repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
smly/Landmark2019-1st-and-3rd-Place-Solution
|
[
"9839c9cbc6bec15e69e91d1d7c8be144531d5a33"
] |
[
"src/FishNet/utils/profile.py"
] |
[
"import torch\nfrom torch.autograd.variable import Variable\nimport numpy as np\n\n\nUSE_GPU = torch.cuda.is_available()\n\n\ndef calc_flops(model, input_size):\n global USE_GPU\n\n def conv_hook(self, input, output):\n batch_size, input_channels, input_height, input_width = input[0].size()\n output_channels, output_height, output_width = output[0].size()\n\n kernel_ops = self.kernel_size[0] * self.kernel_size[1] * (self.in_channels / self.groups) * (\n 2 if multiply_adds else 1)\n bias_ops = 1 if self.bias is not None else 0\n\n params = output_channels * (kernel_ops + bias_ops)\n flops = batch_size * params * output_height * output_width\n\n list_conv.append(flops)\n\n def linear_hook(self, input, output):\n batch_size = input[0].size(0) if input[0].dim() == 2 else 1\n\n weight_ops = self.weight.nelement() * (2 if multiply_adds else 1)\n bias_ops = self.bias.nelement()\n\n flops = batch_size * (weight_ops + bias_ops)\n list_linear.append(flops)\n\n def bn_hook(self, input, output):\n list_bn.append(input[0].nelement())\n\n def relu_hook(self, input, output):\n list_relu.append(input[0].nelement())\n\n def pooling_hook(self, input, output):\n batch_size, input_channels, input_height, input_width = input[0].size()\n output_channels, output_height, output_width = output[0].size()\n\n kernel_ops = self.kernel_size * self.kernel_size\n bias_ops = 0\n params = output_channels * (kernel_ops + bias_ops)\n flops = batch_size * params * output_height * output_width\n\n list_pooling.append(flops)\n\n def foo(net):\n childrens = list(net.children())\n if not childrens:\n if isinstance(net, torch.nn.Conv2d):\n net.register_forward_hook(conv_hook)\n if isinstance(net, torch.nn.Linear):\n net.register_forward_hook(linear_hook)\n if isinstance(net, torch.nn.BatchNorm2d):\n net.register_forward_hook(bn_hook)\n if isinstance(net, torch.nn.ReLU):\n net.register_forward_hook(relu_hook)\n if isinstance(net, torch.nn.MaxPool2d) or isinstance(net, torch.nn.AvgPool2d):\n net.register_forward_hook(pooling_hook)\n return\n for c in childrens:\n foo(c)\n\n multiply_adds = False\n list_conv, list_bn, list_relu, list_linear, list_pooling = [], [], [], [], []\n foo(model)\n if '0.4.' in torch.__version__ or '1.0' in torch.__version__:\n if USE_GPU:\n input = torch.cuda.FloatTensor(torch.rand(2, 3, input_size, input_size).cuda())\n else:\n input = torch.FloatTensor(torch.rand(2, 3, input_size, input_size))\n else:\n input = Variable(torch.rand(2, 3, input_size, input_size), requires_grad=True)\n _ = model(input)\n\n total_flops = (sum(list_conv) + sum(list_linear) + sum(list_bn) + sum(list_relu) + sum(list_pooling))\n\n print(' + Number of FLOPs: %.2fG' % (total_flops / 1e9 / 2))\n\n\ndef count_params(model, input_size=224):\n # param_sum = 0\n with open('models.txt', 'w') as fm:\n fm.write(str(model))\n calc_flops(model, input_size)\n\n model_parameters = filter(lambda p: p.requires_grad, model.parameters())\n params = sum([np.prod(p.size()) for p in model_parameters])\n\n print('The network has {} params.'.format(params))\n\n"
] |
[
[
"torch.rand",
"torch.cuda.is_available"
]
] |
l-tang/sui
|
[
"b5a5ecd744b836bb13d2ec1f2a52bc263eb2fbb4"
] |
[
"sui/dl/pnn.py"
] |
[
"\"\"\"Product-based Neural Networks\nhttps://arxiv.org/pdf/1611.00144.pdf\nDate: 14/Jul/2020\nAuthor: Li Tang\n\"\"\"\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Activation, BatchNormalization, Dense, Dropout\n\nfrom .initializers import get_init\nfrom .losses import get_loss\nfrom .optimizers import get_opti\n\n__author__ = ['Li Tang']\n__copyright__ = 'Li Tang'\n__credits__ = ['Li Tang']\n__license__ = 'MIT'\n__version__ = '0.2.0'\n__maintainer__ = ['Li Tang']\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n\nclass SuiPNNError(Exception):\n pass\n\n\nclass PNN(tf.keras.Model):\n \"\"\"Product-based Neural Networks described in https://arxiv.org/pdf/1611.00144.pdf;\n this class is implemented based on tensorflow.\n\n \"\"\"\n def __init__(self, features_dim: int, fields_dim: int, hidden_layer_sizes: list, dropout_params: list,\n product_layer_dim=10, lasso=0.01, ridge=1e-5, embedding_dim=10, product_type='pnn',\n initializer='glorotuniform', activation='sigmoid', hidden_activation='relu'):\n super().__init__()\n self.features_dim = features_dim # size of features after one-hot, denoted by F\n self.fields_dim = fields_dim # number of different original features, denoted by N\n self.dropout_params = dropout_params\n self.hidden_layer_sizes = hidden_layer_sizes # number of hidden layers\n self.product_layer_dim = product_layer_dim # as same as the input size of l_1, denoted by D_1\n self.lasso = lasso\n self.ridge = ridge\n self.embedding_dim = embedding_dim # dimension of vectors after embedding, denoted by M\n # product type for product layer\n # 'ipnn' for inner product , 'opnn' for outer product, and 'pnn' for concatenating both product\n self.product_type = product_type\n self.initializer = get_init(initializer)\n self.activation = activation\n self.hidden_activation = hidden_activation\n\n # embedding layer\n # the size of embedding layer is F * M\n self.embedding_layer = tf.keras.layers.Embedding(self.features_dim, self.embedding_dim,\n embeddings_initializer='uniform')\n\n # product layer\n # linear signals l_z\n self.linear_sigals_variable = tf.Variable(\n self.initializer(shape=(self.product_layer_dim, self.fields_dim, self.embedding_dim)))\n # quadratic signals l_p\n self.__init_quadratic_signals()\n\n # hidden layers\n self.__init_hidden_layers()\n\n # output layer\n self.output_layer = tf.keras.layers.Dense(1, activation=self.activation, use_bias=True)\n\n def __init_quadratic_signals(self):\n if self.product_type == 'ipnn':\n # matrix decomposition based on the assumption: W_p^n = \\theta ^n * {\\theta^n}^T\n # then the size of W_p^n is D_1 * N\n self.theta = tf.Variable(self.initializer(shape=(self.product_layer_dim, self.fields_dim)))\n elif self.product_type == 'opnn':\n # the size of W_p^n is D_1 * M * M\n self.quadratic_weights = tf.Variable(\n self.initializer(shape=(self.product_layer_dim, self.embedding_dim, self.embedding_dim)))\n elif self.product_type == 'pnn':\n self.theta = tf.Variable(self.initializer(shape=(self.product_layer_dim, self.fields_dim)))\n self.quadratic_weights = tf.Variable(\n self.initializer(shape=(self.product_layer_dim, self.embedding_dim, self.embedding_dim)))\n else:\n raise SuiPNNError(\"'product_type' should be 'ipnn', 'opnn', or 'pnn'.\")\n\n def __init_hidden_layers(self):\n for layer_index in range(len(self.hidden_layer_sizes)):\n setattr(self, 'dense_' + str(layer_index), Dense(self.hidden_layer_sizes[layer_index]))\n setattr(self, 'batch_norm_' + str(layer_index), BatchNormalization())\n setattr(self, 'activation_' + str(layer_index), Activation(self.hidden_activation))\n setattr(self, 'dropout_' + str(layer_index), Dropout(self.dropout_params[layer_index]))\n\n def call(self, feature_value, embedding_index, training=False):\n \"\"\"Function to obtain the series vertex data by concurrent walking in the graph.\n\n Args:\n walk_depth:\n\n Returns:\n a list\n\n Examples:\n >>> from sklearn.model_selection import train_test_split\n >>> X, y = [[]], []\n >>> X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, random_state=1)\n >>> pnn = PNN(\n ... features_dim=8,\n ... fields_dim=4,\n ... hidden_layer_sizes=[32, 16, 4],\n ... dropout_params=[0.5] * 3,\n ... activation='sigmoid'\n ... )\n >>> embedding_index = [np.arange(8) for _ in range(len(X_train))]\n >>> pnn.train(\n ... feature_value=X_train,\n ... embedding_index=embedding_index,\n ... label=y_train,\n ... optimizer='adam',\n ... loss='sigmoid',\n ... epochs=50\n ... )\n >>>\n\n \"\"\"\n features = tf.einsum('bnm,bn->bnm', self.embedding_layer(embedding_index), feature_value)\n # linear part\n l_z = tf.einsum('bnm,dnm->bd', features, self.linear_sigals_variable) # Batch * D_1\n\n # quadratic part\n if self.product_type == 'ipnn':\n delta = tf.einsum('dn,bnm->bdnm', self.theta, features) # Batch * D_1 * N * M\n l_p = tf.einsum('bdnm,bdnm->bd', delta, delta)\n elif self.product_type == 'opnn':\n sum_features = tf.einsum('bnm->bm', features) # Batch * M\n p = tf.einsum('bm,bn->bmn', sum_features, sum_features)\n l_p = tf.einsum('bmn,dmn->bd', p, self.quadratic_weights)\n elif self.product_type == 'pnn':\n delta = tf.einsum('dn,bnm->bdnm', self.theta, features) # Batch * D_1 * N * M\n sum_features = tf.einsum('bnm->bm', features) # Batch * M\n p = tf.einsum('bm,bn->bmn', sum_features, sum_features)\n l_p = tf.concat(\n (tf.einsum('bdnm,bdnm->bd', delta, delta), tf.einsum('bmn,dmn->bd', p, self.quadratic_weights)), axis=1)\n else:\n raise SuiPNNError(\"'product_type' should be 'ipnn', 'opnn', or 'pnn'.\")\n\n model = tf.concat((l_z, l_p), axis=1)\n if training:\n model = tf.keras.layers.Dropout(self.dropout_params[0])(model)\n\n for i in range(len(self.hidden_layer_sizes)):\n model = getattr(self, 'dense_' + str(i))(model)\n model = getattr(self, 'batch_norm_' + str(i))(model)\n model = getattr(self, 'activation_' + str(i))(model)\n if training:\n model = getattr(self, 'dropout_' + str(i))(model)\n\n return self.output_layer(model)\n\n def train(self, feature_value, embedding_index, label, optimizer='adam', learning_rate=1e-4, loss='sigmoid',\n epochs=50, batch=32, shuffle=10000):\n for epoch in range(epochs):\n train_set = tf.data.Dataset.from_tensor_slices((feature_value, embedding_index, label)).shuffle(\n shuffle).batch(batch, drop_remainder=True)\n for batch_set in train_set:\n with tf.GradientTape() as tape:\n prediction = self.call(feature_value=batch_set[0], embedding_index=batch_set[1], training=True)\n self.loss_obj = get_loss(loss)()\n self.optimizer = get_opti(optimizer)(learning_rate=learning_rate)\n batch_loss = self.loss_obj(batch_set[2], prediction)\n gradients = tape.gradient(batch_loss, self.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))\n mean_loss = tf.keras.metrics.Mean(name='train_loss')\n print('epoch: {} ==> loss: {}'.format(epoch + 1, mean_loss(batch_loss)))\n\n def predict(self, feature_value, embedding_index):\n feature_value = tf.convert_to_tensor(feature_value)\n embedding_index = tf.convert_to_tensor(embedding_index)\n return self.call(feature_value=feature_value, embedding_index=embedding_index, training=False)\n\n # TODO\n def dump(self, path):\n self.save(filepath=path)\n\n # TODO\n @staticmethod\n def restore():\n return None\n"
] |
[
[
"tensorflow.convert_to_tensor",
"tensorflow.concat",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Embedding",
"tensorflow.keras.layers.Dense",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.einsum",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.metrics.Mean",
"tensorflow.GradientTape"
]
] |
christiancosgrove/cs767hw4
|
[
"4a3a1e2eeee448ab5b8eadfcf5d5becf7268630e"
] |
[
"parlai/agents/transformer/polyencoder.py"
] |
[
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n# hack to make sure -m transformer/generator works as expected\n\"\"\"\nPoly-encoder Agent.\n\"\"\"\nfrom .biencoder import AddLabelFixedCandsTRA\nfrom .modules import TransformerEncoder\nfrom .modules import get_n_positions_from_options\nfrom parlai.core.torch_ranker_agent import TorchRankerAgent\nfrom .transformer import TransformerRankerAgent\nfrom .modules import BasicAttention, MultiHeadAttention\nimport torch\n\n\nclass PolyencoderAgent(TorchRankerAgent):\n \"\"\"\n Poly-encoder Agent.\n\n Equivalent of bert_ranker/polyencoder and biencoder_multiple_output but does not\n rely on an external library (hugging face).\n \"\"\"\n\n @classmethod\n def add_cmdline_args(cls, argparser):\n \"\"\"\n Add command-line arguments specifically for this agent.\n \"\"\"\n TransformerRankerAgent.add_cmdline_args(argparser)\n agent = argparser.add_argument_group('Polyencoder Arguments')\n agent.add_argument(\n '--polyencoder-type',\n type=str,\n default='codes',\n choices=['codes', 'n_first'],\n help='Type of polyencoder, either we compute'\n 'vectors using codes + attention, or we '\n 'simply take the first N vectors.',\n recommended='codes',\n )\n agent.add_argument(\n '--poly-n-codes',\n type=int,\n default=64,\n help='number of vectors used to represent the context'\n 'in the case of n_first, those are the number'\n 'of vectors that are considered.',\n recommended=64,\n )\n agent.add_argument(\n '--poly-attention-type',\n type=str,\n default='basic',\n choices=['basic', 'sqrt', 'multihead'],\n help='Type of the top aggregation layer of the poly-'\n 'encoder (where the candidate representation is'\n 'the key)',\n recommended='basic',\n )\n agent.add_argument(\n '--polyencoder-attention-keys',\n type=str,\n default='context',\n choices=['context', 'position'],\n help='Input emb vectors for the first level of attention. '\n 'Context refers to the context outputs; position refers to the '\n 'computed position embeddings.',\n recommended='context',\n )\n agent.add_argument(\n '--poly-attention-num-heads',\n type=int,\n default=4,\n help='In case poly-attention-type is multihead, '\n 'specify the number of heads',\n )\n\n # Those arguments are here in case where polyencoder type is 'code'\n agent.add_argument(\n '--codes-attention-type',\n type=str,\n default='basic',\n choices=['basic', 'sqrt', 'multihead'],\n help='Type ',\n recommended='basic',\n )\n agent.add_argument(\n '--codes-attention-num-heads',\n type=int,\n default=4,\n help='In case codes-attention-type is multihead, '\n 'specify the number of heads',\n )\n return agent\n\n def __init__(self, opt, shared=None):\n super().__init__(opt, shared)\n self.rank_loss = torch.nn.CrossEntropyLoss(reduce=True, size_average=True)\n if self.use_cuda:\n self.rank_loss.cuda()\n self.data_parallel = opt.get('data_parallel') and self.use_cuda\n if self.data_parallel:\n from parlai.utils.distributed import is_distributed\n\n if is_distributed():\n raise ValueError('Cannot combine --data-parallel and distributed mode')\n if shared is None:\n self.model = torch.nn.DataParallel(self.model)\n\n def build_model(self, states=None):\n \"\"\"\n Return built model.\n \"\"\"\n return PolyEncoderModule(self.opt, self.dict, self.NULL_IDX)\n\n def vectorize(self, *args, **kwargs):\n \"\"\"\n Add the start and end token to the labels.\n \"\"\"\n kwargs['add_start'] = True\n kwargs['add_end'] = True\n obs = super().vectorize(*args, **kwargs)\n return obs\n\n def _set_text_vec(self, *args, **kwargs):\n \"\"\"\n Add the start and end token to the text.\n \"\"\"\n obs = super()._set_text_vec(*args, **kwargs)\n if 'text_vec' in obs and 'added_start_end_tokens' not in obs:\n obs.force_set(\n 'text_vec', self._add_start_end_tokens(obs['text_vec'], True, True)\n )\n obs['added_start_end_tokens'] = True\n return obs\n\n def vectorize_fixed_candidates(self, *args, **kwargs):\n \"\"\"\n Vectorize fixed candidates.\n\n Override to add start and end token when computing the candidate encodings in\n interactive mode.\n \"\"\"\n kwargs['add_start'] = True\n kwargs['add_end'] = True\n return super().vectorize_fixed_candidates(*args, **kwargs)\n\n def _make_candidate_encs(self, vecs):\n \"\"\"\n Make candidate encs.\n\n The polyencoder module expects cand vecs to be 3D while torch_ranker_agent\n expects it to be 2D. This requires a little adjustment (used in interactive mode\n only)\n \"\"\"\n rep = super()._make_candidate_encs(vecs)\n return rep.transpose(0, 1).contiguous()\n\n def encode_candidates(self, padded_cands):\n \"\"\"\n Encode candidates.\n \"\"\"\n padded_cands = padded_cands.unsqueeze(1)\n _, _, _, cand_rep = self.model(cand_tokens=padded_cands)\n return cand_rep\n\n def score_candidates(self, batch, cand_vecs, cand_encs=None):\n \"\"\"\n Score candidates.\n\n The Poly-encoder encodes the candidate and context independently. Then, the\n model applies additional attention before ultimately scoring a candidate.\n \"\"\"\n bsz = batch.text_vec.size(0)\n ctxt_rep, ctxt_rep_mask, ctxt_pos, _ = self.model(ctxt_tokens=batch.text_vec)\n\n if cand_encs is not None:\n if bsz == 1:\n cand_rep = cand_encs\n else:\n cand_rep = cand_encs.expand(bsz, cand_encs.size(1), -1)\n # bsz x num cands x seq len\n elif len(cand_vecs.shape) == 3:\n _, _, _, cand_rep = self.model(cand_tokens=cand_vecs)\n # bsz x seq len (if batch cands) or num_cands x seq len (if fixed cands)\n elif len(cand_vecs.shape) == 2:\n _, _, _, cand_rep = self.model(cand_tokens=cand_vecs.unsqueeze(1))\n num_cands = cand_rep.size(0) # will be bsz if using batch cands\n cand_rep = cand_rep.expand(num_cands, bsz, -1).transpose(0, 1).contiguous()\n\n scores = self.model(\n ctxt_rep=ctxt_rep,\n ctxt_rep_mask=ctxt_rep_mask,\n cand_rep=cand_rep,\n ctxt_pos=ctxt_pos,\n )\n return scores\n\n def load_state_dict(self, state_dict):\n \"\"\"\n Override to account for codes.\n \"\"\"\n if self.model.type == 'codes' and 'codes' not in state_dict:\n state_dict['codes'] = self.model.codes\n super().load_state_dict(state_dict)\n\n\nclass PolyEncoderModule(torch.nn.Module):\n \"\"\"\n Poly-encoder model.\n\n See https://arxiv.org/abs/1905.01969 for more details\n \"\"\"\n\n def __init__(self, opt, dict, null_idx):\n super(PolyEncoderModule, self).__init__()\n self.null_idx = null_idx\n self.encoder_ctxt = self.get_encoder(opt, dict, null_idx, 'none_with_pos_embs')\n self.encoder_cand = self.get_encoder(opt, dict, null_idx, opt['reduction_type'])\n\n self.type = opt['polyencoder_type']\n self.n_codes = opt['poly_n_codes']\n self.attention_type = opt['poly_attention_type']\n self.attention_keys = opt.get('polyencoder_attention_keys', 'context')\n self.attention_num_heads = opt['poly_attention_num_heads']\n self.codes_attention_type = opt['codes_attention_type']\n self.codes_attention_num_heads = opt['codes_attention_num_heads']\n embed_dim = opt['embedding_size']\n\n # In case it's a polyencoder with code.\n if self.type == 'codes':\n # experimentally it seems that random with size = 1 was good.\n codes = torch.empty(self.n_codes, embed_dim)\n codes = torch.nn.init.uniform_(codes)\n self.codes = torch.nn.Parameter(codes)\n\n # The attention for the codes.\n if self.codes_attention_type == 'multihead':\n self.code_attention = MultiHeadAttention(\n self.codes_attention_num_heads, embed_dim, opt['dropout']\n )\n elif self.codes_attention_type == 'sqrt':\n self.code_attention = PolyBasicAttention(\n self.type, self.n_codes, dim=2, attn='sqrt', get_weights=False\n )\n elif self.codes_attention_type == 'basic':\n self.code_attention = PolyBasicAttention(\n self.type, self.n_codes, dim=2, attn='basic', get_weights=False\n )\n\n # The final attention (the one that takes the candidate as key)\n if self.attention_type == 'multihead':\n self.attention = MultiHeadAttention(\n self.attention_num_heads, opt['embedding_size'], opt['dropout']\n )\n else:\n self.attention = PolyBasicAttention(\n self.type,\n self.n_codes,\n dim=2,\n attn=self.attention_type,\n get_weights=False,\n )\n\n def get_encoder(self, opt, dict, null_idx, reduction_type):\n \"\"\"\n Return encoder, given options.\n\n :param opt:\n opt dict\n :param dict:\n dictionary agent\n :param null_idx:\n null/pad index into dict\n :reduction_type:\n reduction type for the encoder\n\n :return:\n a TransformerEncoder, initialized correctly\n \"\"\"\n n_positions = get_n_positions_from_options(opt)\n embeddings = torch.nn.Embedding(\n len(dict), opt['embedding_size'], padding_idx=null_idx\n )\n torch.nn.init.normal_(embeddings.weight, 0, opt['embedding_size'] ** -0.5)\n return TransformerEncoder(\n n_heads=opt['n_heads'],\n n_layers=opt['n_layers'],\n embedding_size=opt['embedding_size'],\n ffn_size=opt['ffn_size'],\n vocabulary_size=len(dict),\n embedding=embeddings,\n dropout=opt['dropout'],\n attention_dropout=opt['attention_dropout'],\n relu_dropout=opt['relu_dropout'],\n padding_idx=null_idx,\n learn_positional_embeddings=opt['learn_positional_embeddings'],\n embeddings_scale=opt['embeddings_scale'],\n reduction_type=reduction_type,\n n_positions=n_positions,\n n_segments=opt.get('n_segments', 2),\n activation=opt['activation'],\n variant=opt['variant'],\n output_scaling=opt['output_scaling'],\n )\n\n def attend(self, attention_layer, queries, keys, values, mask):\n \"\"\"\n Apply attention.\n\n :param attention_layer:\n nn.Module attention layer to use for the attention\n :param queries:\n the queries for attention\n :param keys:\n the keys for attention\n :param values:\n the values for attention\n :param mask:\n mask for the attention keys\n\n :return:\n the result of applying attention to the values, with weights computed\n wrt to the queries and keys.\n \"\"\"\n if keys is None:\n keys = values\n if isinstance(attention_layer, PolyBasicAttention):\n return attention_layer(queries, keys, mask_ys=mask, values=values)\n elif isinstance(attention_layer, MultiHeadAttention):\n return attention_layer(queries, keys, values, mask)\n else:\n raise Exception('Unrecognized type of attention')\n\n def encode(self, ctxt_tokens, cand_tokens):\n \"\"\"\n Encode a text sequence.\n\n :param ctxt_tokens:\n 2D long tensor, batchsize x sent_len\n :param cand_tokens:\n 3D long tensor, batchsize x num_cands x sent_len\n Note this will actually view it as a 2D tensor\n :return:\n (ctxt_rep, ctxt_mask, ctxt_pos, cand_rep)\n - ctxt_rep 3D float tensor, batchsize x n_codes x dim\n - ctxt_mask byte: batchsize x n_codes (all 1 in case\n of polyencoder with code. Which are the vectors to use\n in the ctxt_rep)\n - ctxt_pos 3D float tensor, batchsize x sent_len x dim\n - cand_rep (3D float tensor) batchsize x num_cands x dim\n \"\"\"\n cand_embed = None\n ctxt_rep = None\n ctxt_rep_mask = None\n ctxt_pos = None\n if cand_tokens is not None:\n assert len(cand_tokens.shape) == 3\n bsz = cand_tokens.size(0)\n num_cands = cand_tokens.size(1)\n cand_embed = self.encoder_cand(cand_tokens.view(bsz * num_cands, -1))\n cand_embed = cand_embed.view(bsz, num_cands, -1)\n\n if ctxt_tokens is not None:\n assert len(ctxt_tokens.shape) == 2\n bsz = ctxt_tokens.size(0)\n # get context_representation. Now that depends on the cases.\n ctxt_out, ctxt_mask, ctxt_pos = self.encoder_ctxt(ctxt_tokens)\n att_keys = ctxt_out if self.attention_keys == 'context' else ctxt_pos\n dim = ctxt_out.size(2)\n\n if self.type == 'codes':\n ctxt_rep = self.attend(\n self.code_attention,\n queries=self.codes.repeat(bsz, 1, 1),\n keys=att_keys,\n values=ctxt_out,\n mask=ctxt_mask,\n )\n ctxt_pos = None # we don't need this anymore\n ctxt_rep_mask = ctxt_rep.new_ones(bsz, self.n_codes).byte()\n\n elif self.type == 'n_first':\n # Expand the output if it is not long enough\n if ctxt_out.size(1) < self.n_codes:\n difference = self.n_codes - ctxt_out.size(1)\n extra_rep = ctxt_out.new_zeros(bsz, difference, dim)\n ctxt_rep = torch.cat([ctxt_out, extra_rep], dim=1)\n ctxt_pos = torch.cat([ctxt_pos, extra_rep], dim=1)\n extra_mask = ctxt_mask.new_zeros(bsz, difference)\n ctxt_rep_mask = torch.cat([ctxt_mask, extra_mask], dim=1)\n else:\n ctxt_rep = ctxt_out[:, 0 : self.n_codes, :]\n ctxt_pos = ctxt_pos[:, 0 : self.n_codes, :]\n ctxt_rep_mask = ctxt_mask[:, 0 : self.n_codes]\n\n return ctxt_rep, ctxt_rep_mask, ctxt_pos, cand_embed\n\n def score(self, ctxt_rep, ctxt_rep_mask, ctxt_pos, cand_embed):\n \"\"\"\n Score the candidates.\n\n :param ctxt_rep:\n 3D float tensor, bsz x ctxt_len x dim\n :param ctxt_rep_mask:\n 2D byte tensor, bsz x ctxt_len, in case there are some elements\n of the ctxt that we should not take into account.\n :param ctx_pos: 3D float tensor, bsz x sent_len x dim\n :param cand_embed: 3D float tensor, bsz x num_cands x dim\n\n :return: scores, 2D float tensor: bsz x num_cands\n \"\"\"\n # Attention keys determined by self.attention_keys\n # 'context' == use context final rep; otherwise use context position embs\n keys = ctxt_rep if self.attention_keys == 'context' else ctxt_pos\n # reduces the context representation to a 3D tensor bsz x num_cands x dim\n ctxt_final_rep = self.attend(\n self.attention, cand_embed, keys, ctxt_rep, ctxt_rep_mask\n )\n scores = torch.sum(ctxt_final_rep * cand_embed, 2)\n return scores\n\n def forward(\n self,\n ctxt_tokens=None,\n cand_tokens=None,\n ctxt_rep=None,\n ctxt_rep_mask=None,\n ctxt_pos=None,\n cand_rep=None,\n ):\n \"\"\"\n Forward pass of the model.\n\n Due to a limitation of parlai, we have to have one single model\n in the agent. And because we want to be able to use data-parallel,\n we need to have one single forward() method.\n Therefore the operation_type can be either 'encode' or 'score'.\n\n :param ctxt_tokens:\n tokenized contexts\n :param cand_tokens:\n tokenized candidates\n :param ctxt_rep:\n (bsz x num_codes x hsz)\n encoded representation of the context. If self.type == 'codes', these\n are the context codes. Otherwise, they are the outputs from the\n encoder\n :param ctxt_rep_mask:\n mask for ctxt rep\n :param ctxt_pos:\n position embeddings for the ctxt_rep. If self.type == 'codes', these\n are None, as their use is earlier in the pipeline.\n :param cand_rep:\n encoded representation of the candidates\n \"\"\"\n if ctxt_tokens is not None or cand_tokens is not None:\n return self.encode(ctxt_tokens, cand_tokens)\n elif (\n ctxt_rep is not None and ctxt_rep_mask is not None and cand_rep is not None\n ):\n # ctxt_pos can be none, if we are using codes (not first M)\n return self.score(ctxt_rep, ctxt_rep_mask, ctxt_pos, cand_rep)\n raise Exception('Unsupported operation')\n\n\nclass PolyBasicAttention(BasicAttention):\n \"\"\"\n Override basic attention to account for edge case for polyencoder.\n \"\"\"\n\n def __init__(self, poly_type, n_codes, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.poly_type = poly_type\n self.n_codes = n_codes\n\n def forward(self, *args, **kwargs):\n \"\"\"\n Forward pass.\n\n Account for accidental dimensionality reduction when num_codes is 1 and the\n polyencoder type is 'codes'\n \"\"\"\n lhs_emb = super().forward(*args, **kwargs)\n if self.poly_type == 'codes' and self.n_codes == 1 and len(lhs_emb.shape) == 2:\n lhs_emb = lhs_emb.unsqueeze(self.dim - 1)\n return lhs_emb\n\n\nclass IRFriendlyPolyencoderAgent(AddLabelFixedCandsTRA, PolyencoderAgent):\n \"\"\"\n Poly-encoder agent that allows for adding label to fixed cands.\n \"\"\"\n\n @classmethod\n def add_cmdline_args(cls, argparser):\n \"\"\"\n Add cmd line args.\n \"\"\"\n super(AddLabelFixedCandsTRA, cls).add_cmdline_args(argparser)\n super(PolyencoderAgent, cls).add_cmdline_args(argparser)\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"torch.nn.init.uniform_",
"torch.nn.Parameter",
"torch.empty",
"torch.cat",
"torch.sum",
"torch.nn.init.normal_",
"torch.nn.DataParallel"
]
] |
aleozlx/rpi-ledstrip-audio
|
[
"ab42f4815534115b98ca74aafe18f3ac293d566d"
] |
[
"main.py"
] |
[
"# official sample\n# https://python-sounddevice.readthedocs.io/en/0.3.14/examples.html#real-time-text-mode-spectrogram\n\nimport argparse\nimport math\nimport shutil\nimport socket\nimport numpy as np\nimport sounddevice as sd\nfrom matplotlib import cm\n\nusage_line = ' press <enter> to quit, +<enter> or -<enter> to change scaling '\n\n\ndef int_or_str(text):\n \"\"\"Helper function for argument parsing.\"\"\"\n try:\n return int(text)\n except ValueError:\n return text\n\n\ntry:\n columns, _ = shutil.get_terminal_size()\nexcept AttributeError:\n columns = 80\n\nparser = argparse.ArgumentParser(add_help=False)\nparser.add_argument(\n '-l', '--list-devices', action='store_true',\n help='show list of audio devices and exit')\nargs, remaining = parser.parse_known_args()\nif args.list_devices:\n print(sd.query_devices())\n parser.exit(0)\nparser = argparse.ArgumentParser(\n description='Supported keys:' + usage_line,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=[parser])\nparser.add_argument(\n '-b', '--block-duration', type=float, metavar='DURATION', default=50,\n help='block size (default %(default)s milliseconds)')\nparser.add_argument(\n '-c', '--columns', type=int, default=columns,\n help='width of spectrogram')\nparser.add_argument(\n '-d', '--device', type=int_or_str,\n help='input device (numeric ID or substring)')\nparser.add_argument(\n '-g', '--gain', type=float, default=10,\n help='initial gain factor (default %(default)s)')\nparser.add_argument(\n '-r', '--range', type=float, nargs=2,\n metavar=('LOW', 'HIGH'), default=[100, 2000],\n help='frequency range (default %(default)s Hz)')\nparser.add_argument(\n 'consumer_ip', type=str)\nargs = parser.parse_args(remaining)\nlow, high = args.range\nif high <= low:\n parser.error('HIGH must be greater than LOW')\n\n# Create a nice output gradient using ANSI escape sequences.\n# Stolen from https://gist.github.com/maurisvh/df919538bcef391bc89f\ncolors = 30, 34, 35, 91, 93, 97\nchars = ' :%#\\t#%:'\ngradient = []\nfor bg, fg in zip(colors, colors[1:]):\n for char in chars:\n if char == '\\t':\n bg, fg = fg, bg\n else:\n gradient.append('\\x1b[{};{}m{}'.format(fg, bg + 10, char))\ndef encode_color(bytes_rgba):\n return bytearray((bytes_rgba[1], bytes_rgba[0], bytes_rgba[2], 0))\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ntry:\n samplerate = sd.query_devices(args.device, 'input')['default_samplerate']\n\n delta_f = (high - low) / (args.columns - 1)\n fftsize = math.ceil(samplerate / delta_f)\n low_bin = math.floor(low / delta_f)\n\n def callback(indata, frames, time, status):\n if status:\n text = ' ' + str(status) + ' '\n print('\\x1b[34;40m', text.center(args.columns, '#'),\n '\\x1b[0m', sep='')\n if any(indata):\n magnitude = np.abs(np.fft.rfft(indata[:, 0], n=fftsize))\n magnitude *= args.gain / fftsize\n normalized_magnitude = [np.clip(x, 0, 1) for x in magnitude[low_bin:low_bin + args.columns]]\n line = (gradient[int(x * (len(gradient) - 1))] for x in normalized_magnitude)\n print(*line, sep='', end='\\x1b[0m\\n')\n # line = [int(x * (len(gradient) - 1)) for x in normalized_magnitude]\n # print(len(line), line)\n line = [encode_color(cm.plasma(x, bytes=True)) for x in normalized_magnitude]\n # print(\"len(line) =\", len(line))\n line = [elem for elem in line for rep in range(2)]\n # print(\"len(line) =\", len(line))\n packet = b''.join([b'\\xCC\\xCC', int.to_bytes(len(line), 2, \"little\")] + line)\n sock.sendto(packet, (args.consumer_ip, 5151))\n # print(\"len(packet) =\", len(packet), packet[:10])\n else:\n print('no input')\n\n with sd.InputStream(device=args.device, channels=1, callback=callback,\n blocksize=int(samplerate * args.block_duration / 1000),\n samplerate=samplerate):\n while True:\n response = input()\n if response in ('', 'q', 'Q'):\n break\n for ch in response:\n if ch == '+':\n args.gain *= 2\n elif ch == '-':\n args.gain /= 2\n else:\n print('\\x1b[31;40m', usage_line.center(args.columns, '#'),\n '\\x1b[0m', sep='')\n break\nexcept KeyboardInterrupt:\n parser.exit('Interrupted by user')\nexcept Exception as e:\n parser.exit(type(e).__name__ + ': ' + str(e))\n"
] |
[
[
"matplotlib.cm.plasma",
"numpy.fft.rfft",
"numpy.clip"
]
] |
AluminiumOxide/pytorch_base_-tutorial
|
[
"a6d3bea6070c7c774dcd7c55d94b0a1441548c8b"
] |
[
"note5_3_nn_pool.py"
] |
[
"import torch\r\nimport torchvision\r\nfrom torch import nn\r\nfrom torch.nn import MaxPool2d\r\nfrom torch.utils.data import DataLoader\r\nfrom torch.utils.tensorboard import SummaryWriter\r\n\r\ndataset = torchvision.datasets.CIFAR10(\"./CIFAR10\", train=False, download=True,transform=torchvision.transforms.ToTensor())\r\n\r\ndataloader = DataLoader(dataset, batch_size=64)\r\n\r\nclass Module(nn.Module):\r\n def __init__(self):\r\n super(Module, self).__init__()\r\n self.maxpool = MaxPool2d(kernel_size=3, ceil_mode=False)\r\n\r\n def forward(self, x):\r\n x = self.maxpool(x)\r\n return x\r\n\r\n\r\nif __name__ == '__main__':\r\n module = Module()\r\n writer = SummaryWriter(\"logs\")\r\n for step,data in enumerate(dataloader):\r\n imgs, targets = data\r\n output = module(imgs)\r\n print('input shape: {} output shape: {}'.format(imgs.shape,output.shape))\r\n\r\n writer.add_images(\"input\", imgs, step)\r\n writer.add_images(\"output\", output, step)\r\n\r\n writer.close()\r\n"
] |
[
[
"torch.nn.MaxPool2d",
"torch.utils.data.DataLoader",
"torch.utils.tensorboard.SummaryWriter"
]
] |
tbs-lab/pycodes
|
[
"2ab296d9ebde6efd5bcefb905721d319e866cd69"
] |
[
"DirectMethods/solver.py"
] |
[
"# -*- coding: utf-8 -*-\nimport numpy as np\nimport scipy.linalg as spla\n\nfrom util import genp_factorize, gepp_factorize\nfrom butterfly import build_recursive_butterfly\n\n\ndef forward(L, b):\n \"\"\"Return a solution x of a linear system Lx = b with forward\n substitution.\n\n Arguments:\n L (array_like): A lower triangular matrix with unit diagonal.\n b (array_like): A right-hand side vector.\n\n Returns:\n numpy.ndarray: A solution of a linear system Lx = b.\n \"\"\"\n return spla.solve_triangular(L, b, lower=True, unit_diagonal=True)\n\n\ndef backward(U, b):\n \"\"\"Return a solution x of a linear system Ux = b with backward\n substitution.\n\n Arguments:\n U (array_like): A upper triangular matrix.\n b (array_like): A right-hand side vector.\n\n Returns:\n numpy.ndarray: A solution of a linear system Ux = b.\n \"\"\"\n return spla.solve_triangular(U, b, lower=False, unit_diagonal=False)\n\n\ndef gepp(A, b):\n \"\"\"Return a solution x of a linear system Ax = b with GEPP.\n\n Arguments:\n A (array_like): A square coefficient matrix.\n b (array_like): A right-hand side vector.\n\n Returns:\n numpy.ndarray: A solution of a linear system Ax = b.\n \"\"\"\n A = np.array(A, dtype=np.float)\n b = np.array(b, dtype=np.float)\n if A.shape[0] != A.shape[1]:\n raise ValueError(\"matrix must be square one\")\n if A.shape[0] != b.shape[0]:\n raise ValueError(\"matrix and vector size must be aligned\")\n\n # PLU factorization using Gaussian Elimination with Partial Pivoting (GEPP)\n P, L, U = gepp_factorize(A)\n\n # solve\n y = forward(L, np.dot(P.T, b))\n x = backward(U, y)\n\n return x\n\n\ndef prbt(A, b, depth):\n \"\"\"Return a solution x of a linear system Ax = b with partial recursive\n butterfly transformation (PRBT).\n\n This algorithm refers to the folloing article:\n Marc Baboulin et al.\n \"Accelerating linear system solutions using randomization\n techniques\", 2011,\n URL<https://hal.inria.fr/inria-00593306/document>.\n\n Arguments:\n A (array_like): A square coefficient matrix.\n b (array_like): A right-hand side vector.\n depth (int): A recursion depth (> 0).\n\n Returns:\n numpy.ndarray: A solution of a linear system Ax = b.\n \"\"\"\n A = np.array(A, dtype=np.float)\n b = np.array(b, dtype=np.float)\n if A.shape[0] != A.shape[1]:\n raise ValueError(\"matrix must be square one\")\n if A.shape[0] != b.shape[0]:\n raise ValueError(\"matrix and vector size must be aligned\")\n\n n = A.shape[0]\n augments = 0\n while (n + augments) % (2 ** (depth - 1)):\n augments += 1\n\n # augment a matrix size adaptively for any size of a system\n A = spla.block_diag(A, np.identity(augments))\n\n # get two recursive butterfly matrices\n W = build_recursive_butterfly(n + augments, depth)\n V = build_recursive_butterfly(n + augments, depth)\n\n # partial recursive butterfly transformation\n A_prbt = np.dot(np.dot(W.T, A), V)[:n, :n]\n\n # LU factorization using Gaussian Elimination with No Pivoting (GENP)\n L, U = genp_factorize(A_prbt)\n\n # solve\n y = forward(L, np.dot(W.T, b))\n y = backward(U, y)\n x = np.dot(V, y)\n\n return x\n"
] |
[
[
"numpy.dot",
"numpy.array",
"numpy.identity",
"scipy.linalg.solve_triangular"
]
] |
alan-cueva/wefe
|
[
"4c155c11c7836d05fb8d951f36ae1fc2d37c3655"
] |
[
"wefe/metrics/RND.py"
] |
[
"\"\"\"Relative Norm Distance (RND) metric implementation.\"\"\"\r\nfrom typing import Any, Callable, Dict, List, Tuple, Union\r\n\r\nimport numpy as np\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\nfrom wefe.metrics.base_metric import BaseMetric\r\nfrom wefe.preprocessing import get_embeddings_from_query\r\nfrom wefe.query import Query\r\nfrom wefe.word_embedding_model import WordEmbeddingModel\r\n\r\n\r\nclass RND(BaseMetric):\r\n \"\"\"Relative Norm Distance (RND).\r\n\r\n It measures the relative strength of association of a set of neutral words\r\n with respect to two groups.\r\n\r\n References\r\n ----------\r\n | [1]: Nikhil Garg, Londa Schiebinger, Dan Jurafsky, and James Zou.\r\n | Word embeddings quantify 100 years of gender and ethnic stereotypes.\r\n | Proceedings of the National Academy of Sciences, 115(16):E3635–E3644,2018.\r\n | [2]: https://github.com/nikhgarg/EmbeddingDynamicStereotypes\r\n \"\"\"\r\n\r\n metric_template = (2, 1)\r\n metric_name = \"Relative Norm Distance\"\r\n metric_short_name = \"RND\"\r\n\r\n def __calc_distance(\r\n self, vec1: np.ndarray, vec2: np.ndarray, distance_type: str = \"norm\",\r\n ) -> float:\r\n if distance_type == \"norm\":\r\n return np.linalg.norm(np.subtract(vec1, vec2))\r\n elif distance_type == \"cos\":\r\n # c = np.dot(vec1, vec2) / np.linalg.norm(vec1) / np.linalg.norm(vec2)\r\n c = cosine_similarity([vec1], [vec2]).flatten()\r\n return c[0]\r\n else:\r\n raise ValueError(\r\n 'distance_type can be either \"norm\" or \"cos\", '\r\n \"got: {} \".format(distance_type)\r\n )\r\n\r\n def __calc_rnd(\r\n self,\r\n target_0: np.ndarray,\r\n target_1: np.ndarray,\r\n attribute: np.ndarray,\r\n attribute_words: list,\r\n distance_type: str,\r\n ) -> Tuple[float, Dict[str, float]]:\r\n\r\n # calculates the average wv for the group words.\r\n target_1_avg_vector = np.average(target_0, axis=0)\r\n target_2_avg_vector = np.average(target_1, axis=0)\r\n\r\n sum_of_distances = 0.0\r\n distance_by_words = {}\r\n\r\n for attribute_word_index, attribute_embedding in enumerate(attribute):\r\n\r\n # calculate the distance\r\n current_distance = self.__calc_distance(\r\n attribute_embedding, target_1_avg_vector, distance_type=distance_type,\r\n ) - self.__calc_distance(\r\n attribute_embedding, target_2_avg_vector, distance_type=distance_type,\r\n )\r\n\r\n # add the distance of the neutral word to the accumulated\r\n # distances.\r\n sum_of_distances += current_distance\r\n # add the distance of the neutral word to the list of distances\r\n # by word\r\n distance_by_words[attribute_words[attribute_word_index]] = current_distance\r\n\r\n sorted_distances_by_word = {\r\n k: v for k, v in sorted(distance_by_words.items(), key=lambda item: item[1])\r\n }\r\n\r\n # calculate the average of the distances and return\r\n mean_distance = sum_of_distances / len(distance_by_words)\r\n return mean_distance, sorted_distances_by_word\r\n\r\n def run_query(\r\n self,\r\n query: Query,\r\n model: WordEmbeddingModel,\r\n distance: str = \"norm\",\r\n lost_vocabulary_threshold: float = 0.2,\r\n preprocessors: List[Dict[str, Union[str, bool, Callable]]] = [{}],\r\n strategy: str = \"first\",\r\n normalize: bool = False,\r\n warn_not_found_words: bool = False,\r\n *args: Any,\r\n **kwargs: Any\r\n ) -> Dict[str, Any]:\r\n \"\"\"Calculate the RND metric over the provided parameters.\r\n\r\n Parameters\r\n ----------\r\n query : Query\r\n A Query object that contains the target and attribute sets to be tested.\r\n\r\n model : WordEmbeddingModel\r\n A word embedding model.\r\n\r\n distance : str, optional\r\n Specifies which type of distance will be calculated. It could be:\r\n {norm, cos} , by default 'norm'.\r\n\r\n preprocessors : List[Dict[str, Union[str, bool, Callable]]]\r\n A list with preprocessor options.\r\n\r\n A ``preprocessor`` is a dictionary that specifies what processing(s) are\r\n performed on each word before it is looked up in the model vocabulary.\r\n For example, the ``preprocessor``\r\n ``{'lowecase': True, 'strip_accents': True}`` allows you to lowercase\r\n and remove the accent from each word before searching for them in the\r\n model vocabulary. Note that an empty dictionary ``{}`` indicates that no\r\n preprocessing is done.\r\n\r\n The possible options for a preprocessor are:\r\n\r\n * ``lowercase``: ``bool``. Indicates that the words are transformed to\r\n lowercase.\r\n * ``uppercase``: ``bool``. Indicates that the words are transformed to\r\n uppercase.\r\n * ``titlecase``: ``bool``. Indicates that the words are transformed to\r\n titlecase.\r\n * ``strip_accents``: ``bool``, ``{'ascii', 'unicode'}``: Specifies that\r\n the accents of the words are eliminated. The stripping type can be\r\n specified. True uses ‘unicode’ by default.\r\n * ``preprocessor``: ``Callable``. It receives a function that operates\r\n on each word. In the case of specifying a function, it overrides the\r\n default preprocessor (i.e., the previous options stop working).\r\n\r\n A list of preprocessor options allows you to search for several\r\n variants of the words into the model. For example, the preprocessors\r\n ``[{}, {\"lowercase\": True, \"strip_accents\": True}]``\r\n ``{}`` allows first to search for the original words in the vocabulary of\r\n the model. In case some of them are not found,\r\n ``{\"lowercase\": True, \"strip_accents\": True}`` is executed on these words\r\n and then they are searched in the model vocabulary.\r\n\r\n strategy : str, optional\r\n The strategy indicates how it will use the preprocessed words: 'first' will\r\n include only the first transformed word found. all' will include all\r\n transformed words found, by default \"first\".\r\n\r\n normalize : bool, optional\r\n True indicates that embeddings will be normalized, by default False\r\n\r\n warn_not_found_words : bool, optional\r\n Specifies if the function will warn (in the logger)\r\n the words that were not found in the model's vocabulary, by default False.\r\n\r\n Returns\r\n -------\r\n Dict[str, Any]\r\n A dictionary with the query name, the resulting score of the metric,\r\n and a dictionary with the distances of each attribute word\r\n with respect to the target sets means.\r\n\r\n Examples\r\n --------\r\n >>> from wefe.metrics import RND\r\n >>> from wefe.query import Query\r\n >>> from wefe.utils import load_test_model\r\n >>>\r\n >>> # define the query\r\n >>> query = Query(\r\n ... target_sets=[\r\n ... [\"female\", \"woman\", \"girl\", \"sister\", \"she\", \"her\", \"hers\",\r\n ... \"daughter\"],\r\n ... [\"male\", \"man\", \"boy\", \"brother\", \"he\", \"him\", \"his\", \"son\"],\r\n ... ],\r\n ... attribute_sets=[\r\n ... [\r\n ... \"home\", \"parents\", \"children\", \"family\", \"cousins\", \"marriage\",\r\n ... \"wedding\", \"relatives\",\r\n ... ],\r\n ... ],\r\n ... target_sets_names=[\"Female terms\", \"Male Terms\"],\r\n ... attribute_sets_names=[\"Family\"],\r\n ... )\r\n >>>\r\n >>> # load the model (in this case, the test model included in wefe)\r\n >>> model = load_test_model()\r\n >>>\r\n >>> # instance the metric and run the query\r\n >>> RND().run_query(query, model) # doctest: +SKIP\r\n {'query_name': 'Female terms and Male Terms wrt Family',\r\n 'result': 0.030381828546524048,\r\n 'rnd': 0.030381828546524048,\r\n 'distances_by_word': {'wedding': -0.1056304,\r\n 'marriage': -0.10163283,\r\n 'children': -0.068374634,\r\n 'parents': 0.00097084045,\r\n 'relatives': 0.0483346,\r\n 'family': 0.12408042,\r\n 'cousins': 0.17195654,\r\n 'home': 0.1733501}}\r\n >>>\r\n >>> # if you want the embeddings to be normalized before calculating the metrics\r\n >>> # use the normalize parameter as True before executing the query.\r\n >>> RND().run_query(query, model, normalize=True) # doctest: +SKIP\r\n {'query_name': 'Female terms and Male Terms wrt Family',\r\n 'result': -0.006278775632381439,\r\n 'rnd': -0.006278775632381439,\r\n 'distances_by_word': {'children': -0.05244279,\r\n 'wedding': -0.04642248,\r\n 'marriage': -0.04268837,\r\n 'parents': -0.022358716,\r\n 'relatives': 0.005497098,\r\n 'family': 0.023389697,\r\n 'home': 0.04009247,\r\n 'cousins': 0.044702888}}\r\n >>>\r\n >>> # if you want to use cosine distance instead of euclidean norm\r\n >>> # use the distance parameter as 'cos' before executing the query.\r\n >>> RND().run_query(query, model, normalize=True, distance='cos') # doctest: +SKIP\r\n {'query_name': 'Female terms and Male Terms wrt Family',\r\n 'result': 0.03643466345965862,\r\n 'rnd': 0.03643466345965862,\r\n 'distances_by_word': {'cousins': -0.035989374,\r\n 'home': -0.026971221,\r\n 'family': -0.009296179,\r\n 'relatives': 0.015690982,\r\n 'parents': 0.051281124,\r\n 'children': 0.09255883,\r\n 'marriage': 0.09959312,\r\n 'wedding': 0.104610026}}\r\n \"\"\"\r\n # check the types of the provided arguments (only the defaults).\r\n self._check_input(query, model, locals())\r\n\r\n # transform query word sets into embeddings\r\n embeddings = get_embeddings_from_query(\r\n model=model,\r\n query=query,\r\n lost_vocabulary_threshold=lost_vocabulary_threshold,\r\n preprocessors=preprocessors,\r\n strategy=strategy,\r\n normalize=normalize,\r\n warn_not_found_words=warn_not_found_words,\r\n )\r\n\r\n # if there is any/some set has less words than the allowed limit,\r\n # return the default value (nan)\r\n if embeddings is None:\r\n return {\r\n \"query_name\": query.query_name,\r\n \"result\": np.nan,\r\n \"rnd\": np.nan,\r\n \"distances_by_word\": {},\r\n }\r\n\r\n # get the targets and attribute sets transformed into embeddings.\r\n target_sets, attribute_sets = embeddings\r\n\r\n # get only the embeddings of the sets.\r\n target_embeddings = list(target_sets.values())\r\n attribute_embeddings = list(attribute_sets.values())\r\n\r\n target_0_embeddings = np.array(list(target_embeddings[0].values()))\r\n target_1_embeddings = np.array(list(target_embeddings[1].values()))\r\n attribute_0_embeddings = np.array(list(attribute_embeddings[0].values()))\r\n\r\n # get a list with the transformed attribute words\r\n attribute_0_words = list(attribute_embeddings[0].keys())\r\n\r\n rnd, distances_by_word = self.__calc_rnd(\r\n target_0_embeddings,\r\n target_1_embeddings,\r\n attribute_0_embeddings,\r\n attribute_0_words,\r\n distance,\r\n )\r\n\r\n return {\r\n \"query_name\": query.query_name,\r\n \"result\": rnd,\r\n \"rnd\": rnd,\r\n \"distances_by_word\": distances_by_word,\r\n }\r\n"
] |
[
[
"numpy.average",
"numpy.subtract",
"sklearn.metrics.pairwise.cosine_similarity"
]
] |
gunjanpatil/keras_imagenet
|
[
"5bc50245d8061e634fda291f1d22a581ee329eb5"
] |
[
"models/efficientnet.py"
] |
[
"# Copyright 2019 The TensorFlow Authors, Pavel Yakubovskiy, Björn Barz. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Contains definitions for EfficientNet model.\n\n[1] Mingxing Tan, Quoc V. Le\n EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks.\n ICML'19, https://arxiv.org/abs/1905.11946\n\"\"\"\n\n# Code of this model implementation is mostly written by\n# Björn Barz ([@Callidior](https://github.com/Callidior))\n\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport math\nimport string\nimport collections\n\nfrom six.moves import xrange\nfrom keras_applications.imagenet_utils import _obtain_input_shape\nfrom keras_applications.imagenet_utils import decode_predictions\n\nfrom tensorflow.keras import backend\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import models\nfrom tensorflow.keras import utils as keras_utils\n\n\nBASE_WEIGHTS_PATH = (\n 'https://github.com/Callidior/keras-applications/'\n 'releases/download/efficientnet/')\n\nWEIGHTS_HASHES = {\n 'efficientnet-b0': ('163292582f1c6eaca8e7dc7b51b01c61'\n '5b0dbc0039699b4dcd0b975cc21533dc',\n 'c1421ad80a9fc67c2cc4000f666aa507'\n '89ce39eedb4e06d531b0c593890ccff3'),\n 'efficientnet-b1': ('d0a71ddf51ef7a0ca425bab32b7fa7f1'\n '6043ee598ecee73fc674d9560c8f09b0',\n '75de265d03ac52fa74f2f510455ba64f'\n '9c7c5fd96dc923cd4bfefa3d680c4b68'),\n 'efficientnet-b2': ('bb5451507a6418a574534aa76a91b106'\n 'f6b605f3b5dde0b21055694319853086',\n '433b60584fafba1ea3de07443b74cfd3'\n '2ce004a012020b07ef69e22ba8669333'),\n 'efficientnet-b3': ('03f1fba367f070bd2545f081cfa7f3e7'\n '6f5e1aa3b6f4db700f00552901e75ab9',\n 'c5d42eb6cfae8567b418ad3845cfd63a'\n 'a48b87f1bd5df8658a49375a9f3135c7'),\n 'efficientnet-b4': ('98852de93f74d9833c8640474b2c698d'\n 'b45ec60690c75b3bacb1845e907bf94f',\n '7942c1407ff1feb34113995864970cd4'\n 'd9d91ea64877e8d9c38b6c1e0767c411'),\n 'efficientnet-b5': ('30172f1d45f9b8a41352d4219bf930ee'\n '3339025fd26ab314a817ba8918fefc7d',\n '9d197bc2bfe29165c10a2af8c2ebc675'\n '07f5d70456f09e584c71b822941b1952'),\n 'efficientnet-b6': ('f5270466747753485a082092ac9939ca'\n 'a546eb3f09edca6d6fff842cad938720',\n '1d0923bb038f2f8060faaf0a0449db4b'\n '96549a881747b7c7678724ac79f427ed'),\n 'efficientnet-b7': ('876a41319980638fa597acbbf956a82d'\n '10819531ff2dcb1a52277f10c7aefa1a',\n '60b56ff3a8daccc8d96edfd40b204c11'\n '3e51748da657afd58034d54d3cec2bac')\n}\n\nBlockArgs = collections.namedtuple('BlockArgs', [\n 'kernel_size', 'num_repeat', 'input_filters', 'output_filters',\n 'expand_ratio', 'id_skip', 'strides', 'se_ratio'\n])\n# defaults will be a public argument for namedtuple in Python 3.7\n# https://docs.python.org/3/library/collections.html#collections.namedtuple\nBlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields)\n\nDEFAULT_BLOCKS_ARGS = [\n BlockArgs(kernel_size=3, num_repeat=1, input_filters=32, output_filters=16,\n expand_ratio=1, id_skip=True, strides=[1, 1], se_ratio=0.25),\n BlockArgs(kernel_size=3, num_repeat=2, input_filters=16, output_filters=24,\n expand_ratio=6, id_skip=True, strides=[2, 2], se_ratio=0.25),\n BlockArgs(kernel_size=5, num_repeat=2, input_filters=24, output_filters=40,\n expand_ratio=6, id_skip=True, strides=[2, 2], se_ratio=0.25),\n BlockArgs(kernel_size=3, num_repeat=3, input_filters=40, output_filters=80,\n expand_ratio=6, id_skip=True, strides=[2, 2], se_ratio=0.25),\n BlockArgs(kernel_size=5, num_repeat=3, input_filters=80, output_filters=112,\n expand_ratio=6, id_skip=True, strides=[1, 1], se_ratio=0.25),\n BlockArgs(kernel_size=5, num_repeat=4, input_filters=112, output_filters=192,\n expand_ratio=6, id_skip=True, strides=[2, 2], se_ratio=0.25),\n BlockArgs(kernel_size=3, num_repeat=1, input_filters=192, output_filters=320,\n expand_ratio=6, id_skip=True, strides=[1, 1], se_ratio=0.25)\n]\n\nCONV_KERNEL_INITIALIZER = {\n 'class_name': 'VarianceScaling',\n 'config': {\n 'scale': 2.0,\n 'mode': 'fan_out',\n # EfficientNet actually uses an untruncated normal distribution for\n # initializing conv layers, but keras.initializers.VarianceScaling use\n # a truncated distribution.\n # We decided against a custom initializer for better serializability.\n 'distribution': 'normal'\n }\n}\n\nDENSE_KERNEL_INITIALIZER = {\n 'class_name': 'VarianceScaling',\n 'config': {\n 'scale': 1. / 3.,\n 'mode': 'fan_out',\n 'distribution': 'uniform'\n }\n}\n\n\ndef get_swish(**kwargs):\n def swish(x):\n \"\"\"Swish activation function: x * sigmoid(x).\n Reference: [Searching for Activation Functions](https://arxiv.org/abs/1710.05941)\n \"\"\"\n\n if backend.backend() == 'tensorflow':\n try:\n # The native TF implementation has a more\n # memory-efficient gradient implementation\n return backend.tf.nn.swish(x)\n except AttributeError:\n pass\n\n return x * backend.sigmoid(x)\n return swish\n\n\ndef get_dropout(**kwargs):\n \"\"\"Wrapper over custom dropout. Fix problem of ``None`` shape for tf.keras.\n It is not possible to define FixedDropout class as global object,\n because we do not have modules for inheritance at first time.\n\n Issue:\n https://github.com/tensorflow/tensorflow/issues/30946\n \"\"\"\n class FixedDropout(layers.Dropout):\n def _get_noise_shape(self, inputs):\n if self.noise_shape is None:\n return self.noise_shape\n\n symbolic_shape = backend.shape(inputs)\n noise_shape = [symbolic_shape[axis] if shape is None else shape\n for axis, shape in enumerate(self.noise_shape)]\n return tuple(noise_shape)\n\n return FixedDropout\n\n\ndef round_filters(filters, width_coefficient, depth_divisor):\n \"\"\"Round number of filters based on width multiplier.\"\"\"\n filters *= width_coefficient\n new_filters = int(filters + depth_divisor / 2) // depth_divisor * depth_divisor\n new_filters = max(depth_divisor, new_filters)\n # Make sure that round down does not go down by more than 10%.\n if new_filters < 0.9 * filters:\n new_filters += depth_divisor\n return int(new_filters)\n\n\ndef round_repeats(repeats, depth_coefficient):\n \"\"\"Round number of repeats based on depth multiplier.\"\"\"\n return int(math.ceil(depth_coefficient * repeats))\n\n\ndef mb_conv_block(inputs, block_args, activation, drop_rate=None, prefix='', ):\n \"\"\"Mobile Inverted Residual Bottleneck.\"\"\"\n has_se = (block_args.se_ratio is not None) and (0 < block_args.se_ratio <= 1)\n bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1\n\n # workaround over non working dropout with None in noise_shape in tf.keras\n Dropout = get_dropout(\n backend=backend,\n layers=layers,\n models=models,\n utils=keras_utils\n )\n\n # Expansion phase\n filters = block_args.input_filters * block_args.expand_ratio\n if block_args.expand_ratio != 1:\n x = layers.Conv2D(filters, 1,\n padding='same',\n use_bias=False,\n kernel_initializer=CONV_KERNEL_INITIALIZER,\n name=prefix + 'expand_conv')(inputs)\n x = layers.BatchNormalization(axis=bn_axis, name=prefix + 'expand_bn')(x)\n x = layers.Activation(activation, name=prefix + 'expand_activation')(x)\n else:\n x = inputs\n\n # Depthwise Convolution\n x = layers.DepthwiseConv2D(block_args.kernel_size,\n strides=block_args.strides,\n padding='same',\n use_bias=False,\n depthwise_initializer=CONV_KERNEL_INITIALIZER,\n name=prefix + 'dwconv')(x)\n x = layers.BatchNormalization(axis=bn_axis, name=prefix + 'bn')(x)\n x = layers.Activation(activation, name=prefix + 'activation')(x)\n\n # Squeeze and Excitation phase\n if has_se:\n num_reduced_filters = max(1, int(\n block_args.input_filters * block_args.se_ratio\n ))\n se_tensor = layers.GlobalAveragePooling2D(name=prefix + 'se_squeeze')(x)\n\n target_shape = (1, 1, filters) if backend.image_data_format() == 'channels_last' else (filters, 1, 1)\n se_tensor = layers.Reshape(target_shape, name=prefix + 'se_reshape')(se_tensor)\n se_tensor = layers.Conv2D(num_reduced_filters, 1,\n activation=activation,\n padding='same',\n use_bias=True,\n kernel_initializer=CONV_KERNEL_INITIALIZER,\n name=prefix + 'se_reduce')(se_tensor)\n se_tensor = layers.Conv2D(filters, 1,\n activation='sigmoid',\n padding='same',\n use_bias=True,\n kernel_initializer=CONV_KERNEL_INITIALIZER,\n name=prefix + 'se_expand')(se_tensor)\n if backend.backend() == 'theano':\n # For the Theano backend, we have to explicitly make\n # the excitation weights broadcastable.\n pattern = ([True, True, True, False] if backend.image_data_format() == 'channels_last'\n else [True, False, True, True])\n se_tensor = layers.Lambda(\n lambda x: backend.pattern_broadcast(x, pattern),\n name=prefix + 'se_broadcast')(se_tensor)\n x = layers.multiply([x, se_tensor], name=prefix + 'se_excite')\n\n # Output phase\n x = layers.Conv2D(block_args.output_filters, 1,\n padding='same',\n use_bias=False,\n kernel_initializer=CONV_KERNEL_INITIALIZER,\n name=prefix + 'project_conv')(x)\n x = layers.BatchNormalization(axis=bn_axis, name=prefix + 'project_bn')(x)\n if block_args.id_skip and all(\n s == 1 for s in block_args.strides\n ) and block_args.input_filters == block_args.output_filters:\n if drop_rate and (drop_rate > 0):\n x = Dropout(drop_rate,\n noise_shape=(None, 1, 1, 1),\n name=prefix + 'drop')(x)\n x = layers.add([x, inputs], name=prefix + 'add')\n\n return x\n\n\ndef EfficientNet(width_coefficient,\n depth_coefficient,\n default_resolution,\n dropout_rate=0.2,\n drop_connect_rate=0.2,\n depth_divisor=8,\n blocks_args=DEFAULT_BLOCKS_ARGS,\n model_name='efficientnet',\n include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n \"\"\"Instantiates the EfficientNet architecture using given scaling coefficients.\n Optionally loads weights pre-trained on ImageNet.\n Note that the data format convention used by the model is\n the one specified in your Keras config at `~/.keras/keras.json`.\n\n # Arguments\n width_coefficient: float, scaling coefficient for network width.\n depth_coefficient: float, scaling coefficient for network depth.\n default_resolution: int, default input image size.\n dropout_rate: float, dropout rate before final classifier layer.\n drop_connect_rate: float, dropout rate at skip connections.\n depth_divisor: int.\n blocks_args: A list of BlockArgs to construct block modules.\n model_name: string, model name.\n include_top: whether to include the fully-connected\n layer at the top of the network.\n weights: one of `None` (random initialization),\n 'imagenet' (pre-training on ImageNet),\n or the path to the weights file to be loaded.\n input_tensor: optional Keras tensor\n (i.e. output of `layers.Input()`)\n to use as image input for the model.\n input_shape: optional shape tuple, only to be specified\n if `include_top` is False.\n It should have exactly 3 inputs channels.\n pooling: optional pooling mode for feature extraction\n when `include_top` is `False`.\n - `None` means that the output of the model will be\n the 4D tensor output of the\n last convolutional layer.\n - `avg` means that global average pooling\n will be applied to the output of the\n last convolutional layer, and thus\n the output of the model will be a 2D tensor.\n - `max` means that global max pooling will\n be applied.\n classes: optional number of classes to classify images\n into, only to be specified if `include_top` is True, and\n if no `weights` argument is specified.\n\n # Returns\n A Keras model instance.\n\n # Raises\n ValueError: in case of invalid argument for `weights`,\n or invalid input shape.\n \"\"\"\n if not (weights in {'imagenet', None} or os.path.exists(weights)):\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization), `imagenet` '\n '(pre-training on ImageNet), '\n 'or the path to the weights file to be loaded.')\n\n if weights == 'imagenet' and include_top and classes != 1000:\n raise ValueError('If using `weights` as `\"imagenet\"` with `include_top`'\n ' as true, `classes` should be 1000')\n\n # Determine proper input shape\n input_shape = _obtain_input_shape(input_shape,\n default_size=default_resolution,\n min_size=32,\n data_format=backend.image_data_format(),\n require_flatten=include_top,\n weights=weights)\n\n if input_tensor is None:\n img_input = layers.Input(shape=input_shape)\n else:\n if backend.backend() == 'tensorflow':\n from tensorflow.python.keras.backend import is_keras_tensor\n else:\n is_keras_tensor = backend.is_keras_tensor\n if not is_keras_tensor(input_tensor):\n img_input = layers.Input(tensor=input_tensor, shape=input_shape)\n else:\n img_input = input_tensor\n\n bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1\n activation = get_swish(**kwargs)\n\n # Build stem\n x = img_input\n x = layers.Conv2D(round_filters(32, width_coefficient, depth_divisor), 3,\n strides=(2, 2),\n padding='same',\n use_bias=False,\n kernel_initializer=CONV_KERNEL_INITIALIZER,\n name='stem_conv')(x)\n x = layers.BatchNormalization(axis=bn_axis, name='stem_bn')(x)\n x = layers.Activation(activation, name='stem_activation')(x)\n\n # Build blocks\n num_blocks_total = sum(block_args.num_repeat for block_args in blocks_args)\n block_num = 0\n for idx, block_args in enumerate(blocks_args):\n assert block_args.num_repeat > 0\n # Update block input and output filters based on depth multiplier.\n block_args = block_args._replace(\n input_filters=round_filters(block_args.input_filters,\n width_coefficient, depth_divisor),\n output_filters=round_filters(block_args.output_filters,\n width_coefficient, depth_divisor),\n num_repeat=round_repeats(block_args.num_repeat, depth_coefficient))\n\n # The first block needs to take care of stride and filter size increase.\n drop_rate = drop_connect_rate * float(block_num) / num_blocks_total\n x = mb_conv_block(x, block_args,\n activation=activation,\n drop_rate=drop_rate,\n prefix='block{}a_'.format(idx + 1))\n block_num += 1\n if block_args.num_repeat > 1:\n # pylint: disable=protected-access\n block_args = block_args._replace(\n input_filters=block_args.output_filters, strides=[1, 1])\n # pylint: enable=protected-access\n for bidx in xrange(block_args.num_repeat - 1):\n drop_rate = drop_connect_rate * float(block_num) / num_blocks_total\n block_prefix = 'block{}{}_'.format(\n idx + 1,\n string.ascii_lowercase[bidx + 1]\n )\n x = mb_conv_block(x, block_args,\n activation=activation,\n drop_rate=drop_rate,\n prefix=block_prefix)\n block_num += 1\n\n # Build top\n x = layers.Conv2D(round_filters(1280, width_coefficient, depth_divisor), 1,\n padding='same',\n use_bias=False,\n kernel_initializer=CONV_KERNEL_INITIALIZER,\n name='top_conv')(x)\n x = layers.BatchNormalization(axis=bn_axis, name='top_bn')(x)\n x = layers.Activation(activation, name='top_activation')(x)\n\n if include_top:\n x = layers.GlobalAveragePooling2D(name='avg_pool')(x)\n if dropout_rate and dropout_rate > 0:\n x = layers.Dropout(dropout_rate, name='top_dropout')(x)\n x = layers.Dense(classes,\n activation='softmax',\n kernel_initializer=DENSE_KERNEL_INITIALIZER,\n name='probs')(x)\n else:\n if pooling == 'avg':\n x = layers.GlobalAveragePooling2D(name='avg_pool')(x)\n elif pooling == 'max':\n x = layers.GlobalMaxPooling2D(name='max_pool')(x)\n\n # Ensure that the model takes into account\n # any potential predecessors of `input_tensor`.\n if input_tensor is not None:\n inputs = keras_utils.get_source_inputs(input_tensor)\n else:\n inputs = img_input\n\n # Create model.\n model = models.Model(inputs, x, name=model_name)\n\n # Load weights.\n if weights == 'imagenet':\n if include_top:\n file_name = model_name + '_weights_tf_dim_ordering_tf_kernels_autoaugment.h5'\n file_hash = WEIGHTS_HASHES[model_name][0]\n else:\n file_name = model_name + '_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5'\n file_hash = WEIGHTS_HASHES[model_name][1]\n weights_path = keras_utils.get_file(file_name,\n BASE_WEIGHTS_PATH + file_name,\n cache_subdir='models',\n file_hash=file_hash)\n model.load_weights(weights_path)\n elif weights is not None:\n model.load_weights(weights)\n\n return model\n\n\ndef EfficientNetB0(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n return EfficientNet(1.0, 1.0, 224, 0.2,\n model_name='efficientnet-b0',\n include_top=include_top, weights=weights,\n input_tensor=input_tensor, input_shape=input_shape,\n pooling=pooling, classes=classes,\n **kwargs)\n\n\ndef EfficientNetB1(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n return EfficientNet(1.0, 1.1, 240, 0.2,\n model_name='efficientnet-b1',\n include_top=include_top, weights=weights,\n input_tensor=input_tensor, input_shape=input_shape,\n pooling=pooling, classes=classes,\n **kwargs)\n\n\ndef EfficientNetB2(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n return EfficientNet(1.1, 1.2, 260, 0.3,\n model_name='efficientnet-b2',\n include_top=include_top, weights=weights,\n input_tensor=input_tensor, input_shape=input_shape,\n pooling=pooling, classes=classes,\n **kwargs)\n\n\ndef EfficientNetB3(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n return EfficientNet(1.2, 1.4, 300, 0.3,\n model_name='efficientnet-b3',\n include_top=include_top, weights=weights,\n input_tensor=input_tensor, input_shape=input_shape,\n pooling=pooling, classes=classes,\n **kwargs)\n\n\ndef EfficientNetB4(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n return EfficientNet(1.4, 1.8, 380, 0.4,\n model_name='efficientnet-b4',\n include_top=include_top, weights=weights,\n input_tensor=input_tensor, input_shape=input_shape,\n pooling=pooling, classes=classes,\n **kwargs)\n\n\ndef EfficientNetB5(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n return EfficientNet(1.6, 2.2, 456, 0.4,\n model_name='efficientnet-b5',\n include_top=include_top, weights=weights,\n input_tensor=input_tensor, input_shape=input_shape,\n pooling=pooling, classes=classes,\n **kwargs)\n\n\ndef EfficientNetB6(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n return EfficientNet(1.8, 2.6, 528, 0.5,\n model_name='efficientnet-b6',\n include_top=include_top, weights=weights,\n input_tensor=input_tensor, input_shape=input_shape,\n pooling=pooling, classes=classes,\n **kwargs)\n\n\ndef EfficientNetB7(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n return EfficientNet(2.0, 3.1, 600, 0.5,\n model_name='efficientnet-b7',\n include_top=include_top, weights=weights,\n input_tensor=input_tensor, input_shape=input_shape,\n pooling=pooling, classes=classes,\n **kwargs)\n\n\ndef EfficientNetB0_224x224(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n return EfficientNet(1.0, 1.0, 224, 0.2,\n model_name='efficientnet-b0',\n include_top=include_top, weights=weights,\n input_tensor=input_tensor, input_shape=input_shape,\n pooling=pooling, classes=classes,\n **kwargs)\n\n\ndef EfficientNetB1_224x224(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n return EfficientNet(1.0, 1.1, 224, 0.2,\n model_name='efficientnet-b1',\n include_top=include_top, weights=weights,\n input_tensor=input_tensor, input_shape=input_shape,\n pooling=pooling, classes=classes,\n **kwargs)\n\n\ndef EfficientNetB2_224x224(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n return EfficientNet(1.1, 1.2, 224, 0.3,\n model_name='efficientnet-b2',\n include_top=include_top, weights=weights,\n input_tensor=input_tensor, input_shape=input_shape,\n pooling=pooling, classes=classes,\n **kwargs)\n\n\ndef EfficientNetB3_224x224(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n return EfficientNet(1.2, 1.4, 224, 0.3,\n model_name='efficientnet-b3',\n include_top=include_top, weights=weights,\n input_tensor=input_tensor, input_shape=input_shape,\n pooling=pooling, classes=classes,\n **kwargs)\n\n\ndef EfficientNetB4_224x224(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n return EfficientNet(1.4, 1.8, 224, 0.4,\n model_name='efficientnet-b4',\n include_top=include_top, weights=weights,\n input_tensor=input_tensor, input_shape=input_shape,\n pooling=pooling, classes=classes,\n **kwargs)\n\n\ndef EfficientNetB5_224x224(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n return EfficientNet(1.6, 2.2, 224, 0.4,\n model_name='efficientnet-b5',\n include_top=include_top, weights=weights,\n input_tensor=input_tensor, input_shape=input_shape,\n pooling=pooling, classes=classes,\n **kwargs)\n\n\ndef EfficientNetB6_224x224(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n return EfficientNet(1.8, 2.6, 224, 0.5,\n model_name='efficientnet-b6',\n include_top=include_top, weights=weights,\n input_tensor=input_tensor, input_shape=input_shape,\n pooling=pooling, classes=classes,\n **kwargs)\n\n\ndef EfficientNetB7_224x224(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n return EfficientNet(2.0, 3.1, 224, 0.5,\n model_name='efficientnet-b7',\n include_top=include_top, weights=weights,\n input_tensor=input_tensor, input_shape=input_shape,\n pooling=pooling, classes=classes,\n **kwargs)\n\n\nsetattr(EfficientNetB0, '__doc__', EfficientNet.__doc__)\nsetattr(EfficientNetB1, '__doc__', EfficientNet.__doc__)\nsetattr(EfficientNetB2, '__doc__', EfficientNet.__doc__)\nsetattr(EfficientNetB3, '__doc__', EfficientNet.__doc__)\nsetattr(EfficientNetB4, '__doc__', EfficientNet.__doc__)\nsetattr(EfficientNetB5, '__doc__', EfficientNet.__doc__)\nsetattr(EfficientNetB6, '__doc__', EfficientNet.__doc__)\nsetattr(EfficientNetB7, '__doc__', EfficientNet.__doc__)\nsetattr(EfficientNetB0_224x224, '__doc__', EfficientNet.__doc__)\nsetattr(EfficientNetB1_224x224, '__doc__', EfficientNet.__doc__)\nsetattr(EfficientNetB2_224x224, '__doc__', EfficientNet.__doc__)\nsetattr(EfficientNetB3_224x224, '__doc__', EfficientNet.__doc__)\nsetattr(EfficientNetB4_224x224, '__doc__', EfficientNet.__doc__)\nsetattr(EfficientNetB5_224x224, '__doc__', EfficientNet.__doc__)\nsetattr(EfficientNetB6_224x224, '__doc__', EfficientNet.__doc__)\nsetattr(EfficientNetB7_224x224, '__doc__', EfficientNet.__doc__)\n"
] |
[
[
"tensorflow.keras.backend.backend",
"tensorflow.keras.layers.Conv2D",
"tensorflow.python.keras.backend.is_keras_tensor",
"tensorflow.keras.layers.DepthwiseConv2D",
"tensorflow.keras.backend.sigmoid",
"tensorflow.keras.backend.image_data_format",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.add",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.backend.tf.nn.swish",
"tensorflow.keras.utils.get_source_inputs",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.backend.pattern_broadcast",
"tensorflow.keras.layers.multiply",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.utils.get_file",
"tensorflow.keras.backend.shape",
"tensorflow.keras.layers.GlobalMaxPooling2D",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Input"
]
] |
xingularity/yellowbrick
|
[
"d13065786b25323558c493222d39b67f6b5243d9"
] |
[
"yellowbrick/utils/helpers.py"
] |
[
"# yellowbrick.utils.helpers\n# Helper functions and generic utilities for use in Yellowbrick code.\n#\n# Author: Benjamin Bengfort <[email protected]>\n# Created: Fri May 19 10:39:30 2017 -0700\n#\n# Copyright (C) 2017 District Data Labs\n# For license information, see LICENSE.txt\n#\n# ID: helpers.py [79cd8cf] [email protected] $\n\n\"\"\"\nHelper functions and generic utilities for use in Yellowbrick code.\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport re\nimport numpy as np\n\nfrom sklearn.pipeline import Pipeline\n\nfrom .types import is_estimator\nfrom yellowbrick.exceptions import YellowbrickTypeError\n\n\n##########################################################################\n## Model and Feature Information\n##########################################################################\n\ndef get_model_name(model):\n \"\"\"\n Detects the model name for a Scikit-Learn model or pipeline.\n\n Parameters\n ----------\n model: class or instance\n The object to determine the name for. If the model is an estimator it\n returns the class name; if it is a Pipeline it returns the class name\n of the final transformer or estimator in the Pipeline.\n\n Returns\n -------\n name : string\n The name of the model or pipeline.\n \"\"\"\n if not is_estimator(model):\n raise YellowbrickTypeError(\n \"Cannot detect the model name for non estimator: '{}'\".format(\n type(model)\n )\n )\n\n else:\n if isinstance(model, Pipeline):\n return get_model_name(model.steps[-1][-1])\n else:\n return model.__class__.__name__\n\n\ndef has_ndarray_int_columns(features, X):\n \"\"\" Checks if numeric feature columns exist in ndarray \"\"\"\n _, ncols = X.shape\n if not all(d.isdigit() for d in features if isinstance(d, str)) or not isinstance(X, np.ndarray):\n return False\n ndarray_columns = np.arange(0, ncols)\n feature_cols = np.unique([int(d) for d in features])\n return all(np.in1d(feature_cols, ndarray_columns))\n\n# Alias for closer name to isinstance and issubclass\nhasndarrayintcolumns = has_ndarray_int_columns\n\n\ndef is_monotonic(a, increasing=True):\n \"\"\"\n Tests whether a vector a has monotonicity.\n\n Parameters\n ----------\n a : array-like\n Array that should be tested for monotonicity\n\n increasing : bool, default: True\n Test if the array is montonically increasing, otherwise test if the\n array is montonically decreasing.\n \"\"\"\n a = np.asarray(a) # ensure a is array-like\n\n if a.ndim > 1:\n raise ValueError(\"not supported for multi-dimensonal arrays\")\n\n if len(a) <= 1:\n return True\n\n if increasing:\n return np.all(a[1:] >= a[:-1], axis=0)\n return np.all(a[1:] <= a[:-1], axis=0)\n\n\n##########################################################################\n## Numeric Computations\n##########################################################################\n\n#From here: http://stackoverflow.com/questions/26248654/numpy-return-0-with-divide-by-zero\ndef div_safe( numerator, denominator ):\n \"\"\"\n Ufunc-extension that returns 0 instead of nan when dividing numpy arrays\n\n Parameters\n ----------\n numerator: array-like\n\n denominator: scalar or array-like that can be validly divided by the numerator\n\n returns a numpy array\n\n example: div_safe( [-1, 0, 1], 0 ) == [0, 0, 0]\n \"\"\"\n #First handle scalars\n if np.isscalar(numerator):\n raise ValueError(\"div_safe should only be used with an array-like numerator\")\n\n #Then numpy arrays\n try:\n with np.errstate(divide='ignore', invalid='ignore'):\n result = np.true_divide( numerator, denominator )\n result[ ~ np.isfinite( result )] = 0 # -inf inf NaN\n return result\n except ValueError as e:\n raise e\n\n\ndef prop_to_size(vals, mi=0.0, ma=5.0, power=0.5, log=False):\n \"\"\"\n Converts an array of property values (e.g. a metric or score) to values\n that are more useful for marker sizes, line widths, or other visual\n sizes. The new sizes are computed as:\n\n y = mi + (ma -mi)(\\frac{x_i - min(x){max(x) - min(x)})^{power}\n\n If ``log=True``, the natural logarithm of the property values is used instead.\n\n Parameters\n ----------\n prop : array-like, 1D\n An array of values of the property to scale between the size range.\n\n mi : float, default: 0.0\n The size to assign the smallest property (minimum size value).\n\n ma : float, default: 5.0\n The size to assign the largest property (maximum size value).\n\n power : float, default: 0.5\n Used to control how rapidly the size increases from smallest to largest.\n\n log : bool, default: False\n Use the natural logarithm to compute the property sizes\n\n Returns\n -------\n sizes : array, 1D\n The new size values, in the same shape as the input prop array\n \"\"\"\n # ensure that prop is an array\n vals = np.asarray(vals)\n\n # apply natural log if specified\n if log:\n vals = np.log(vals)\n\n # avoid division by zero error\n delta = vals.max() - vals.min()\n if delta == 0.0:\n delta = 1.0\n\n return mi + (ma-mi) * ((vals -vals.min()) / delta) ** power\n\n\n\n##########################################################################\n## String Computations\n##########################################################################\n\ndef slugify(text):\n \"\"\"\n Returns a slug of given text, normalizing unicode data for file-safe\n strings. Used for deciding where to write images to disk.\n\n Parameters\n ----------\n text : string\n The string to slugify\n\n Returns\n -------\n slug : string\n A normalized slug representation of the text\n\n .. seealso:: http://yashchandra.com/2014/05/08/how-to-generate-clean-url-or-a-slug-in-python/\n \"\"\"\n slug = re.sub(r'[^\\w]+', ' ', text)\n slug = \"-\".join(slug.lower().strip().split())\n return slug\n"
] |
[
[
"numpy.log",
"numpy.true_divide",
"numpy.isfinite",
"numpy.asarray",
"numpy.arange",
"numpy.in1d",
"numpy.all",
"numpy.isscalar",
"numpy.errstate"
]
] |
KonduitAI/ImportTests
|
[
"1b05adac04d1b04fe4492d3fd35f3c4573774ceb"
] |
[
"tests/OLD/mathops/test_partition_stitch_misc.py"
] |
[
"import numpy as np\nimport tensorflow as tf\nfrom tfoptests.persistor import TensorFlowPersistor\nfrom tfoptests.test_graph import TestGraph\n\n'''\nNo training.\nTensor Transforms with rearranging values and some random ops\n'''\n\n\nclass TensorRearrange(TestGraph):\n def __init__(self, *args, **kwargs):\n super(TensorRearrange, self).__init__(*args, **kwargs)\n self.a = np.random.uniform(size=(2, 5, 4))\n self.b = np.random.uniform(size=(2, 3, 5, 4))\n self.c = np.random.uniform(size=(3, 1, 5, 4))\n\n def list_inputs(self):\n return [\"input_0\", \"input_1\", \"input_2\"]\n\n def get_placeholder_input(self, name):\n if name == \"input_0\":\n return self.a\n if name == \"input_1\":\n return self.b\n if name == \"input_2\":\n return self.c\n\n def _get_placeholder_shape(self, name):\n if name == \"input_0\":\n return self.a.shape\n if name == \"input_1\":\n return self.b.shape\n if name == \"input_2\":\n return self.c.shape\n\n\ndef test_tensor_rearrange():\n tensor_rearrange = TensorRearrange(seed=713)\n in_node_a = tensor_rearrange.get_placeholder(\"input_0\")\n in_node_b = tensor_rearrange.get_placeholder(\"input_1\")\n in_node_c = tensor_rearrange.get_placeholder(\"input_2\")\n stitched = tf.dynamic_stitch([[1, 10], [[0, 7, 9], [5, 8, 3]], [[6], [4], [2]]],\n [in_node_a, in_node_b, in_node_c]) # should be 11,5,4\n list_of_parts = tf.dynamic_partition(tf.transpose(stitched, perm=[1, 2, 0]),\n [[0, 1, 2, 3], [1, 0, 2, 3], [2, 3, 1, 0], [2, 1, 0, 3], [0, 1, 2, 3]],\n num_partitions=4) # after permute becomes 5,4,11, return all partitions 5,11\n node_a = tf.div(list_of_parts[0], list_of_parts[1])\n node_b = tf.divide(list_of_parts[2], list_of_parts[3])\n trace_node = tf.trace(node_a) + node_b # there is a broadcast here\n out_node = tf.cast(tf.count_nonzero(trace_node), dtype=tf.float32) + tf.Variable(tf.random_normal(shape=(2, 3)))\n\n placeholders = [in_node_a, in_node_b, in_node_c]\n predictions = [out_node]\n\n # Run and persist\n tfp = TensorFlowPersistor(save_dir=\"partition_stitch_misc\")\n tfp.set_placeholders(placeholders) \\\n .set_output_tensors(predictions) \\\n .set_test_data(tensor_rearrange.get_test_data()) \\\n .build_save_frozen_graph()\n\nif __name__ == '__main__':\n test_tensor_rearrange()"
] |
[
[
"tensorflow.transpose",
"tensorflow.count_nonzero",
"tensorflow.divide",
"tensorflow.div",
"tensorflow.dynamic_stitch",
"tensorflow.random_normal",
"numpy.random.uniform",
"tensorflow.trace"
]
] |
robolableonardo/franka_ros
|
[
"b9b60e8a8316593b6a82d3bfa48a5b89155f42a5"
] |
[
"franka_example_controllers/scripts/interactive_marker.py"
] |
[
"#!/usr/bin/env python\n\nimport rospy\nimport tf.transformations\nimport numpy as np\n\nfrom interactive_markers.interactive_marker_server import \\\n InteractiveMarkerServer, InteractiveMarkerFeedback\nfrom visualization_msgs.msg import InteractiveMarker, \\\n InteractiveMarkerControl\nfrom geometry_msgs.msg import PoseStamped\nfrom franka_msgs.msg import FrankaState\n\nmarker_pose = PoseStamped()\ninitial_pose_found = False\npose_pub = None\n# [[min_x, max_x], [min_y, max_y], [min_z, max_z]]\nposition_limits = [[-0.6, 0.6], [-0.6, 0.6], [0.05, 0.9]]\n\n\ndef publisherCallback(msg, link_name):\n marker_pose.header.frame_id = link_name\n marker_pose.header.stamp = rospy.Time(0)\n pose_pub.publish(marker_pose)\n\n\ndef franka_state_callback(msg):\n initial_quaternion = \\\n tf.transformations.quaternion_from_matrix(\n np.transpose(np.reshape(msg.O_T_EE,\n (4, 4))))\n initial_quaternion = initial_quaternion / np.linalg.norm(initial_quaternion)\n marker_pose.pose.orientation.x = initial_quaternion[0]\n marker_pose.pose.orientation.y = initial_quaternion[1]\n marker_pose.pose.orientation.z = initial_quaternion[2]\n marker_pose.pose.orientation.w = initial_quaternion[3]\n marker_pose.pose.position.x = msg.O_T_EE[12]\n marker_pose.pose.position.y = msg.O_T_EE[13]\n marker_pose.pose.position.z = msg.O_T_EE[14]\n global initial_pose_found\n initial_pose_found = True\n\n\ndef processFeedback(feedback):\n if feedback.event_type == InteractiveMarkerFeedback.POSE_UPDATE:\n marker_pose.pose.position.x = max([min([feedback.pose.position.x,\n position_limits[0][1]]),\n position_limits[0][0]])\n marker_pose.pose.position.y = max([min([feedback.pose.position.y,\n position_limits[1][1]]),\n position_limits[1][0]])\n marker_pose.pose.position.z = max([min([feedback.pose.position.z,\n position_limits[2][1]]),\n position_limits[2][0]])\n marker_pose.pose.orientation = feedback.pose.orientation\n server.applyChanges()\n\n\nif __name__ == \"__main__\":\n rospy.init_node(\"equilibrium_pose_node\")\n state_sub = rospy.Subscriber(\"franka_state_controller/franka_states\",\n FrankaState, franka_state_callback)\n listener = tf.TransformListener()\n link_name = rospy.get_param(\"~link_name\")\n\n # Get initial pose for the interactive marker\n while not initial_pose_found:\n rospy.sleep(1)\n state_sub.unregister()\n\n pose_pub = rospy.Publisher(\n \"equilibrium_pose\", PoseStamped, queue_size=10)\n server = InteractiveMarkerServer(\"equilibrium_pose_marker\")\n int_marker = InteractiveMarker()\n int_marker.header.frame_id = link_name\n int_marker.scale = 0.3\n int_marker.name = \"equilibrium_pose\"\n int_marker.description = (\"Equilibrium Pose\\nBE CAREFUL! \"\n \"If you move the \\nequilibrium \"\n \"pose the robot will follow it\\n\"\n \"so be aware of potential collisions\")\n int_marker.pose = marker_pose.pose\n # run pose publisher\n rospy.Timer(rospy.Duration(0.005),\n lambda msg: publisherCallback(msg, link_name))\n\n # insert a box\n control = InteractiveMarkerControl()\n control.orientation.w = 1\n control.orientation.x = 1\n control.orientation.y = 0\n control.orientation.z = 0\n control.name = \"rotate_x\"\n control.interaction_mode = InteractiveMarkerControl.ROTATE_AXIS\n int_marker.controls.append(control)\n\n control = InteractiveMarkerControl()\n control.orientation.w = 1\n control.orientation.x = 1\n control.orientation.y = 0\n control.orientation.z = 0\n control.name = \"move_x\"\n control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS\n int_marker.controls.append(control)\n control = InteractiveMarkerControl()\n control.orientation.w = 1\n control.orientation.x = 0\n control.orientation.y = 1\n control.orientation.z = 0\n control.name = \"rotate_y\"\n control.interaction_mode = InteractiveMarkerControl.ROTATE_AXIS\n int_marker.controls.append(control)\n control = InteractiveMarkerControl()\n control.orientation.w = 1\n control.orientation.x = 0\n control.orientation.y = 1\n control.orientation.z = 0\n control.name = \"move_y\"\n control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS\n int_marker.controls.append(control)\n control = InteractiveMarkerControl()\n control.orientation.w = 1\n control.orientation.x = 0\n control.orientation.y = 0\n control.orientation.z = 1\n control.name = \"rotate_z\"\n control.interaction_mode = InteractiveMarkerControl.ROTATE_AXIS\n int_marker.controls.append(control)\n control = InteractiveMarkerControl()\n control.orientation.w = 1\n control.orientation.x = 0\n control.orientation.y = 0\n control.orientation.z = 1\n control.name = \"move_z\"\n control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS\n int_marker.controls.append(control)\n server.insert(int_marker, processFeedback)\n\n server.applyChanges()\n\n rospy.spin()\n"
] |
[
[
"numpy.reshape",
"numpy.linalg.norm"
]
] |
cdyfng/pyalgotrade
|
[
"c9e8d950c8d911d5f1bed7d821c4cf6fd37f3a3c"
] |
[
"pyalgotrade/technical/linreg.py"
] |
[
"# PyAlgoTrade\n#\n# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n.. moduleauthor:: Gabriel Martin Becedillas Ruiz <[email protected]>\n\"\"\"\n\nfrom pyalgotrade import technical\nfrom pyalgotrade.utils import collections\nfrom pyalgotrade.utils import dt\n\nimport numpy as np\nfrom scipy import stats\n\n\n# Using scipy.stats.linregress instead of numpy.linalg.lstsq because of this:\n# http://stackoverflow.com/questions/20736255/numpy-linalg-lstsq-with-big-values\ndef lsreg(x, y):\n x = np.asarray(x)\n y = np.asarray(y)\n res = stats.linregress(x, y)\n return res[0], res[1]\n\n\nclass LeastSquaresRegressionWindow(technical.EventWindow):\n def __init__(self, windowSize):\n assert(windowSize > 1)\n super(LeastSquaresRegressionWindow, self).__init__(windowSize)\n self.__timestamps = collections.NumPyDeque(windowSize)\n\n def onNewValue(self, dateTime, value):\n technical.EventWindow.onNewValue(self, dateTime, value)\n if value is not None:\n timestamp = dt.datetime_to_timestamp(dateTime)\n if len(self.__timestamps):\n assert(timestamp > self.__timestamps[-1])\n self.__timestamps.append(timestamp)\n\n def __getValueAtImpl(self, timestamp):\n ret = None\n if self.windowFull():\n a, b = lsreg(self.__timestamps.data(), self.getValues())\n ret = a * timestamp + b\n return ret\n\n def getTimeStamps(self):\n return self.__timestamps\n\n def getValueAt(self, dateTime):\n return self.__getValueAtImpl(dt.datetime_to_timestamp(dateTime))\n\n def getValue(self):\n ret = None\n if self.windowFull():\n ret = self.__getValueAtImpl(self.__timestamps.data()[-1])\n return ret\n\n\nclass LeastSquaresRegression(technical.EventBasedFilter):\n \"\"\"Calculates values based on a least-squares regression.\n\n :param dataSeries: The DataSeries instance being filtered.\n :type dataSeries: :class:`pyalgotrade.dataseries.DataSeries`.\n :param windowSize: The number of values to use to calculate the regression.\n :type windowSize: int.\n :param maxLen: The maximum number of values to hold.\n Once a bounded length is full, when new items are added, a corresponding number of items are discarded from the\n opposite end. If None then dataseries.DEFAULT_MAX_LEN is used.\n :type maxLen: int.\n \"\"\"\n def __init__(self, dataSeries, windowSize, maxLen=None):\n super(LeastSquaresRegression, self).__init__(dataSeries, LeastSquaresRegressionWindow(windowSize), maxLen)\n\n def getValueAt(self, dateTime):\n \"\"\"Calculates the value at a given time based on the regression line.\n\n :param dateTime: The datetime to calculate the value at.\n Will return None if there are not enough values in the underlying DataSeries.\n :type dateTime: :class:`datetime.datetime`.\n \"\"\"\n return self.getEventWindow().getValueAt(dateTime)\n\n\nclass SlopeEventWindow(technical.EventWindow):\n def __init__(self, windowSize):\n super(SlopeEventWindow, self).__init__(windowSize)\n self.__x = np.asarray(range(windowSize))\n\n def getValue(self):\n ret = None\n if self.windowFull():\n y = self.getValues()\n ret = lsreg(self.__x, y)[0]\n return ret\n\n\nclass Slope(technical.EventBasedFilter):\n \"\"\"The Slope filter calculates the slope of a least-squares regression line.\n\n :param dataSeries: The DataSeries instance being filtered.\n :type dataSeries: :class:`pyalgotrade.dataseries.DataSeries`.\n :param period: The number of values to use to calculate the slope.\n :type period: int.\n :param maxLen: The maximum number of values to hold.\n Once a bounded length is full, when new items are added, a corresponding number of items are discarded from the\n opposite end. If None then dataseries.DEFAULT_MAX_LEN is used.\n :type maxLen: int.\n\n .. note::\n This filter ignores the time elapsed between the different values.\n \"\"\"\n\n def __init__(self, dataSeries, period, maxLen=None):\n super(Slope, self).__init__(dataSeries, SlopeEventWindow(period), maxLen)\n\n\nclass TrendEventWindow(SlopeEventWindow):\n def __init__(self, windowSize, positiveThreshold, negativeThreshold):\n if negativeThreshold > positiveThreshold:\n raise Exception(\"Invalid thresholds\")\n\n super(TrendEventWindow, self).__init__(windowSize)\n self.__positiveThreshold = positiveThreshold\n self.__negativeThreshold = negativeThreshold\n\n def getValue(self):\n ret = super(TrendEventWindow, self).getValue()\n if ret is not None:\n if ret > self.__positiveThreshold:\n ret = True\n elif ret < self.__negativeThreshold:\n ret = False\n else: # Between negative and postive thresholds.\n ret = None\n return ret\n\n\nclass Trend(technical.EventBasedFilter):\n def __init__(self, dataSeries, trendDays, positiveThreshold=0, negativeThreshold=0, maxLen=None):\n super(Trend, self).__init__(dataSeries, TrendEventWindow(trendDays, positiveThreshold, negativeThreshold), maxLen)\n"
] |
[
[
"numpy.asarray",
"scipy.stats.linregress"
]
] |
pzejdl/hls4ml
|
[
"d1e4b22d05a9d12dd23fb92280e35857fc8e9b69"
] |
[
"hls4ml/model/hls_model.py"
] |
[
"from __future__ import print_function\nimport six\nimport re\nimport numpy as np\nfrom enum import Enum\nfrom collections import OrderedDict\n\nfrom .templates import get_config_template, get_function_template\n\nclass HLSConfig(object):\n def __init__(self, config):\n self.config = config\n\n self.model_precision = {}\n self.layer_type_precision = {}\n self.layer_name_precision = {}\n\n self.model_rf = None\n self.layer_type_rf = {}\n self.layer_name_rf = {}\n\n self.model_strategy = 'Latency'\n self.layer_type_strategy = {}\n self.layer_name_strategy = {}\n\n self.model_compression = False\n self.layer_type_compression = {}\n self.layer_name_compression = {}\n\n self._parse_hls_config()\n self._validate_hls_config()\n\n def get_config_value(self, key):\n return self.config.get(key, None)\n\n def get_project_name(self):\n return self.get_config_value('ProjectName')\n\n def get_output_dir(self):\n return self.get_config_value('OutputDir')\n\n def get_precision(self, layer, var='default'):\n precision = self.layer_name_precision.get(layer.name.lower() + '_' + var)\n type_name = layer.name.lower() + '_' + var + '_t'\n if precision is None:\n precision = self.layer_name_precision.get(layer.name.lower() + '_default')\n type_name = layer.name.lower() + '_default_t'\n\n if precision is None:\n precision = self.layer_type_precision.get(layer.__class__.__name__.lower() + '_' + var)\n type_name = layer.__class__.__name__ + '_' + var + '_t'\n if precision is None:\n precision = self.layer_type_precision.get(layer.__class__.__name__.lower() + '_default')\n type_name = layer.__class__.__name__ + '_default_t'\n\n if precision is None:\n precision = self.model_precision.get(var)\n type_name = var + '_default_t'\n if precision is None:\n precision = self.model_precision.get('default')\n type_name = 'model_default_t'\n\n if precision is None:\n raise Exception('No precision for {}->{} found and no default specified.'.format(layer.name, var))\n\n return (precision, type_name)\n\n def get_reuse_factor(self, layer):\n rf = self.layer_name_rf.get(layer.name.lower())\n if rf is None:\n rf = self.layer_type_rf.get(layer.__class__.__name__.lower())\n if rf is None:\n rf = self.model_rf\n\n if rf is None:\n raise Exception('No reuse factor for {} found and no default specified.'.format(layer.name))\n\n return rf\n\n def get_strategy(self, layer):\n strategy = self.layer_name_strategy.get(layer.name.lower())\n if strategy is None:\n strategy = self.layer_type_strategy.get(layer.__class__.__name__.lower())\n if strategy is None:\n strategy = self.model_strategy\n\n return strategy\n\n def is_resource_strategy(self, layer):\n return self.get_strategy(layer).lower() == 'resource'\n\n def get_compression(self, layer):\n compression = self.layer_name_compression.get(layer.name.lower())\n if compression is None:\n compression = self.layer_type_compression.get(layer.__class__.__name__.lower())\n if compression is None:\n compression = self.model_compression\n\n return compression\n\n def _parse_hls_config(self):\n hls_config = self.config['HLSConfig']\n model_cfg = hls_config.get('Model')\n if model_cfg is not None:\n precision_cfg = model_cfg.get('Precision')\n if precision_cfg is not None:\n if isinstance(precision_cfg, dict):\n for var, precision in precision_cfg.items():\n self.model_precision[var] = precision\n else:\n self.model_precision['default'] = precision_cfg # Default precision for everything\n\n self.model_rf = model_cfg.get('ReuseFactor')\n self.model_strategy = model_cfg.get('Strategy', 'Latency')\n self.model_compression = bool(model_cfg.get('Compression', 0))\n\n layer_type_cfg = hls_config.get('LayerType')\n if layer_type_cfg is not None:\n for layer_type, layer_cfg in layer_type_cfg.items():\n precision_cfg = layer_cfg.get('Precision')\n if isinstance(precision_cfg, dict):\n for var, precision in precision_cfg.items():\n self.layer_type_precision[layer_type.lower() + '_' + var] = precision\n else:\n self.layer_type_precision[layer_type.lower() + '_default'] = precision_cfg\n\n rf = layer_cfg.get('ReuseFactor')\n if rf is not None:\n self.layer_type_rf[layer_type.lower()] = rf\n\n strategy = layer_cfg.get('Strategy')\n if strategy is not None:\n self.layer_type_strategy[layer_type.lower()] = strategy\n\n compression = layer_cfg.get('Compression')\n if compression is not None:\n self.layer_type_compression[layer_type.lower()] = bool(compression)\n\n layer_name_cfg = hls_config.get('LayerName')\n if layer_name_cfg is not None:\n for layer_name, layer_cfg in layer_name_cfg.items():\n precision_cfg = layer_cfg.get('Precision')\n if isinstance(precision_cfg, dict):\n for var, precision in precision_cfg.items():\n self.layer_name_precision[layer_name.lower() + '_' + var] = precision\n else:\n self.layer_name_precision[layer_name.lower() + '_default'] = precision_cfg\n\n rf = layer_cfg.get('ReuseFactor')\n if rf is not None:\n self.layer_name_rf[layer_name.lower()] = rf\n\n strategy = layer_cfg.get('Strategy')\n if strategy is not None:\n self.layer_name_strategy[layer_name.lower()] = strategy\n\n compression = layer_cfg.get('Compression')\n if compression is not None:\n self.layer_name_compression[layer_name.lower()] = bool(compression)\n\n def _validate_hls_config(self):\n use_resource = False\n if self.model_strategy.lower() == 'latency' and self.model_compression:\n print('WARNING: Compression enabled while model strategy set to \"Latency\".')\n use_resource = True\n for layer_type, strategy in self.layer_type_strategy.items():\n if strategy.lower() == 'resource' and self.model_strategy.lower() == 'latency':\n print('WARNING: Strategy for layer type {} set to \"Resource\", while model strategy set to \"Latency\".'.format(layer_type))\n use_resource = True\n\n for layer_name, strategy in self.layer_name_strategy.items():\n if strategy.lower() == 'resource' and self.model_strategy.lower() == 'latency':\n print('WARNING: Strategy for layer {} set to \"Resource\", while model strategy set to \"Latency\".'.format(layer_name))\n use_resource = True\n\n for layer_type, compression in self.layer_type_compression.items():\n if compression and self.model_strategy.lower() == 'latency':\n print('WARNING: Compression enabled for layer type {}, while model strategy set to \"Latency\".'.format(layer_type))\n use_resource = True\n\n for layer_name, compression in self.layer_name_compression.items():\n if compression and self.model_strategy.lower() == 'latency':\n print('WARNING: Compression enabled for layer {}, while model strategy set to \"Latency\".'.format(layer_name))\n use_resource = True\n\n if use_resource:\n print('WARNING: Changing model strategy to \"Resource\"')\n self.model_strategy = 'Resource'\n\nclass HLSModel(object):\n def __init__(self, config, data_reader, layer_list, inputs=None, outputs=None):\n self.config = HLSConfig(config)\n self.reader = data_reader\n\n # If not provided, assumes layer_list[0] is input, and layer_list[-1] is output\n self.inputs = inputs if inputs is not None else [layer_list[0]['name']]\n self.outputs = outputs if outputs is not None else [layer_list[-1]['name']]\n\n self.index = 0\n self.graph = OrderedDict()\n self.output_vars = {}\n\n self._make_graph(layer_list)\n\n def _make_graph(self, layer_list):\n for layer in layer_list:\n kind = layer['class_name']\n name = layer['name']\n inputs = layer.get('inputs', [])\n outputs = layer.get('outputs', [])\n if len(inputs) == 0:\n inputs = [next(reversed(self.graph), 'input')]\n if len(outputs) == 0:\n outputs = [name]\n\n self.graph[name] = self.make_node(kind, name, layer, inputs, outputs)\n\n def make_node(self, kind, name, attributes, inputs, outputs=None):\n node = layer_map[kind](self, name, attributes, inputs, outputs)\n for o in node.outputs:\n out_var = node.get_output_variable(output_name=o)\n if o in self.outputs:\n out_var.type.name = 'result_t'\n self.output_vars[o] = out_var\n\n return node\n\n def insert_node(self, node):\n if len(node.inputs) > 1:\n raise Exception('Cannot insert a node with more than one input (for now).')\n\n prev_node = self.graph.get(node.inputs[0])\n next_node = next((x for x in self.graph.values() if x.inputs[0] == prev_node.outputs[0]), None)\n if next_node is not None:\n next_node.inputs[0] = node.outputs[0]\n\n new_graph = OrderedDict()\n for k, v in self.graph.items():\n new_graph[k] = v\n if k == prev_node.name:\n new_graph[node.name] = node\n\n self.graph = new_graph\n\n def remove_node(self, node, rewire=True):\n if rewire:\n if len(node.inputs) > 1 or len(node.outputs) > 1:\n raise Exception('Cannot rewire a node with multiple inputs/outputs')\n prev_node = self.graph.get(node.inputs[0])\n next_node = next((x for x in self.graph.values() if x.inputs[0] == node.outputs[0]), None)\n if prev_node is not None:\n if next_node is not None:\n next_node.inputs[0] = prev_node.outputs[0]\n else:\n if node.outputs[0] in self.outputs:\n self.outputs = [prev_node.outputs[0] if x == node.outputs[0] else x for x in self.outputs]\n else:\n raise Exception('Cannot rewire a node without child')\n else:\n raise Exception('Cannot rewire a node without a parent')\n\n del self.output_vars[node.outputs[0]]\n del self.graph[node.name]\n\n def replace_node(self, old_node, new_node):\n prev_node = self.graph.get(old_node.inputs[0])\n next_node = next((x for x in self.graph.values() if x.inputs[0] == old_node.outputs[0]), None)\n if next_node is not None:\n next_node.inputs[0] = new_node.outputs[0]\n if prev_node is not None:\n if new_node.inputs is None or len(new_node.inputs) == 0: # Check if already rewired\n new_node.inputs = [prev_node.outputs[0]]\n\n self.graph = OrderedDict((new_node.name, new_node) if k == old_node.name else (k, v) for k, v in self.graph.items())\n\n def get_weights_data(self, layer_name, var_name):\n return self.reader.get_weights_data(layer_name, var_name)\n\n def quantize_data(self, data, quantize):\n zeros = np.zeros_like(data)\n ones = np.ones_like(data)\n quant_data = data\n if quantize == 1:\n quant_data = np.where(data > 0, ones, zeros).astype('int')\n if quantize == 2:\n quant_data = np.where(data > 0, ones, -ones)\n elif quantize == 3:\n quant_data = np.where(data > 0.5, ones, np.where(data <= -0.5, -ones, zeros))\n return quant_data\n\n def next_layer(self):\n self.index += 1\n return self.index\n\n def get_layers(self):\n return self.graph.values()\n\n def get_input_variables(self):\n variables = []\n for inp in self.inputs:\n variables.append(self.graph[inp].get_output_variable())\n return variables\n\n def register_output_variable(self, out_name, variable):\n if out_name in self.outputs:\n variable.type.name = 'result_t'\n self.output_vars[out_name] = variable\n\n def get_output_variables(self):\n variables = []\n for out in self.outputs:\n variables.append(self.output_vars[out])\n return variables\n\n def get_layer_output_variable(self, output_name):\n return self.output_vars[output_name]\n\nclass HLSType(object):\n def __init__(self, name, precision, **kwargs):\n self.name = name.format(**kwargs)\n self.precision = precision\n\n def definition_cpp(self):\n return 'typedef {precision} {name};\\n'.format(name=self.name, precision=self.precision)\n\nclass CompressedType(HLSType):\n def __init__(self, name, precision, index_precision, **kwargs):\n super(CompressedType, self).__init__('compressed_type{index}', precision, **kwargs)\n self.index_precision = index_precision\n\n def definition_cpp(self):\n cpp_fmt = ('typedef struct {name} {{ '\n '{index} row_index; '\n '{index} col_index; '\n '{precision} weight; }} {name};\\n')\n return cpp_fmt.format(name=self.name, index=self.index_precision, precision=self.precision)\n\nclass Variable(object):\n def __init__(self, var_name, type_name, precision, **kwargs):\n self.name = var_name.format(**kwargs)\n self.type = HLSType(type_name, precision, **kwargs)\n self.cppname = re.sub(r'\\W|^(?=\\d)','_', self.name)\n\nclass ArrayVariable(Variable):\n def __init__(self, shape, dim_names, var_name='layer{index}', type_name='layer{index}_t', precision=None, pragma='partition', **kwargs):\n super(ArrayVariable, self).__init__(var_name, type_name, precision, **kwargs)\n self.shape = shape\n self.dim_names = dim_names\n\n if pragma == 'partition':\n self.partition()\n elif pragma == 'reshape':\n self.reshape()\n elif pragma == 'stream':\n self.stream()\n else:\n self.pragma = None\n\n def partition(self, type='complete', factor=None, dim=0):\n if factor:\n pragma = '#pragma HLS ARRAY_PARTITION variable={name} {type} factor={factor} dim={dim}'\n else:\n pragma = '#pragma HLS ARRAY_PARTITION variable={name} {type} dim={dim}'\n\n self.pragma = pragma.format(name=self.name, type=type, factor=factor, dim=dim)\n\n def reshape(self, type='complete', factor=None, dim=0):\n if factor:\n pragma = '#pragma HLS ARRAY_RESHAPE variable={name} {type} factor={factor} dim={dim}'\n else:\n pragma = '#pragma HLS ARRAY_RESHAPE variable={name} {type} dim={dim}'\n\n self.pragma = pragma.format(name=self.name, type=type, factor=factor, dim=dim)\n\n def stream(self, depth=1, dim=1):\n pragma = '#pragma HLS STREAM variable={name} depth={depth} dim={dim}'\n self.pragma = pragma.format(name=self.name, depth=depth, dim=dim)\n\n def get_shape(self):\n return zip(self.dim_names, self.shape)\n\n def definition_cpp(self):\n array_shape = self.size_cpp()\n return '{type} {name}[{shape}]'.format(type=self.type.name, name=self.cppname, shape=array_shape)\n\n def size(self):\n nelem = 1\n for dim in self.shape:\n nelem *= dim\n return nelem\n\n def size_cpp(self):\n return '*'.join([str(k) for k in self.dim_names])\n\nclass WeightVariable(Variable):\n def __init__(self, var_name, type_name, precision, data, **kwargs):\n super(WeightVariable, self).__init__(var_name, type_name, precision, **kwargs)\n self.data = data\n self.nzeros = -1\n self.shape = list(self.data.shape)\n self.data_length = np.prod(self.data.shape)\n self.nonzeros = np.count_nonzero(self.data)\n self.nzeros = self.data_length - self.nonzeros\n self.min = np.min(self.data)\n self.max = np.max(self.data)\n self._iterator = None\n self.update_precision(precision)\n\n def __iter__(self):\n self._iterator = np.nditer(self.data, order='C')\n return self\n\n def __next__(self):\n if not self._iterator.finished:\n value = self._iterator[0]\n self._iterator.iternext()\n return self.precision_fmt % value\n else:\n raise StopIteration\n\n next = __next__\n\n def update_precision(self, new_precision):\n self.type.precision = new_precision\n if 'int' in self.type.precision:\n self.precision_fmt = '%d'\n else:\n precision_bits = re.search('.+<(.+?)>', self.type.precision).group(1).split(',')\n decimal_bits = int(precision_bits[0]) - int(precision_bits[1])\n decimal_spaces = int(np.floor(np.log10(2 ** decimal_bits - 1))) + 1\n self.precision_fmt = '%.{}f'.format(decimal_spaces)\n\n def definition_cpp(self):\n return '{type} {name}[{size}]'.format(type=self.type.name, name=self.cppname, size=self.data_length)\n\nclass CompressedWeightVariable(WeightVariable):\n def __init__(self, var_name, type_name, precision, data, reuse_factor, **kwargs):\n super(CompressedWeightVariable, self).__init__(var_name, type_name, precision, data, **kwargs)\n self.extra_zeros = 0\n self.data_length = np.prod(data.shape) - self.nzeros\n while self.data_length % reuse_factor != 0:\n self.extra_zeros += 1\n self.data_length += 1\n self.nonzeros = np.prod(data.shape) - self.nzeros + self.extra_zeros\n\n # Compress the array\n weights = []\n extra_nzero_cnt = self.extra_zeros\n it = np.nditer(data, order='C', flags=['multi_index'])\n max_idx = 0\n while not it.finished:\n val = it[0]\n if not (val == 0 and extra_nzero_cnt < 1):\n if val == 0:\n extra_nzero_cnt -= 1\n if it.multi_index[0] > max_idx:\n max_idx = it.multi_index[0]\n if it.multi_index[1] > max_idx:\n max_idx = it.multi_index[1]\n weights.append([it.multi_index[1], it.multi_index[0], val])\n it.iternext()\n weights.sort()\n\n index_precision = 32\n if max_idx > 0:\n index_precision = int(np.log2(max_idx) + 1)\n self.type = CompressedType(type_name, precision, 'ap_uint<{}>'.format(index_precision), **kwargs)\n\n self.data = weights\n\n def __iter__(self):\n self._iterator = iter(self.data)\n return self\n\n def __next__(self):\n value = next(self._iterator)\n value_fmt = self.precision_fmt % value[2]\n return '{ %u, %u, %s }' % (value[1], value[0], value_fmt)\n\n next = __next__\n\nclass Layer(object):\n def __init__(self, model, name, attributes, inputs, outputs=None):\n self.model = model\n self.name = name\n self.index = model.next_layer()\n self.inputs = inputs\n self.outputs = outputs\n if self.outputs is None:\n self.outputs = [self.name]\n\n self.attributes = attributes\n\n self._function_template = get_function_template(self.__class__.__name__)\n self._config_template = get_config_template(self.__class__.__name__)\n self.weights = OrderedDict()\n self.variables = OrderedDict()\n self.precision = OrderedDict()\n accum_t = HLSType(*reversed(self.model.config.get_precision(self, 'accum')))\n self.precision[accum_t.name] = accum_t\n self.set_attr('accum_t', accum_t.precision)\n\n self.initialize()\n\n def initialize(self):\n raise NotImplementedError\n\n def set_attr(self, key, value):\n self.attributes[key] = value\n\n def get_attr(self, key, default=None):\n return self.attributes.get(key, default)\n\n def get_input_node(self, input_name=None):\n if input_name is not None:\n return self.model.graph.get(input_name)\n else:\n return self.model.graph.get(self.inputs[0])\n\n def get_input_variable(self, input_name=None):\n if input_name is not None:\n return self.model.get_layer_output_variable(input_name)\n else:\n return self.model.get_layer_output_variable(self.inputs[0])\n\n def get_output_nodes(self, output_name=None):\n if output_name is None:\n output_name = self.outputs[0]\n return [node for node in self.model.graph.values() if node.inputs[0] == output_name]\n\n def get_output_variable(self, output_name=None):\n if output_name is not None:\n return self.variables[output_name]\n else:\n return next(iter(self.variables.values()))\n\n def get_weights(self, var_name=None):\n if var_name:\n return self.weights[var_name]\n\n return self.weights.values()\n\n def get_variables(self):\n return self.variables.values()\n\n def add_output_variable(self, shape, dim_names, out_name=None, var_name='layer{index}_out', type_name='layer{index}_t', precision=None, pragma='auto'):\n if out_name is None:\n out_name = self.outputs[0]\n\n if precision is None:\n precision, _ = self.model.config.get_precision(self, var='result')\n\n if pragma == 'auto':\n if self.model.config.get_config_value('IOType') == 'io_serial':\n pragma = 'stream'\n else:\n if self.name in self.model.inputs:\n pragma = 'reshape'\n else:\n pragma = 'partition'\n\n out = ArrayVariable(shape, dim_names, var_name=var_name, type_name=type_name, precision=precision, pragma=pragma, index=self.index)\n\n self.variables[out_name] = out\n self.model.register_output_variable(out_name, out)\n\n self.precision[out.type.name] = out.type\n\n def add_weights(self, quantize=0, compression=False):\n data = self.model.get_weights_data(self.name, 'kernel')\n\n self.add_weights_variable(name='weight', var_name='w{index}', data=data, quantize=quantize, compression=compression)\n\n def add_bias(self, quantize=0):\n data = self.model.get_weights_data(self.name, 'bias')\n precision = None\n type_name = None\n if data is None:\n data = np.zeros(self.get_output_variable().shape[-1])\n precision = 'ap_uint<1>'\n type_name = 'bias{index}_t'\n quantize = 0 # Don't quantize non-existant bias\n\n self.add_weights_variable(name='bias', var_name='b{index}', type_name=type_name, precision=precision, data=data, quantize=quantize)\n\n def add_weights_variable(self, name, var_name=None, type_name=None, precision=None, data=None, quantize=0, compression=False):\n if var_name is None:\n var_name = name + '{index}'\n\n if precision is None:\n precision, _ = self.model.config.get_precision(self, var=name)\n\n if type_name is None:\n _, type_name = self.model.config.get_precision(self, var=name)\n\n if data is None:\n data = self.model.get_weights_data(self.name, name)\n elif isinstance(data, six.string_types):\n data = self.model.get_weights_data(self.name, data)\n\n if quantize > 0:\n data = self.model.quantize_data(data, quantize)\n if quantize == 1:\n precision = 'ap_uint<1>'\n type_name = name + '{index}_t'\n elif quantize == 2 or quantize == 3:\n precision = 'ap_int<2>'\n type_name = name + '{index}_t'\n\n if compression:\n rf = self.model.config.get_reuse_factor(self)\n var = CompressedWeightVariable(var_name, type_name=type_name, precision=precision, data=data, reuse_factor=rf, index=self.index)\n else:\n var = WeightVariable(var_name, type_name=type_name, precision=precision, data=data, index=self.index)\n\n self.weights[name] = var\n self.precision[var.type.name] = var.type\n\n def _default_function_params(self):\n params = {}\n params['config'] = 'config{}'.format(self.index)\n params['input_t'] = self.get_input_variable().type.name\n params['output_t'] = self.get_output_variable().type.name\n params['input'] = self.get_input_variable().name\n params['output'] = self.get_output_variable().name\n\n return params\n\n def _default_config_params(self):\n params = {}\n params.update(self.attributes)\n params['index'] = self.index\n params['iotype'] = self.model.config.get_config_value('IOType')\n params['reuse'] = self.model.config.get_reuse_factor(self)\n\n # data types\n for weight_name, variable in self.weights.items():\n params[weight_name + '_t'] = variable.type.name\n\n return params\n\n def get_layer_precision(self):\n return self.precision\n\n # myproject.cpp/h\n def function_cpp(self):\n raise NotImplementedError\n\n # parameters.h\n def config_cpp(self):\n raise NotImplementedError\n\n def get_numbers_cpp(self):\n numbers = ''\n for k, v in self.get_output_variable().get_shape():\n numbers += '#define {} {}\\n'.format(k,v)\n\n return numbers\n\n def precision_cpp(self):\n return 'typedef {precision} layer{index}_t;'.format(precision=self.get_output_variable().precision, index=self.index)\n\nclass Input(Layer):\n def initialize(self):\n shape = self.attributes['input_shape']\n if shape[0] is None:\n shape = shape[1:]\n dims = ['N_INPUT_{}_{}'.format(i, self.index) for i in range(1, len(shape) + 1)]\n self.add_output_variable(shape, dims, var_name=self.name, type_name='input_t')\n\n def function_cpp(self):\n return None\n\n def config_cpp(self):\n return None\n\nclass Dense(Layer):\n def initialize(self):\n shape = [self.attributes['n_out']]\n dims = ['N_LAYER_{}'.format(self.index)]\n quantize = self.get_attr('quantize', default=0)\n compression = self.model.config.get_compression(self)\n if self.model.config.is_resource_strategy(self):\n if self.model.config.get_reuse_factor(self) == 1:\n print('WARNING: Using ReuseFactor 1 with \"Resource\" strategy. This may not work.')\n if compression:\n self.set_attr('strategy', 'compressed')\n else:\n self.set_attr('strategy', 'large')\n else:\n self.set_attr('strategy', 'latency')\n self.add_output_variable(shape, dims)\n self.add_weights(quantize=quantize, compression=compression)\n index_t = 'ap_uint<1>'\n if self.model.config.is_resource_strategy(self):\n if self.model.config.get_compression(self):\n index_t = self.get_weights('weight').type.index_precision\n else:\n self.weights['weight'].data = np.transpose(self.weights['weight'].data)\n self.set_attr('index_t', index_t)\n self.add_bias(quantize=quantize)\n\n def function_cpp(self):\n params = self._default_function_params()\n params['strategy'] = self.get_attr('strategy')\n params['w'] = self.get_weights('weight').name\n params['b'] = self.get_weights('bias').name\n\n return [self._function_template.format(**params)]\n\n def config_cpp(self):\n params = self._default_config_params()\n params['n_in'] = self.get_input_variable().size_cpp()\n params['n_out'] = self.get_output_variable().size_cpp()\n params['nzeros'] = self.get_weights('weight').nzeros\n params['nonzeros'] = self.get_weights('weight').nonzeros\n\n return self._config_template.format(**params)\n\nclass Conv1D(Layer):\n def initialize(self):\n shape = [self.attributes['y_out'], self.attributes['n_filt']]\n dims = ['Y_OUTPUTS_{}'.format(self.index), 'N_FILT_{}'.format(self.index)]\n self.add_output_variable(shape, dims)\n self.add_weights()\n self.add_bias()\n\n def function_cpp(self):\n params = self._default_function_params()\n params['w'] = self.get_weights('weight').name\n params['b'] = self.get_weights('bias').name\n\n return [self._function_template.format(**params)]\n\n def config_cpp(self):\n params = self._default_config_params()\n params['n_in'] = self.get_input_variable().dim_names[0]\n params['n_chan'] = self.get_input_variable().dim_names[1]\n params['filt_width'] = self.get_attr('y_filt')\n params['dilation'] = self.get_attr('dilation', 1)\n params['n_filt'] = 'N_FILT_{}'.format(self.index)\n params['n_out'] = 'Y_OUTPUTS_{}'.format(self.index)\n params['nzeros'] = self.get_weights('weight').nzeros\n\n return self._config_template.format(**params)\n\nclass Conv2D(Layer):\n def initialize(self):\n shape = [self.attributes['out_height'], self.attributes['out_width'], self.attributes['n_filt']]\n dims = ['OUT_HEIGHT_{}'.format(self.index), 'OUT_WIDTH_{}'.format(self.index), 'N_FILT_{}'.format(self.index)]\n self.add_output_variable(shape, dims)\n self.add_weights()\n self.add_bias()\n\n def function_cpp(self):\n params = self._default_function_params()\n params['w'] = self.get_weights('weight').name\n params['b'] = self.get_weights('bias').name\n\n return [self._function_template.format(**params)]\n\n def config_cpp(self):\n params = self._default_config_params()\n params['in_height'] = self.get_input_variable().dim_names[0]\n params['in_width'] = self.get_input_variable().dim_names[1]\n params['n_chan'] = self.get_input_variable().dim_names[2]\n params['out_height'] = self.get_output_variable().dim_names[0]\n params['out_width'] = self.get_output_variable().dim_names[1]\n params['n_filt'] = self.get_output_variable().dim_names[2]\n params['nzeros'] = self.get_weights('weight').nzeros\n\n return self._config_template.format(**params)\n\nclass Pooling1D(Layer):\n def initialize(self):\n shape = [self.attributes['n_out'], self.attributes['n_filt']]\n dims = ['N_OUTPUTS_{}'.format(self.index), 'N_FILT_{}'.format(self.index)]\n self.add_output_variable(shape, dims)\n self.set_attr('pool_op', self.get_attr('class_name').split('Pooling')[0])\n\n def function_cpp(self):\n params = self._default_function_params()\n\n return [self._function_template.format(**params)]\n\n def config_cpp(self):\n params = self._default_config_params()\n params['n_in'] = self.get_input_variable().size_cpp()\n params['n_out'] = self.get_output_variable().size_cpp()\n\n return self._config_template.format(**params)\n\nclass Pooling2D(Layer):\n def initialize(self):\n shape = [self.attributes['out_height'], self.attributes['out_width'], self.attributes['n_filt']]\n dims = ['OUT_HEIGHT_{}'.format(self.index), 'OUT_WIDTH_{}'.format(self.index), 'N_FILT_{}'.format(self.index)]\n self.add_output_variable(shape, dims)\n self.set_attr('pool_op', self.get_attr('class_name').split('Pooling')[0])\n\n def function_cpp(self):\n params = self._default_function_params()\n\n return [self._function_template.format(**params)]\n\n def config_cpp(self):\n params = self._default_config_params()\n params['n_in'] = self.get_input_variable().dim_names[0]\n params['in_width'] = self.get_input_variable().dim_names[1]\n params['out_height'] = self.get_output_variable().dim_names[0]\n params['out_width'] = self.get_output_variable().dim_names[1]\n params['n_filt'] = self.get_output_variable().dim_names[2]\n\n return self._config_template.format(**params)\n\nclass Activation(Layer):\n def initialize(self):\n inp = self.get_input_variable()\n shape = inp.shape\n dims = inp.dim_names\n self.add_output_variable(shape, dims)\n\n def function_cpp(self):\n params = self._default_function_params()\n params['activation'] = self.get_attr('activation')\n params['config'] = '{}_config{}'.format(self.get_attr('activation'), self.index)\n\n return [self._function_template.format(**params)]\n\n def config_cpp(self):\n params = self._default_config_params()\n params['type'] = self.get_attr('activation')\n params['n_in'] = self.get_input_variable().size_cpp()\n\n return self._config_template.format(**params)\n\nclass ParametrizedActivation(Activation):\n def function_cpp(self):\n params = self._default_function_params()\n params['activation'] = self._get_act_function_name()\n params['param'] = self.get_attr('activ_param', 1.0)\n params['config'] = '{}_config{}'.format(self.get_attr('activation'), self.index)\n\n return [self._function_template.format(**params)]\n\n def _get_act_function_name(self):\n act = self.get_attr('activation').lower()\n if act == 'leakyrelu':\n return 'leaky_relu'\n elif act == 'thresholdedrelu':\n return 'thresholded_relu'\n else:\n return act # ELU activation\n\nclass PReLU(Activation):\n def initialize(self):\n super(PReLU, self).initialize()\n self.add_weights_variable(name='alpha', var_name='a{index}')\n\n def function_cpp(self):\n params = self._default_function_params()\n params['activation'] = self.get_attr('activation').lower()\n params['param'] = self.get_weights('alpha').name\n params['config'] = '{}_config{}'.format(self.get_attr('activation'), self.index)\n\n return [self._function_template.format(**params)]\n\nclass BatchNormalization(Layer):\n def initialize(self):\n inp = self.get_input_variable()\n shape = inp.shape\n dims = inp.dim_names\n self.add_output_variable(shape, dims)\n\n gamma = self.model.get_weights_data(self.name, 'gamma')\n beta = self.model.get_weights_data(self.name, 'beta')\n mean = self.model.get_weights_data(self.name, 'moving_mean')\n var = self.model.get_weights_data(self.name, 'moving_variance')\n\n scale = gamma / np.sqrt(var + self.get_attr('epsilon'))\n bias = beta - gamma * mean / np.sqrt(var + self.get_attr('epsilon'))\n\n self.add_weights_variable(name='scale', var_name='s{index}', data=scale)\n self.add_weights_variable(name='bias', var_name='b{index}', data=bias)\n\n def function_cpp(self):\n params = self._default_function_params()\n params['scale'] = self.get_weights('scale').name\n params['bias'] = self.get_weights('bias').name\n\n return [self._function_template.format(**params)]\n\n def config_cpp(self):\n params = self._default_config_params()\n params['n_in'] = self.get_input_variable().size_cpp()\n\n return self._config_template.format(**params)\n\nclass Merge(Layer):\n def initialize(self):\n assert(len(self.inputs) == 2)\n inp1 = self.get_input_variable(self.inputs[0])\n inp2 = self.get_input_variable(self.inputs[1])\n shape = inp1.shape\n assert(inp1.shape == inp2.shape)\n dims = inp1.dim_names\n self.add_output_variable(shape, dims)\n\n def function_cpp(self):\n params = {}\n params['merge'] = self.get_attr('op').lower()\n params['config'] = 'config{}'.format(self.index)\n params['input1_t'] = self.get_input_variable(self.inputs[0]).type.name\n params['input2_t'] = self.get_input_variable(self.inputs[1]).type.name\n params['output_t'] = self.get_output_variable().type.name\n params['input1'] = self.get_input_variable(self.inputs[0]).name\n params['input2'] = self.get_input_variable(self.inputs[1]).name\n params['output'] = self.get_output_variable().name\n\n return [self._function_template.format(**params)]\n\n def config_cpp(self):\n params = self._default_config_params()\n params['n_elem'] = self.get_input_variable(self.inputs[0]).size_cpp()\n\n return self._config_template.format(**params)\n\nclass Concatenate(Merge):\n def initialize(self):\n assert(len(self.inputs) == 2)\n inp1 = self.get_input_variable(self.inputs[0])\n inp2 = self.get_input_variable(self.inputs[1])\n shape = [sum(x) for x in zip(inp1.shape, inp2.shape)]\n rank = len(shape)\n if rank > 1:\n dims = ['OUT_CONCAT_{}_{}'.format(i, self.index) for i in range(rank)]\n else:\n dims = ['OUT_CONCAT_{}'.format(self.index)]\n self.add_output_variable(shape, dims)\n\n def config_cpp(self):\n params = self._default_config_params()\n for i in range(3):\n params.setdefault('n_elem1_{}'.format(i), 0)\n params.setdefault('n_elem2_{}'.format(i), 0)\n inp1 = self.get_input_variable(self.inputs[0])\n inp2 = self.get_input_variable(self.inputs[1])\n for i, (s1, s2) in enumerate(zip(inp1.shape, inp2.shape)):\n params['n_elem1_{}'.format(i)] = s1\n params['n_elem2_{}'.format(i)] = s2\n\n return self._config_template.format(**params)\n\nlayer_map = {\n 'InputLayer' : Input,\n 'Activation' : Activation,\n 'LeakyReLU' : ParametrizedActivation,\n 'ThresholdedReLU' : ParametrizedActivation,\n 'ELU' : ParametrizedActivation,\n 'PReLU' : PReLU,\n 'Dense' : Dense,\n 'BinaryDense' : Dense,\n 'TernaryDense' : Dense,\n 'Conv1D' : Conv1D,\n 'Conv2D' : Conv2D,\n 'BatchNormalization' : BatchNormalization,\n 'MaxPooling1D' : Pooling1D,\n 'AveragePooling1D' : Pooling1D,\n 'MaxPooling2D' : Pooling2D,\n 'AveragePooling2D' : Pooling2D,\n 'Merge' : Merge,\n 'Concatenate' : Concatenate,\n}\n\ndef register_layer(name, clazz):\n global layer_map\n layer_map[name] = clazz\n"
] |
[
[
"numpy.log2",
"numpy.ones_like",
"numpy.nditer",
"numpy.min",
"numpy.transpose",
"numpy.max",
"numpy.log10",
"numpy.zeros_like",
"numpy.count_nonzero",
"numpy.prod",
"numpy.where"
]
] |
oitoku/goshichigo
|
[
"d9e323ee023838907faf9af58ab2056e16224bc9"
] |
[
"scratch/haiku_maker.py"
] |
[
"import numpy as np\nfrom nltk.corpus import cmudict\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nstopWords = set(stopwords.words('english'))\n\n\nd = cmudict.dict()\ndef nsyl(word):\n\t\"\"\"Count syllables, or -1 if not found\"\"\"\n\ttry:\n\t\treturn [len(list(y for y in x if y[-1].isdigit())) for x in d[word.lower()]][0]\n\texcept:\n\t\treturn -1\n\ndef text_to_word_list(text):\n\t\"\"\"Given text, clean it and format it into a word list\"\"\"\n\ttext = text.lower()\n\ttext = text.strip()\n\tfor i in ',.?!:;*-\"(){}[]@$%\\n':\n\t\ttext = text.replace(i, ' ')\n\n\ttext = text.replace(\"' \", ' ')\n\ttext = text.replace(\" '\", ' ')\n\twords = text.split()\n\n\tfiltered_words = []\n\tfor i in words:\n\t\tif len(i) == 1 and not i in ('i'):\n\t\t\tcontinue\n\t\tfiltered_words.append(i)\n\treturn filtered_words\n\ndef lazy_haiku_maker(words):\n\t\"\"\"\n\tGiven a list of words, return a haiku as a string\n\tReturns None if fails\n\t\"\"\"\n\tsyllables = [nsyl(x) for x in words]\n\tstate = 0\n\tcount = 0\n\thaiku = [[],[],[]]\n\tfor i, j in zip(words, syllables):\n\t\tif j == -1:\n\t\t\tcontinue\n\t\tif j + count > 5:\n\t\t\tcontinue\n\t\thaiku[state].append(i)\n\t\tcount += j\n\t\tif count == 5:\n\t\t\tif state == 0:\n\t\t\t\tstate = 1\n\t\t\t\tcount = -2\n\t\t\telif state == 1:\n\t\t\t\tstate = 2\n\t\t\t\tcount = 0\n\t\t\telif state == 2:\n\t\t\t\treturn haiku\n\treturn None\n\ndef bagging_haiku_maker(words, keep_chance=0.75, iterations=2000):\n\t\"\"\"\n\tGiven a list of words, generate a haiku by randomly dropping words\n\tReturns None if fails\n\t\"\"\"\n\tfor i in range(iterations):\n\t\tnew_words = [x for x in words if np.random.rand() < keep_chance]\n\t\tif not new_words or len(new_words) <= 1:\n\t\t\tcontinue\n\t\tnew_words = new_words[np.random.randint(len(new_words) - 1):]\n\t\thaiku = lazy_haiku_maker(new_words)\n\t\tif not haiku:\n\t\t\tcontinue\n\t\tif haiku[0][-1] in stopWords:\n\t\t\tcontinue\n\t\tif haiku[1][-1] in stopWords:\n\t\t\tcontinue\n\t\tif haiku[2][-1] in stopWords:\n\t\t\tcontinue\n\t\tif np.sum([1 for x in sum(haiku, []) if x in stopWords]) > 3:\n\t\t\tcontinue\n\t\treturn \"\\n\".join(list(map(lambda x : \" \".join(x), haiku)))\n\treturn None\n\ndef text2haiku(text, keep_chance=0.75, iterations=2000):\n\t\"\"\"\n\tGiven a string of text, try to make a haiku\n\treturns a haiku as a string or None if it fails\n\t\"\"\"\n\tword_list = text_to_word_list(text)\n\thaiku = bagging_haiku_maker(word_list, keep_chance=keep_chance, iterations=iterations)\n\treturn haiku\n\n\nif __name__ == '__main__':\n\timport sys\n\tinfile = sys.argv[1]\n\twith open(infile, 'r') as f:\n\t\tfor i in f:\n\t\t\thaiku = text2haiku(i, keep_chance=0.75)\n\t\t\tif haiku:\n\t\t\t\tprint(\"*************************\")\n\t\t\t\tprint(i)\n\t\t\t\tprint(\"_________________________\")\n\t\t\t\tprint(haiku)\n\t\t\t\tprint(\"*************************\\n\")\n\t\t\t\tinput()\n"
] |
[
[
"numpy.random.rand"
]
] |
lewisjared/scmdataframe
|
[
"d8893235b3d2898a21ef8cc45bb04d94c6b038fe"
] |
[
"tests/integration/test_processing.py"
] |
[
"import datetime as dt\nimport os.path\nimport re\n\nimport numpy as np\nimport pandas as pd\nimport pandas.testing as pdt\nimport pint.errors\nimport pytest\n\nimport scmdata.processing\nfrom scmdata import ScmRun\nfrom scmdata.errors import MissingRequiredColumnError, NonUniqueMetadataError\nfrom scmdata.testing import _check_pandas_less_120\n\n\[email protected](scope=\"function\")\ndef test_processing_scm_df():\n data = np.array(\n [\n [1, 1.1, 1.2, 1.1],\n [1.1, 1.2, 1.3, 1.41],\n [1.3, 1.4, 1.5, 1.6],\n [1.3, 1.5, 1.6, 1.2],\n [1.48, 1.51, 1.72, 1.56],\n ]\n ).T\n yield ScmRun(\n data=data,\n columns={\n \"model\": [\"a_iam\"],\n \"climate_model\": [\"a_model\"],\n \"scenario\": [\"a_scenario\"],\n \"region\": [\"World\"],\n \"variable\": [\"Surface Air Temperature Change\"],\n \"unit\": [\"K\"],\n \"ensemble_member\": range(data.shape[1]),\n },\n index=[2005, 2006, 2007, 2100],\n )\n\n\[email protected]()\ndef test_processing_scm_df_multi_climate_model(test_processing_scm_df):\n other = test_processing_scm_df + 0.1\n other[\"climate_model\"] = \"z_model\"\n\n return test_processing_scm_df.append(other)\n\n\ncrossing_times_year_conversions = pytest.mark.parametrize(\n \"return_year,conv_to_year\",\n (\n (None, True),\n (True, True),\n (False, False),\n ),\n)\n\n\ndef _get_calculate_crossing_times_call_kwargs(return_year):\n call_kwargs = {}\n if return_year is not None:\n call_kwargs[\"return_year\"] = return_year\n\n return call_kwargs\n\n\ndef _get_expected_crossing_times(exp_vals, conv_to_year):\n if conv_to_year:\n exp_vals = [v if pd.isnull(v) else v.year for v in exp_vals]\n else:\n exp_vals = [pd.NaT if pd.isnull(v) else v for v in exp_vals]\n\n return exp_vals\n\n\[email protected](\n \"threshold,exp_vals\",\n (\n (\n 1.0,\n [\n dt.datetime(2006, 1, 1), # doesn't cross 1.0 until 2006\n dt.datetime(2005, 1, 1),\n dt.datetime(2005, 1, 1),\n dt.datetime(2005, 1, 1),\n dt.datetime(2005, 1, 1),\n ],\n ),\n (\n 1.5,\n [\n np.nan, # never crosses\n np.nan, # never crosses\n dt.datetime(2100, 1, 1), # doesn't cross 1.5 until 2100\n dt.datetime(2007, 1, 1), # 2007 is first year to actually exceed 1.5\n dt.datetime(2006, 1, 1),\n ],\n ),\n (2.0, [np.nan, np.nan, np.nan, np.nan, np.nan]),\n ),\n)\n@crossing_times_year_conversions\ndef test_crossing_times(\n threshold, exp_vals, return_year, conv_to_year, test_processing_scm_df\n):\n call_kwargs = _get_calculate_crossing_times_call_kwargs(return_year)\n res = scmdata.processing.calculate_crossing_times(\n test_processing_scm_df,\n threshold=threshold,\n **call_kwargs,\n )\n\n exp_vals = _get_expected_crossing_times(exp_vals, conv_to_year)\n\n exp = pd.Series(exp_vals, pd.MultiIndex.from_frame(test_processing_scm_df.meta))\n\n pdt.assert_series_equal(res, exp, check_dtype=False)\n\n\[email protected](\n \"end_year\",\n (\n 5000,\n pytest.param(\n 10**3, marks=pytest.mark.xfail(reason=\"ScmRun fails to initialise #179\")\n ),\n pytest.param(\n 10**4, marks=pytest.mark.xfail(reason=\"ScmRun fails to initialise #179\")\n ),\n ),\n)\n@crossing_times_year_conversions\ndef test_crossing_times_long_runs(\n end_year, return_year, conv_to_year, test_processing_scm_df\n):\n test_processing_scm_df = test_processing_scm_df.timeseries(time_axis=\"year\").rename(\n {2100: end_year}, axis=\"columns\"\n )\n test_processing_scm_df = scmdata.ScmRun(test_processing_scm_df)\n\n call_kwargs = _get_calculate_crossing_times_call_kwargs(return_year)\n res = scmdata.processing.calculate_crossing_times(\n test_processing_scm_df,\n threshold=1.5,\n **call_kwargs,\n )\n\n exp_vals = [\n np.nan,\n np.nan,\n dt.datetime(end_year, 1, 1),\n dt.datetime(2007, 1, 1),\n dt.datetime(2006, 1, 1),\n ]\n exp_vals = _get_expected_crossing_times(exp_vals, conv_to_year)\n\n exp = pd.Series(exp_vals, pd.MultiIndex.from_frame(test_processing_scm_df.meta))\n\n pdt.assert_series_equal(res, exp)\n\n\n@crossing_times_year_conversions\ndef test_crossing_times_multi_climate_model(\n return_year, conv_to_year, test_processing_scm_df_multi_climate_model\n):\n call_kwargs = _get_calculate_crossing_times_call_kwargs(return_year)\n\n threshold = 1.5\n exp_vals = [\n # a_model\n np.nan,\n np.nan,\n dt.datetime(2100, 1, 1),\n dt.datetime(2007, 1, 1),\n dt.datetime(2006, 1, 1),\n # z_model\n np.nan,\n dt.datetime(2100, 1, 1),\n dt.datetime(2007, 1, 1),\n dt.datetime(2006, 1, 1),\n dt.datetime(2005, 1, 1),\n ]\n\n res = scmdata.processing.calculate_crossing_times(\n test_processing_scm_df_multi_climate_model,\n threshold=threshold,\n **call_kwargs,\n )\n\n exp_vals = _get_expected_crossing_times(exp_vals, conv_to_year)\n\n exp = pd.Series(\n exp_vals,\n pd.MultiIndex.from_frame(test_processing_scm_df_multi_climate_model.meta),\n )\n\n pdt.assert_series_equal(res, exp)\n\n\ndef _get_expected_crossing_time_quantiles(\n cts, groups, exp_quantiles, interpolation, nan_fill_value, nan_fill_threshold\n):\n cts = cts.fillna(nan_fill_value)\n cts_qs = cts.groupby(groups).quantile(q=exp_quantiles, interpolation=interpolation)\n out = cts_qs.where(cts_qs < nan_fill_threshold)\n out.index = out.index.set_names(\"quantile\", level=-1)\n\n return out\n\n\[email protected](\n \"groups\", ([\"model\", \"scenario\"], [\"climate_model\", \"model\", \"scenario\"])\n)\[email protected](\n \"quantiles,exp_quantiles\",\n (\n (None, [0.05, 0.5, 0.95]),\n ([0.05, 0.17, 0.5, 0.83, 0.95], [0.05, 0.17, 0.5, 0.83, 0.95]),\n ),\n)\[email protected](\n \"interpolation,exp_interpolation\",\n (\n (None, \"linear\"),\n (\"linear\", \"linear\"),\n (\"nearest\", \"nearest\"),\n ),\n)\ndef test_crossing_times_quantiles(\n groups,\n quantiles,\n exp_quantiles,\n interpolation,\n exp_interpolation,\n test_processing_scm_df_multi_climate_model,\n):\n threshold = 1.5\n crossing_times = scmdata.processing.calculate_crossing_times(\n test_processing_scm_df_multi_climate_model,\n threshold=threshold,\n # return_year False handled in\n # test_crossing_times_quantiles_datetime_error\n return_year=True,\n )\n\n exp = _get_expected_crossing_time_quantiles(\n crossing_times,\n groups,\n exp_quantiles,\n exp_interpolation,\n nan_fill_value=10**6,\n nan_fill_threshold=10**5,\n )\n\n call_kwargs = {\"groupby\": groups}\n if quantiles is not None:\n call_kwargs[\"quantiles\"] = quantiles\n\n if interpolation is not None:\n call_kwargs[\"interpolation\"] = interpolation\n\n res = scmdata.processing.calculate_crossing_times_quantiles(\n crossing_times, **call_kwargs\n )\n\n if _check_pandas_less_120():\n check_dtype = False\n else:\n check_dtype = True\n\n pdt.assert_series_equal(res, exp, check_dtype=check_dtype)\n\n\ndef test_crossing_times_quantiles_datetime_error(\n test_processing_scm_df_multi_climate_model,\n):\n crossing_times = scmdata.processing.calculate_crossing_times(\n test_processing_scm_df_multi_climate_model,\n threshold=1.5,\n return_year=False,\n )\n with pytest.raises(NotImplementedError):\n scmdata.processing.calculate_crossing_times_quantiles(\n crossing_times, [\"model\", \"scenario\"]\n )\n\n\[email protected](\n \"nan_fill_value,out_nan_threshold,exp_vals\",\n (\n (None, None, [2025.4, 2027.0, np.nan]),\n (None, 10**4, [2025.4, 2027.0, np.nan]),\n (10**5, 10**4, [2025.4, 2027.0, np.nan]),\n (10**6, 10**5, [2025.4, 2027.0, np.nan]),\n (\n # fill value less than threshold means calculated quantiles are used\n 3000,\n 10**5,\n [2025.4, 2027.0, 2805.4],\n ),\n (3000, 2806, [2025.4, 2027.0, 2805.4]),\n (3000, 2805, [2025.4, 2027.0, np.nan]),\n ),\n)\ndef test_crossing_times_quantiles_nan_fill_values(\n nan_fill_value, out_nan_threshold, exp_vals\n):\n data = np.array(\n [\n [1.3, 1.35, 1.5, 1.52],\n [1.37, 1.43, 1.54, 1.58],\n [1.48, 1.51, 1.72, 2.02],\n [1.55, 1.65, 1.85, 2.1],\n [1.42, 1.46, 1.55, 1.62],\n ]\n ).T\n ensemble = scmdata.ScmRun(\n data=data,\n index=[2025, 2026, 2027, 2100],\n columns={\n \"model\": [\"a_iam\"],\n \"climate_model\": [\"a_model\"],\n \"scenario\": [\"a_scenario\"],\n \"region\": [\"World\"],\n \"variable\": [\"Surface Air Temperature Change\"],\n \"unit\": [\"K\"],\n \"ensemble_member\": range(data.shape[1]),\n },\n )\n\n call_kwargs = {}\n if nan_fill_value is not None:\n call_kwargs[\"nan_fill_value\"] = nan_fill_value\n\n if out_nan_threshold is not None:\n call_kwargs[\"out_nan_threshold\"] = out_nan_threshold\n\n crossing_times = scmdata.processing.calculate_crossing_times(\n ensemble,\n threshold=1.53,\n return_year=True,\n )\n res = scmdata.processing.calculate_crossing_times_quantiles(\n crossing_times,\n [\"climate_model\", \"scenario\"],\n quantiles=(0.05, 0.5, 0.95),\n **call_kwargs,\n )\n\n exp = pd.Series(\n exp_vals,\n pd.MultiIndex.from_product(\n [[\"a_model\"], [\"a_scenario\"], [0.05, 0.5, 0.95]],\n names=[\"climate_model\", \"scenario\", \"quantile\"],\n ),\n )\n\n if _check_pandas_less_120():\n check_dtype = False\n else:\n check_dtype = True\n\n pdt.assert_series_equal(res, exp, check_dtype=check_dtype)\n\n\noutput_name_options = pytest.mark.parametrize(\n \"output_name\", (None, \"test\", \"test other\")\n)\n\n\ndef _get_calculate_exceedance_probs_call_kwargs(output_name):\n call_kwargs = {}\n if output_name is not None:\n call_kwargs[\"output_name\"] = output_name\n\n return call_kwargs\n\n\ndef _get_calculate_exeedance_probs_expected_name(output_name, threshold):\n if output_name is not None:\n return output_name\n\n return \"{} exceedance probability\".format(threshold)\n\n\[email protected](\n \"threshold,exp_vals\",\n (\n (1.0, [0.8, 1.0, 1.0, 1.0]),\n (1.5, [0.0, 0.2, 0.4, 0.4]),\n (2.0, [0.0, 0.0, 0.0, 0.0]),\n ),\n)\n@output_name_options\ndef test_exceedance_probabilities_over_time(\n output_name, threshold, exp_vals, test_processing_scm_df\n):\n call_kwargs = _get_calculate_exceedance_probs_call_kwargs(output_name)\n res = scmdata.processing.calculate_exceedance_probabilities_over_time(\n test_processing_scm_df,\n process_over_cols=\"ensemble_member\",\n threshold=threshold,\n **call_kwargs,\n )\n\n exp_idx = pd.MultiIndex.from_frame(\n test_processing_scm_df.meta.drop(\n \"ensemble_member\", axis=\"columns\"\n ).drop_duplicates()\n )\n\n exp = pd.DataFrame(\n np.array(exp_vals)[np.newaxis, :],\n index=exp_idx,\n columns=test_processing_scm_df.time_points.to_index(),\n )\n exp.index = exp.index.set_levels(\n [_get_calculate_exeedance_probs_expected_name(output_name, threshold)],\n level=\"variable\",\n ).set_levels(\n [\"dimensionless\"],\n level=\"unit\",\n )\n\n pdt.assert_frame_equal(res, exp, check_like=True, check_column_type=False)\n\n\ndef test_exceedance_probabilities_over_time_multiple_res(\n test_processing_scm_df_multi_climate_model,\n):\n start = test_processing_scm_df_multi_climate_model.copy()\n threshold = 1.5\n exp_vals = np.array([[0, 1, 2, 2], [1, 2, 3, 3]]) / 5\n\n res = scmdata.processing.calculate_exceedance_probabilities_over_time(\n start,\n process_over_cols=[\"ensemble_member\"],\n threshold=threshold,\n )\n\n exp_idx = pd.MultiIndex.from_frame(\n start.meta.drop([\"ensemble_member\"], axis=\"columns\").drop_duplicates()\n )\n\n exp = pd.DataFrame(exp_vals, index=exp_idx, columns=start.time_points.to_index())\n exp.index = exp.index.set_levels(\n [_get_calculate_exeedance_probs_expected_name(None, threshold)],\n level=\"variable\",\n ).set_levels(\n [\"dimensionless\"],\n level=\"unit\",\n )\n\n pdt.assert_frame_equal(res, exp, check_like=True, check_column_type=False)\n\n\ndef test_exceedance_probabilities_over_time_multiple_grouping(\n test_processing_scm_df_multi_climate_model,\n):\n start = test_processing_scm_df_multi_climate_model.copy()\n threshold = 1.5\n exp_vals = np.array([1, 3, 5, 5]) / 10\n\n res = scmdata.processing.calculate_exceedance_probabilities_over_time(\n start,\n process_over_cols=[\"climate_model\", \"ensemble_member\"],\n threshold=threshold,\n )\n\n exp_idx = pd.MultiIndex.from_frame(\n start.meta.drop(\n [\"climate_model\", \"ensemble_member\"], axis=\"columns\"\n ).drop_duplicates()\n )\n\n exp = pd.DataFrame(\n exp_vals[np.newaxis, :],\n index=exp_idx,\n columns=start.time_points.to_index(),\n )\n exp.index = exp.index.set_levels(\n [_get_calculate_exeedance_probs_expected_name(None, threshold)],\n level=\"variable\",\n ).set_levels(\n [\"dimensionless\"],\n level=\"unit\",\n )\n\n pdt.assert_frame_equal(res, exp, check_like=True, check_column_type=False)\n\n\[email protected](\n \"threshold,exp_val\",\n (\n (1.0, 1.0),\n (1.5, 0.6),\n (2.0, 0.0),\n ),\n)\n@output_name_options\ndef test_exceedance_probabilities(\n output_name, threshold, exp_val, test_processing_scm_df\n):\n call_kwargs = _get_calculate_exceedance_probs_call_kwargs(output_name)\n res = scmdata.processing.calculate_exceedance_probabilities(\n test_processing_scm_df,\n process_over_cols=\"ensemble_member\",\n threshold=threshold,\n **call_kwargs,\n )\n\n exp_idx = pd.MultiIndex.from_frame(\n test_processing_scm_df.meta.drop(\n \"ensemble_member\", axis=\"columns\"\n ).drop_duplicates()\n )\n\n exp = pd.Series(exp_val, index=exp_idx)\n exp.name = _get_calculate_exeedance_probs_expected_name(output_name, threshold)\n exp.index = exp.index.set_levels(\n [\"dimensionless\"],\n level=\"unit\",\n )\n\n pdt.assert_series_equal(res, exp)\n\n\ndef test_exceedance_probabilities_multiple_res(\n test_processing_scm_df_multi_climate_model,\n):\n start = test_processing_scm_df_multi_climate_model.copy()\n threshold = 1.5\n exp_vals = [0.6, 0.8]\n\n res = scmdata.processing.calculate_exceedance_probabilities(\n start,\n process_over_cols=[\"ensemble_member\"],\n threshold=threshold,\n )\n\n exp_idx = pd.MultiIndex.from_frame(\n start.meta.drop(\"ensemble_member\", axis=\"columns\").drop_duplicates()\n )\n\n exp = pd.Series(exp_vals, index=exp_idx)\n exp.name = _get_calculate_exeedance_probs_expected_name(None, threshold)\n exp.index = exp.index.set_levels(\n [\"dimensionless\"],\n level=\"unit\",\n )\n\n pdt.assert_series_equal(res, exp)\n\n\ndef test_exceedance_probabilities_multiple_grouping(\n test_processing_scm_df_multi_climate_model,\n):\n start = test_processing_scm_df_multi_climate_model.copy()\n threshold = 1.5\n exp_vals = [0.7]\n\n res = scmdata.processing.calculate_exceedance_probabilities(\n start,\n process_over_cols=[\"ensemble_member\", \"climate_model\"],\n threshold=threshold,\n )\n\n exp_idx = pd.MultiIndex.from_frame(\n start.meta.drop(\n [\"ensemble_member\", \"climate_model\"], axis=\"columns\"\n ).drop_duplicates()\n )\n\n exp = pd.Series(exp_vals, index=exp_idx)\n exp.name = _get_calculate_exeedance_probs_expected_name(None, threshold)\n exp.index = exp.index.set_levels(\n [\"dimensionless\"],\n level=\"unit\",\n )\n\n pdt.assert_series_equal(res, exp)\n\n\[email protected](\"col\", [\"unit\", \"variable\"])\[email protected](\n \"func,kwargs\",\n (\n (scmdata.processing.calculate_exceedance_probabilities, {\"threshold\": 1.5}),\n (\n scmdata.processing.calculate_exceedance_probabilities_over_time,\n {\"threshold\": 1.5},\n ),\n ),\n)\ndef test_requires_preprocessing(test_processing_scm_df, col, func, kwargs):\n test_processing_scm_df[col] = [\n str(i) for i in range(test_processing_scm_df.shape[0])\n ]\n\n error_msg = (\n \"More than one value for {}. \"\n \"This is unlikely to be what you want.\".format(col)\n )\n with pytest.raises(ValueError, match=error_msg):\n func(\n test_processing_scm_df,\n process_over_cols=[\"ensemble_member\", col],\n **kwargs,\n )\n\n\ndef _get_calculate_peak_call_kwargs(output_name, variable):\n call_kwargs = {}\n if output_name is not None:\n call_kwargs[\"output_name\"] = output_name\n\n return call_kwargs\n\n\n@output_name_options\ndef test_peak(output_name, test_processing_scm_df):\n call_kwargs = _get_calculate_peak_call_kwargs(\n output_name,\n test_processing_scm_df.get_unique_meta(\"variable\", True),\n )\n\n exp_vals = [1.2, 1.41, 1.6, 1.6, 1.72]\n res = scmdata.processing.calculate_peak(\n test_processing_scm_df,\n **call_kwargs,\n )\n\n exp_idx = pd.MultiIndex.from_frame(test_processing_scm_df.meta)\n\n exp = pd.Series(exp_vals, index=exp_idx)\n if output_name is not None:\n exp.index = exp.index.set_levels([output_name], level=\"variable\")\n else:\n idx = exp.index.names\n exp = exp.reset_index()\n exp[\"variable\"] = exp[\"variable\"].apply(lambda x: \"Peak {}\".format(x))\n exp = exp.set_index(idx)[0]\n\n pdt.assert_series_equal(res, exp)\n\n\ndef test_peak_multi_variable(test_processing_scm_df_multi_climate_model):\n test_processing_scm_df_multi_climate_model[\"variable\"] = [\n str(i) for i in range(test_processing_scm_df_multi_climate_model.shape[0])\n ]\n\n exp_vals = [1.2, 1.41, 1.6, 1.6, 1.72, 1.3, 1.51, 1.7, 1.7, 1.82]\n res = scmdata.processing.calculate_peak(\n test_processing_scm_df_multi_climate_model,\n )\n\n exp_idx = pd.MultiIndex.from_frame(test_processing_scm_df_multi_climate_model.meta)\n\n exp = pd.Series(exp_vals, index=exp_idx)\n idx = exp.index.names\n exp = exp.reset_index()\n exp[\"variable\"] = exp[\"variable\"].apply(lambda x: \"Peak {}\".format(x))\n exp = exp.set_index(idx)[0]\n\n pdt.assert_series_equal(res, exp)\n\n\ndef _get_calculate_peak_time_call_kwargs(return_year, output_name):\n call_kwargs = {}\n\n if return_year is not None:\n call_kwargs[\"return_year\"] = return_year\n\n if output_name is not None:\n call_kwargs[\"output_name\"] = output_name\n\n return call_kwargs\n\n\n@output_name_options\n@crossing_times_year_conversions\ndef test_peak_time(output_name, return_year, conv_to_year, test_processing_scm_df):\n call_kwargs = _get_calculate_peak_time_call_kwargs(return_year, output_name)\n\n exp_vals = [\n dt.datetime(2007, 1, 1),\n dt.datetime(2100, 1, 1),\n dt.datetime(2100, 1, 1),\n dt.datetime(2007, 1, 1),\n dt.datetime(2007, 1, 1),\n ]\n res = scmdata.processing.calculate_peak_time(\n test_processing_scm_df,\n **call_kwargs,\n )\n\n exp_idx = pd.MultiIndex.from_frame(test_processing_scm_df.meta)\n\n if conv_to_year:\n exp_vals = [v.year if conv_to_year else v for v in exp_vals]\n time_name = \"Year\"\n else:\n time_name = \"Time\"\n\n exp = pd.Series(exp_vals, index=exp_idx)\n if output_name is not None:\n exp.index = exp.index.set_levels([output_name], level=\"variable\")\n else:\n idx = exp.index.names\n exp = exp.reset_index()\n exp[\"variable\"] = exp[\"variable\"].apply(\n lambda x: \"{} of peak {}\".format(time_name, x)\n )\n exp = exp.set_index(idx)[0]\n\n pdt.assert_series_equal(res, exp)\n\n\n@crossing_times_year_conversions\ndef test_peak_time_multi_variable(\n return_year, conv_to_year, test_processing_scm_df_multi_climate_model\n):\n test_processing_scm_df_multi_climate_model[\"variable\"] = [\n str(i) for i in range(test_processing_scm_df_multi_climate_model.shape[0])\n ]\n\n call_kwargs = _get_calculate_peak_time_call_kwargs(return_year, None)\n\n exp_vals = [\n dt.datetime(2007, 1, 1),\n dt.datetime(2100, 1, 1),\n dt.datetime(2100, 1, 1),\n dt.datetime(2007, 1, 1),\n dt.datetime(2007, 1, 1),\n ] * 2\n\n res = scmdata.processing.calculate_peak_time(\n test_processing_scm_df_multi_climate_model, **call_kwargs\n )\n\n if conv_to_year:\n exp_vals = [v.year if conv_to_year else v for v in exp_vals]\n time_name = \"Year\"\n else:\n time_name = \"Time\"\n\n exp_idx = pd.MultiIndex.from_frame(test_processing_scm_df_multi_climate_model.meta)\n\n exp = pd.Series(exp_vals, index=exp_idx)\n idx = exp.index.names\n exp = exp.reset_index()\n\n exp[\"variable\"] = exp[\"variable\"].apply(\n lambda x: \"{} of peak {}\".format(time_name, x)\n )\n exp = exp.set_index(idx)[0]\n\n pdt.assert_series_equal(res, exp)\n\n\[email protected](scope=\"session\")\ndef sr15_inferred_temperature_quantiles(test_data_path):\n # fake the temperature quantiles in preparation for the categorisation tests\n # we do this as P33 is not included in the SR1.5 output, yet we need it for\n # the categorisation\n sr15_output = scmdata.ScmRun(\n os.path.join(test_data_path, \"sr15\", \"sr15-output.csv\"),\n )\n sr15_exceedance_probs = sr15_output.filter(variable=\"*Exceedance*\")\n\n out = []\n for cm in [\"MAGICC\", \"FAIR\"]:\n cm_ep = sr15_exceedance_probs.filter(variable=\"*{}*\".format(cm))\n cm_median = sr15_output.filter(variable=\"*{}*MED\".format(cm)).timeseries()\n for p in [0.67, 0.5, 0.34]:\n quantile = 1 - p\n cm_q = cm_median.reset_index()\n cm_q[\"variable\"] = cm_q[\"variable\"].str.replace(\n \"MED\", \"P{}\".format(int(np.round(quantile * 100, 0)))\n )\n cm_q = cm_q.set_index(cm_median.index.names).sort_index()\n cm_q.iloc[:, :] = 10\n for t in [2.0, 1.5]:\n cm_ep_t = cm_ep.filter(variable=\"*{}*\".format(t)).timeseries()\n # null values in FaIR should be treated as being small\n cm_ep_t_lt = (cm_ep_t <= p) | cm_ep_t.isnull()\n cm_ep_t_lt = cm_ep_t_lt.reorder_levels(cm_q.index.names).sort_index()\n cm_ep_t_lt.index = cm_q.index\n cm_q[cm_ep_t_lt] = t\n\n out.append(scmdata.ScmRun(cm_q))\n\n out = scmdata.run_append(out)\n return out\n\n\[email protected](scope=\"session\")\ndef sr15_temperatures_unmangled_names(sr15_inferred_temperature_quantiles):\n out = sr15_inferred_temperature_quantiles.copy()\n out[\"quantile\"] = out[\"variable\"].apply(\n lambda x: float(x.split(\"|\")[-1].strip(\"P\")) / 100\n )\n out[\"variable\"] = out[\"variable\"].apply(lambda x: \"|\".join(x.split(\"|\")[:-1]))\n\n return out\n\n\[email protected](\"unit\", (\"K\", \"mK\"))\ndef test_categorisation_sr15(unit, sr15_temperatures_unmangled_names):\n index = [\"model\", \"scenario\"]\n exp = (\n sr15_temperatures_unmangled_names.meta[index + [\"category\"]]\n .drop_duplicates()\n .set_index(index)[\"category\"]\n )\n\n inp = (\n sr15_temperatures_unmangled_names.drop_meta([\"category\", \"version\"])\n .filter(variable=\"*MAGICC*\")\n .convert_unit(unit)\n )\n\n res = scmdata.processing.categorisation_sr15(inp, index=index)\n\n pdt.assert_series_equal(exp, res)\n\n category_counts = res.value_counts()\n assert category_counts[\"Above 2C\"] == 189\n assert category_counts[\"Lower 2C\"] == 74\n assert category_counts[\"Higher 2C\"] == 58\n assert category_counts[\"1.5C low overshoot\"] == 44\n assert category_counts[\"1.5C high overshoot\"] == 37\n assert category_counts[\"Below 1.5C\"] == 9\n\n\ndef test_categorisation_sr15_multimodel(sr15_temperatures_unmangled_names):\n index = [\"model\", \"scenario\", \"climate_model\"]\n\n inp = sr15_temperatures_unmangled_names.drop_meta([\"category\", \"version\"])\n inp[\"climate_model\"] = inp[\"variable\"].apply(lambda x: x.split(\"|\")[-1])\n inp[\"variable\"] = inp[\"variable\"].apply(lambda x: \"|\".join(x.split(\"|\")[:-1]))\n\n res = scmdata.processing.categorisation_sr15(inp, index=index)\n\n exp = pd.concat(\n [\n scmdata.processing.categorisation_sr15(inp_cm, index=index)\n for inp_cm in inp.groupby(\"climate_model\")\n ]\n )\n\n pdt.assert_series_equal(exp.sort_index(), res.sort_index())\n\n category_counts = res.groupby(\"climate_model\").value_counts()\n\n assert category_counts.loc[\"MAGICC6\", \"Above 2C\"] == 189\n assert category_counts.loc[\"MAGICC6\", \"Higher 2C\"] == 58\n assert category_counts.loc[\"MAGICC6\", \"Lower 2C\"] == 74\n assert category_counts.loc[\"MAGICC6\", \"1.5C high overshoot\"] == 37\n assert category_counts.loc[\"MAGICC6\", \"1.5C low overshoot\"] == 44\n assert category_counts.loc[\"MAGICC6\", \"Below 1.5C\"] == 9\n\n assert category_counts.loc[\"FAIR\", \"Above 2C\"] == 134\n assert category_counts.loc[\"FAIR\", \"Higher 2C\"] == 14\n assert category_counts.loc[\"FAIR\", \"Lower 2C\"] == 80\n assert category_counts.loc[\"FAIR\", \"1.5C high overshoot\"] == 1\n assert category_counts.loc[\"FAIR\", \"1.5C low overshoot\"] == 22\n assert category_counts.loc[\"FAIR\", \"Below 1.5C\"] == 159\n\n\ndef test_categorisation_sr15_multi_variable(sr15_temperatures_unmangled_names):\n inp = sr15_temperatures_unmangled_names.copy()\n inp[\"variable\"] = range(inp[\"variable\"].shape[0])\n\n error_msg = (\n \"More than one value for variable. \" \"This is unlikely to be what you want.\"\n )\n with pytest.raises(ValueError, match=error_msg):\n scmdata.processing.categorisation_sr15(inp, index=[\"model\", \"scenario\"])\n\n\ndef test_categorisation_sr15_bad_unit(sr15_temperatures_unmangled_names):\n inp = sr15_temperatures_unmangled_names.filter(variable=\"*MAGICC*\").copy()\n inp[\"unit\"] = \"GtC\"\n\n with pytest.raises(pint.errors.DimensionalityError):\n scmdata.processing.categorisation_sr15(inp, index=[\"model\", \"scenario\"])\n\n\ndef test_categorisation_sr15_no_quantile(sr15_temperatures_unmangled_names):\n error_msg = (\n \"No `quantile` column, calculate quantiles using `.quantiles_over` \"\n \"to calculate the 0.33, 0.5 and 0.66 quantiles before calling \"\n \"this function\"\n )\n with pytest.raises(MissingRequiredColumnError, match=error_msg):\n scmdata.processing.categorisation_sr15(\n sr15_temperatures_unmangled_names.filter(quantile=0.5).drop_meta(\n \"quantile\"\n ),\n index=[\"model\", \"scenario\"],\n )\n\n\ndef test_categorisation_sr15_missing_quantiles(sr15_temperatures_unmangled_names):\n error_msg = re.escape(\n \"Not all required quantiles are available, we require the \"\n \"0.33, 0.5 and 0.66 quantiles, available quantiles: `[0.5]`\"\n )\n with pytest.raises(ValueError, match=error_msg):\n scmdata.processing.categorisation_sr15(\n sr15_temperatures_unmangled_names.filter(quantile=0.5),\n index=[\"model\", \"scenario\"],\n )\n\n\[email protected](\n _check_pandas_less_120(),\n reason=\"pandas<1.2.0 can't handle non-numeric types in pivot\",\n)\[email protected](\n \"index\",\n (\n [\"climate_model\", \"model\", \"scenario\", \"region\"],\n [\"climate_model\", \"scenario\", \"region\"],\n [\"climate_model\", \"model\", \"scenario\", \"region\", \"unit\"],\n ),\n)\[email protected](\n \",\".join(\n [\n \"exceedance_probabilities_thresholds\",\n \"exp_exceedance_prob_thresholds\",\n \"exceedance_probabilities_output_name\",\n \"exp_exceedance_probabilities_output_name\",\n \"exceedance_probabilities_variable\",\n \"exp_exceedance_probabilities_variable\",\n \"categorisation_variable\",\n \"exp_categorisation_variable\",\n \"categorisation_quantile_cols\",\n \"exp_categorisation_quantile_cols\",\n ]\n ),\n (\n (\n None,\n [1.5, 2.0, 2.5],\n None,\n \"{} exceedance probability\",\n None,\n \"Surface Air Temperature Change\",\n \"Surface Temperature\",\n \"Surface Temperature\",\n \"run_id\",\n \"run_id\",\n ),\n (\n [1.0, 1.5, 2.0, 2.5],\n [1.0, 1.5, 2.0, 2.5],\n \"Exceedance Probability|{:.2f}C\",\n \"Exceedance Probability|{:.2f}C\",\n \"Surface Temperature\",\n \"Surface Temperature\",\n None,\n \"Surface Air Temperature Change\",\n None,\n \"ensemble_member\",\n ),\n ),\n)\[email protected](\n \",\".join(\n [\n \"peak_variable\",\n \"exp_peak_variable\",\n \"peak_quantiles\",\n \"exp_peak_quantiles\",\n \"peak_naming_base\",\n \"exp_peak_naming_base\",\n \"peak_time_naming_base\",\n \"exp_peak_time_naming_base\",\n \"peak_return_year\",\n \"exp_peak_return_year\",\n ]\n ),\n (\n (\n \"Surface Temperature\",\n \"Surface Temperature\",\n None,\n [0.05, 0.17, 0.5, 0.83, 0.95],\n None,\n \"{} peak\",\n None,\n \"{} peak year\",\n None,\n True,\n ),\n (\n \"Surface Temperature\",\n \"Surface Temperature\",\n None,\n [0.05, 0.17, 0.5, 0.83, 0.95],\n None,\n \"{} peak\",\n \"test {}\",\n \"test {}\",\n None,\n True,\n ),\n (\n \"Surface Temperature\",\n \"Surface Temperature\",\n None,\n [0.05, 0.17, 0.5, 0.83, 0.95],\n None,\n \"{} peak\",\n None,\n \"{} peak year\",\n True,\n True,\n ),\n (\n None,\n \"Surface Air Temperature Change\",\n [0.05, 0.95],\n [0.05, 0.95],\n \"test {}\",\n \"test {}\",\n \"test peak {}\",\n \"test peak {}\",\n True,\n True,\n ),\n (\n None,\n \"Surface Air Temperature Change\",\n [0.05, 0.95],\n [0.05, 0.95],\n \"test {}\",\n \"test {}\",\n None,\n \"{} peak time\",\n False,\n False,\n ),\n (\n None,\n \"Surface Air Temperature Change\",\n [0.05, 0.95],\n [0.05, 0.95],\n \"test {}\",\n \"test {}\",\n \"test peak {}\",\n \"test peak {}\",\n False,\n False,\n ),\n ),\n)\[email protected](\"progress\", (True, False))\ndef test_calculate_summary_stats(\n exceedance_probabilities_thresholds,\n exp_exceedance_prob_thresholds,\n index,\n exceedance_probabilities_output_name,\n exp_exceedance_probabilities_output_name,\n exceedance_probabilities_variable,\n exp_exceedance_probabilities_variable,\n peak_quantiles,\n exp_peak_quantiles,\n peak_variable,\n exp_peak_variable,\n peak_naming_base,\n exp_peak_naming_base,\n peak_time_naming_base,\n exp_peak_time_naming_base,\n peak_return_year,\n exp_peak_return_year,\n categorisation_variable,\n exp_categorisation_variable,\n categorisation_quantile_cols,\n exp_categorisation_quantile_cols,\n progress,\n test_processing_scm_df_multi_climate_model,\n):\n inp = test_processing_scm_df_multi_climate_model.copy()\n\n if \"unit\" not in index:\n exp_index = index + [\"unit\"]\n else:\n exp_index = index\n\n process_over_cols = inp.get_meta_columns_except(exp_index)\n\n exp = []\n for threshold in exp_exceedance_prob_thresholds:\n tmp = scmdata.processing.calculate_exceedance_probabilities(\n inp,\n threshold,\n process_over_cols,\n exp_exceedance_probabilities_output_name.format(threshold),\n )\n exp.append(tmp)\n\n peaks = scmdata.processing.calculate_peak(inp)\n peak_times = scmdata.processing.calculate_peak_time(\n inp, return_year=exp_peak_return_year\n )\n for q in exp_peak_quantiles:\n peak_q = peaks.groupby(exp_index).quantile(q)\n peak_q.name = exp_peak_naming_base.format(q)\n\n peak_time_q = peak_times.groupby(exp_index).quantile(q)\n peak_time_q.name = exp_peak_time_naming_base.format(q)\n\n exp.append(peak_q)\n exp.append(peak_time_q)\n\n inp_categories = scmdata.ScmRun(\n inp.quantiles_over(\"ensemble_member\", quantiles=[0.33, 0.5, 0.66])\n )\n sr15_cats = scmdata.processing.categorisation_sr15(\n inp_categories,\n exp_index,\n )\n sr15_cats.name = \"SR1.5 category\"\n exp.append(sr15_cats)\n\n exp = [v.reorder_levels(exp_index).astype(\"object\") for v in exp]\n exp = pd.DataFrame(exp).T\n exp.columns.name = \"statistic\"\n exp = exp.stack(\"statistic\")\n exp.name = \"value\"\n\n call_kwargs = {}\n if exceedance_probabilities_thresholds is not None:\n call_kwargs[\n \"exceedance_probabilities_thresholds\"\n ] = exceedance_probabilities_thresholds\n\n if exceedance_probabilities_output_name is not None:\n call_kwargs[\n \"exceedance_probabilities_naming_base\"\n ] = exceedance_probabilities_output_name\n\n inp_renamed = inp.copy()\n inp_renamed[\"variable\"] = exp_exceedance_probabilities_variable\n if exceedance_probabilities_variable is not None:\n call_kwargs[\n \"exceedance_probabilities_variable\"\n ] = exceedance_probabilities_variable\n\n if peak_quantiles is not None:\n call_kwargs[\"peak_quantiles\"] = peak_quantiles\n\n tmp = inp.copy()\n tmp[\"variable\"] = exp_peak_variable\n try:\n inp_renamed = inp_renamed.append(tmp)\n except NonUniqueMetadataError:\n # variable already included\n pass\n if peak_variable is not None:\n call_kwargs[\"peak_variable\"] = peak_variable\n\n if peak_naming_base is not None:\n call_kwargs[\"peak_naming_base\"] = peak_naming_base\n\n if peak_time_naming_base is not None:\n call_kwargs[\"peak_time_naming_base\"] = peak_time_naming_base\n\n if peak_time_naming_base is not None:\n call_kwargs[\"peak_time_naming_base\"] = peak_time_naming_base\n\n if peak_return_year is not None:\n call_kwargs[\"peak_return_year\"] = peak_return_year\n\n tmp = inp.copy()\n tmp[\"variable\"] = exp_categorisation_variable\n try:\n inp_renamed = inp_renamed.append(tmp)\n except NonUniqueMetadataError:\n # variable already included\n pass\n\n if categorisation_variable is not None:\n call_kwargs[\"categorisation_variable\"] = categorisation_variable\n\n if categorisation_quantile_cols is not None:\n call_kwargs[\"categorisation_quantile_cols\"] = categorisation_quantile_cols\n\n if exp_categorisation_quantile_cols != \"ensemble_member\":\n inp_renamed[exp_categorisation_quantile_cols] = inp_renamed[\"ensemble_member\"]\n inp_renamed = inp_renamed.drop_meta(\"ensemble_member\")\n\n res = scmdata.processing.calculate_summary_stats(\n inp_renamed,\n index,\n progress=progress,\n **call_kwargs,\n )\n\n pdt.assert_series_equal(res.sort_index(), exp.sort_index())\n\n # then user can stack etc. if they want, see notebooks\n\n\ndef test_calculate_summary_stats_no_exceedance_probability_var(\n test_processing_scm_df_multi_climate_model,\n):\n error_msg = re.escape(\n \"exceedance_probabilities_variable `junk` is not available. \"\n \"Available variables:{}\".format(\n test_processing_scm_df_multi_climate_model.get_unique_meta(\"variable\")\n )\n )\n with pytest.raises(ValueError, match=error_msg):\n scmdata.processing.calculate_summary_stats(\n test_processing_scm_df_multi_climate_model,\n [\"model\", \"scenario\"],\n exceedance_probabilities_variable=\"junk\",\n )\n\n\ndef test_calculate_summary_stats_no_peak_variable(\n test_processing_scm_df_multi_climate_model,\n):\n error_msg = re.escape(\n \"peak_variable `junk` is not available. \"\n \"Available variables:{}\".format(\n test_processing_scm_df_multi_climate_model.get_unique_meta(\"variable\")\n )\n )\n with pytest.raises(ValueError, match=error_msg):\n scmdata.processing.calculate_summary_stats(\n test_processing_scm_df_multi_climate_model,\n [\"model\", \"scenario\"],\n peak_variable=\"junk\",\n )\n\n\ndef test_calculate_summary_stats_no_categorisation_variable(\n test_processing_scm_df_multi_climate_model,\n):\n error_msg = re.escape(\n \"categorisation_variable `junk` is not available. \"\n \"Available variables:{}\".format(\n test_processing_scm_df_multi_climate_model.get_unique_meta(\"variable\")\n )\n )\n with pytest.raises(ValueError, match=error_msg):\n scmdata.processing.calculate_summary_stats(\n test_processing_scm_df_multi_climate_model,\n [\"model\", \"scenario\"],\n categorisation_variable=\"junk\",\n )\n\n\[email protected](\"dud_cols\", (\"junk\", [\"junk\"], [\"junk\", \"ensemble_member\"]))\ndef test_calculate_summary_stats_no_categorisation_quantile_cols(\n test_processing_scm_df_multi_climate_model,\n dud_cols,\n):\n error_msg = re.escape(\n \"categorisation_quantile_cols `{}` not in `scmrun`. \"\n \"Available columns:{}\".format(\n dud_cols, test_processing_scm_df_multi_climate_model.meta.columns.tolist()\n )\n )\n with pytest.raises(ValueError, match=error_msg):\n scmdata.processing.calculate_summary_stats(\n test_processing_scm_df_multi_climate_model,\n [\"model\", \"scenario\"],\n categorisation_quantile_cols=dud_cols,\n )\n"
] |
[
[
"pandas.MultiIndex.from_frame",
"pandas.testing.assert_series_equal",
"pandas.Series",
"pandas.isnull",
"pandas.DataFrame",
"numpy.round",
"pandas.testing.assert_frame_equal",
"pandas.MultiIndex.from_product",
"numpy.array"
]
] |
sys-bio/roadrunner
|
[
"f0a757771ef0e337ddf7409284910e1627c3ad71"
] |
[
"rrplugins/examples/python/parameter_minimization/telChiSquareAsFunctionOfTwoParameters.py"
] |
[
"#-------------------------------------------------------------------------------\n# Purpose: Example demonstrating how to calculate the ChiSquare, using the\n# ChiSquare plugin, as a function of a TWO model parameters.\n#\n# This example are using example data in the file TwoParameters.dat, which is\n# generated using the model in 'two_parameters.xml'. Both of these files are\n# needed for the example\n#\n# The generated contour plot will indicate the value of k1 and k2 that was\n# used in generating the 'Experimental' data.\n# Author: Totte Karlsson ([email protected])\n#-------------------------------------------------------------------------------\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport roadrunner\nimport rrplugins as tel\nfrom roadrunner import Config\n\n\ndef chiFun(k1,k2):\n rr.reset()\n rr.setValue(\"k1\", k1)\n rr.setValue(\"k2\", k2) \n data = rr.simulate(timeStart, timeEnd, nrPoints - 1) \n chiSquare.ModelData = tel.getDataSeries(data)\n \n if not chiSquare.execute():\n raise Exception( tel.getLastError() )\n \n return chiSquare.ReducedChiSquare \n \nConfig.setValue(Config.LOADSBMLOPTIONS_CONSERVED_MOIETIES, False) \nConfig.setValue(Config.SIMULATEOPTIONS_STRUCTURED_RESULT, True)\n#Read some 'experimental' data \nexpData = tel.DataSeries()\n\n#This does not look right.. but it works..\nexpData = expData.readDataSeries('TwoParametersData.dat') \n \ntest_model = 'two_parameters.xml' \n\n# Create a roadrunner instance and create some MODEL data\nrr = roadrunner.RoadRunner()\nrr.load(test_model)\n\n#Get chi square plugin and set it up\nchiSquare = tel.Plugin(\"tel_chisquare\") \nchiSquare.ExperimentalData = expData \nchiSquare.NrOfModelParameters = 2\n\n#Simulate using the same numbers as in the 'Experimental data \nk1Start = 1 ; k1End = 1.8 \nk2Start = 1 ; k2End = 4.5\ntimeStart = 0; timeEnd = 1.5 ; nrPoints = 16\n \nX = np.linspace(k1Start, k1End, 55)\nY = np.linspace(k2Start, k2End, 55)\nnrX = len(X) ; nrY = len(Y)\nprint('nrPoints = ', nrX * nrY)\n\nZ = np.zeros([nrX, nrY]) \nfor row in range(nrX): \n for col in range(nrY):\n print('row,col = ', row, \", \", col)\n Z[col,row] = chiFun(X[row], Y[col]) \n \nX, Y = np.meshgrid(X, Y) \n \nCS = plt.contour(X, Y, Z, 400)\nplt.clabel(CS, inline=1, fontsize=10) \nplt.title('Reduced ChiSquare for SBML model with two parameters')\nplt.xlabel('k1')\nplt.ylabel('k2') \nplt.show() \nprint(\"done\")\n"
] |
[
[
"matplotlib.pyplot.clabel",
"numpy.linspace",
"matplotlib.pyplot.title",
"matplotlib.pyplot.contour",
"matplotlib.pyplot.xlabel",
"numpy.meshgrid",
"numpy.zeros",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
353622088/tianchi
|
[
"e1f378e5fd783eb4cfbfaf8ecdd944b8fcfdd733"
] |
[
"nets/inception_v4.py"
] |
[
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains the definition of the Inception V4 architecture.\n\nAs described in http://arxiv.org/abs/1602.07261.\n\n Inception-v4, Inception-ResNet and the Impact of Residual Connections\n on Learning\n Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, Alex Alemi\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom nets import inception_utils\n\nslim = tf.contrib.slim\n\n\ndef block_inception_a(inputs, scope=None, reuse=None):\n \"\"\"Builds Inception-A block for Inception v4 network.\"\"\"\n # By default use stride=1 and SAME padding\n with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],\n stride=1, padding='SAME'):\n with tf.variable_scope(scope, 'BlockInceptionA', [inputs], reuse=reuse):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(inputs, 96, [1, 1], scope='Conv2d_0a_1x1')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, 96, [3, 3], scope='Conv2d_0b_3x3')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1')\n branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')\n branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3')\n with tf.variable_scope('Branch_3'):\n branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')\n branch_3 = slim.conv2d(branch_3, 96, [1, 1], scope='Conv2d_0b_1x1')\n return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])\n\n\ndef block_reduction_a(inputs, scope=None, reuse=None):\n \"\"\"Builds Reduction-A block for Inception v4 network.\"\"\"\n # By default use stride=1 and SAME padding\n with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],\n stride=1, padding='SAME'):\n with tf.variable_scope(scope, 'BlockReductionA', [inputs], reuse=reuse):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(inputs, 384, [3, 3], stride=2, padding='VALID',\n scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')\n branch_1 = slim.conv2d(branch_1, 256, [3, 3], stride=2,\n padding='VALID', scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',\n scope='MaxPool_1a_3x3')\n return tf.concat(axis=3, values=[branch_0, branch_1, branch_2])\n\n\ndef block_inception_b(inputs, scope=None, reuse=None):\n \"\"\"Builds Inception-B block for Inception v4 network.\"\"\"\n # By default use stride=1 and SAME padding\n with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],\n stride=1, padding='SAME'):\n with tf.variable_scope(scope, 'BlockInceptionB', [inputs], reuse=reuse):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, 224, [1, 7], scope='Conv2d_0b_1x7')\n branch_1 = slim.conv2d(branch_1, 256, [7, 1], scope='Conv2d_0c_7x1')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')\n branch_2 = slim.conv2d(branch_2, 192, [7, 1], scope='Conv2d_0b_7x1')\n branch_2 = slim.conv2d(branch_2, 224, [1, 7], scope='Conv2d_0c_1x7')\n branch_2 = slim.conv2d(branch_2, 224, [7, 1], scope='Conv2d_0d_7x1')\n branch_2 = slim.conv2d(branch_2, 256, [1, 7], scope='Conv2d_0e_1x7')\n with tf.variable_scope('Branch_3'):\n branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')\n branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')\n return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])\n\n\ndef block_reduction_b(inputs, scope=None, reuse=None):\n \"\"\"Builds Reduction-B block for Inception v4 network.\"\"\"\n # By default use stride=1 and SAME padding\n with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],\n stride=1, padding='SAME'):\n with tf.variable_scope(scope, 'BlockReductionB', [inputs], reuse=reuse):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')\n branch_0 = slim.conv2d(branch_0, 192, [3, 3], stride=2,\n padding='VALID', scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, 256, [1, 7], scope='Conv2d_0b_1x7')\n branch_1 = slim.conv2d(branch_1, 320, [7, 1], scope='Conv2d_0c_7x1')\n branch_1 = slim.conv2d(branch_1, 320, [3, 3], stride=2,\n padding='VALID', scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',\n scope='MaxPool_1a_3x3')\n return tf.concat(axis=3, values=[branch_0, branch_1, branch_2])\n\n\ndef block_inception_c(inputs, scope=None, reuse=None):\n \"\"\"Builds Inception-C block for Inception v4 network.\"\"\"\n # By default use stride=1 and SAME padding\n with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],\n stride=1, padding='SAME'):\n with tf.variable_scope(scope, 'BlockInceptionC', [inputs], reuse=reuse):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = tf.concat(axis=3, values=[\n slim.conv2d(branch_1, 256, [1, 3], scope='Conv2d_0b_1x3'),\n slim.conv2d(branch_1, 256, [3, 1], scope='Conv2d_0c_3x1')])\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')\n branch_2 = slim.conv2d(branch_2, 448, [3, 1], scope='Conv2d_0b_3x1')\n branch_2 = slim.conv2d(branch_2, 512, [1, 3], scope='Conv2d_0c_1x3')\n branch_2 = tf.concat(axis=3, values=[\n slim.conv2d(branch_2, 256, [1, 3], scope='Conv2d_0d_1x3'),\n slim.conv2d(branch_2, 256, [3, 1], scope='Conv2d_0e_3x1')])\n with tf.variable_scope('Branch_3'):\n branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')\n branch_3 = slim.conv2d(branch_3, 256, [1, 1], scope='Conv2d_0b_1x1')\n return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])\n\n\ndef inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None):\n \"\"\"Creates the Inception V4 network up to the given final endpoint.\n\n Args:\n inputs: a 4-D tensor of size [batch_size, height, width, 3].\n final_endpoint: specifies the endpoint to construct the network up to.\n It can be one of [ 'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',\n 'Mixed_3a', 'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',\n 'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e',\n 'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c',\n 'Mixed_7d']\n scope: Optional variable_scope.\n\n Returns:\n logits: the logits outputs of the model.\n end_points: the set of end_points from the inception model.\n\n Raises:\n ValueError: if final_endpoint is not set to one of the predefined values,\n \"\"\"\n end_points = {}\n\n def add_and_check_final(name, net):\n end_points[name] = net\n return name == final_endpoint\n\n with tf.variable_scope(scope, 'InceptionV4', [inputs]):\n with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],\n stride=1, padding='SAME'):\n # 299 x 299 x 3\n net = slim.conv2d(inputs, 32, [3, 3], stride=2,\n padding='VALID', scope='Conv2d_1a_3x3')\n if add_and_check_final('Conv2d_1a_3x3', net): return net, end_points\n # 149 x 149 x 32\n net = slim.conv2d(net, 32, [3, 3], padding='VALID',\n scope='Conv2d_2a_3x3')\n if add_and_check_final('Conv2d_2a_3x3', net): return net, end_points\n # 147 x 147 x 32\n net = slim.conv2d(net, 64, [3, 3], scope='Conv2d_2b_3x3')\n if add_and_check_final('Conv2d_2b_3x3', net): return net, end_points\n # 147 x 147 x 64\n with tf.variable_scope('Mixed_3a'):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',\n scope='MaxPool_0a_3x3')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(net, 96, [3, 3], stride=2, padding='VALID',\n scope='Conv2d_0a_3x3')\n net = tf.concat(axis=3, values=[branch_0, branch_1])\n if add_and_check_final('Mixed_3a', net): return net, end_points\n\n # 73 x 73 x 160\n with tf.variable_scope('Mixed_4a'):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')\n branch_0 = slim.conv2d(branch_0, 96, [3, 3], padding='VALID',\n scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, 64, [1, 7], scope='Conv2d_0b_1x7')\n branch_1 = slim.conv2d(branch_1, 64, [7, 1], scope='Conv2d_0c_7x1')\n branch_1 = slim.conv2d(branch_1, 96, [3, 3], padding='VALID',\n scope='Conv2d_1a_3x3')\n net = tf.concat(axis=3, values=[branch_0, branch_1])\n if add_and_check_final('Mixed_4a', net): return net, end_points\n\n # 71 x 71 x 192\n with tf.variable_scope('Mixed_5a'):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(net, 192, [3, 3], stride=2, padding='VALID',\n scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',\n scope='MaxPool_1a_3x3')\n net = tf.concat(axis=3, values=[branch_0, branch_1])\n if add_and_check_final('Mixed_5a', net): return net, end_points\n\n # 35 x 35 x 384\n # 4 x Inception-A blocks\n for idx in range(4):\n block_scope = 'Mixed_5' + chr(ord('b') + idx)\n net = block_inception_a(net, block_scope)\n if add_and_check_final(block_scope, net): return net, end_points\n\n # 35 x 35 x 384\n # Reduction-A block\n net = block_reduction_a(net, 'Mixed_6a')\n if add_and_check_final('Mixed_6a', net): return net, end_points\n\n # 17 x 17 x 1024\n # 7 x Inception-B blocks\n for idx in range(7):\n block_scope = 'Mixed_6' + chr(ord('b') + idx)\n net = block_inception_b(net, block_scope)\n if add_and_check_final(block_scope, net): return net, end_points\n\n # 17 x 17 x 1024\n # Reduction-B block\n net = block_reduction_b(net, 'Mixed_7a')\n if add_and_check_final('Mixed_7a', net): return net, end_points\n\n # 8 x 8 x 1536\n # 3 x Inception-C blocks\n for idx in range(3):\n block_scope = 'Mixed_7' + chr(ord('b') + idx)\n net = block_inception_c(net, block_scope)\n if add_and_check_final(block_scope, net): return net, end_points\n raise ValueError('Unknown final endpoint %s' % final_endpoint)\n\n\ndef inception_v4(inputs, num_classes=1001, is_training=True,\n dropout_keep_prob=0.8,\n reuse=None,\n scope='InceptionV4',\n create_aux_logits=True):\n \"\"\"Creates the Inception V4 model.\n\n Args:\n inputs: a 4-D tensor of size [batch_size, height, width, 3].\n num_classes: number of predicted classes. If 0 or None, the logits layer\n is omitted and the input features to the logits layer (before dropout)\n are returned instead.\n is_training: whether is training or not.\n dropout_keep_prob: float, the fraction to keep before final layer.\n reuse: whether or not the network and its variables should be reused. To be\n able to reuse 'scope' must be given.\n scope: Optional variable_scope.\n create_aux_logits: Whether to include the auxiliary logits.\n\n Returns:\n net: a Tensor with the logits (pre-softmax activations) if num_classes\n is a non-zero integer, or the non-dropped input to the logits layer\n if num_classes is 0 or None.\n end_points: the set of end_points from the inception model.\n \"\"\"\n end_points = {}\n with tf.variable_scope(scope, 'InceptionV4', [inputs], reuse=reuse) as scope:\n with slim.arg_scope([slim.batch_norm, slim.dropout],\n is_training=is_training):\n net, end_points = inception_v4_base(inputs, scope=scope)\n\n with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],\n stride=1, padding='SAME'):\n # Auxiliary Head logits\n if create_aux_logits and num_classes:\n with tf.variable_scope('AuxLogits'):\n # 17 x 17 x 1024\n aux_logits = end_points['Mixed_6h']\n aux_logits = slim.avg_pool2d(aux_logits, [5, 5], stride=3,\n padding='VALID',\n scope='AvgPool_1a_5x5')\n aux_logits = slim.conv2d(aux_logits, 128, [1, 1],\n scope='Conv2d_1b_1x1')\n aux_logits = slim.conv2d(aux_logits, 768,\n aux_logits.get_shape()[1:3],\n padding='VALID', scope='Conv2d_2a')\n aux_logits = slim.flatten(aux_logits)\n aux_logits = slim.fully_connected(aux_logits, num_classes,\n activation_fn=None,\n scope='Aux_logits')\n end_points['AuxLogits'] = aux_logits\n\n # Final pooling and prediction\n # TODO(sguada,arnoegw): Consider adding a parameter global_pool which\n # can be set to False to disable pooling here (as in resnet_*()).\n with tf.variable_scope('Logits'):\n # 8 x 8 x 1536\n kernel_size = net.get_shape()[1:3]\n if kernel_size.is_fully_defined():\n net = slim.avg_pool2d(net, kernel_size, padding='VALID',\n scope='AvgPool_1a')\n else:\n net = tf.reduce_mean(net, [1, 2], keepdims=True,\n name='global_pool')\n end_points['global_pool'] = net\n if not num_classes:\n return net, end_points\n # 1 x 1 x 1536\n net = slim.dropout(net, dropout_keep_prob, scope='Dropout_1b')\n net = slim.flatten(net, scope='PreLogitsFlatten')\n end_points['PreLogitsFlatten'] = net\n # 1536\n logits = slim.fully_connected(net, num_classes, activation_fn=None,\n scope='Logits')\n end_points['Logits'] = logits\n end_points['Predictions'] = tf.nn.softmax(logits, name='Predictions')\n return logits, end_points\n\n\ninception_v4.default_image_size = 299\n\ninception_v4_arg_scope = inception_utils.inception_arg_scope\n"
] |
[
[
"tensorflow.variable_scope",
"tensorflow.nn.softmax",
"tensorflow.concat",
"tensorflow.reduce_mean"
]
] |
Tobby666/polars
|
[
"8cedd5edf4c1056a255c2fb29c3cb5d173068e32"
] |
[
"py-polars/polars/internals/series.py"
] |
[
"import sys\nfrom datetime import date, datetime, timedelta\nfrom numbers import Number\nfrom typing import (\n Any,\n Callable,\n Dict,\n List,\n Optional,\n Sequence,\n Tuple,\n Type,\n Union,\n overload,\n)\n\nimport numpy as np\n\ntry:\n import pyarrow as pa\n\n _PYARROW_AVAILABLE = True\nexcept ImportError: # pragma: no cover\n _PYARROW_AVAILABLE = False\n\nfrom polars import internals as pli\nfrom polars.internals.construction import (\n arrow_to_pyseries,\n numpy_to_pyseries,\n pandas_to_pyseries,\n sequence_to_pyseries,\n series_to_pyseries,\n)\n\ntry:\n from polars.polars import PyDataFrame, PySeries\n\n _DOCUMENTING = False\nexcept ImportError: # pragma: no cover\n _DOCUMENTING = True\n\nfrom polars.datatypes import (\n Boolean,\n DataType,\n Date,\n Datetime,\n Float32,\n Float64,\n Int8,\n Int16,\n Int32,\n Int64,\n)\nfrom polars.datatypes import List as PlList\nfrom polars.datatypes import (\n Object,\n UInt8,\n UInt16,\n UInt32,\n UInt64,\n Utf8,\n dtype_to_ctype,\n dtype_to_ffiname,\n maybe_cast,\n py_type_to_dtype,\n)\nfrom polars.utils import (\n _date_to_pl_date,\n _datetime_to_pl_timestamp,\n _ptr_to_numpy,\n range_to_slice,\n)\n\ntry:\n import pandas as pd\n\n _PANDAS_AVAILABLE = True\nexcept ImportError: # pragma: no cover\n _PANDAS_AVAILABLE = False\n\nif sys.version_info >= (3, 8):\n from typing import Literal\nelse:\n from typing_extensions import Literal # pragma: no cover\n\n\ndef get_ffi_func(\n name: str,\n dtype: Type[\"DataType\"],\n obj: \"PySeries\",\n) -> Optional[Callable[..., Any]]:\n \"\"\"\n Dynamically obtain the proper ffi function/ method.\n\n Parameters\n ----------\n name\n function or method name where dtype is replaced by <>\n for example\n \"call_foo_<>\"\n dtype\n polars dtype.\n obj\n Object to find the method for.\n\n Returns\n -------\n ffi function, or None if not found\n \"\"\"\n ffi_name = dtype_to_ffiname(dtype)\n fname = name.replace(\"<>\", ffi_name)\n return getattr(obj, fname, None)\n\n\ndef wrap_s(s: \"PySeries\") -> \"Series\":\n return Series._from_pyseries(s)\n\n\nArrayLike = Union[\n Sequence[Any], \"Series\", \"pa.Array\", np.ndarray, \"pd.Series\", \"pd.DatetimeIndex\"\n]\n\n\nclass Series:\n \"\"\"\n A Series represents a single column in a polars DataFrame.\n\n Parameters\n ----------\n name : str, default None\n Name of the series. Will be used as a column name when used in a DataFrame.\n When not specified, name is set to an empty string.\n values : ArrayLike, default None\n One-dimensional data in various forms. Supported are: Sequence, Series,\n pyarrow Array, and numpy ndarray.\n dtype : DataType, default None\n Polars dtype of the Series data. If not specified, the dtype is inferred.\n strict\n Throw error on numeric overflow\n nan_to_null\n In case a numpy arrow is used to create this Series, indicate how to deal with np.nan\n\n Examples\n --------\n Constructing a Series by specifying name and values positionally:\n\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s\n shape: (3,)\n Series: 'a' [i64]\n [\n 1\n 2\n 3\n ]\n\n Notice that the dtype is automatically inferred as a polars Int64:\n\n >>> s.dtype\n <class 'polars.datatypes.Int64'>\n\n Constructing a Series with a specific dtype:\n\n >>> s2 = pl.Series(\"a\", [1, 2, 3], dtype=pl.Float32)\n >>> s2\n shape: (3,)\n Series: 'a' [f32]\n [\n 1\n 2\n 3\n ]\n\n It is possible to construct a Series with values as the first positional argument.\n This syntax considered an anti-pattern, but it can be useful in certain\n scenarios. You must specify any other arguments through keywords.\n\n >>> s3 = pl.Series([1, 2, 3])\n >>> s3\n shape: (3,)\n Series: '' [i64]\n [\n 1\n 2\n 3\n ]\n\n \"\"\"\n\n def __init__(\n self,\n name: Optional[Union[str, ArrayLike]] = None,\n values: Optional[ArrayLike] = None,\n dtype: Optional[Type[DataType]] = None,\n strict: bool = True,\n nan_to_null: bool = False,\n ):\n\n # Handle case where values are passed as the first argument\n if name is not None and not isinstance(name, str):\n if values is None:\n values = name\n name = None\n else:\n raise ValueError(\"Series name must be a string.\")\n\n # TODO: Remove if-statement below once Series name is allowed to be None\n if name is None:\n name = \"\"\n\n if values is None:\n self._s = sequence_to_pyseries(name, [], dtype=dtype)\n elif isinstance(values, Series):\n self._s = series_to_pyseries(name, values)\n elif _PYARROW_AVAILABLE and isinstance(values, pa.Array):\n self._s = arrow_to_pyseries(name, values)\n elif isinstance(values, np.ndarray):\n self._s = numpy_to_pyseries(name, values, strict, nan_to_null)\n elif isinstance(values, Sequence):\n self._s = sequence_to_pyseries(name, values, dtype=dtype, strict=strict)\n elif _PANDAS_AVAILABLE and isinstance(values, (pd.Series, pd.DatetimeIndex)):\n self._s = pandas_to_pyseries(name, values)\n else:\n raise ValueError(\"Series constructor not called properly.\")\n\n @classmethod\n def _from_pyseries(cls, pyseries: \"PySeries\") -> \"Series\":\n series = cls.__new__(cls)\n series._s = pyseries\n return series\n\n @classmethod\n def _repeat(\n cls, name: str, val: Union[int, float, str, bool], n: int, dtype: Type[DataType]\n ) -> \"Series\":\n return cls._from_pyseries(PySeries.repeat(name, val, n, dtype))\n\n @classmethod\n def _from_arrow(\n cls, name: str, values: \"pa.Array\", rechunk: bool = True\n ) -> \"Series\":\n \"\"\"\n Construct a Series from an Arrow Array.\n \"\"\"\n return cls._from_pyseries(arrow_to_pyseries(name, values, rechunk))\n\n @classmethod\n def _from_pandas(\n cls,\n name: str,\n values: Union[\"pd.Series\", \"pd.DatetimeIndex\"],\n nan_to_none: bool = True,\n ) -> \"Series\":\n \"\"\"\n Construct a Series from a pandas Series or DatetimeIndex.\n \"\"\"\n return cls._from_pyseries(\n pandas_to_pyseries(name, values, nan_to_none=nan_to_none)\n )\n\n def inner(self) -> \"PySeries\":\n return self._s\n\n def __getstate__(self): # type: ignore\n return self._s.__getstate__()\n\n def __setstate__(self, state): # type: ignore\n self._s = sequence_to_pyseries(\"\", [], Float32)\n self._s.__setstate__(state)\n\n def __str__(self) -> str:\n return self._s.as_str()\n\n def __repr__(self) -> str:\n return self.__str__()\n\n def __and__(self, other: \"Series\") -> \"Series\":\n if not isinstance(other, Series):\n other = Series([other])\n return wrap_s(self._s.bitand(other._s))\n\n def __rand__(self, other: \"Series\") -> \"Series\":\n return self.__and__(other)\n\n def __or__(self, other: \"Series\") -> \"Series\":\n if not isinstance(other, Series):\n other = Series([other])\n return wrap_s(self._s.bitor(other._s))\n\n def __ror__(self, other: \"Series\") -> \"Series\":\n return self.__or__(other)\n\n def __xor__(self, other: \"Series\") -> \"Series\":\n if not isinstance(other, Series):\n other = Series([other])\n return wrap_s(self._s.bitxor(other._s))\n\n def __rxor__(self, other: \"Series\") -> \"Series\":\n return self.__xor__(other)\n\n def _comp(self, other: Any, op: str) -> \"Series\":\n if isinstance(other, datetime) and self.dtype == Datetime:\n ts = _datetime_to_pl_timestamp(other)\n f = get_ffi_func(op + \"_<>\", Int64, self._s)\n return wrap_s(f(ts)) # type: ignore\n if isinstance(other, date) and self.dtype == Date:\n d = _date_to_pl_date(other)\n f = get_ffi_func(op + \"_<>\", Int32, self._s)\n return wrap_s(f(d)) # type: ignore\n\n if isinstance(other, Sequence) and not isinstance(other, str):\n other = Series(\"\", other)\n if isinstance(other, Series):\n return wrap_s(getattr(self._s, op)(other._s))\n other = maybe_cast(other, self.dtype)\n f = get_ffi_func(op + \"_<>\", self.dtype, self._s)\n if f is None:\n return NotImplemented\n return wrap_s(f(other))\n\n def __eq__(self, other: Any) -> \"Series\": # type: ignore[override]\n return self._comp(other, \"eq\")\n\n def __ne__(self, other: Any) -> \"Series\": # type: ignore[override]\n return self._comp(other, \"neq\")\n\n def __gt__(self, other: Any) -> \"Series\":\n return self._comp(other, \"gt\")\n\n def __lt__(self, other: Any) -> \"Series\":\n return self._comp(other, \"lt\")\n\n def __ge__(self, other: Any) -> \"Series\":\n return self._comp(other, \"gt_eq\")\n\n def __le__(self, other: Any) -> \"Series\":\n return self._comp(other, \"lt_eq\")\n\n def _arithmetic(self, other: Any, op_s: str, op_ffi: str) -> \"Series\":\n if isinstance(other, Series):\n return wrap_s(getattr(self._s, op_s)(other._s))\n other = maybe_cast(other, self.dtype)\n f = get_ffi_func(op_ffi, self.dtype, self._s)\n if f is None:\n return NotImplemented\n return wrap_s(f(other))\n\n def __add__(self, other: Any) -> \"Series\":\n if isinstance(other, str):\n other = Series(\"\", [other])\n return self._arithmetic(other, \"add\", \"add_<>\")\n\n def __sub__(self, other: Any) -> \"Series\":\n return self._arithmetic(other, \"sub\", \"sub_<>\")\n\n def __truediv__(self, other: Any) -> \"Series\":\n # this branch is exactly the floordiv function without rounding the floats\n if self.is_float():\n return self._arithmetic(other, \"div\", \"div_<>\")\n\n return self.cast(Float64) / other\n\n def __floordiv__(self, other: Any) -> \"Series\":\n result = self._arithmetic(other, \"div\", \"div_<>\")\n if self.is_float():\n result = result.floor()\n return result\n\n def __mul__(self, other: Any) -> \"Series\":\n return self._arithmetic(other, \"mul\", \"mul_<>\")\n\n def __mod__(self, other: Any) -> \"Series\":\n return self._arithmetic(other, \"rem\", \"rem_<>\")\n\n def __rmod__(self, other: Any) -> \"Series\":\n return self._arithmetic(other, \"rem\", \"rem_<>_rhs\")\n\n def __radd__(self, other: Any) -> \"Series\":\n return self._arithmetic(other, \"add\", \"add_<>_rhs\")\n\n def __rsub__(self, other: Any) -> \"Series\":\n return self._arithmetic(other, \"sub\", \"sub_<>_rhs\")\n\n def __invert__(self) -> \"Series\":\n if self.dtype == Boolean:\n return wrap_s(self._s._not())\n return NotImplemented\n\n def __rtruediv__(self, other: Any) -> np.ndarray:\n if self.is_float():\n self.__rfloordiv__(other)\n\n if isinstance(other, int):\n other = float(other)\n\n return self.cast(Float64).__rfloordiv__(other) # type: ignore\n\n def __rfloordiv__(self, other: Any) -> \"Series\":\n return self._arithmetic(other, \"div\", \"div_<>_rhs\")\n\n def __rmul__(self, other: Any) -> \"Series\":\n return self._arithmetic(other, \"mul\", \"mul_<>\")\n\n def __pow__(self, power: float, modulo: None = None) -> \"Series\":\n return np.power(self, power) # type: ignore\n\n def __neg__(self) -> \"Series\":\n return 0 - self\n\n def __getitem__(self, item: Union[int, \"Series\", range, slice]) -> Any:\n if isinstance(item, int):\n if item < 0:\n item = self.len() + item\n if self.dtype in (PlList, Date, Datetime, Object):\n f = get_ffi_func(\"get_<>\", self.dtype, self._s)\n if f is None:\n return NotImplemented\n out = f(item)\n if self.dtype == PlList:\n if out is None:\n return None\n return wrap_s(out)\n return out\n\n return self._s.get_idx(item)\n # assume it is boolean mask\n if isinstance(item, Series):\n return wrap_s(self._s.filter(item._s))\n\n if isinstance(item, range):\n return self[range_to_slice(item)]\n\n # slice\n if isinstance(item, slice):\n start, stop, stride = item.indices(self.len())\n out = self.slice(start, stop - start)\n if stride != 1:\n return out.take_every(stride)\n else:\n return out\n\n raise NotImplementedError\n\n def __setitem__(\n self, key: Union[int, \"Series\", np.ndarray, List, Tuple], value: Any\n ) -> None:\n if isinstance(value, list):\n raise ValueError(\"cannot set with a list as value, use a primitive value\")\n if isinstance(key, Series):\n if key.dtype == Boolean:\n self._s = self.set(key, value)._s\n elif key.dtype == UInt64:\n self._s = self.set_at_idx(key.cast(UInt32), value)._s\n elif key.dtype == UInt32:\n self._s = self.set_at_idx(key, value)._s\n # TODO: implement for these types without casting to series\n elif isinstance(key, np.ndarray) and key.dtype == np.bool_:\n # boolean numpy mask\n self._s = self.set_at_idx(np.argwhere(key)[:, 0], value)._s\n elif isinstance(key, (np.ndarray, list, tuple)):\n s = wrap_s(PySeries.new_u32(\"\", np.array(key, np.uint32), True))\n self.__setitem__(s, value)\n elif isinstance(key, int) and not isinstance(key, bool):\n self.__setitem__([key], value)\n else:\n raise ValueError(f'cannot use \"{key}\" for indexing')\n\n def sqrt(self) -> \"Series\":\n \"\"\"\n Compute the square root of the elements\n\n Syntactic sugar for\n\n >>> pl.Series([1, 2]) ** 0.5\n shape: (2,)\n Series: '' [f64]\n [\n 1\n 1.4142135623730951\n ]\n\n \"\"\"\n return self ** 0.5\n\n def log(self) -> \"Series\":\n \"\"\"\n Natural logarithm, element-wise.\n\n The natural logarithm log is the inverse of the exponential function, so that log(exp(x)) = x.\n The natural logarithm is logarithm in base e.\n \"\"\"\n return np.log(self) # type: ignore\n\n def log10(self) -> \"Series\":\n \"\"\"\n Return the base 10 logarithm of the input array, element-wise.\n \"\"\"\n return np.log10(self) # type: ignore\n\n def exp(self) -> \"Series\":\n \"\"\"\n Return the exponential element-wise\n \"\"\"\n return np.exp(self) # type: ignore\n\n def drop_nulls(self) -> \"Series\":\n \"\"\"\n Create a new Series that copies data from this Series without null values.\n \"\"\"\n return wrap_s(self._s.drop_nulls())\n\n def to_frame(self) -> \"pli.DataFrame\":\n \"\"\"\n Cast this Series to a DataFrame.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> df = s.to_frame()\n >>> df\n shape: (3, 1)\n ┌─────┐\n │ a │\n │ --- │\n │ i64 │\n ╞═════╡\n │ 1 │\n ├╌╌╌╌╌┤\n │ 2 │\n ├╌╌╌╌╌┤\n │ 3 │\n └─────┘\n\n >>> type(df)\n <class 'polars.internals.frame.DataFrame'>\n\n \"\"\"\n return pli.wrap_df(PyDataFrame([self._s]))\n\n @property\n def dtype(self) -> Type[DataType]:\n \"\"\"\n Get the data type of this Series.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s.dtype\n <class 'polars.datatypes.Int64'>\n\n \"\"\"\n return self._s.dtype()\n\n @property\n def inner_dtype(self) -> Optional[Type[DataType]]:\n \"\"\"\n Get the inner dtype in of a List typed Series\n\n Returns\n -------\n DataType\n \"\"\"\n return self._s.inner_dtype()\n\n def describe(self) -> \"pli.DataFrame\":\n \"\"\"\n Quick summary statistics of a series. Series with mixed datatypes will return summary statistics for the datatype of the first value.\n\n Returns\n -------\n Dictionary with summary statistics of a Series.\n\n Examples\n --------\n >>> series_num = pl.Series([1, 2, 3, 4, 5])\n >>> series_num.describe()\n shape: (6, 2)\n ┌────────────┬────────────────────┐\n │ statistic ┆ value │\n │ --- ┆ --- │\n │ str ┆ f64 │\n ╞════════════╪════════════════════╡\n │ min ┆ 1 │\n ├╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ max ┆ 5 │\n ├╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ null_count ┆ 0.0 │\n ├╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ mean ┆ 3 │\n ├╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ std ┆ 1.5811388300841898 │\n ├╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ count ┆ 5 │\n └────────────┴────────────────────┘\n\n >>> series_str = pl.Series([\"a\", \"a\", None, \"b\", \"c\"])\n >>> series_str.describe()\n shape: (3, 2)\n ┌────────────┬───────┐\n │ statistic ┆ value │\n │ --- ┆ --- │\n │ str ┆ i64 │\n ╞════════════╪═══════╡\n │ unique ┆ 4 │\n ├╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤\n │ null_count ┆ 1 │\n ├╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌┤\n │ count ┆ 5 │\n └────────────┴───────┘\n\n \"\"\"\n stats: Dict[str, Union[Optional[float], int, str]]\n\n if self.len() == 0:\n raise ValueError(\"Series must contain at least one value\")\n elif self.is_numeric():\n s = self.cast(Float64)\n stats = {\n \"min\": s.min(),\n \"max\": s.max(),\n \"null_count\": s.null_count(),\n \"mean\": s.mean(),\n \"std\": s.std(),\n \"count\": s.len(),\n }\n elif self.is_boolean():\n stats = {\n \"sum\": self.sum(),\n \"null_count\": self.null_count(),\n \"count\": self.len(),\n }\n elif self.is_utf8():\n stats = {\n \"unique\": len(self.unique()),\n \"null_count\": self.null_count(),\n \"count\": self.len(),\n }\n elif self.is_datelike():\n # we coerce all to string, because a polars column\n # only has a single dtype and dates: datetime and count: int don't match\n stats = {\n \"min\": str(self.dt.min()),\n \"max\": str(self.dt.max()),\n \"null_count\": str(self.null_count()),\n \"count\": str(self.len()),\n }\n else:\n raise TypeError(\"This type is not supported\")\n\n return pli.DataFrame(\n {\"statistic\": list(stats.keys()), \"value\": list(stats.values())}\n )\n\n def sum(self) -> Union[int, float]:\n \"\"\"\n Reduce this Series to the sum value.\n\n Notes\n -----\n Dtypes in {Int8, UInt8, Int16, UInt16} are cast to\n Int64 before summing to prevent overflow issues.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s.sum()\n 6\n\n \"\"\"\n return self._s.sum()\n\n def mean(self) -> Union[int, float]:\n \"\"\"\n Reduce this Series to the mean value.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s.mean()\n 2.0\n\n \"\"\"\n return self._s.mean()\n\n def min(self) -> Union[int, float]:\n \"\"\"\n Get the minimal value in this Series.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s.min()\n 1\n\n \"\"\"\n return self._s.min()\n\n def max(self) -> Union[int, float]:\n \"\"\"\n Get the maximum value in this Series.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s.min()\n 1\n\n \"\"\"\n return self._s.max()\n\n def std(self, ddof: int = 1) -> Optional[float]:\n \"\"\"\n Get the standard deviation of this Series.\n\n Parameters\n ----------\n ddof\n “Delta Degrees of Freedom”: the divisor used in the calculation is N - ddof,\n where N represents the number of elements.\n By default ddof is 1.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s.std()\n 1.0\n\n \"\"\"\n if not self.is_numeric():\n return None\n return np.std(self.drop_nulls().view(), ddof=ddof)\n\n def var(self, ddof: int = 1) -> Optional[float]:\n \"\"\"\n Get variance of this Series.\n\n Parameters\n ----------\n ddof\n “Delta Degrees of Freedom”: the divisor used in the calculation is N - ddof,\n where N represents the number of elements.\n By default ddof is 1.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s.var()\n 1.0\n\n \"\"\"\n if not self.is_numeric():\n return None\n return np.var(self.drop_nulls().view(), ddof=ddof)\n\n def median(self) -> float:\n \"\"\"\n Get the median of this Series.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s.median()\n 2.0\n\n \"\"\"\n return self._s.median()\n\n def quantile(self, quantile: float, interpolation: str = \"nearest\") -> float:\n \"\"\"\n Get the quantile value of this Series.\n\n Parameters\n ----------\n quantile\n quantile between 0.0 and 1.0\n\n interpolation\n interpolation type, options: ['nearest', 'higher', 'lower', 'midpoint', 'linear']\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s.quantile(0.5)\n 2\n\n \"\"\"\n return self._s.quantile(quantile, interpolation)\n\n def to_dummies(self) -> \"pli.DataFrame\":\n \"\"\"\n Get dummy variables.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s.to_dummies()\n shape: (3, 3)\n ┌─────┬─────┬─────┐\n │ a_1 ┆ a_2 ┆ a_3 │\n │ --- ┆ --- ┆ --- │\n │ u8 ┆ u8 ┆ u8 │\n ╞═════╪═════╪═════╡\n │ 1 ┆ 0 ┆ 0 │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 0 ┆ 1 ┆ 0 │\n ├╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌┤\n │ 0 ┆ 0 ┆ 1 │\n └─────┴─────┴─────┘\n\n \"\"\"\n return pli.wrap_df(self._s.to_dummies())\n\n def value_counts(self) -> \"pli.DataFrame\":\n \"\"\"\n Count the unique values in a Series.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 2, 3])\n >>> s.value_counts().sort(by=\"a\")\n shape: (3, 2)\n ┌─────┬────────┐\n │ a ┆ counts │\n │ --- ┆ --- │\n │ i64 ┆ u32 │\n ╞═════╪════════╡\n │ 1 ┆ 1 │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤\n │ 2 ┆ 2 │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤\n │ 3 ┆ 1 │\n └─────┴────────┘\n\n \"\"\"\n return pli.wrap_df(self._s.value_counts())\n\n @property\n def name(self) -> str:\n \"\"\"\n Get the name of this Series.\n \"\"\"\n return self._s.name()\n\n def alias(self, name: str) -> \"Series\":\n \"\"\"\n Rename the Series\n\n Parameters\n ----------\n name\n New name\n\n Returns\n -------\n\n \"\"\"\n s = self.clone()\n s._s.rename(name)\n return s\n\n @overload\n def rename(self, name: str, in_place: Literal[False] = ...) -> \"Series\":\n ...\n\n @overload\n def rename(self, name: str, in_place: Literal[True]) -> None:\n ...\n\n @overload\n def rename(self, name: str, in_place: bool) -> Optional[\"Series\"]:\n ...\n\n def rename(self, name: str, in_place: bool = False) -> Optional[\"Series\"]:\n \"\"\"\n Rename this Series.\n\n Parameters\n ----------\n name\n New name.\n in_place\n Modify the Series in-place.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s.rename(\"b\")\n shape: (3,)\n Series: 'b' [i64]\n [\n 1\n 2\n 3\n ]\n\n \"\"\"\n if in_place:\n self._s.rename(name)\n return None\n else:\n return self.alias(name)\n\n def chunk_lengths(self) -> List[int]:\n \"\"\"\n Get the length of each individual chunk.\n \"\"\"\n return self._s.chunk_lengths()\n\n def n_chunks(self) -> int:\n \"\"\"\n Get the number of chunks that this Series contains.\n \"\"\"\n return self._s.n_chunks()\n\n def cumsum(self, reverse: bool = False) -> \"Series\":\n \"\"\"\n Get an array with the cumulative sum computed at every element.\n\n Parameters\n ----------\n reverse\n reverse the operation.\n\n Notes\n -----\n Dtypes in {Int8, UInt8, Int16, UInt16} are cast to\n Int64 before summing to prevent overflow issues.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s.cumsum()\n shape: (3,)\n Series: 'a' [i64]\n [\n 1\n 3\n 6\n ]\n\n \"\"\"\n return wrap_s(self._s.cumsum(reverse))\n\n def cummin(self, reverse: bool = False) -> \"Series\":\n \"\"\"\n Get an array with the cumulative min computed at every element.\n\n Parameters\n ----------\n reverse\n reverse the operation.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s.cummin()\n shape: (3,)\n Series: 'a' [i64]\n [\n 1\n 1\n 1\n ]\n\n \"\"\"\n return wrap_s(self._s.cummin(reverse))\n\n def cummax(self, reverse: bool = False) -> \"Series\":\n \"\"\"\n Get an array with the cumulative max computed at every element.\n\n Parameters\n ----------\n reverse\n reverse the operation.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s.cummax()\n shape: (3,)\n Series: 'a' [i64]\n [\n 1\n 2\n 3\n ]\n\n \"\"\"\n return wrap_s(self._s.cummax(reverse))\n\n def cumprod(self, reverse: bool = False) -> \"Series\":\n \"\"\"\n Get an array with the cumulative product computed at every element.\n\n Parameters\n ----------\n reverse\n reverse the operation.\n\n Notes\n -----\n Dtypes in {Int8, UInt8, Int16, UInt16} are cast to\n Int64 before summing to prevent overflow issues.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s.cumprod()\n shape: (3,)\n Series: 'a' [i64]\n [\n 1\n 2\n 6\n ]\n\n \"\"\"\n return wrap_s(self._s.cumprod(reverse))\n\n def limit(self, num_elements: int = 10) -> \"Series\":\n \"\"\"\n Take n elements from this Series.\n\n Parameters\n ----------\n num_elements\n Amount of elements to take.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s.limit(2)\n shape: (2,)\n Series: 'a' [i64]\n [\n 1\n 2\n ]\n\n \"\"\"\n return wrap_s(self._s.limit(num_elements))\n\n def slice(self, offset: int, length: int) -> \"Series\":\n \"\"\"\n Get a slice of this Series.\n\n Parameters\n ----------\n offset\n Offset index.\n length\n Length of the slice.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s.slice(1, 2)\n shape: (2,)\n Series: 'a' [i64]\n [\n 2\n 3\n ]\n\n \"\"\"\n return wrap_s(self._s.slice(offset, length))\n\n def append(self, other: \"Series\") -> None:\n \"\"\"\n Append a Series to this one.\n\n Parameters\n ----------\n other\n Series to append.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s2 = pl.Series(\"b\", [4, 5, 6])\n >>> s.append(s2)\n >>> s\n shape: (6,)\n Series: 'a' [i64]\n [\n 1\n 2\n 3\n 4\n 5\n 6\n ]\n\n \"\"\"\n self._s.append(other._s)\n\n def filter(self, predicate: Union[\"Series\", list]) -> \"Series\":\n \"\"\"\n Filter elements by a boolean mask.\n\n Parameters\n ----------\n predicate\n Boolean mask.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> mask = pl.Series(\"\", [True, False, True])\n >>> s.filter(mask)\n shape: (2,)\n Series: 'a' [i64]\n [\n 1\n 3\n ]\n\n \"\"\"\n if isinstance(predicate, list):\n predicate = Series(\"\", predicate)\n return wrap_s(self._s.filter(predicate._s))\n\n def head(self, length: Optional[int] = None) -> \"Series\":\n \"\"\"\n Get first N elements as Series.\n\n Parameters\n ----------\n length\n Length of the head.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s.head(2)\n shape: (2,)\n Series: 'a' [i64]\n [\n 1\n 2\n ]\n\n \"\"\"\n return wrap_s(self._s.head(length))\n\n def tail(self, length: Optional[int] = None) -> \"Series\":\n \"\"\"\n Get last N elements as Series.\n\n Parameters\n ----------\n length\n Length of the tail.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s.tail(2)\n shape: (2,)\n Series: 'a' [i64]\n [\n 2\n 3\n ]\n\n \"\"\"\n return wrap_s(self._s.tail(length))\n\n def take_every(self, n: int) -> \"Series\":\n \"\"\"\n Take every nth value in the Series and return as new Series.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3, 4])\n >>> s.take_every(2)\n shape: (2,)\n Series: 'a' [i64]\n [\n 1\n 3\n ]\n\n \"\"\"\n return wrap_s(self._s.take_every(n))\n\n @overload\n def sort(\n self, reverse: bool = False, *, in_place: Literal[False] = ...\n ) -> \"Series\":\n ...\n\n @overload\n def sort(self, reverse: bool = False, *, in_place: Literal[True]) -> None:\n ...\n\n @overload\n def sort(\n self, reverse: bool = False, *, in_place: bool = False\n ) -> Optional[\"Series\"]:\n ...\n\n def sort(\n self, reverse: bool = False, *, in_place: bool = False\n ) -> Optional[\"Series\"]:\n \"\"\"\n Sort this Series.\n\n Parameters\n ----------\n reverse\n Reverse sort.\n in_place\n Sort in place.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 3, 4, 2])\n >>> s.sort()\n shape: (4,)\n Series: 'a' [i64]\n [\n 1\n 2\n 3\n 4\n ]\n >>> s.sort(reverse=True)\n shape: (4,)\n Series: 'a' [i64]\n [\n 4\n 3\n 2\n 1\n ]\n\n \"\"\"\n if in_place:\n self._s = self._s.sort(reverse)\n return None\n else:\n return wrap_s(self._s.sort(reverse))\n\n def argsort(self, reverse: bool = False) -> \"Series\":\n \"\"\"\n Index location of the sorted variant of this Series.\n\n Returns\n -------\n indexes\n Indexes that can be used to sort this array.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [5, 3, 4, 1, 2])\n >>> s.argsort()\n shape: (5,)\n Series: 'a' [u32]\n [\n 3\n 4\n 1\n 2\n 0\n ]\n\n \"\"\"\n return wrap_s(self._s.argsort(reverse))\n\n def arg_unique(self) -> \"Series\":\n \"\"\"\n Get unique index as Series.\n \"\"\"\n return wrap_s(self._s.arg_unique())\n\n def arg_min(self) -> Optional[int]:\n \"\"\"\n Get the index of the minimal value.\n \"\"\"\n return self._s.arg_min()\n\n def arg_max(self) -> Optional[int]:\n \"\"\"\n Get the index of the maximal value.\n \"\"\"\n return self._s.arg_max()\n\n def unique(self) -> \"Series\":\n \"\"\"\n Get unique elements in series.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 2, 3])\n >>> s.unique().sort()\n shape: (3,)\n Series: 'a' [i64]\n [\n 1\n 2\n 3\n ]\n\n \"\"\"\n return wrap_s(self._s.unique())\n\n def take(self, indices: Union[np.ndarray, List[int]]) -> \"Series\":\n \"\"\"\n Take values by index.\n\n Parameters\n ----------\n indices\n Index location used for selection.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3, 4])\n >>> s.take([1, 3])\n shape: (2,)\n Series: 'a' [i64]\n [\n 2\n 4\n ]\n\n \"\"\"\n if isinstance(indices, list):\n indices = np.array(indices)\n return wrap_s(self._s.take(indices))\n\n def null_count(self) -> int:\n \"\"\"\n Count the null values in this Series.\n \"\"\"\n return self._s.null_count()\n\n def has_validity(self) -> bool:\n \"\"\"\n Returns True if the Series has a validity bitmask. If there is none, it means that there are no null values.\n Use this to swiftly assert a Series does not have null values.\n \"\"\"\n return self._s.has_validity()\n\n def is_null(self) -> \"Series\":\n \"\"\"\n Get mask of null values.\n\n Returns\n -------\n Boolean Series\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1.0, 2.0, 3.0, None])\n >>> s.is_null()\n shape: (4,)\n Series: 'a' [bool]\n [\n false\n false\n false\n true\n ]\n\n \"\"\"\n return wrap_s(self._s.is_null())\n\n def is_not_null(self) -> \"Series\":\n \"\"\"\n Get mask of non null values.\n\n Returns\n -------\n Boolean Series\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1.0, 2.0, 3.0, None])\n >>> s.is_not_null()\n shape: (4,)\n Series: 'a' [bool]\n [\n true\n true\n true\n false\n ]\n\n \"\"\"\n return wrap_s(self._s.is_not_null())\n\n def is_finite(self) -> \"Series\":\n \"\"\"\n Get mask of finite values if Series dtype is Float.\n\n Returns\n -------\n Boolean Series\n\n Examples\n --------\n >>> import numpy as np\n >>> s = pl.Series(\"a\", [1.0, 2.0, np.inf])\n >>> s.is_finite()\n shape: (3,)\n Series: 'a' [bool]\n [\n true\n true\n false\n ]\n\n \"\"\"\n return wrap_s(self._s.is_finite())\n\n def is_infinite(self) -> \"Series\":\n \"\"\"\n Get mask of infinite values if Series dtype is Float.\n\n Returns\n -------\n Boolean Series\n\n Examples\n --------\n >>> import numpy as np\n >>> s = pl.Series(\"a\", [1.0, 2.0, np.inf])\n >>> s.is_infinite()\n shape: (3,)\n Series: 'a' [bool]\n [\n false\n false\n true\n ]\n\n \"\"\"\n return wrap_s(self._s.is_infinite())\n\n def is_nan(self) -> \"Series\":\n \"\"\"\n Get mask of NaN values if Series dtype is Float.\n\n Returns\n -------\n Boolean Series\n\n Examples\n --------\n >>> import numpy as np\n >>> s = pl.Series(\"a\", [1.0, 2.0, 3.0, np.NaN])\n >>> s.is_nan()\n shape: (4,)\n Series: 'a' [bool]\n [\n false\n false\n false\n true\n ]\n\n \"\"\"\n return wrap_s(self._s.is_nan())\n\n def is_not_nan(self) -> \"Series\":\n \"\"\"\n Get negated mask of NaN values if Series dtype is_not Float.\n\n Returns\n -------\n Boolean Series\n\n Examples\n --------\n >>> import numpy as np\n >>> s = pl.Series(\"a\", [1.0, 2.0, 3.0, np.NaN])\n >>> s.is_not_nan()\n shape: (4,)\n Series: 'a' [bool]\n [\n true\n true\n true\n false\n ]\n\n \"\"\"\n return wrap_s(self._s.is_not_nan())\n\n def is_in(self, other: Union[\"Series\", List]) -> \"Series\":\n \"\"\"\n Check if elements of this Series are in the right Series, or List values of the right Series.\n\n Returns\n -------\n Boolean Series\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s2 = pl.Series(\"b\", [2, 4])\n >>> s2.is_in(s)\n shape: (2,)\n Series: 'b' [bool]\n [\n true\n false\n ]\n\n >>> # check if some values are a member of sublists\n >>> sets = pl.Series(\"sets\", [[1, 2, 3], [1, 2], [9, 10]])\n >>> optional_members = pl.Series(\"optional_members\", [1, 2, 3])\n >>> print(sets)\n shape: (3,)\n Series: 'sets' [list]\n [\n [1, 2, 3]\n [1, 2]\n [9, 10]\n ]\n >>> print(optional_members)\n shape: (3,)\n Series: 'optional_members' [i64]\n [\n 1\n 2\n 3\n ]\n >>> optional_members.is_in(sets)\n shape: (3,)\n Series: 'optional_members' [bool]\n [\n true\n true\n false\n ]\n\n \"\"\"\n if isinstance(other, list):\n other = Series(\"\", other)\n return wrap_s(self._s.is_in(other._s))\n\n def arg_true(self) -> \"Series\":\n \"\"\"\n Get index values where Boolean Series evaluate True.\n\n Returns\n -------\n UInt32 Series\n \"\"\"\n return wrap_s(self._s.arg_true())\n\n def is_unique(self) -> \"Series\":\n \"\"\"\n Get mask of all unique values.\n\n Returns\n -------\n Boolean Series\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 2, 3])\n >>> s.is_unique()\n shape: (4,)\n Series: 'a' [bool]\n [\n true\n false\n false\n true\n ]\n\n \"\"\"\n return wrap_s(self._s.is_unique())\n\n def is_first(self) -> \"Series\":\n \"\"\"\n Get a mask of the first unique value.\n\n Returns\n -------\n Boolean Series\n \"\"\"\n return wrap_s(self._s.is_first())\n\n def is_duplicated(self) -> \"Series\":\n \"\"\"\n Get mask of all duplicated values.\n\n Returns\n -------\n Boolean Series\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 2, 3])\n >>> s.is_duplicated()\n shape: (4,)\n Series: 'a' [bool]\n [\n false\n true\n true\n false\n ]\n\n \"\"\"\n return wrap_s(self._s.is_duplicated())\n\n def explode(self) -> \"Series\":\n \"\"\"\n Explode a list or utf8 Series. This means that every item is expanded to a new row.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [[1, 2], [3, 4], [9, 10]])\n >>> s.explode()\n shape: (6,)\n Series: 'a' [i64]\n [\n 1\n 2\n 3\n 4\n 9\n 10\n ]\n\n Returns\n -------\n Exploded Series of same dtype\n \"\"\"\n return wrap_s(self._s.explode())\n\n def series_equal(\n self, other: \"Series\", null_equal: bool = False, strict: bool = False\n ) -> bool:\n \"\"\"\n Check if series is equal with another Series.\n\n Parameters\n ----------\n other\n Series to compare with.\n null_equal\n Consider null values as equal.\n strict\n Don't allow different numerical dtypes, e.g. comparing `pl.UInt32` with a `pl.Int64` will return `False`.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s2 = pl.Series(\"b\", [4, 5, 6])\n >>> s.series_equal(s)\n True\n >>> s.series_equal(s2)\n False\n\n \"\"\"\n return self._s.series_equal(other._s, null_equal, strict)\n\n def len(self) -> int:\n \"\"\"\n Length of this Series.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s.len()\n 3\n\n \"\"\"\n return self._s.len()\n\n @property\n def shape(self) -> Tuple[int]:\n \"\"\"\n Shape of this Series.\n \"\"\"\n return (self._s.len(),)\n\n def __len__(self) -> int:\n return self.len()\n\n def cast(\n self,\n dtype: Union[Type[DataType], Type[int], Type[float], Type[str], Type[bool]],\n strict: bool = True,\n ) -> \"Series\":\n \"\"\"\n Cast between data types.\n\n Parameters\n ----------\n dtype\n DataType to cast to\n strict\n Throw an error if a cast could not be done for instance due to an overflow\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [True, False, True])\n >>> s\n shape: (3,)\n Series: 'a' [bool]\n [\n true\n false\n true\n ]\n\n >>> s.cast(pl.UInt32)\n shape: (3,)\n Series: 'a' [u32]\n [\n 1\n 0\n 1\n ]\n\n \"\"\"\n pl_dtype = py_type_to_dtype(dtype)\n return wrap_s(self._s.cast(str(pl_dtype), strict))\n\n def to_physical(self) -> \"Series\":\n \"\"\"\n Cast to physical representation of the logical dtype.\n\n Date -> Int32\n Datetime -> Int64\n Time -> Int64\n other -> other\n \"\"\"\n return wrap_s(self._s.to_physical())\n\n def to_list(self, use_pyarrow: bool = False) -> List[Optional[Any]]:\n \"\"\"\n Convert this Series to a Python List. This operation clones data.\n\n Parameters\n ----------\n use_pyarrow\n Use pyarrow for the conversion.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s.to_list()\n [1, 2, 3]\n >>> type(s.to_list())\n <class 'list'>\n\n \"\"\"\n if use_pyarrow:\n return self.to_arrow().to_pylist()\n return self._s.to_list()\n\n def __iter__(self) -> \"SeriesIter\":\n return SeriesIter(self.len(), self)\n\n @overload\n def rechunk(self, in_place: Literal[False] = ...) -> \"Series\":\n ...\n\n @overload\n def rechunk(self, in_place: Literal[True]) -> None:\n ...\n\n @overload\n def rechunk(self, in_place: bool) -> Optional[\"Series\"]:\n ...\n\n def rechunk(self, in_place: bool = False) -> Optional[\"Series\"]:\n \"\"\"\n Create a single chunk of memory for this Series.\n\n Parameters\n ----------\n in_place\n In place or not.\n \"\"\"\n opt_s = self._s.rechunk(in_place)\n if in_place:\n return None\n else:\n return wrap_s(opt_s)\n\n def is_numeric(self) -> bool:\n \"\"\"\n Check if this Series datatype is numeric.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s.is_numeric()\n True\n\n \"\"\"\n return self.dtype in (\n Int8,\n Int16,\n Int32,\n Int64,\n UInt8,\n UInt16,\n UInt32,\n UInt64,\n Float32,\n Float64,\n )\n\n def is_datelike(self) -> bool:\n \"\"\"\n Check if this Series datatype is datelike.\n\n Examples\n --------\n >>> from datetime import date\n >>> s = pl.Series([date(2021, 1, 1), date(2021, 1, 2), date(2021, 1, 3)])\n >>> s.is_datelike()\n True\n\n \"\"\"\n return self.dtype in (Date, Datetime)\n\n def is_float(self) -> bool:\n \"\"\"\n Check if this Series has floating point numbers.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1.0, 2.0, 3.0])\n >>> s.is_float()\n True\n\n \"\"\"\n return self.dtype in (Float32, Float64)\n\n def is_boolean(self) -> bool:\n \"\"\"\n Check if this Series is a Boolean.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [True, False, True])\n >>> s.is_boolean()\n True\n\n \"\"\"\n return self.dtype is Boolean\n\n def is_utf8(self) -> bool:\n \"\"\"\n Checks if this Series datatype is a Utf8.\n\n Examples\n --------\n >>> s = pl.Series(\"x\", [\"a\", \"b\", \"c\"])\n >>> s.is_utf8()\n True\n\n \"\"\"\n return self.dtype is Utf8\n\n def view(self, ignore_nulls: bool = False) -> np.ndarray:\n \"\"\"\n Get a view into this Series data with a numpy array. This operation doesn't clone data, but does not include\n missing values. Don't use this unless you know what you are doing.\n\n .. warning::\n\n This function can lead to undefined behavior in the following cases:\n\n Returns a view to a piece of memory that is already dropped:\n\n >>> pl.Series([1, 3, 5]).sort().view() # doctest: +IGNORE_RESULT\n\n Sums invalid data that is missing:\n\n >>> pl.Series([1, 2, None]).view().sum() # doctest: +SKIP\n\n \"\"\"\n if not ignore_nulls:\n assert not self.has_validity()\n\n ptr_type = dtype_to_ctype(self.dtype)\n ptr = self._s.as_single_ptr()\n array = _ptr_to_numpy(ptr, self.len(), ptr_type)\n array.setflags(write=False)\n return array\n\n def __array__(self, dtype: Any = None) -> np.ndarray:\n return self.to_numpy().__array__(dtype)\n\n def __array_ufunc__(\n self, ufunc: Callable[..., Any], method: str, *inputs: Any, **kwargs: Any\n ) -> \"Series\":\n \"\"\"\n Numpy universal functions.\n \"\"\"\n if self._s.n_chunks() > 0:\n self._s.rechunk(in_place=True)\n\n if method == \"__call__\":\n args: List[Union[Number, np.ndarray]] = []\n for arg in inputs:\n if isinstance(arg, Number):\n args.append(arg)\n elif isinstance(arg, Series):\n args.append(arg.view(ignore_nulls=True))\n else:\n return NotImplemented\n\n if \"dtype\" in kwargs:\n dtype = kwargs.pop(\"dtype\")\n else:\n dtype = self.dtype\n\n try:\n f = get_ffi_func(\"apply_ufunc_<>\", dtype, self._s)\n if f is None:\n return NotImplemented\n series = f(lambda out: ufunc(*args, out=out, **kwargs))\n return wrap_s(series)\n except TypeError:\n # some integer to float ufuncs do not work, try on f64\n s = self.cast(Float64)\n args[0] = s.view(ignore_nulls=True)\n f = get_ffi_func(\"apply_ufunc_<>\", Float64, self._s)\n if f is None:\n return NotImplemented\n series = f(lambda out: ufunc(*args, out=out, **kwargs))\n return wrap_s(series)\n\n else:\n return NotImplemented\n\n def to_numpy(\n self, *args: Any, zero_copy_only: bool = False, **kwargs: Any\n ) -> np.ndarray:\n \"\"\"\n Convert this Series to numpy. This operation clones data but is completely safe.\n\n If you want a zero-copy view and know what you are doing, use `.view()`.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> arr = s.to_numpy()\n >>> arr # doctest: +IGNORE_RESULT\n array([1, 2, 3], dtype=int64)\n >>> type(arr)\n <class 'numpy.ndarray'>\n\n Parameters\n ----------\n args\n args will be sent to pyarrow.Array.to_numpy.\n zero_copy_only\n If True, an exception will be raised if the conversion to a numpy\n array would require copying the underlying data (e.g. in presence\n of nulls, or for non-primitive types).\n kwargs\n kwargs will be sent to pyarrow.Array.to_numpy\n \"\"\"\n\n def convert_to_date(arr): # type: ignore\n if self.dtype == Date:\n tp = \"datetime64[D]\"\n else:\n tp = \"datetime64[ms]\"\n return arr.astype(tp)\n\n if _PYARROW_AVAILABLE and not self.is_datelike():\n return self.to_arrow().to_numpy(\n *args, zero_copy_only=zero_copy_only, **kwargs\n )\n else:\n if not self.has_validity():\n if self.is_datelike():\n return convert_to_date(self.view(ignore_nulls=True))\n return self.view(ignore_nulls=True)\n if self.is_datelike():\n return convert_to_date(self._s.to_numpy())\n return self._s.to_numpy()\n\n def to_arrow(self) -> \"pa.Array\":\n \"\"\"\n Get the underlying Arrow Array. If the Series contains only a single chunk\n this operation is zero copy.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s = s.to_arrow()\n >>> s # doctest: +ELLIPSIS\n <pyarrow.lib.Int64Array object at ...>\n [\n 1,\n 2,\n 3\n ]\n\n \"\"\"\n return self._s.to_arrow()\n\n def set(self, filter: \"Series\", value: Union[int, float]) -> \"Series\":\n \"\"\"\n Set masked values.\n\n Parameters\n ----------\n filter\n Boolean mask.\n value\n Value to replace the the masked values with.\n \"\"\"\n f = get_ffi_func(\"set_with_mask_<>\", self.dtype, self._s)\n if f is None:\n return NotImplemented\n return wrap_s(f(filter._s, value))\n\n def set_at_idx(\n self,\n idx: Union[\"Series\", np.ndarray, List[int], Tuple[int]],\n value: Union[int, float, str, bool],\n ) -> \"Series\":\n \"\"\"\n Set values at the index locations.\n\n Parameters\n ----------\n idx\n Integers representing the index locations.\n value\n replacement values.\n\n Returns\n -------\n New allocated Series\n \"\"\"\n\n # the set_at_idx function expects a np.array of dtype u32\n f = get_ffi_func(\"set_at_idx_<>\", self.dtype, self._s)\n if f is None:\n raise ValueError(\n f\"could not find the FFI function needed to set at idx for series {self._s}\"\n )\n if isinstance(idx, Series):\n # make sure the dtype matches\n idx = idx.cast(UInt32)\n idx_array = idx.view()\n elif isinstance(idx, np.ndarray):\n if not idx.data.c_contiguous:\n idx_array = np.ascontiguousarray(idx, dtype=np.uint32)\n else:\n idx_array = idx\n if idx_array.dtype != np.uint32:\n idx_array = np.array(idx_array, np.uint32)\n\n else:\n idx_array = np.array(idx, dtype=np.uint32)\n\n return wrap_s(f(idx_array, value))\n\n def clone(self) -> \"Series\":\n \"\"\"\n Cheap deep clones.\n \"\"\"\n return wrap_s(self._s.clone())\n\n def __copy__(self) -> \"Series\":\n return self.clone()\n\n def __deepcopy__(self, memodict: Any = {}) -> \"Series\":\n return self.clone()\n\n def fill_null(self, strategy: Union[str, int, \"pli.Expr\"]) -> \"Series\":\n \"\"\"\n Fill null values with a filling strategy.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3, None])\n >>> s.fill_null(\"forward\")\n shape: (4,)\n Series: 'a' [i64]\n [\n 1\n 2\n 3\n 3\n ]\n >>> s.fill_null(\"min\")\n shape: (4,)\n Series: 'a' [i64]\n [\n 1\n 2\n 3\n 1\n ]\n\n Parameters\n ----------\n strategy\n\n Fill null strategy or a value\n * \"backward\"\n * \"forward\"\n * \"min\"\n * \"max\"\n * \"mean\"\n * \"one\"\n * \"zero\"\n \"\"\"\n if not isinstance(strategy, str):\n return self.to_frame().select(pli.col(self.name).fill_null(strategy))[\n self.name\n ]\n return wrap_s(self._s.fill_null(strategy))\n\n def floor(self) -> \"Series\":\n \"\"\"\n Floor underlying floating point array to the lowest integers smaller or equal to the float value.\n\n Only works on floating point Series\n \"\"\"\n return wrap_s(self._s.floor())\n\n def round(self, decimals: int) -> \"Series\":\n \"\"\"\n Round underlying floating point data by `decimals` digits.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1.12345, 2.56789, 3.901234])\n >>> s.round(2)\n shape: (3,)\n Series: 'a' [f64]\n [\n 1.12\n 2.57\n 3.9\n ]\n\n Parameters\n ----------\n decimals\n number of decimals to round by.\n \"\"\"\n return wrap_s(self._s.round(decimals))\n\n def dot(self, other: \"Series\") -> Optional[float]:\n \"\"\"\n Compute the dot/inner product between two Series\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s2 = pl.Series(\"b\", [4.0, 5.0, 6.0])\n >>> s.dot(s2)\n 32.0\n\n Parameters\n ----------\n other\n Series to compute dot product with\n \"\"\"\n return self._s.dot(other._s)\n\n def mode(self) -> \"Series\":\n \"\"\"\n Compute the most occurring value(s). Can return multiple Values\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 2, 3])\n >>> s.mode()\n shape: (1,)\n Series: 'a' [i64]\n [\n 2\n ]\n\n \"\"\"\n return wrap_s(self._s.mode())\n\n def sin(self) -> \"Series\":\n \"\"\"\n Compute the element-wise value for Trigonometric sine.\n\n Examples\n --------\n >>> import numpy as np\n >>> s = pl.Series(\"a\", np.array((0.0, np.pi / 2.0, np.pi)))\n >>> s.sin()\n shape: (3,)\n Series: 'a' [f64]\n [\n 0.0\n 1\n 1.2246467991473532e-16\n ]\n\n \"\"\"\n return np.sin(self) # type: ignore\n\n def cos(self) -> \"Series\":\n \"\"\"\n Compute the element-wise value for Trigonometric cosine.\n\n Examples\n --------\n >>> import numpy as np\n >>> s = pl.Series(\"a\", np.array((0.0, np.pi / 2.0, np.pi)))\n >>> s.cos()\n shape: (3,)\n Series: 'a' [f64]\n [\n 1\n 6.123233995736766e-17\n -1e0\n ]\n\n \"\"\"\n return np.cos(self) # type: ignore\n\n def tan(self) -> \"Series\":\n \"\"\"\n Compute the element-wise value for Trigonometric tangent.\n\n Examples\n --------\n >>> import numpy as np\n >>> s = pl.Series(\"a\", np.array((0.0, np.pi / 2.0, np.pi)))\n >>> s.tan()\n shape: (3,)\n Series: 'a' [f64]\n [\n 0.0\n 1.633123935319537e16\n -1.2246467991473532e-16\n ]\n\n \"\"\"\n return np.tan(self) # type: ignore\n\n def arcsin(self) -> \"Series\":\n \"\"\"\n Compute the element-wise value for Trigonometric Inverse sine.\n\n Examples\n --------\n >>> import numpy as np\n >>> s = pl.Series(\"a\", np.array((1.0, 0.0, -1)))\n >>> s.arcsin()\n shape: (3,)\n Series: 'a' [f64]\n [\n 1.5707963267948966\n 0.0\n -1.5707963267948966e0\n ]\n\n \"\"\"\n return np.arcsin(self) # type: ignore\n\n def arccos(self) -> \"Series\":\n \"\"\"\n Compute the element-wise value for Trigonometric Inverse cosine.\n\n Examples\n --------\n >>> import numpy as np\n >>> s = pl.Series(\"a\", np.array((1.0, 0.0, -1)))\n >>> s.arccos()\n shape: (3,)\n Series: 'a' [f64]\n [\n 0.0\n 1.5707963267948966\n 3.141592653589793\n ]\n\n \"\"\"\n return np.arccos(self) # type: ignore\n\n def arctan(self) -> \"Series\":\n \"\"\"\n Compute the element-wise value for Trigonometric Inverse tangent.\n\n Examples\n --------\n >>> import numpy as np\n >>> s = pl.Series(\"a\", np.array((1.0, 0.0, -1)))\n >>> s.arctan()\n shape: (3,)\n Series: 'a' [f64]\n [\n 0.7853981633974483\n 0.0\n -7.853981633974483e-1\n ]\n\n \"\"\"\n return np.arctan(self) # type: ignore\n\n def apply(\n self,\n func: Callable[[Any], Any],\n return_dtype: Optional[Type[DataType]] = None,\n ) -> \"Series\":\n \"\"\"\n Apply a function over elements in this Series and return a new Series.\n\n If the function returns another datatype, the return_dtype arg should be set, otherwise the method will fail.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s.apply(lambda x: x + 10)\n shape: (3,)\n Series: 'a' [i64]\n [\n 11\n 12\n 13\n ]\n\n Parameters\n ----------\n func\n function or lambda.\n return_dtype\n Output datatype. If none is given, the same datatype as this Series will be used.\n\n Returns\n -------\n Series\n \"\"\"\n if return_dtype is None:\n pl_return_dtype = None\n else:\n pl_return_dtype = py_type_to_dtype(return_dtype)\n return wrap_s(self._s.apply_lambda(func, pl_return_dtype))\n\n def shift(self, periods: int = 1) -> \"Series\":\n \"\"\"\n Shift the values by a given period and fill the parts that will be empty due to this operation\n with `Nones`.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s.shift(periods=1)\n shape: (3,)\n Series: 'a' [i64]\n [\n null\n 1\n 2\n ]\n >>> s.shift(periods=-1)\n shape: (3,)\n Series: 'a' [i64]\n [\n 2\n 3\n null\n ]\n\n Parameters\n ----------\n periods\n Number of places to shift (may be negative).\n \"\"\"\n return wrap_s(self._s.shift(periods))\n\n def shift_and_fill(\n self, periods: int, fill_value: Union[int, \"pli.Expr\"]\n ) -> \"Series\":\n \"\"\"\n Shift the values by a given period and fill the parts that will be empty due to this operation\n with the result of the `fill_value` expression.\n\n Parameters\n ----------\n periods\n Number of places to shift (may be negative).\n fill_value\n Fill None values with the result of this expression.\n \"\"\"\n return self.to_frame().select(\n pli.col(self.name).shift_and_fill(periods, fill_value)\n )[self.name]\n\n def zip_with(self, mask: \"Series\", other: \"Series\") -> \"Series\":\n \"\"\"\n Where mask evaluates true, take values from self. Where mask evaluates false, take values from other.\n\n Parameters\n ----------\n mask\n Boolean Series.\n other\n Series of same type.\n\n Returns\n -------\n New Series\n \"\"\"\n return wrap_s(self._s.zip_with(mask._s, other._s))\n\n def rolling_min(\n self,\n window_size: int,\n weights: Optional[List[float]] = None,\n min_periods: Optional[int] = None,\n center: bool = False,\n ) -> \"Series\":\n \"\"\"\n apply a rolling min (moving min) over the values in this array.\n A window of length `window_size` will traverse the array. The values that fill this window\n will (optionally) be multiplied with the weights given by the `weight` vector. The resultingParameters\n values will be aggregated to their sum.\n\n Parameters\n ----------\n window_size\n The length of the window.\n weights\n An optional slice with the same length as the window that will be multiplied\n elementwise with the values in the window.\n min_periods\n The number of values in the window that should be non-null before computing a result.\n If None, it will be set equal to window size.\n center\n Set the labels at the center of the window\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [100, 200, 300, 400, 500])\n >>> s.rolling_min(window_size=3)\n shape: (5,)\n Series: 'a' [i64]\n [\n null\n null\n 100\n 200\n 300\n ]\n\n \"\"\"\n if min_periods is None:\n min_periods = window_size\n return wrap_s(self._s.rolling_min(window_size, weights, min_periods, center))\n\n def rolling_max(\n self,\n window_size: int,\n weights: Optional[List[float]] = None,\n min_periods: Optional[int] = None,\n center: bool = False,\n ) -> \"Series\":\n \"\"\"\n Apply a rolling max (moving max) over the values in this array.\n A window of length `window_size` will traverse the array. The values that fill this window\n will (optionally) be multiplied with the weights given by the `weight` vector. The resultingParameters\n values will be aggregated to their sum.\n\n Parameters\n ----------\n window_size\n The length of the window.\n weights\n An optional slice with the same length as the window that will be multiplied\n elementwise with the values in the window.\n min_periods\n The number of values in the window that should be non-null before computing a result.\n If None, it will be set equal to window size.\n center\n Set the labels at the center of the window\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [100, 200, 300, 400, 500])\n >>> s.rolling_max(window_size=2)\n shape: (5,)\n Series: 'a' [i64]\n [\n null\n 200\n 300\n 400\n 500\n ]\n\n \"\"\"\n if min_periods is None:\n min_periods = window_size\n return wrap_s(self._s.rolling_max(window_size, weights, min_periods, center))\n\n def rolling_mean(\n self,\n window_size: int,\n weights: Optional[List[float]] = None,\n min_periods: Optional[int] = None,\n center: bool = False,\n ) -> \"Series\":\n \"\"\"\n Apply a rolling mean (moving mean) over the values in this array.\n A window of length `window_size` will traverse the array. The values that fill this window\n will (optionally) be multiplied with the weights given by the `weight` vector. The resultingParameters\n values will be aggregated to their sum.\n\n Parameters\n ----------\n window_size\n The length of the window.\n weights\n An optional slice with the same length as the window that will be multiplied\n elementwise with the values in the window.\n min_periods\n The number of values in the window that should be non-null before computing a result.\n If None, it will be set equal to window size.\n center\n Set the labels at the center of the window\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [100, 200, 300, 400, 500])\n >>> s.rolling_mean(window_size=2)\n shape: (5,)\n Series: 'a' [f64]\n [\n null\n 150\n 250\n 350\n 450\n ]\n\n \"\"\"\n if min_periods is None:\n min_periods = window_size\n return wrap_s(self._s.rolling_mean(window_size, weights, min_periods, center))\n\n def rolling_sum(\n self,\n window_size: int,\n weights: Optional[List[float]] = None,\n min_periods: Optional[int] = None,\n center: bool = False,\n ) -> \"Series\":\n \"\"\"\n Apply a rolling sum (moving sum) over the values in this array.\n A window of length `window_size` will traverse the array. The values that fill this window\n will (optionally) be multiplied with the weights given by the `weight` vector. The resultingParameters\n values will be aggregated to their sum.\n\n Parameters\n ----------\n window_size\n The length of the window.\n weights\n An optional slice with the same length of the window that will be multiplied\n elementwise with the values in the window.\n min_periods\n The number of values in the window that should be non-null before computing a result.\n If None, it will be set equal to window size.\n center\n Set the labels at the center of the window\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3, 4, 5])\n >>> s.rolling_sum(window_size=2)\n shape: (5,)\n Series: 'a' [i64]\n [\n null\n 3\n 5\n 7\n 9\n ]\n\n \"\"\"\n if min_periods is None:\n min_periods = window_size\n return wrap_s(self._s.rolling_sum(window_size, weights, min_periods, center))\n\n def rolling_std(\n self,\n window_size: int,\n weights: Optional[List[float]] = None,\n min_periods: Optional[int] = None,\n center: bool = False,\n ) -> \"Series\":\n \"\"\"\n Compute a rolling std dev\n\n A window of length `window_size` will traverse the array. The values that fill this window\n will (optionally) be multiplied with the weights given by the `weight` vector. The resultingParameters\n values will be aggregated to their sum.\n\n Parameters\n ----------\n window_size\n The length of the window.\n weights\n An optional slice with the same length as the window that will be multiplied\n elementwise with the values in the window.\n min_periods\n The number of values in the window that should be non-null before computing a result.\n If None, it will be set equal to window size.\n center\n Set the labels at the center of the window\n\n \"\"\"\n if min_periods is None:\n min_periods = window_size\n return wrap_s(self._s.rolling_std(window_size, weights, min_periods, center))\n\n def rolling_var(\n self,\n window_size: int,\n weights: Optional[List[float]] = None,\n min_periods: Optional[int] = None,\n center: bool = False,\n ) -> \"Series\":\n \"\"\"\n Compute a rolling variance.\n\n A window of length `window_size` will traverse the array. The values that fill this window\n will (optionally) be multiplied with the weights given by the `weight` vector. The resultingParameters\n values will be aggregated to their sum.\n\n Parameters\n ----------\n window_size\n The length of the window.\n weights\n An optional slice with the same length as the window that will be multiplied\n elementwise with the values in the window.\n min_periods\n The number of values in the window that should be non-null before computing a result.\n If None, it will be set equal to window size.\n center\n Set the labels at the center of the window\n\n \"\"\"\n if min_periods is None:\n min_periods = window_size\n return wrap_s(self._s.rolling_var(window_size, weights, min_periods, center))\n\n def rolling_apply(\n self, window_size: int, function: Callable[[\"pli.Series\"], Any]\n ) -> \"pli.Series\":\n \"\"\"\n Allows a custom rolling window function.\n Prefer the specific rolling window functions over this one, as they are faster.\n Prefer:\n\n * rolling_min\n * rolling_max\n * rolling_mean\n * rolling_sum\n\n Parameters\n ----------\n window_size\n Size of the rolling window\n function\n Aggregation function\n\n Examples\n --------\n >>> s = pl.Series(\"A\", [1.0, 2.0, 9.0, 2.0, 13.0])\n >>> s.rolling_apply(window_size=3, function=lambda s: s.std())\n shape: (5,)\n Series: 'A' [f64]\n [\n null\n null\n 4.358898943540674\n 4.041451884327381\n 5.5677643628300215\n ]\n\n \"\"\"\n return self.to_frame().select(\n pli.col(self.name).rolling_apply(window_size, function)\n )[self.name]\n\n def rolling_median(self, window_size: int) -> \"Series\":\n \"\"\"\n Compute a rolling median\n\n Parameters\n ----------\n window_size\n Size of the rolling window\n \"\"\"\n return self.to_frame().select(pli.col(self.name).rolling_median(window_size))[\n self.name\n ]\n\n def rolling_quantile(\n self, window_size: int, quantile: float, interpolation: str = \"nearest\"\n ) -> \"Series\":\n \"\"\"\n Compute a rolling quantile\n\n Parameters\n ----------\n window_size\n Size of the rolling window\n quantile\n quantile to compute\n interpolation\n interpolation type, options: ['nearest', 'higher', 'lower', 'midpoint', 'linear']\n \"\"\"\n return self.to_frame().select(\n pli.col(self.name).rolling_quantile(window_size, quantile, interpolation)\n )[self.name]\n\n def rolling_skew(self, window_size: int, bias: bool = True) -> \"Series\":\n \"\"\"\n Compute a rolling skew\n\n Parameters\n ----------\n window_size\n Size of the rolling window\n bias\n If False, then the calculations are corrected for statistical bias.\n \"\"\"\n return self.to_frame().select(\n pli.col(self.name).rolling_skew(window_size, bias)\n )[self.name]\n\n def sample(\n self,\n n: Optional[int] = None,\n frac: Optional[float] = None,\n with_replacement: bool = False,\n seed: int = 0,\n ) -> \"Series\":\n \"\"\"\n Sample from this Series by setting either `n` or `frac`.\n\n Parameters\n ----------\n n\n Number of samples < self.len().\n frac\n Fraction between 0.0 and 1.0 .\n with_replacement\n sample with replacement.\n seed\n Initialization seed\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3, 4, 5])\n >>> s.sample(2) # doctest: +IGNORE_RESULT\n shape: (2,)\n Series: 'a' [i64]\n [\n 1\n 5\n ]\n\n \"\"\"\n if n is not None:\n return wrap_s(self._s.sample_n(n, with_replacement, seed))\n return wrap_s(self._s.sample_frac(frac, with_replacement, seed))\n\n def peak_max(self) -> \"Series\":\n \"\"\"\n Get a boolean mask of the local maximum peaks.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3, 4, 5])\n >>> s.peak_max()\n shape: (5,)\n Series: '' [bool]\n [\n false\n false\n false\n false\n true\n ]\n\n \"\"\"\n return wrap_s(self._s.peak_max())\n\n def peak_min(self) -> \"Series\":\n \"\"\"\n Get a boolean mask of the local minimum peaks.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [4, 1, 3, 2, 5])\n >>> s.peak_min()\n shape: (5,)\n Series: '' [bool]\n [\n false\n true\n false\n true\n false\n ]\n\n \"\"\"\n return wrap_s(self._s.peak_min())\n\n def n_unique(self) -> int:\n \"\"\"\n Count the number of unique values in this Series.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 2, 3])\n >>> s.n_unique()\n 3\n\n \"\"\"\n return self._s.n_unique()\n\n @overload\n def shrink_to_fit(self, in_place: Literal[False] = ...) -> \"Series\":\n ...\n\n @overload\n def shrink_to_fit(self, in_place: Literal[True]) -> None:\n ...\n\n @overload\n def shrink_to_fit(self, in_place: bool = False) -> Optional[\"Series\"]:\n ...\n\n def shrink_to_fit(self, in_place: bool = False) -> Optional[\"Series\"]:\n \"\"\"\n Shrink memory usage of this Series to fit the exact capacity needed to hold the data.\n \"\"\"\n if in_place:\n self._s.shrink_to_fit()\n return None\n else:\n series = self.clone()\n series._s.shrink_to_fit()\n return series\n\n def hash(self, k0: int = 0, k1: int = 1, k2: int = 2, k3: int = 3) -> \"pli.Series\":\n \"\"\"\n Hash the Series.\n\n The hash value is of type `UInt64`\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, 3])\n >>> s.hash(k0=42) # doctest: +IGNORE_RESULT\n shape: (3,)\n Series: 'a' [u64]\n [\n 18040498172617206516\n 5352755651785478209\n 3939059409923356085\n ]\n\n Parameters\n ----------\n k0\n seed parameter\n k1\n seed parameter\n k2\n seed parameter\n k3\n seed parameter\n \"\"\"\n return wrap_s(self._s.hash(k0, k1, k2, k3))\n\n def reinterpret(self, signed: bool = True) -> \"Series\":\n \"\"\"\n Reinterpret the underlying bits as a signed/unsigned integer.\n This operation is only allowed for 64bit integers. For lower bits integers,\n you can safely use that cast operation.\n\n Parameters\n ----------\n signed\n True -> pl.Int64\n False -> pl.UInt64\n \"\"\"\n return wrap_s(self._s.reinterpret(signed))\n\n def interpolate(self) -> \"Series\":\n \"\"\"\n Interpolate intermediate values. The interpolation method is linear.\n\n Examples\n --------\n >>> s = pl.Series(\"a\", [1, 2, None, None, 5])\n >>> s.interpolate()\n shape: (5,)\n Series: 'a' [i64]\n [\n 1\n 2\n 3\n 4\n 5\n ]\n\n \"\"\"\n return wrap_s(self._s.interpolate())\n\n def abs(self) -> \"Series\":\n \"\"\"\n Take absolute values\n \"\"\"\n return wrap_s(self._s.abs())\n\n def rank(self, method: str = \"average\", reverse: bool = False) -> \"Series\":\n \"\"\"\n Assign ranks to data, dealing with ties appropriately.\n\n Parameters\n ----------\n method\n {'average', 'min', 'max', 'dense', 'ordinal', 'random'}, optional\n The method used to assign ranks to tied elements.\n The following methods are available (default is 'average'):\n - 'average': The average of the ranks that would have been assigned to\n all the tied values is assigned to each value.\n - 'min': The minimum of the ranks that would have been assigned to all\n the tied values is assigned to each value. (This is also\n referred to as \"competition\" ranking.)\n - 'max': The maximum of the ranks that would have been assigned to all\n the tied values is assigned to each value.\n - 'dense': Like 'min', but the rank of the next highest element is\n assigned the rank immediately after those assigned to the tied\n elements.\n - 'ordinal': All values are given a distinct rank, corresponding to\n the order that the values occur in `a`.\n - 'random': Like 'ordinal', but the rank for ties is not dependent\n on the order that the values occur in `a`.\n reverse\n reverse the operation\n \"\"\"\n return wrap_s(self._s.rank(method, reverse))\n\n def diff(self, n: int = 1, null_behavior: str = \"ignore\") -> \"Series\":\n \"\"\"\n Calculate the n-th discrete difference.\n\n Parameters\n ----------\n n\n number of slots to shift\n null_behavior\n {'ignore', 'drop'}\n \"\"\"\n return wrap_s(self._s.diff(n, null_behavior))\n\n def skew(self, bias: bool = True) -> Optional[float]:\n r\"\"\"Compute the sample skewness of a data set.\n For normally distributed data, the skewness should be about zero. For\n unimodal continuous distributions, a skewness value greater than zero means\n that there is more weight in the right tail of the distribution. The\n function `skewtest` can be used to determine if the skewness value\n is close enough to zero, statistically speaking.\n\n\n See scipy.stats for more information.\n\n Parameters\n ----------\n bias : bool, optional\n If False, then the calculations are corrected for statistical bias.\n\n Notes\n -----\n The sample skewness is computed as the Fisher-Pearson coefficient\n of skewness, i.e.\n\n .. math:: g_1=\\frac{m_3}{m_2^{3/2}}\n\n where\n\n .. math:: m_i=\\frac{1}{N}\\sum_{n=1}^N(x[n]-\\bar{x})^i\n\n is the biased sample :math:`i\\texttt{th}` central moment, and\n :math:`\\bar{x}` is\n the sample mean. If ``bias`` is False, the calculations are\n corrected for bias and the value computed is the adjusted\n Fisher-Pearson standardized moment coefficient, i.e.\n\n .. math:: G_1=\\frac{k_3}{k_2^{3/2}}=\\frac{\\sqrt{N(N-1)}}{N-2}\\frac{m_3}{m_2^{3/2}}\n\n \"\"\"\n return self._s.skew(bias)\n\n def kurtosis(self, fisher: bool = True, bias: bool = True) -> Optional[float]:\n \"\"\"Compute the kurtosis (Fisher or Pearson) of a dataset.\n Kurtosis is the fourth central moment divided by the square of the\n variance. If Fisher's definition is used, then 3.0 is subtracted from\n the result to give 0.0 for a normal distribution.\n If bias is False then the kurtosis is calculated using k statistics to\n eliminate bias coming from biased moment estimators\n\n See scipy.stats for more information\n\n Parameters\n ----------\n fisher : bool, optional\n If True, Fisher's definition is used (normal ==> 0.0). If False,\n Pearson's definition is used (normal ==> 3.0).\n bias : bool, optional\n If False, then the calculations are corrected for statistical bias.\n \"\"\"\n return self._s.kurtosis(fisher, bias)\n\n def clip(self, min_val: Union[int, float], max_val: Union[int, float]) -> \"Series\":\n \"\"\"\n Clip (limit) the values in an array.\n\n Parameters\n ----------\n min_val, max_val\n Minimum and maximum value.\n \"\"\"\n return self.to_frame().select(pli.col(self.name).clip(min_val, max_val))[\n self.name\n ]\n\n def str_concat(self, delimiter: str = \"-\") -> \"Series\":\n \"\"\"\n Vertically concat the values in the Series to a single string value.\n\n Returns\n -------\n Series of dtype Utf8\n\n Examples\n --------\n >>> pl.Series([1, None, 2]).str_concat(\"-\")[0]\n '1-null-2'\n\n \"\"\"\n return self.to_frame().select(pli.col(self.name).str_concat(delimiter))[\n self.name\n ]\n\n def reshape(self, dims: Tuple[int, ...]) -> \"Series\":\n \"\"\"\n Reshape this Series to a flat series, shape: (len,)\n or a List series, shape: (rows, cols)\n\n if a -1 is used in any of the dimensions, that dimension is inferred.\n\n Parameters\n ----------\n dims\n Tuple of the dimension sizes\n\n Returns\n -------\n Series\n \"\"\"\n return wrap_s(self._s.reshape(dims))\n\n def shuffle(self, seed: int = 0) -> \"Series\":\n \"\"\"\n Shuffle the contents of this Series.\n\n Parameters\n ----------\n seed\n Seed initialization\n \"\"\"\n return wrap_s(self._s.shuffle(seed))\n\n def ewm_mean(\n self,\n com: Optional[float] = None,\n span: Optional[float] = None,\n half_life: Optional[float] = None,\n alpha: Optional[float] = None,\n adjust: bool = True,\n min_periods: int = 1,\n ) -> \"Series\":\n r\"\"\"\n Exponential moving average.\n\n Parameters\n ----------\n com\n Specify decay in terms of center of mass, :math:`alpha = 1/(1 + com) \\;for\\; com >= 0`.\n span\n Specify decay in terms of span, :math:`alpha = 2/(span + 1) \\;for\\; span >= 1`\n half_life\n Specify decay in terms of half-life, :math:`alpha = 1 - exp(-ln(2) / halflife) \\;for\\; halflife > 0`\n alpha\n Specify smoothing factor alpha directly, :math:`0 < alpha < 1`.\n adjust\n Divide by decaying adjustment factor in beginning periods to account for imbalance in relative weightings\n\n - When adjust = True the EW function is calculated using weights :math:`w_i = (1 - alpha)^i`\n - When adjust = False the EW function is calculated recursively.\n min_periods\n Minimum number of observations in window required to have a value (otherwise result is Null).\n\n \"\"\"\n return (\n self.to_frame()\n .select(\n pli.col(self.name).ewm_mean(\n com, span, half_life, alpha, adjust, min_periods\n )\n )\n .to_series()\n )\n\n def ewm_std(\n self,\n com: Optional[float] = None,\n span: Optional[float] = None,\n half_life: Optional[float] = None,\n alpha: Optional[float] = None,\n adjust: bool = True,\n min_periods: int = 1,\n ) -> \"Series\":\n r\"\"\"\n Exponential moving standard deviation.\n\n Parameters\n ----------\n com\n Specify decay in terms of center of mass, :math:`alpha = 1/(1 + com) \\;for\\; com >= 0`.\n span\n Specify decay in terms of span, :math:`alpha = 2/(span + 1) \\;for\\; span >= 1`\n half_life\n Specify decay in terms of half-life, :math:`alpha = 1 - exp(-ln(2) / halflife) \\;for\\; halflife > 0`\n alpha\n Specify smoothing factor alpha directly, :math:`0 < alpha < 1`.\n adjust\n Divide by decaying adjustment factor in beginning periods to account for imbalance in relative weightings\n\n - When adjust = True the EW function is calculated using weights :math:`w_i = (1 - alpha)^i`\n - When adjust = False the EW function is calculated recursively.\n min_periods\n Minimum number of observations in window required to have a value (otherwise result is Null).\n\n \"\"\"\n return (\n self.to_frame()\n .select(\n pli.col(self.name).ewm_std(\n com, span, half_life, alpha, adjust, min_periods\n )\n )\n .to_series()\n )\n\n def ewm_var(\n self,\n com: Optional[float] = None,\n span: Optional[float] = None,\n half_life: Optional[float] = None,\n alpha: Optional[float] = None,\n adjust: bool = True,\n min_periods: int = 1,\n ) -> \"Series\":\n r\"\"\"\n Exponential moving standard variation.\n\n Parameters\n ----------\n com\n Specify decay in terms of center of mass, :math:`alpha = 1/(1 + com) \\;for\\; com >= 0`.\n span\n Specify decay in terms of span, :math:`alpha = 2/(span + 1) \\;for\\; span >= 1`\n half_life\n Specify decay in terms of half-life, :math:`alpha = 1 - exp(-ln(2) / halflife) \\;for\\; halflife > 0`\n alpha\n Specify smoothing factor alpha directly, :math:`0 < alpha < 1`.\n adjust\n Divide by decaying adjustment factor in beginning periods to account for imbalance in relative weightings\n\n - When adjust = True the EW function is calculated using weights :math:`w_i = (1 - alpha)^i`\n - When adjust = False the EW function is calculated recursively.\n min_periods\n Minimum number of observations in window required to have a value (otherwise result is Null).\n\n \"\"\"\n return (\n self.to_frame()\n .select(\n pli.col(self.name).ewm_var(\n com, span, half_life, alpha, adjust, min_periods\n )\n )\n .to_series()\n )\n\n def extend(self, value: Optional[Union[int, float, str, bool]], n: int) -> \"Series\":\n \"\"\"\n Extend the Series with given number of values.\n\n Parameters\n ----------\n value\n The value to extend the Series with. This value may be None to fill with nulls.\n n\n The number of values to extend.\n \"\"\"\n return wrap_s(self._s.extend(value, n))\n\n # Below are the namespaces defined. Do not move these up in the definition of Series, as it confuses mypy between the\n # type annotation `str` and the namespace \"str\n\n @property\n def dt(self) -> \"DateTimeNameSpace\":\n \"\"\"\n Create an object namespace of all datetime related methods.\n \"\"\"\n return DateTimeNameSpace(self)\n\n @property\n def arr(self) -> \"ListNameSpace\":\n \"\"\"\n Create an object namespace of all list related methods.\n \"\"\"\n return ListNameSpace(self)\n\n @property\n def str(self) -> \"StringNameSpace\":\n \"\"\"\n Create an object namespace of all string related methods.\n \"\"\"\n return StringNameSpace(self)\n\n\nclass StringNameSpace:\n \"\"\"\n Series.str namespace.\n \"\"\"\n\n def __init__(self, series: \"Series\"):\n self._s = series._s\n\n def strptime(self, datatype: Type[DataType], fmt: Optional[str] = None) -> Series:\n \"\"\"\n Parse a Series of dtype Utf8 to a Date/Datetime Series.\n\n Parameters\n ----------\n datatype\n Date or Datetime.\n fmt\n formatting syntax. [Read more](https://docs.rs/chrono/0.4.19/chrono/format/strftime/index.html)\n\n Returns\n -------\n A Date/ Datetime Series\n \"\"\"\n if datatype == Date:\n return wrap_s(self._s.str_parse_date(fmt))\n if datatype == Datetime:\n return wrap_s(self._s.str_parse_datetime(fmt))\n raise NotImplementedError # pragma: no cover\n\n def lengths(self) -> Series:\n \"\"\"\n Get length of the string values in the Series.\n\n Returns\n -------\n Series[u32]\n \"\"\"\n return wrap_s(self._s.str_lengths())\n\n def contains(self, pattern: str) -> Series:\n \"\"\"\n Check if strings in Series contain regex pattern.\n\n Parameters\n ----------\n pattern\n A valid regex pattern.\n\n Returns\n -------\n Boolean mask\n \"\"\"\n return wrap_s(self._s.str_contains(pattern))\n\n def json_path_match(self, json_path: str) -> Series:\n \"\"\"\n Extract the first match of json string with provided JSONPath expression.\n Throw errors if encounter invalid json strings.\n All return value will be casted to Utf8 regardless of the original value.\n Documentation on JSONPath standard: https://goessner.net/articles/JsonPath/\n\n Parameters\n ----------\n json_path\n A valid JSON path query string\n\n Returns\n -------\n Utf8 array. Contain null if original value is null or the json_path return nothing.\n\n Examples\n --------\n\n >>> df = pl.DataFrame(\n ... {\"json_val\": ['{\"a\":\"1\"}', None, '{\"a\":2}', '{\"a\":2.1}', '{\"a\":true}']}\n ... )\n >>> df.select(pl.col(\"json_val\").str.json_path_match(\"$.a\"))[:, 0]\n shape: (5,)\n Series: 'json_val' [str]\n [\n \"1\"\n null\n \"2\"\n \"2.1\"\n \"true\"\n ]\n\n \"\"\"\n return wrap_s(self._s.str_json_path_match(json_path))\n\n def extract(self, pattern: str, group_index: int = 1) -> Series:\n r\"\"\"\n Extract the target capture group from provided patterns.\n\n Parameters\n ----------\n pattern\n A valid regex pattern\n group_index\n Index of the targeted capture group.\n Group 0 mean the whole pattern, first group begin at index 1\n Default to the first capture group\n\n Returns\n -------\n Utf8 array. Contain null if original value is null or regex capture nothing.\n\n Examples\n --------\n\n >>> df = pl.DataFrame(\n ... {\n ... \"a\": [\n ... \"http://vote.com/ballon_dor?candidate=messi&ref=polars\",\n ... \"http://vote.com/ballon_dor?candidat=jorginho&ref=polars\",\n ... \"http://vote.com/ballon_dor?candidate=ronaldo&ref=polars\",\n ... ]\n ... }\n ... )\n >>> df.select([pl.col(\"a\").str.extract(r\"candidate=(\\w+)\", 1)])\n shape: (3, 1)\n ┌─────────┐\n │ a │\n │ --- │\n │ str │\n ╞═════════╡\n │ messi │\n ├╌╌╌╌╌╌╌╌╌┤\n │ null │\n ├╌╌╌╌╌╌╌╌╌┤\n │ ronaldo │\n └─────────┘\n\n \"\"\"\n return wrap_s(self._s.str_extract(pattern, group_index))\n\n def replace(self, pattern: str, value: str) -> Series:\n \"\"\"\n Replace first regex match with a string value.\n\n Parameters\n ----------\n pattern\n A valid regex pattern.\n value\n Substring to replace.\n \"\"\"\n return wrap_s(self._s.str_replace(pattern, value))\n\n def replace_all(self, pattern: str, value: str) -> Series:\n \"\"\"\n Replace all regex matches with a string value.\n\n Parameters\n ----------\n pattern\n A valid regex pattern.\n value\n Substring to replace.\n \"\"\"\n return wrap_s(self._s.str_replace_all(pattern, value))\n\n def to_lowercase(self) -> Series:\n \"\"\"\n Modify the strings to their lowercase equivalent.\n \"\"\"\n return wrap_s(self._s.str_to_lowercase())\n\n def to_uppercase(self) -> Series:\n \"\"\"\n Modify the strings to their uppercase equivalent.\n \"\"\"\n return wrap_s(self._s.str_to_uppercase())\n\n def rstrip(self) -> Series:\n \"\"\"\n Remove trailing whitespace.\n \"\"\"\n return self.replace(r\"[ \\t]+$\", \"\")\n\n def lstrip(self) -> Series:\n \"\"\"\n Remove leading whitespace.\n \"\"\"\n return self.replace(r\"^\\s*\", \"\")\n\n def slice(self, start: int, length: Optional[int] = None) -> Series:\n \"\"\"\n Create subslices of the string values of a Utf8 Series.\n\n Parameters\n ----------\n start\n Start of the slice (negative indexing may be used).\n length\n Optional length of the slice.\n\n Returns\n -------\n Series of Utf8 type\n \"\"\"\n return wrap_s(self._s.str_slice(start, length))\n\n\nclass ListNameSpace:\n \"\"\"\n Series.dt namespace.\n \"\"\"\n\n def __init__(self, series: Series):\n self._s = series._s\n\n def lengths(self) -> Series:\n \"\"\"\n Get the length of the arrays as UInt32.\n \"\"\"\n return wrap_s(self._s.arr_lengths())\n\n def sum(self) -> Series:\n \"\"\"\n Sum all the arrays in the list\n \"\"\"\n return pli.select(pli.lit(wrap_s(self._s)).arr.sum()).to_series()\n\n def max(self) -> Series:\n \"\"\"\n Compute the max value of the arrays in the list\n \"\"\"\n return pli.select(pli.lit(wrap_s(self._s)).arr.max()).to_series()\n\n def min(self) -> Series:\n \"\"\"\n Compute the min value of the arrays in the list\n \"\"\"\n return pli.select(pli.lit(wrap_s(self._s)).arr.min()).to_series()\n\n def mean(self) -> Series:\n \"\"\"\n Compute the mean value of the arrays in the list\n \"\"\"\n return pli.select(pli.lit(wrap_s(self._s)).arr.mean()).to_series()\n\n def sort(self, reverse: bool = False) -> Series:\n \"\"\"\n Sort the arrays in the list\n \"\"\"\n return pli.select(pli.lit(wrap_s(self._s)).arr.sort(reverse)).to_series()\n\n def reverse(self) -> Series:\n \"\"\"\n Reverse the arrays in the list\n \"\"\"\n return pli.select(pli.lit(wrap_s(self._s)).arr.reverse()).to_series()\n\n def unique(self) -> Series:\n \"\"\"\n Get the unique/distinct values in the list\n \"\"\"\n return pli.select(pli.lit(wrap_s(self._s)).arr.unique()).to_series()\n\n def concat(self, other: Union[List[Series], Series]) -> \"Series\":\n \"\"\"\n Concat the arrays in a Series dtype List in linear time.\n\n Parameters\n ----------\n other\n Columns to concat into a List Series\n \"\"\"\n if not isinstance(other, list):\n other = [other]\n s = wrap_s(self._s)\n names = [s.name for s in other]\n names.insert(0, s.name)\n df = pli.DataFrame(other)\n df.insert_at_idx(0, s)\n return df.select(pli.concat_list(names))[s.name] # type: ignore\n\n def get(self, index: int) -> \"Series\":\n \"\"\"\n Get the value by index in the sublists.\n So index `0` would return the first item of every sublist\n and index `-1` would return the last item of every sublist\n if an index is out of bounds, it will return a `None`.\n\n Parameters\n ----------\n index\n Index to return per sublist\n \"\"\"\n return pli.select(pli.lit(wrap_s(self._s)).arr.get(index)).to_series()\n\n def first(self) -> \"Series\":\n \"\"\"\n Get the first value of the sublists.\n \"\"\"\n return self.get(0)\n\n def last(self) -> \"Series\":\n \"\"\"\n Get the last value of the sublists.\n \"\"\"\n return self.get(-1)\n\n def contains(self, item: Union[float, str, bool, int, date, datetime]) -> \"Series\":\n \"\"\"\n Check if sublists contain the given item.\n\n Parameters\n ----------\n item\n Item that will be checked for membership\n\n Returns\n -------\n Boolean mask\n \"\"\"\n s = pli.Series(\"\", [item])\n s_list = wrap_s(self._s)\n out = s.is_in(s_list)\n return out.rename(s_list.name)\n\n\nclass DateTimeNameSpace:\n \"\"\"\n Series.dt namespace.\n \"\"\"\n\n def __init__(self, series: Series):\n self._s = series._s\n\n def truncate(\n self,\n every: Union[str, timedelta],\n offset: Optional[Union[str, timedelta]] = None,\n ) -> Series:\n \"\"\"\n .. warning::\n This API is experimental and may change without it being considered a breaking change.\n\n Divide the date/ datetime range into buckets.\n Data must be sorted, if not the output does not make sense.\n\n The `every` and `offset` argument are created with the\n the following string language:\n\n 1ns # 1 nanosecond\n 1us # 1 microsecond\n 1ms # 1 millisecond\n 1s # 1 second\n 1m # 1 minute\n 1h # 1 hour\n 1d # 1 day\n 1w # 1 week\n 1mo # 1 calendar month\n 1y # 1 calendar year\n\n 3d12h4m25s # 3 days, 12 hours, 4 minutes, and 25 seconds\n\n Parameters\n ----------\n every\n Every interval start and period length\n offset\n Offset the window\n\n Returns\n -------\n Date/Datetime series\n\n Examples\n --------\n\n >>> from datetime import timedelta, datetime\n >>> start = datetime(2001, 1, 1)\n >>> stop = datetime(2001, 1, 2)\n >>> s = pl.date_range(start, stop, timedelta(minutes=30), name=\"dates\")\n >>> s\n shape: (49,)\n Series: 'dates' [datetime]\n [\n 2001-01-01 00:00:00\n 2001-01-01 00:30:00\n 2001-01-01 01:00:00\n 2001-01-01 01:30:00\n 2001-01-01 02:00:00\n 2001-01-01 02:30:00\n 2001-01-01 03:00:00\n 2001-01-01 03:30:00\n 2001-01-01 04:00:00\n 2001-01-01 04:30:00\n 2001-01-01 05:00:00\n 2001-01-01 05:30:00\n ...\n 2001-01-01 18:30:00\n 2001-01-01 19:00:00\n 2001-01-01 19:30:00\n 2001-01-01 20:00:00\n 2001-01-01 20:30:00\n 2001-01-01 21:00:00\n 2001-01-01 21:30:00\n 2001-01-01 22:00:00\n 2001-01-01 22:30:00\n 2001-01-01 23:00:00\n 2001-01-01 23:30:00\n 2001-01-02 00:00:00\n ]\n >>> s.dt.truncate(\"1h\")\n shape: (49,)\n Series: 'dates' [datetime]\n [\n 2001-01-01 00:00:00\n 2001-01-01 00:00:00\n 2001-01-01 01:00:00\n 2001-01-01 01:00:00\n 2001-01-01 02:00:00\n 2001-01-01 02:00:00\n 2001-01-01 03:00:00\n 2001-01-01 03:00:00\n 2001-01-01 04:00:00\n 2001-01-01 04:00:00\n 2001-01-01 05:00:00\n 2001-01-01 05:00:00\n ...\n 2001-01-01 18:00:00\n 2001-01-01 19:00:00\n 2001-01-01 19:00:00\n 2001-01-01 20:00:00\n 2001-01-01 20:00:00\n 2001-01-01 21:00:00\n 2001-01-01 21:00:00\n 2001-01-01 22:00:00\n 2001-01-01 22:00:00\n 2001-01-01 23:00:00\n 2001-01-01 23:00:00\n 2001-01-02 00:00:00\n ]\n >>> assert s.dt.truncate(\"1h\") == s.dt.truncate(timedelta(hours=1))\n\n \"\"\"\n return pli.select(\n pli.lit(wrap_s(self._s)).dt.truncate(every, offset)\n ).to_series()\n\n def __getitem__(self, item: int) -> Union[date, datetime]:\n s = wrap_s(self._s)\n out = wrap_s(self._s)[item]\n return _to_python_datetime(out, s.dtype)\n\n def strftime(self, fmt: str) -> Series:\n \"\"\"\n Format Date/datetime with a formatting rule: See `chrono strftime/strptime <https://docs.rs/chrono/0.4.19/chrono/format/strftime/index.html>`_.\n\n Returns\n -------\n Utf8 Series\n \"\"\"\n return wrap_s(self._s.strftime(fmt))\n\n def year(self) -> Series:\n \"\"\"\n Extract the year from the underlying date representation.\n Can be performed on Date and Datetime.\n\n Returns the year number in the calendar date.\n\n Returns\n -------\n Year as Int32\n \"\"\"\n return wrap_s(self._s.year())\n\n def month(self) -> Series:\n \"\"\"\n Extract the month from the underlying date representation.\n Can be performed on Date and Datetime\n\n Returns the month number starting from 1.\n The return value ranges from 1 to 12.\n\n Returns\n -------\n Month as UInt32\n \"\"\"\n return wrap_s(self._s.month())\n\n def week(self) -> Series:\n \"\"\"\n Extract the week from the underlying date representation.\n Can be performed on Date and Datetime\n\n Returns the ISO week number starting from 1.\n The return value ranges from 1 to 53. (The last week of year differs by years.)\n\n Returns\n -------\n Week number as UInt32\n \"\"\"\n return wrap_s(self._s.week())\n\n def weekday(self) -> Series:\n \"\"\"\n Extract the week day from the underlying date representation.\n Can be performed on Date and Datetime.\n\n Returns the weekday number where monday = 0 and sunday = 6\n\n Returns\n -------\n Week day as UInt32\n \"\"\"\n return wrap_s(self._s.weekday())\n\n def day(self) -> Series:\n \"\"\"\n Extract the day from the underlying date representation.\n Can be performed on Date and Datetime.\n\n Returns the day of month starting from 1.\n The return value ranges from 1 to 31. (The last day of month differs by months.)\n\n Returns\n -------\n Day as UInt32\n \"\"\"\n return wrap_s(self._s.day())\n\n def ordinal_day(self) -> Series:\n \"\"\"\n Extract ordinal day from underlying date representation.\n Can be performed on Date and Datetime.\n\n Returns the day of year starting from 1.\n The return value ranges from 1 to 366. (The last day of year differs by years.)\n\n Returns\n -------\n Day as UInt32\n \"\"\"\n return wrap_s(self._s.ordinal_day())\n\n def hour(self) -> Series:\n \"\"\"\n Extract the hour from the underlying DateTime representation.\n Can be performed on Datetime.\n\n Returns the hour number from 0 to 23.\n\n Returns\n -------\n Hour as UInt32\n \"\"\"\n return wrap_s(self._s.hour())\n\n def minute(self) -> Series:\n \"\"\"\n Extract the minutes from the underlying DateTime representation.\n Can be performed on Datetime.\n\n Returns the minute number from 0 to 59.\n\n Returns\n -------\n Minute as UInt32\n \"\"\"\n return wrap_s(self._s.minute())\n\n def second(self) -> Series:\n \"\"\"\n Extract the seconds the from underlying DateTime representation.\n Can be performed on Datetime.\n\n Returns the second number from 0 to 59.\n\n Returns\n -------\n Second as UInt32\n \"\"\"\n return wrap_s(self._s.second())\n\n def nanosecond(self) -> Series:\n \"\"\"\n Extract the nanoseconds from the underlying DateTime representation.\n Can be performed on Datetime.\n\n Returns the number of nanoseconds since the whole non-leap second.\n The range from 1,000,000,000 to 1,999,999,999 represents the leap second.\n\n Returns\n -------\n Nanosecond as UInt32\n \"\"\"\n return wrap_s(self._s.nanosecond())\n\n def timestamp(self) -> Series:\n \"\"\"\n Return timestamp in ms as Int64 type.\n \"\"\"\n return wrap_s(self._s.timestamp())\n\n def to_python_datetime(self) -> Series:\n \"\"\"\n Go from Date/Datetime to python DateTime objects\n \"\"\"\n return (self.timestamp() / 1000).apply(\n lambda ts: datetime.utcfromtimestamp(ts), Object\n )\n\n def min(self) -> Union[date, datetime]:\n \"\"\"\n Return minimum as python DateTime\n \"\"\"\n s = wrap_s(self._s)\n out = s.min()\n return _to_python_datetime(out, s.dtype)\n\n def max(self) -> Union[date, datetime]:\n \"\"\"\n Return maximum as python DateTime\n \"\"\"\n s = wrap_s(self._s)\n out = s.max()\n return _to_python_datetime(out, s.dtype)\n\n def median(self) -> Union[date, datetime]:\n \"\"\"\n Return median as python DateTime\n \"\"\"\n s = wrap_s(self._s)\n out = int(s.median())\n return _to_python_datetime(out, s.dtype)\n\n def mean(self) -> Union[date, datetime]:\n \"\"\"\n Return mean as python DateTime\n \"\"\"\n s = wrap_s(self._s)\n out = int(s.mean())\n return _to_python_datetime(out, s.dtype)\n\n def epoch_days(self) -> Series:\n \"\"\"\n Get the number of days since the unix EPOCH.\n If the date is before the unix EPOCH, the number of days will be negative.\n\n Returns\n -------\n Days as Int32\n \"\"\"\n return wrap_s(self._s).cast(Date).cast(Int32)\n\n def epoch_milliseconds(self) -> Series:\n \"\"\"\n Get the number of milliseconds since the unix EPOCH\n If the date is before the unix EPOCH, the number of milliseconds will be negative.\n\n Returns\n -------\n Milliseconds as Int64\n \"\"\"\n return self.timestamp()\n\n def epoch_seconds(self) -> Series:\n \"\"\"\n Get the number of seconds since the unix EPOCH\n If the date is before the unix EPOCH, the number of seconds will be negative.\n\n Returns\n -------\n Milliseconds as Int64\n \"\"\"\n return wrap_s(self._s.dt_epoch_seconds())\n\n\ndef _to_python_datetime(\n value: Union[int, float], dtype: Type[DataType]\n) -> Union[date, datetime]:\n if dtype == Date:\n # days to seconds\n # important to create from utc. Not doing this leads\n # to inconsistencies dependent on the timezone you are in.\n return datetime.utcfromtimestamp(value * 3600 * 24).date()\n elif dtype == Datetime:\n # nanoseconds to seconds\n return datetime.utcfromtimestamp(value / 1_000_000_000)\n else:\n raise NotImplementedError # pragma: no cover\n\n\nclass SeriesIter:\n \"\"\"\n Utility class that allows slow iteration over a `Series`.\n \"\"\"\n\n def __init__(self, length: int, s: Series):\n self.len = length\n self.i = 0\n self.s = s\n\n def __iter__(self) -> \"SeriesIter\":\n return self\n\n def __next__(self) -> Any:\n if self.i < self.len:\n i = self.i\n self.i += 1\n return self.s[i]\n else:\n raise StopIteration\n"
] |
[
[
"numpy.log",
"numpy.arctan",
"numpy.power",
"numpy.arcsin",
"numpy.ascontiguousarray",
"numpy.cos",
"numpy.arccos",
"numpy.sin",
"numpy.tan",
"numpy.argwhere",
"numpy.log10",
"numpy.array",
"numpy.exp"
]
] |
dumpmemory/deduplicate-text-datasets
|
[
"ad86c7f65ac626581fe3a4277106309bc6b50c23"
] |
[
"scripts/make_suffix_array.py"
] |
[
"# Copyright 2021 Google LLC\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nimport time\nimport sys\nimport multiprocessing as mp\nimport numpy as np\n\ndata_size = os.path.getsize(sys.argv[1])\n\nHACK = 100000\n\n\nstarted = []\n\nif data_size > 10e9:\n total_jobs = 100\n jobs_at_once = 20\nelif data_size > 1e9:\n total_jobs = 96\n jobs_at_once = 96\nelif data_size > 10e6:\n total_jobs = 4\n jobs_at_once = 4\nelse:\n total_jobs = 1\n jobs_at_once = 1\n\nS = data_size//total_jobs\n\n\nfor jobstart in range(0, total_jobs, jobs_at_once):\n wait = []\n for i in range(jobstart,jobstart+jobs_at_once):\n s, e = i*S, min((i+1)*S+HACK, data_size)\n cmd = \"./target/debug/dedup_dataset make-part --data-file %s --start-byte %d --end-byte %d\"%(sys.argv[1], s, e)\n started.append((s, e))\n print(cmd)\n wait.append(os.popen(cmd))\n \n if e == data_size:\n break\n\n print(\"Waiting for jobs to finish\")\n [x.read() for x in wait]\n\nprint(\"Checking all wrote correctly\")\n\nwhile True:\n files = [\"%s.part.%d-%d\"%(sys.argv[1],s, e) for s,e in started]\n \n wait = []\n for x,(s,e) in zip(files,started):\n size_data = os.path.getsize(x)\n FACT = np.ceil(np.log(size_data)/np.log(2)/8)\n size_table = os.path.getsize(x+\".table.bin\")\n if not os.path.exists(x) or not os.path.exists(x+\".table.bin\") or size_table == 0 or size_data*FACT != size_table:\n cmd = \"./target/debug/dedup_dataset make-part --data-file %s --start-byte %d --end-byte %d\"%(sys.argv[1], s, e)\n print(cmd)\n wait.append(os.popen(cmd))\n print(\"Rerunning\", len(wait), \"jobs because they failed.\")\n [x.read() for x in wait]\n time.sleep(1)\n if len(wait) == 0:\n break\n \n\nprint(\"Merging suffix trees\")\n\nos.popen(\"rm tmp/out.table.bin.*\").read()\n\ntorun = \" --suffix-path \".join(files)\nprint(\"./target/debug/dedup_dataset merge --output-file %s --suffix-path %s --num-threads %d\"%(\"tmp/out.table.bin\", torun, mp.cpu_count()))\npipe = os.popen(\"./target/debug/dedup_dataset merge --output-file %s --suffix-path %s --num-threads %d\"%(\"tmp/out.table.bin\", torun, mp.cpu_count()))\noutput = pipe.read()\nif pipe.close() is not None:\n print(\"Something went wrong with merging.\")\n print(\"Please check that you ran with ulimit -Sn 100000\")\n exit(1)\n#exit(0)\nprint(\"Now merging individual tables\")\nos.popen(\"cat tmp/out.table.bin.* > tmp/out.table.bin\").read()\nprint(\"Cleaning up\")\nos.popen(\"mv tmp/out.table.bin %s.table.bin\"%sys.argv[1]).read()\n\nif os.path.exists(sys.argv[1]+\".table.bin\"):\n if os.path.getsize(sys.argv[1]+\".table.bin\")%os.path.getsize(sys.argv[1]) != 0:\n print(\"File size is wrong\")\n exit(1)\nelse:\n print(\"Failed to create table\")\n exit(1)\n"
] |
[
[
"numpy.log"
]
] |
WGW101/habitat_sim2real
|
[
"04be5215af53122f166d6dfdfb131af3619d10a0"
] |
[
"test/test_habitat_lab_citi.py"
] |
[
"#!/usr/bin/env python3\n\nimport habitat\nimport cv2\nimport quaternion\nimport numpy\n\ncfg = habitat.get_config(\"configs/locobot_pointnav_citi_sim.yaml\")\nsim_cls = habitat.registry.get_simulator(cfg.SIMULATOR.TYPE)\nsim = sim_cls(cfg.SIMULATOR)\n\nobs = sim.reset()\nwhile True:\n state = sim.get_agent_state()\n print(\"Pos:\", state.position)\n _, th, _ = quaternion.as_euler_angles(state.rotation)\n print(\"Rot:\", state.rotation, \"(theta: \", int(numpy.rad2deg(th)), \")\")\n cv2.imshow(\"Color\", obs[\"rgb\"][:, :, ::-1])\n cv2.imshow(\"Depth\", obs[\"depth\"] / 5)\n c = cv2.waitKey()\n if c == ord(\"w\"):\n obs = sim.step(1)\n elif c == ord(\"a\"):\n obs = sim.step(2)\n elif c == ord(\"d\"):\n obs = sim.step(3)\n elif c == ord(\"r\"):\n obs = sim.step(4)\n elif c == ord(\"f\"):\n obs = sim.step(5)\n else:\n break\ncv2.destroyAllWindows()\n"
] |
[
[
"numpy.rad2deg"
]
] |
AIRI-Institute/DeepCT
|
[
"8e23fda101bd4a2bce2c98c5a73d97072a3892de"
] |
[
"src/deepct_model_multi_ct_q_mpi.py"
] |
[
"\"\"\"\nDeepCT architecture without Sigmoid layer \nfor multiple cell type per position computation at once\nwith quantitative features (TODO: Add our names).\n\"\"\"\nimport os\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch import mean\nfrom torch.nn import MSELoss\nfrom torch.utils.tensorboard import SummaryWriter\n\n\nclass qDeepCT(nn.Module):\n def __init__(\n self,\n sequence_length,\n n_cell_types,\n sequence_embedding_length,\n cell_type_embedding_length,\n final_embedding_length,\n n_genomic_features,\n dropout_rate=0.2,\n ):\n \"\"\"\n Based on a DeepSEA architecture (see https://github.com/FunctionLab/selene/blob/0.4.8/models/deepsea.py)\n\n Parameters\n ----------\n sequence_length : int\n Length of input sequence.\n n_cell_types : int\n Number of cell types.\n sequence_embedding_length : int\n cell_type_embedding_length : int\n final_embedding_length : int\n n_genomic_features : int\n Number of target features.\n \"\"\"\n super(qDeepCT, self).__init__()\n self._n_cell_types = n_cell_types\n self.n_genomic_features = n_genomic_features\n conv_kernel_size = 8\n pool_kernel_size = 4\n\n self.conv_net = nn.Sequential(\n nn.Conv1d(4, 320, kernel_size=conv_kernel_size),\n nn.ReLU(inplace=True),\n nn.Conv1d(320, 320, kernel_size=conv_kernel_size),\n nn.ReLU(inplace=True),\n nn.MaxPool1d(kernel_size=pool_kernel_size, stride=pool_kernel_size),\n nn.BatchNorm1d(320),\n nn.Conv1d(320, 480, kernel_size=conv_kernel_size),\n nn.ReLU(inplace=True),\n nn.Conv1d(480, 480, kernel_size=conv_kernel_size),\n nn.ReLU(inplace=True),\n nn.MaxPool1d(kernel_size=pool_kernel_size, stride=pool_kernel_size),\n nn.BatchNorm1d(480),\n nn.Dropout(p=dropout_rate),\n nn.Conv1d(480, 960, kernel_size=conv_kernel_size),\n nn.ReLU(inplace=True),\n nn.Conv1d(960, 960, kernel_size=conv_kernel_size),\n nn.ReLU(inplace=True),\n nn.BatchNorm1d(960),\n nn.Dropout(p=dropout_rate),\n )\n\n reduce_by = 2 * (conv_kernel_size - 1)\n pool_kernel_size = float(pool_kernel_size)\n self._n_channels = int(\n np.floor(\n (np.floor((sequence_length - reduce_by) / pool_kernel_size) - reduce_by)\n / pool_kernel_size\n )\n - reduce_by\n )\n\n self.sequence_net = nn.Sequential(\n nn.Linear(960 * self._n_channels, sequence_embedding_length),\n nn.ReLU(inplace=True),\n )\n\n self.cell_type_net = nn.Sequential(\n nn.Linear(n_cell_types, cell_type_embedding_length),\n )\n\n self.seq_regressor = nn.Sequential(\n nn.Linear(sequence_embedding_length, sequence_embedding_length),\n nn.ReLU(inplace=True),\n nn.BatchNorm1d(sequence_embedding_length),\n nn.Linear(sequence_embedding_length, sequence_embedding_length),\n nn.ReLU(inplace=True),\n nn.Linear(sequence_embedding_length, sequence_embedding_length),\n nn.ReLU(inplace=True),\n nn.Linear(sequence_embedding_length, sequence_embedding_length),\n nn.ReLU(inplace=True),\n nn.BatchNorm1d(sequence_embedding_length),\n nn.Linear(sequence_embedding_length, sequence_embedding_length),\n nn.ReLU(inplace=True),\n nn.Linear(sequence_embedding_length, sequence_embedding_length),\n nn.ReLU(inplace=True),\n nn.Linear(sequence_embedding_length, sequence_embedding_length),\n nn.ReLU(inplace=True),\n nn.BatchNorm1d(sequence_embedding_length),\n nn.Linear(sequence_embedding_length, n_genomic_features),\n # sigmoid turned off for loss numerical stability\n # nn.Sigmoid(),\n )\n\n self.ct_regressor = nn.Sequential(\n nn.Linear(\n sequence_embedding_length + cell_type_embedding_length,\n final_embedding_length,\n ),\n nn.ReLU(inplace=True),\n nn.BatchNorm1d(final_embedding_length),\n nn.Linear(final_embedding_length, final_embedding_length),\n nn.ReLU(inplace=True),\n nn.Linear(final_embedding_length, final_embedding_length),\n nn.ReLU(inplace=True),\n nn.Linear(final_embedding_length, final_embedding_length),\n nn.ReLU(inplace=True),\n nn.BatchNorm1d(final_embedding_length),\n nn.Linear(final_embedding_length, final_embedding_length),\n nn.ReLU(inplace=True),\n nn.Linear(final_embedding_length, final_embedding_length),\n nn.ReLU(inplace=True),\n nn.Linear(final_embedding_length, final_embedding_length),\n nn.ReLU(inplace=True),\n nn.BatchNorm1d(final_embedding_length),\n nn.Linear(final_embedding_length, n_genomic_features),\n # sigmoid turned off for loss numerical stability\n # nn.Sigmoid(),\n )\n\n def get_cell_type_embeddings(self):\n \"\"\"Retrieve cell type embeddings learned by the model.\"\"\"\n device = next(self.parameters()).device\n with torch.no_grad():\n all_cell_types = torch.eye(self._n_cell_types).to(device)\n embeddings = self.cell_type_net(all_cell_types)\n return embeddings.detach().cpu()\n\n \"\"\"\n # Doesn't take linear layer bias into account\n def log_cell_type_embeddings_to_tensorboard(self, cell_type_labels, output_dir):\n writer = SummaryWriter(output_dir)\n\n writer.add_embedding(\n self.cell_type_net[0].weight.transpose(0, 1), cell_type_labels\n )\n writer.flush()\n writer.close()\n \"\"\"\n\n def forward(self, sequence_batch, cell_type_batch):\n \"\"\"Forward propagation of a batch.\n\n Parameters:\n -----------\n sequence_batch : torch.Tensor\n A batch of encoded sequences.\n cell_type_batch: torch.Tensor\n A batch of one-hot cell type encodings.\n\n \"\"\"\n batch_size = sequence_batch.size(0)\n\n cell_type_one_hots = torch.eye(self._n_cell_types).to(sequence_batch.device)\n\n sequence_out = self.conv_net(sequence_batch)\n reshaped_sequence_out = sequence_out.view(\n sequence_out.size(0), 960 * self._n_channels\n )\n sequence_embedding = self.sequence_net(reshaped_sequence_out)\n\n # Repeat each sequence embedding to fit cell type embeddings.\n # E.g., with 2 cell types, [seq0_emb, seq1_emb, seq2_emb] becomes\n # [seq0_emb, seq0_emb, seq1_emb, seq1_emb, seq2_emb, seq2_emb]\n repeated_sequence_embedding = sequence_embedding.repeat_interleave(\n repeats=self._n_cell_types, dim=0\n )\n\n # Repeat cell type embeddings to fit sequence embeddings.\n # E.g., with batch size of 3, 2 cell types, and [ct0_emb, ct1_emb] cell type\n # embeddings, the embeddings will be converted to\n # [ct0_emb, ct1_emb, ct0_emb, ct1_emb, ct0_emb, ct1_emb].\n cell_type_embeddings = self.cell_type_net(cell_type_one_hots).repeat(\n batch_size, 1\n )\n sequence_and_cell_type_embeddings = torch.cat(\n (repeated_sequence_embedding, cell_type_embeddings), 1\n )\n\n # view mean_positional_prediction to shape it as [batch_size, 1, n_genomic_features]\n mean_positional_prediction = self.seq_regressor(sequence_embedding).view(\n batch_size, 1, self.n_genomic_features\n )\n # view ct_deviations_prediction to shape it as [batch_size, _n_cell_types, n_genomic_features]\n ct_deviations_prediction = self.ct_regressor(\n sequence_and_cell_type_embeddings\n ).view(batch_size, self._n_cell_types, -1)\n predict = torch.cat((ct_deviations_prediction, mean_positional_prediction), 1)\n return predict\n\n\ndef get_optimizer(lr):\n \"\"\"\n The optimizer and the parameters with which to initialize the optimizer.\n At a later time, we initialize the optimizer by also passing in the model\n parameters (`model.parameters()`). We cannot initialize the optimizer\n until the model has been initialized.\n \"\"\"\n # Option 1:\n # return (torch.optim.SGD, {\"lr\": lr, \"weight_decay\": 1e-6, \"momentum\": 0.9})\n\n # Option 2:\n return (torch.optim.Adam, {\"lr\": lr, \"weight_decay\": 1e-6})\n"
] |
[
[
"torch.nn.BatchNorm1d",
"torch.nn.Dropout",
"torch.cat",
"torch.eye",
"torch.nn.MaxPool1d",
"torch.nn.Linear",
"torch.no_grad",
"numpy.floor",
"torch.nn.Conv1d",
"torch.nn.ReLU"
]
] |
anthonyivol/muzero-general
|
[
"ddeb263d7ca1653122fbaec5ab54824215280b2a"
] |
[
"games/spiel.py"
] |
[
"import datetime\nimport os\n\nimport numpy\nimport torch\n\nfrom .abstract_game import AbstractGame\n\n\n# This is a Game wrapper for open_spiel games. It allows you to run any game in the open_spiel library.\n\ntry:\n import pyspiel\n\nexcept ImportError:\n import sys\n sys.exit(\"You need to install open_spiel by running pip install open_spiel. For a full documentation, see: https://github.com/deepmind/open_spiel/blob/master/docs/install.md\")\n\n# The game you want to run. See https://github.com/deepmind/open_spiel/blob/master/docs/games.md for a list of games\ngame = pyspiel.load_game(\"tic_tac_toe\")\n\nclass MuZeroConfig:\n def __init__(self):\n # More information is available here: https://github.com/werner-duvaud/muzero-general/wiki/Hyperparameter-Optimization\n\n self.game = game\n\n self.seed = 0 # Seed for numpy, torch and the game\n self.max_num_gpus = None # Fix the maximum number of GPUs to use. It's usually faster to use a single GPU (set it to 1) if it has enough memory. None will use every GPUs available\n\n\n\n ### Game\n self.observation_shape = tuple(self.game.observation_tensor_shape()) # Dimensions of the game observation, must be 3D (channel, height, width). For a 1D array, please reshape it to (1, 1, length of array)\n self.action_space = list(range(self.game.policy_tensor_shape()[0])) # Fixed list of all possible actions. You should only edit the length\n self.players = list(range(self.game.num_players())) # List of players. You should only edit the length\n self.stacked_observations = 0 # Number of previous observations and previous actions to add to the current observation\n\n # Evaluate\n self.muzero_player = 0 # Turn Muzero begins to play (0: MuZero plays first, 1: MuZero plays second)\n self.opponent = \"self\" # Hard coded agent that MuZero faces to assess his progress in multiplayer games. It doesn't influence training. None, \"random\" or \"expert\" if implemented in the Game class\n\n\n\n ### Self-Play\n self.num_workers = 1 # Number of simultaneous threads/workers self-playing to feed the replay buffer\n self.selfplay_on_gpu = False\n self.max_moves = self.game.max_game_length() # Maximum number of moves if game is not finished before\n self.num_simulations = 25 # Number of future moves self-simulated\n self.discount = 0.1 # Chronological discount of the reward\n self.temperature_threshold = None # Number of moves before dropping the temperature given by visit_softmax_temperature_fn to 0 (ie selecting the best action). If None, visit_softmax_temperature_fn is used every time\n\n # Root prior exploration noise\n self.root_dirichlet_alpha = 0.1\n self.root_exploration_fraction = 0.25\n\n # UCB formula\n self.pb_c_base = 19652\n self.pb_c_init = 1.25\n\n\n\n ### Network\n self.network = \"resnet\" # \"resnet\" / \"fullyconnected\"\n self.support_size = 10 # Value and reward are scaled (with almost sqrt) and encoded on a vector with a range of -support_size to support_size. Choose it so that support_size <= sqrt(max(abs(discounted reward)))\n\n # Residual Network\n self.downsample = False # Downsample observations before representation network, False / \"CNN\" (lighter) / \"resnet\" (See paper appendix Network Architecture)\n self.blocks = 2 # Number of blocks in the ResNet\n self.channels = 16 # Number of channels in the ResNet\n self.reduced_channels_reward = 16 # Number of channels in reward head\n self.reduced_channels_value = 16 # Number of channels in value head\n self.reduced_channels_policy = 16 # Number of channels in policy head\n self.resnet_fc_reward_layers = [8] # Define the hidden layers in the reward head of the dynamic network\n self.resnet_fc_value_layers = [8] # Define the hidden layers in the value head of the prediction network\n self.resnet_fc_policy_layers = [8] # Define the hidden layers in the policy head of the prediction network\n\n # Fully Connected Network\n self.encoding_size = 32\n self.fc_representation_layers = [] # Define the hidden layers in the representation network\n self.fc_dynamics_layers = [16] # Define the hidden layers in the dynamics network\n self.fc_reward_layers = [16] # Define the hidden layers in the reward network\n self.fc_value_layers = [] # Define the hidden layers in the value network\n self.fc_policy_layers = [] # Define the hidden layers in the policy network\n\n\n\n ### Training\n self.results_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"../results\", os.path.basename(__file__)[:-3], datetime.datetime.now().strftime(\"%Y-%m-%d--%H-%M-%S\")) # Path to store the model weights and TensorBoard logs\n self.save_model = True # Save the checkpoint in results_path as model.checkpoint\n self.training_steps = 1000000 # Total number of training steps (ie weights update according to a batch)\n self.batch_size = 64 # Number of parts of games to train on at each training step\n self.checkpoint_interval = 10 # Number of training steps before using the model for self-playing\n self.value_loss_weight = 0.25 # Scale the value loss to avoid overfitting of the value function, paper recommends 0.25 (See paper appendix Reanalyze)\n self.train_on_gpu = torch.cuda.is_available() # Train on GPU if available\n\n self.optimizer = \"Adam\" # \"Adam\" or \"SGD\". Paper uses SGD\n self.weight_decay = 1e-4 # L2 weights regularization\n self.momentum = 0.9 # Used only if optimizer is SGD\n\n # Exponential learning rate schedule\n self.lr_init = 0.003 # Initial learning rate\n self.lr_decay_rate = 1 # Set it to 1 to use a constant learning rate\n self.lr_decay_steps = 10000\n\n\n\n ### Replay Buffer\n self.replay_buffer_size = 3000 # Number of self-play games to keep in the replay buffer\n self.num_unroll_steps = 20 # Number of game moves to keep for every batch element\n self.td_steps = 20 # Number of steps in the future to take into account for calculating the target value\n self.PER = True # Prioritized Replay (See paper appendix Training), select in priority the elements in the replay buffer which are unexpected for the network\n self.PER_alpha = 0.5 # How much prioritization is used, 0 corresponding to the uniform case, paper suggests 1\n\n # Reanalyze (See paper appendix Reanalyse)\n self.use_last_model_value = True # Use the last model to provide a fresher, stable n-step value (See paper appendix Reanalyze)\n self.reanalyse_on_gpu = False\n\n\n\n ### Adjust the self play / training ratio to avoid over/underfitting\n self.self_play_delay = 0 # Number of seconds to wait after each played game\n self.training_delay = 0 # Number of seconds to wait after each training step\n self.ratio = None # Desired training steps per self played step ratio. Equivalent to a synchronous version, training can take much longer. Set it to None to disable it\n\n\n def visit_softmax_temperature_fn(self, trained_steps):\n \"\"\"\n Parameter to alter the visit count distribution to ensure that the action selection becomes greedier as training progresses.\n The smaller it is, the more likely the best action (ie with the highest visit count) is chosen.\n\n Returns:\n Positive float.\n \"\"\"\n return 1\n\n\nclass Game(AbstractGame):\n \"\"\"\n Game wrapper.\n \"\"\"\n\n def __init__(self, seed=None):\n self.env = Spiel()\n\n def step(self, action):\n \"\"\"\n Apply action to the game.\n \n Args:\n action : action of the action_space to take.\n\n Returns:\n The new observation, the reward and a boolean if the game has ended.\n \"\"\"\n observation, reward, done = self.env.step(action)\n return observation, reward * 20, done\n\n def to_play(self):\n \"\"\"\n Return the current player.\n\n Returns:\n The current player, it should be an element of the players list in the config. \n \"\"\"\n return self.env.to_play()\n\n def legal_actions(self):\n \"\"\"\n Should return the legal actions at each turn, if it is not available, it can return\n the whole action space. At each turn, the game have to be able to handle one of returned actions.\n \n For complex game where calculating legal moves is too long, the idea is to define the legal actions\n equal to the action space but to return a negative reward if the action is illegal.\n \n Returns:\n An array of integers, subset of the action space.\n \"\"\"\n return self.env.legal_actions()\n\n def reset(self):\n \"\"\"\n Reset the game for a new game.\n \n Returns:\n Initial observation of the game.\n \"\"\"\n return self.env.reset()\n\n def render(self):\n \"\"\"\n Display the game observation.\n \"\"\"\n self.env.render()\n # input(\"Press enter to take a step \")\n\n def legal_actions_human(self):\n return self.env.human_legal_actions()\n\n def human_to_action(self):\n \"\"\"\n For multiplayer games, ask the user for a legal action\n and return the corresponding action number.\n\n Returns:\n An integer from the action space.\n \"\"\"\n while True:\n try:\n print(\"Legal Actions: \", self.legal_actions_human())\n choice = input(\"Enter your move: \")\n if (choice in self.legal_actions_human()):\n break\n except:\n pass\n print(\"Wrong input, try again\")\n\n return self.env.board.string_to_action(choice)\n\n\n def action_to_string(self, action_number):\n \"\"\"\n Convert an action number to a string representing the action.\n \n Args:\n action_number: an integer from the action space.\n\n Returns:\n String representing the action.\n \"\"\"\n row = action_number // 3 + 1\n col = action_number % 3 + 1\n return f\"Play row {row}, column {col}\"\n\n\nclass Spiel:\n def __init__(self):\n self.game = game\n self.board = self.game.new_initial_state()\n self.player = 1\n\n def to_play(self):\n return 0 if self.player == 1 else 1\n\n def reset(self):\n self.board = self.game.new_initial_state()\n self.player = 1\n return self.get_observation()\n\n def step(self, action):\n self.board = self.board.child(action)\n\n done = self.board.is_terminal()\n\n reward = 1 if self.have_winner() else 0\n\n observation = self.get_observation()\n\n self.player *= -1\n\n return observation, reward, done\n\n def get_observation(self):\n if self.player == 1:\n current_player = 1\n else:\n current_player = 0\n return numpy.array(self.board.observation_tensor(current_player)).reshape(self.game.observation_tensor_shape())\n\n def legal_actions(self):\n return self.board.legal_actions()\n\n def have_winner(self):\n rewards = self.board.rewards()\n \n if (self.player == 1):\n\n if (rewards[0] == 1.0):\n return True\n\n elif (self.player == -1):\n if (rewards[1] == 1.0):\n return True\n\n return False\n\n def human_legal_actions(self):\n return [self.board.action_to_string(x) for x in self.board.legal_actions()]\n\n def render(self):\n print(self.board)\n"
] |
[
[
"torch.cuda.is_available"
]
] |
annahung31/REMI_PyTorch
|
[
"8ad15ff57f0a94633fc14b92ce533607c693569a"
] |
[
"src/data.py"
] |
[
"import os\r\nfrom collections import Counter\r\nfrom glob import glob\r\nimport numpy as np\r\nimport pickle\r\nimport utils\r\nfrom tqdm import tqdm\r\nimport string\r\nimport random\r\ndef extract_event(file_path):\r\n note_items, ticks_per_beat = utils.read_midi(file_path)\r\n note_items = utils.quantize_items(note_items, ticks_per_beat/4)\r\n groups = utils.group_items(note_items, ticks_per_beat*4)\r\n events = utils.item2event(groups, ticks_per_beat)\r\n return events\r\n\r\n\r\ndef create_folders(ROOT):\r\n folder_list = ['event', 'word']\r\n for fo in folder_list:\r\n if not os.path.exists(os.path.join(ROOT, fo)):\r\n os.mkdir(os.path.join(ROOT, fo))\r\n\r\n\r\nif __name__ == '__main__':\r\n ROOT = 'dataset/775_subset'\r\n create_folders(ROOT)\r\n # list file\r\n files = sorted(glob(os.path.join(ROOT,'midi','*.midi')))\r\n print(len(files))\r\n\r\n print('='*5, 'extract events', '='*5)\r\n # extract events\r\n for i in tqdm(range(len(files))):\r\n events = extract_event(files[i])\r\n folder = files[i].split('/')[-2]\r\n #name = files[i].split('/')[-1][:5] +'_'+ files[i].split('/')[-1][-10:-4] + '_'+''.join(random.sample(string.ascii_letters+string.digits, 10))\r\n name = files[i].split('/')[-1][:-5]\r\n path = os.path.join(ROOT,'event','{}...{}.pkl'.format(folder, name))\r\n pickle.dump(events, open(path, 'wb'))\r\n \r\n # count for dictionary\r\n print('='*5, 'count for dictionary', '='*5)\r\n event_files = glob(os.path.join(ROOT,'event','*.pkl'))\r\n print(len(event_files))\r\n data = []\r\n for file in tqdm(event_files):\r\n for event in pickle.load(open(file, 'rb')):\r\n data.append('{}_{}'.format(event.name, event.value))\r\n counts = Counter(data)\r\n event2word = {key: i for i, key in enumerate(counts.keys())}\r\n word2event = {i: key for i, key in enumerate(counts.keys())}\r\n path = os.path.join(ROOT,'dictionary.pkl')\r\n pickle.dump((event2word, word2event), open(path, 'wb'))\r\n \r\n # convert to word\r\n print('='*5, 'convert to word', '='*5)\r\n event_files = glob(os.path.join(ROOT,'event','*.pkl'))\r\n event2word, word2event = pickle.load(open(os.path.join(ROOT,'dictionary.pkl'), 'rb'))\r\n for file in tqdm(event_files):\r\n events = pickle.load(open(file, 'rb'))\r\n words = []\r\n for e in events:\r\n word = event2word['{}_{}'.format(e.name, e.value)]\r\n words.append(word)\r\n name = file.split('/')[-1]\r\n path = os.path.join(ROOT,'word','{}.npy'.format(name))\r\n np.save(path, words)\r\n\r\n # create training data\r\n print('='*5, 'create training data', '='*5)\r\n WINDOW_SIZE = 512\r\n GROUP_SIZE = 5\r\n INTERVAL = GROUP_SIZE * 2\r\n word_files = sorted(glob(os.path.join(ROOT,'word','*.npy')))\r\n print(len(word_files))\r\n segments = []\r\n for file in tqdm(word_files):\r\n words = np.load(file)\r\n pairs = []\r\n for i in range(0, len(words)-WINDOW_SIZE-1, WINDOW_SIZE):\r\n x = words[i:i+WINDOW_SIZE]\r\n y = words[i+1:i+WINDOW_SIZE+1]\r\n pairs.append([x, y])\r\n pairs = np.array(pairs)\r\n # abandon the last\r\n for i in np.arange(0, len(pairs)-GROUP_SIZE, INTERVAL):\r\n data = pairs[i:i+GROUP_SIZE]\r\n if len(data) == GROUP_SIZE:\r\n segments.append(data)\r\n training_data = np.array(segments)\r\n print(training_data.shape)\r\n np.save(os.path.join(ROOT,'train_data.npy'), segments)\r\n"
] |
[
[
"numpy.load",
"numpy.array",
"numpy.save"
]
] |
trpreston5/EXtra-data
|
[
"ca3b59894ae5ea519725e2834ec818ad3b859922"
] |
[
"extra_data/tests/test_stacking.py"
] |
[
"import numpy as np\nimport pytest\n\nfrom extra_data import RunDirectory, stack_data, stack_detector_data\nfrom extra_data.stacking import StackView\n\ndef test_stack_data(mock_fxe_raw_run):\n test_run = RunDirectory(mock_fxe_raw_run)\n tid, data = test_run.train_from_id(10000, devices=[('*/DET/*', 'image.data')])\n\n comb = stack_data(data, 'image.data')\n assert comb.shape == (128, 1, 16, 256, 256)\n\n\ndef test_stack_detector_data(mock_fxe_raw_run):\n test_run = RunDirectory(mock_fxe_raw_run)\n tid, data = test_run.train_from_id(10000, devices=[('*/DET/*', 'image.data')])\n\n comb = stack_detector_data(data, 'image.data')\n assert comb.shape == (128, 1, 16, 256, 256)\n\n\ndef test_stack_detector_data_missing(mock_fxe_raw_run):\n test_run = RunDirectory(mock_fxe_raw_run)\n tid, data = test_run.train_from_id(10000, devices=[('*/DET/*', 'image.data')])\n\n # Three variants of missing data:\n # 1. Source missing\n del data['FXE_DET_LPD1M-1/DET/3CH0:xtdf']\n # 2. Key missing\n del data['FXE_DET_LPD1M-1/DET/7CH0:xtdf']['image.data']\n # 3. Empty array\n missing = ['FXE_DET_LPD1M-1/DET/{}CH0:xtdf'.format(m) for m in (1, 5, 9, 15)]\n for module in missing:\n data[module]['image.data'] = np.zeros((0, 1, 256, 256), dtype=np.uint16)\n\n comb = stack_detector_data(data, 'image.data', fillvalue=22)\n assert comb.shape == (128, 1, 16, 256, 256)\n\n assert not (comb[:, :, 0] == 22).any() # Control\n assert (comb[:, :, 3] == 22).all() # Source missing\n assert (comb[:, :, 7] == 22).all() # Key missing\n assert (comb[:, :, 5] == 22).all() # Empty array\n\n\ndef test_stack_detector_data_stackview(mock_fxe_raw_run):\n test_run = RunDirectory(mock_fxe_raw_run)\n tid, data = test_run.train_from_id(10000, devices=[('*/DET/*', 'image.data')])\n\n # Three variants of missing data:\n # 1. Source missing\n del data['FXE_DET_LPD1M-1/DET/3CH0:xtdf']\n # 2. Key missing\n del data['FXE_DET_LPD1M-1/DET/7CH0:xtdf']['image.data']\n # 3. Empty array\n missing = ['FXE_DET_LPD1M-1/DET/{}CH0:xtdf'.format(m) for m in (1, 5, 9, 15)]\n for module in missing:\n data[module]['image.data'] = np.zeros((0, 1, 256, 256), dtype=np.uint16)\n\n comb = stack_detector_data(data, 'image.data', fillvalue=22, real_array=False)\n assert comb.shape == (128, 1, 16, 256, 256)\n\n assert not (comb[:, :, 0] == 22).any() # Control\n assert (comb[:, :, 3] == 22).all() # Source missing\n assert (comb[:, :, 7] == 22).all() # Key missing\n assert (comb[:, :, 5] == 22).all() # Empty array\n\n # Slice across all modules\n pulse = comb[0, 0]\n assert pulse.shape == (16, 256, 256)\n assert not (pulse[0] == 22).any()\n assert (pulse[3] == 22).all()\n assert (pulse[7] == 22).all()\n assert (pulse[5] == 22).all()\n\n pulse_arr = pulse.asarray()\n assert pulse_arr.shape == (16, 256, 256)\n assert pulse_arr.max() == 22\n assert pulse_arr.min() == 0\n\n\ndef test_stack_detector_data_wrong_pulses(mock_fxe_raw_run):\n test_run = RunDirectory(mock_fxe_raw_run)\n tid, data = test_run.train_from_id(10000, devices=[('*/DET/*', 'image.data')])\n\n misshaped = ['FXE_DET_LPD1M-1/DET/{}CH0:xtdf'.format(m) for m in (12, 13)]\n for module in misshaped:\n data[module]['image.data'] = np.zeros((64, 1, 256, 256), dtype=np.uint16)\n\n with pytest.raises(ValueError) as excinfo:\n comb = stack_detector_data(data, 'image.data')\n assert '(64, 1, 256, 256)' in str(excinfo.value)\n\n\ndef test_stack_detector_data_wrong_shape(mock_fxe_raw_run):\n test_run = RunDirectory(mock_fxe_raw_run)\n tid, data = test_run.train_from_id(10000, devices=[('*/DET/*', 'image.data')])\n\n misshaped = ['FXE_DET_LPD1M-1/DET/{}CH0:xtdf'.format(m) for m in (0, 15)]\n for module in misshaped:\n data[module]['image.data'] = np.zeros((128, 1, 512, 128), dtype=np.uint16)\n\n with pytest.raises(ValueError) as excinfo:\n comb = stack_detector_data(data, 'image.data')\n assert '(128, 1, 512, 128)' in str(excinfo.value)\n\n\ndef test_stack_detector_data_type_error(mock_fxe_raw_run):\n test_run = RunDirectory(mock_fxe_raw_run)\n tid, data = test_run.train_from_id(10000, devices=[('*/DET/*', 'image.data')])\n\n module = 'FXE_DET_LPD1M-1/DET/3CH0:xtdf'\n data[module]['image.data'] = data[module]['image.data'].astype(np.float32)\n\n with pytest.raises(ValueError) as excinfo:\n comb = stack_detector_data(data, 'image.data')\n assert \"dtype('float32')\" in str(excinfo.value)\n\n\ndef test_stack_detector_data_extra_mods(mock_fxe_raw_run):\n test_run = RunDirectory(mock_fxe_raw_run)\n tid, data = test_run.train_from_id(10000, devices=[('*/DET/*', 'image.data')])\n\n data.setdefault(\n 'FXE_DET_LPD1M-1/DET/16CH0:xtdf',\n {'image.data': np.zeros((128, 1, 256, 256), dtype=np.uint16)},\n )\n\n with pytest.raises(IndexError) as excinfo:\n comb = stack_detector_data(data, 'image.data')\n assert \"16\" in str(excinfo.value)\n\ndef test_stackview_squeeze():\n # Squeeze not dropping stacking dim\n data = {0: np.zeros((1, 4)), 1: np.zeros((1, 4))}\n sv = StackView(data, 2, (1, 4), data[0], 0, stack_axis=0)\n assert sv.shape == (2, 1, 4)\n assert sv.squeeze().shape == (2, 4)\n\n # Squeeze dropping stacking dim\n data = {0: np.zeros((1, 4))}\n sv = StackView(data, 1, (1, 4), data[0].dtype, 0, stack_axis=0)\n assert sv.shape == (1, 1, 4)\n assert sv.squeeze().shape == (4,)\n\n assert sv.squeeze(axis=0).shape == (1, 4)\n assert sv.squeeze(axis=-2).shape == (1, 4)\n\n with pytest.raises(np.AxisError):\n sv.squeeze(axis=4)\n"
] |
[
[
"numpy.zeros"
]
] |
ahmad-sulehari/LabEngine
|
[
"9444dce96954c546333d5aecc92a06c3bfd19aa5"
] |
[
"venv/Lib/site-packages/numpy/testing/_private/noseclasses.py"
] |
[
"# These classes implement a doctest runner plugin for nose, a \"known failure\"\n# error class, and a customized TestProgram for NumPy.\n\n# Because this module imports nose directly, it should not\n# be used except by nosetester.py to avoid a general NumPy\n# dependency on nose.\nfrom __future__ import division, absolute_import, print_function\n\nimport os\nimport sys\nimport doctest\nimport inspect\n\nimport numpy\nimport nose\nfrom nose.plugins import doctests as npd\nfrom nose.plugins.errorclass import ErrorClass, ErrorClassPlugin\nfrom nose.plugins.base import Plugin\nfrom nose.util import src\nfrom .nosetester import get_package_name\nfrom .utils import KnownFailureException, KnownFailureTest\n\n\n# Some of the classes in this module begin with 'Numpy' to clearly distinguish\n# them from the plethora of very similar names from nose/unittest/doctest\n\n#-----------------------------------------------------------------------------\n# Modified version of the one in the stdlib, that fixes a python bug (doctests\n# not found in extension modules, http://bugs.python.org/issue3158)\nclass NumpyDocTestFinder(doctest.DocTestFinder):\n\n def _from_module(self, module, object):\n \"\"\"\n Return true if the given object is defined in the given\n module.\n \"\"\"\n if module is None:\n return True\n elif inspect.isfunction(object):\n return module.__dict__ is object.__globals__\n elif inspect.isbuiltin(object):\n return module.__name__ == object.__module__\n elif inspect.isclass(object):\n return module.__name__ == object.__module__\n elif inspect.ismethod(object):\n # This one may be a bug in cython that fails to correctly set the\n # __module__ attribute of methods, but since the same error is easy\n # to make by extension code writers, having this safety in place\n # isn't such a bad idea\n return module.__name__ == object.__self__.__class__.__module__\n elif inspect.getmodule(object) is not None:\n return module is inspect.getmodule(object)\n elif hasattr(object, '__module__'):\n return module.__name__ == object.__module__\n elif isinstance(object, property):\n return True # [XX] no way not be sure.\n else:\n raise ValueError(\"object must be a class or function\")\n\n def _find(self, tests, obj, name, module, source_lines, globs, seen):\n \"\"\"\n Find tests for the given object and any contained objects, and\n add them to `tests`.\n \"\"\"\n\n doctest.DocTestFinder._find(self, tests, obj, name, module,\n source_lines, globs, seen)\n\n # Below we re-run pieces of the above method with manual modifications,\n # because the original code is buggy and fails to correctly identify\n # doctests in extension modules.\n\n # Local shorthands\n from inspect import (\n isroutine, isclass, ismodule, isfunction, ismethod\n )\n\n # Look for tests in a module's contained objects.\n if ismodule(obj) and self._recurse:\n for valname, val in obj.__dict__.items():\n valname1 = '%s.%s' % (name, valname)\n if ( (isroutine(val) or isclass(val))\n and self._from_module(module, val)):\n\n self._find(tests, val, valname1, module, source_lines,\n globs, seen)\n\n # Look for tests in a class's contained objects.\n if isclass(obj) and self._recurse:\n for valname, val in obj.__dict__.items():\n # Special handling for staticmethod/classmethod.\n if isinstance(val, staticmethod):\n val = getattr(obj, valname)\n if isinstance(val, classmethod):\n val = getattr(obj, valname).__func__\n\n # Recurse to methods, properties, and nested classes.\n if ((isfunction(val) or isclass(val) or\n ismethod(val) or isinstance(val, property)) and\n self._from_module(module, val)):\n valname = '%s.%s' % (name, valname)\n self._find(tests, val, valname, module, source_lines,\n globs, seen)\n\n\n# second-chance checker; if the default comparison doesn't\n# pass, then see if the expected output string contains flags that\n# tell us to ignore the output\nclass NumpyOutputChecker(doctest.OutputChecker):\n def check_output(self, want, got, optionflags):\n ret = doctest.OutputChecker.check_output(self, want, got,\n optionflags)\n if not ret:\n if \"#random\" in want:\n return True\n\n # it would be useful to normalize endianness so that\n # bigendian machines don't fail all the tests (and there are\n # actually some bigendian examples in the doctests). Let's try\n # making them all little endian\n got = got.replace(\"'>\", \"'<\")\n want = want.replace(\"'>\", \"'<\")\n\n # try to normalize out 32 and 64 bit default int sizes\n for sz in [4, 8]:\n got = got.replace(\"'<i%d'\" % sz, \"int\")\n want = want.replace(\"'<i%d'\" % sz, \"int\")\n\n ret = doctest.OutputChecker.check_output(self, want,\n got, optionflags)\n\n return ret\n\n\n# Subclass nose.plugins.doctests.DocTestCase to work around a bug in\n# its constructor that blocks non-default arguments from being passed\n# down into doctest.DocTestCase\nclass NumpyDocTestCase(npd.DocTestCase):\n def __init__(self, test, optionflags=0, setUp=None, tearDown=None,\n checker=None, obj=None, result_var='_'):\n self._result_var = result_var\n self._nose_obj = obj\n doctest.DocTestCase.__init__(self, test,\n optionflags=optionflags,\n setUp=setUp, tearDown=tearDown,\n checker=checker)\n\n\nprint_state = numpy.get_printoptions()\n\nclass NumpyDoctest(npd.Doctest):\n name = 'numpydoctest' # call nosetests with --with-numpydoctest\n score = 1000 # load late, after doctest builtin\n\n # always use whitespace and ellipsis options for doctests\n doctest_optflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS\n\n # files that should be ignored for doctests\n doctest_ignore = ['generate_numpy_api.py',\n 'setup.py']\n\n # Custom classes; class variables to allow subclassing\n doctest_case_class = NumpyDocTestCase\n out_check_class = NumpyOutputChecker\n test_finder_class = NumpyDocTestFinder\n\n # Don't use the standard doctest option handler; hard-code the option values\n def options(self, parser, env=os.environ):\n Plugin.options(self, parser, env)\n # Test doctests in 'test' files / directories. Standard plugin default\n # is False\n self.doctest_tests = True\n # Variable name; if defined, doctest results stored in this variable in\n # the top-level namespace. None is the standard default\n self.doctest_result_var = None\n\n def configure(self, options, config):\n # parent method sets enabled flag from command line --with-numpydoctest\n Plugin.configure(self, options, config)\n self.finder = self.test_finder_class()\n self.parser = doctest.DocTestParser()\n if self.enabled:\n # Pull standard doctest out of plugin list; there's no reason to run\n # both. In practice the Unplugger plugin above would cover us when\n # run from a standard numpy.test() call; this is just in case\n # someone wants to run our plugin outside the numpy.test() machinery\n config.plugins.plugins = [p for p in config.plugins.plugins\n if p.name != 'doctest']\n\n def set_test_context(self, test):\n \"\"\" Configure `test` object to set test context\n\n We set the numpy / scipy standard doctest namespace\n\n Parameters\n ----------\n test : test object\n with ``globs`` dictionary defining namespace\n\n Returns\n -------\n None\n\n Notes\n -----\n `test` object modified in place\n \"\"\"\n # set the namespace for tests\n pkg_name = get_package_name(os.path.dirname(test.filename))\n\n # Each doctest should execute in an environment equivalent to\n # starting Python and executing \"import numpy as np\", and,\n # for SciPy packages, an additional import of the local\n # package (so that scipy.linalg.basic.py's doctests have an\n # implicit \"from scipy import linalg\" as well.\n #\n # Note: __file__ allows the doctest in NoseTester to run\n # without producing an error\n test.globs = {'__builtins__':__builtins__,\n '__file__':'__main__',\n '__name__':'__main__',\n 'np':numpy}\n # add appropriate scipy import for SciPy tests\n if 'scipy' in pkg_name:\n p = pkg_name.split('.')\n p2 = p[-1]\n test.globs[p2] = __import__(pkg_name, test.globs, {}, [p2])\n\n # Override test loading to customize test context (with set_test_context\n # method), set standard docstring options, and install our own test output\n # checker\n def loadTestsFromModule(self, module):\n if not self.matches(module.__name__):\n npd.log.debug(\"Doctest doesn't want module %s\", module)\n return\n try:\n tests = self.finder.find(module)\n except AttributeError:\n # nose allows module.__test__ = False; doctest does not and\n # throws AttributeError\n return\n if not tests:\n return\n tests.sort()\n module_file = src(module.__file__)\n for test in tests:\n if not test.examples:\n continue\n if not test.filename:\n test.filename = module_file\n # Set test namespace; test altered in place\n self.set_test_context(test)\n yield self.doctest_case_class(test,\n optionflags=self.doctest_optflags,\n checker=self.out_check_class(),\n result_var=self.doctest_result_var)\n\n # Add an afterContext method to nose.plugins.doctests.Doctest in order\n # to restore print options to the original state after each doctest\n def afterContext(self):\n numpy.set_printoptions(**print_state)\n\n # Ignore NumPy-specific build files that shouldn't be searched for tests\n def wantFile(self, file):\n bn = os.path.basename(file)\n if bn in self.doctest_ignore:\n return False\n return npd.Doctest.wantFile(self, file)\n\n\nclass Unplugger(object):\n \"\"\" Nose plugin to remove named plugin late in loading\n\n By default it removes the \"doctest\" plugin.\n \"\"\"\n name = 'unplugger'\n enabled = True # always enabled\n score = 4000 # load late in order to be after builtins\n\n def __init__(self, to_unplug='doctest'):\n self.to_unplug = to_unplug\n\n def options(self, parser, env):\n pass\n\n def configure(self, options, config):\n # Pull named plugin out of plugins list\n config.plugins.plugins = [p for p in config.plugins.plugins\n if p.name != self.to_unplug]\n\n\nclass KnownFailurePlugin(ErrorClassPlugin):\n '''Plugin that installs a KNOWNFAIL error class for the\n KnownFailureClass exception. When KnownFailure is raised,\n the exception will be logged in the knownfail attribute of the\n result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the\n exception will not be counted as an error or failure.'''\n enabled = True\n knownfail = ErrorClass(KnownFailureException,\n label='KNOWNFAIL',\n isfailure=False)\n\n def options(self, parser, env=os.environ):\n env_opt = 'NOSE_WITHOUT_KNOWNFAIL'\n parser.add_option('--no-knownfail', action='store_true',\n dest='noKnownFail', default=env.get(env_opt, False),\n help='Disable special handling of KnownFailure '\n 'exceptions')\n\n def configure(self, options, conf):\n if not self.can_configure:\n return\n self.conf = conf\n disable = getattr(options, 'noKnownFail', False)\n if disable:\n self.enabled = False\n\nKnownFailure = KnownFailurePlugin # backwards compat\n\n\nclass FPUModeCheckPlugin(Plugin):\n \"\"\"\n Plugin that checks the FPU mode before and after each test,\n raising failures if the test changed the mode.\n \"\"\"\n\n def prepareTestCase(self, test):\n from numpy.core._multiarray_tests import get_fpu_mode\n\n def run(result):\n old_mode = get_fpu_mode()\n test.test(result)\n new_mode = get_fpu_mode()\n\n if old_mode != new_mode:\n try:\n raise AssertionError(\n \"FPU mode changed from {0:#x} to {1:#x} during the \"\n \"test\".format(old_mode, new_mode))\n except AssertionError:\n result.addFailure(test, sys.exc_info())\n\n return run\n\n\n# Class allows us to save the results of the tests in runTests - see runTests\n# method docstring for details\nclass NumpyTestProgram(nose.core.TestProgram):\n def runTests(self):\n \"\"\"Run Tests. Returns true on success, false on failure, and\n sets self.success to the same value.\n\n Because nose currently discards the test result object, but we need\n to return it to the user, override TestProgram.runTests to retain\n the result\n \"\"\"\n if self.testRunner is None:\n self.testRunner = nose.core.TextTestRunner(stream=self.config.stream,\n verbosity=self.config.verbosity,\n config=self.config)\n plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)\n if plug_runner is not None:\n self.testRunner = plug_runner\n self.result = self.testRunner.run(self.test)\n self.success = self.result.wasSuccessful()\n return self.success\n"
] |
[
[
"numpy.set_printoptions",
"numpy.core._multiarray_tests.get_fpu_mode",
"numpy.get_printoptions"
]
] |
wx-b/NerfingMVS
|
[
"8c8f96244146b929a1495caf2719c090b48ac082"
] |
[
"utils/depth_priors_utils.py"
] |
[
"import torch\n\ndef compute_depth_loss(depth_pred, depth_gt, mask_gt):\n loss_list = []\n for pred, gt, mask in zip(depth_pred, depth_gt, mask_gt):\n log_pred = torch.log(pred[mask])\n log_target = torch.log(gt[mask])\n alpha = (log_target - log_pred).sum()/mask.sum()\n log_diff = torch.abs((log_pred - log_target + alpha))\n d = 0.05*0.2*(log_diff.sum()/mask.sum())\n loss_list.append(d)\n\n return torch.stack(loss_list, 0).mean()"
] |
[
[
"torch.stack",
"torch.abs",
"torch.log"
]
] |
hoangtuanvu/rad_chestxray
|
[
"b29c2bf98ae41d85258b21674e8826847a0cc647"
] |
[
"models/feature_extraction/inceptionresnetv2.py"
] |
[
"from __future__ import print_function, division, absolute_import\nimport torch\nimport torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\nfrom models.attention_map import BamBlock\n\n__all__ = ['InceptionResNetV2', 'inceptionresnetv2']\n\npretrained_settings = {\n 'inceptionresnetv2': {\n 'imagenet': {\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionresnetv2-520b38e4.pth',\n 'input_space': 'RGB',\n 'input_size': [3, 299, 299],\n 'input_range': [0, 1],\n 'mean': [0.5, 0.5, 0.5],\n 'std': [0.5, 0.5, 0.5],\n 'num_classes': 1000\n },\n 'imagenet+background': {\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionresnetv2-520b38e4.pth',\n 'input_space': 'RGB',\n 'input_size': [3, 299, 299],\n 'input_range': [0, 1],\n 'mean': [0.5, 0.5, 0.5],\n 'std': [0.5, 0.5, 0.5],\n 'num_classes': 1001\n }\n }\n}\n\n\nclass BasicConv2d(nn.Module):\n\n def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0):\n super(BasicConv2d, self).__init__()\n self.conv = nn.Conv2d(in_planes, out_planes,\n kernel_size=kernel_size, stride=stride,\n padding=padding, bias=False) # verify bias false\n self.bn = nn.BatchNorm2d(out_planes,\n eps=0.001, # value found in tensorflow\n momentum=0.1, # default pytorch value\n affine=True)\n self.relu = nn.ReLU(inplace=False)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.relu(x)\n return x\n\n\nclass Mixed_5b(nn.Module):\n\n def __init__(self):\n super(Mixed_5b, self).__init__()\n\n self.branch0 = BasicConv2d(192, 96, kernel_size=1, stride=1)\n\n self.branch1 = nn.Sequential(\n BasicConv2d(192, 48, kernel_size=1, stride=1),\n BasicConv2d(48, 64, kernel_size=5, stride=1, padding=2)\n )\n\n self.branch2 = nn.Sequential(\n BasicConv2d(192, 64, kernel_size=1, stride=1),\n BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1),\n BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1)\n )\n\n self.branch3 = nn.Sequential(\n nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),\n BasicConv2d(192, 64, kernel_size=1, stride=1)\n )\n\n def forward(self, x):\n x0 = self.branch0(x)\n x1 = self.branch1(x)\n x2 = self.branch2(x)\n x3 = self.branch3(x)\n out = torch.cat((x0, x1, x2, x3), 1)\n return out\n\n\nclass Block35(nn.Module):\n\n def __init__(self, scale=1.0):\n super(Block35, self).__init__()\n\n self.scale = scale\n\n self.branch0 = BasicConv2d(320, 32, kernel_size=1, stride=1)\n\n self.branch1 = nn.Sequential(\n BasicConv2d(320, 32, kernel_size=1, stride=1),\n BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1)\n )\n\n self.branch2 = nn.Sequential(\n BasicConv2d(320, 32, kernel_size=1, stride=1),\n BasicConv2d(32, 48, kernel_size=3, stride=1, padding=1),\n BasicConv2d(48, 64, kernel_size=3, stride=1, padding=1)\n )\n\n self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1)\n self.relu = nn.ReLU(inplace=False)\n\n def forward(self, x):\n x0 = self.branch0(x)\n x1 = self.branch1(x)\n x2 = self.branch2(x)\n out = torch.cat((x0, x1, x2), 1)\n out = self.conv2d(out)\n out = out * self.scale + x\n out = self.relu(out)\n return out\n\n\nclass Mixed_6a(nn.Module):\n\n def __init__(self):\n super(Mixed_6a, self).__init__()\n\n self.branch0 = BasicConv2d(320, 384, kernel_size=3, stride=2)\n\n self.branch1 = nn.Sequential(\n BasicConv2d(320, 256, kernel_size=1, stride=1),\n BasicConv2d(256, 256, kernel_size=3, stride=1, padding=1),\n BasicConv2d(256, 384, kernel_size=3, stride=2)\n )\n\n self.branch2 = nn.MaxPool2d(3, stride=2)\n\n def forward(self, x):\n x0 = self.branch0(x)\n x1 = self.branch1(x)\n x2 = self.branch2(x)\n out = torch.cat((x0, x1, x2), 1)\n return out\n\n\nclass Block17(nn.Module):\n\n def __init__(self, scale=1.0):\n super(Block17, self).__init__()\n\n self.scale = scale\n\n self.branch0 = BasicConv2d(1088, 192, kernel_size=1, stride=1)\n\n self.branch1 = nn.Sequential(\n BasicConv2d(1088, 128, kernel_size=1, stride=1),\n BasicConv2d(128, 160, kernel_size=(1, 7), stride=1, padding=(0, 3)),\n BasicConv2d(160, 192, kernel_size=(7, 1), stride=1, padding=(3, 0))\n )\n\n self.conv2d = nn.Conv2d(384, 1088, kernel_size=1, stride=1)\n self.relu = nn.ReLU(inplace=False)\n\n def forward(self, x):\n x0 = self.branch0(x)\n x1 = self.branch1(x)\n out = torch.cat((x0, x1), 1)\n out = self.conv2d(out)\n out = out * self.scale + x\n out = self.relu(out)\n return out\n\n\nclass Mixed_7a(nn.Module):\n\n def __init__(self):\n super(Mixed_7a, self).__init__()\n\n self.branch0 = nn.Sequential(\n BasicConv2d(1088, 256, kernel_size=1, stride=1),\n BasicConv2d(256, 384, kernel_size=3, stride=2)\n )\n\n self.branch1 = nn.Sequential(\n BasicConv2d(1088, 256, kernel_size=1, stride=1),\n BasicConv2d(256, 288, kernel_size=3, stride=2)\n )\n\n self.branch2 = nn.Sequential(\n BasicConv2d(1088, 256, kernel_size=1, stride=1),\n BasicConv2d(256, 288, kernel_size=3, stride=1, padding=1),\n BasicConv2d(288, 320, kernel_size=3, stride=2)\n )\n\n self.branch3 = nn.MaxPool2d(3, stride=2)\n\n def forward(self, x):\n x0 = self.branch0(x)\n x1 = self.branch1(x)\n x2 = self.branch2(x)\n x3 = self.branch3(x)\n out = torch.cat((x0, x1, x2, x3), 1)\n return out\n\n\nclass Block8(nn.Module):\n\n def __init__(self, scale=1.0, noReLU=False):\n super(Block8, self).__init__()\n\n self.scale = scale\n self.noReLU = noReLU\n\n self.branch0 = BasicConv2d(2080, 192, kernel_size=1, stride=1)\n\n self.branch1 = nn.Sequential(\n BasicConv2d(2080, 192, kernel_size=1, stride=1),\n BasicConv2d(192, 224, kernel_size=(1, 3), stride=1, padding=(0, 1)),\n BasicConv2d(224, 256, kernel_size=(3, 1), stride=1, padding=(1, 0))\n )\n\n self.conv2d = nn.Conv2d(448, 2080, kernel_size=1, stride=1)\n if not self.noReLU:\n self.relu = nn.ReLU(inplace=False)\n\n def forward(self, x):\n x0 = self.branch0(x)\n x1 = self.branch1(x)\n out = torch.cat((x0, x1), 1)\n out = self.conv2d(out)\n out = out * self.scale + x\n if not self.noReLU:\n out = self.relu(out)\n return out\n\n\nclass InceptionResNetV2(nn.Module):\n\n def __init__(self, num_classes=1001, gray=None, img_size=224, use_bam=True):\n super(InceptionResNetV2, self).__init__()\n # Special attributs\n self.input_space = None\n # self.input_size = (299, 299, 3)\n self.gray = gray\n self.use_bam = use_bam\n if gray:\n self.input_size = (img_size, img_size, 1)\n self.gconv2d_1a = BasicConv2d(1, 32, kernel_size=3, stride=2)\n else:\n self.input_size = (img_size, img_size, 3)\n self.conv2d_1a = BasicConv2d(3, 32, kernel_size=3, stride=2)\n self.mean = None\n self.std = None\n # Modules\n # self.conv2d_1a = BasicConv2d(3, 32, kernel_size=3, stride=2)\n if use_bam:\n self.bam_2a = BamBlock(channels=32)\n self.bam_3b = BamBlock(channels=64)\n self.bam_4a = BamBlock(channels=80)\n self.bam_5b = BamBlock(channels=192)\n self.bam_6a = BamBlock(channels=320)\n self.bam_7a = BamBlock(channels=1088)\n self.bam_7b = BamBlock(channels=2080)\n\n self.conv2d_2a = BasicConv2d(32, 32, kernel_size=3, stride=1)\n self.conv2d_2b = BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1)\n self.maxpool_3a = nn.MaxPool2d(3, stride=2)\n self.conv2d_3b = BasicConv2d(64, 80, kernel_size=1, stride=1)\n self.conv2d_4a = BasicConv2d(80, 192, kernel_size=3, stride=1)\n self.maxpool_5a = nn.MaxPool2d(3, stride=2)\n self.mixed_5b = Mixed_5b()\n self.repeat = nn.Sequential(\n Block35(scale=0.17),\n Block35(scale=0.17),\n Block35(scale=0.17),\n Block35(scale=0.17),\n Block35(scale=0.17),\n Block35(scale=0.17),\n Block35(scale=0.17),\n Block35(scale=0.17),\n Block35(scale=0.17),\n Block35(scale=0.17)\n )\n self.mixed_6a = Mixed_6a()\n self.repeat_1 = nn.Sequential(\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10)\n )\n self.mixed_7a = Mixed_7a()\n self.repeat_2 = nn.Sequential(\n Block8(scale=0.20),\n Block8(scale=0.20),\n Block8(scale=0.20),\n Block8(scale=0.20),\n Block8(scale=0.20),\n Block8(scale=0.20),\n Block8(scale=0.20),\n Block8(scale=0.20),\n Block8(scale=0.20)\n )\n self.block8 = Block8(noReLU=True)\n self.conv2d_7b = BasicConv2d(2080, 1536, kernel_size=1, stride=1)\n self.avgpool_1a = nn.AvgPool2d(8, count_include_pad=False)\n self.last_linear = nn.Linear(1536, num_classes)\n # self.Sigmoid = nn.Sigmoid()\n\n def features(self, input):\n if self.gray:\n x = self.gconv2d_1a(input)\n else:\n x = self.conv2d_1a(input)\n\n # print('conv2d_2a', x.size())\n if self.use_bam:\n x = self.bam_2a(x)\n\n x = self.conv2d_2a(x)\n x = self.conv2d_2b(x)\n # print('conv2d_3b', x.size())\n if self.use_bam:\n x = self.bam_3b(x)\n\n x = self.maxpool_3a(x)\n x = self.conv2d_3b(x)\n # print('conv2d_4a', x.size())\n if self.use_bam:\n x = self.bam_4a(x)\n\n x = self.conv2d_4a(x)\n # print('mixed_5b', x.size())\n if self.use_bam:\n x = self.bam_5b(x)\n\n x = self.maxpool_5a(x)\n x = self.mixed_5b(x)\n # print('mixed_6a', x.size())\n if self.use_bam:\n x = self.bam_6a(x)\n\n x = self.repeat(x)\n x = self.mixed_6a(x)\n # print('mixed_7a', x.size())\n if self.use_bam:\n x = self.bam_7a(x)\n\n x = self.repeat_1(x)\n x = self.mixed_7a(x)\n x = self.repeat_2(x)\n x = self.block8(x)\n # print('conv2d_7b', x.size())\n if self.use_bam:\n x = self.bam_7b(x)\n\n x = self.conv2d_7b(x)\n return x\n\n def logits(self, features):\n x = self.avgpool_1a(features)\n x = x.view(x.size(0), -1)\n x = self.last_linear(x)\n return x\n\n def forward(self, input):\n x = self.features(input)\n # x = self.logits(x)\n return x\n\n\ndef inceptionresnetv2(cfg, **kwargs):\n r\"\"\"InceptionResNetV2 model architecture from the\n `\"InceptionV4, Inception-ResNet...\" <https://arxiv.org/abs/1602.07261>`_ paper.\n \"\"\"\n if cfg.pretrained:\n settings = pretrained_settings['inceptionresnetv2']['imagenet']\n model = InceptionResNetV2(num_classes=1001, **kwargs)\n\n pretrained_state = model_zoo.load_url(settings['url'])\n ori_state = model.state_dict()\n for k in pretrained_state:\n if k in ori_state:\n ori_state[k] = pretrained_state[k]\n model.load_state_dict(ori_state)\n else:\n model = InceptionResNetV2(num_classes=1001, **kwargs)\n\n return model\n\n# class ATInceptionresnetv2(nn.Module):\n# def __init__(self, num_classes, pretrained, attention_type):\n# super(ATInceptionresnetv2, self).__init__()\n# self.inceptionresnetv2 = inceptionresnetv2(pretrained=pretrained)\n# self.attention = AttentionMap(type=attention_type, num_channels=1536)\n# self.avgpool_1a = nn.AvgPool2d(8, count_include_pad=False)\n# self.last_linear = nn.Linear(1536, num_classes)\n#\n# def logits(self, features):\n# x = self.avgpool_1a(features)\n# x = x.view(x.size(0), -1)\n# x = self.last_linear(x)\n# return x\n#\n# def forward(self, x):\n# x = self.inceptionresnetv2.features(x)\n# x = self.attention(x)\n# x = self.logits(x)\n# return x\n#\n#\n# if __name__ == \"__main__\":\n# model = inceptionresnetv2(pretrained=True)\n# inp = torch.randn([16, 3, 299, 299])\n# out = model(inp)\n# print(out.size())\n"
] |
[
[
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.AvgPool2d",
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.utils.model_zoo.load_url"
]
] |
cokezrr/transformers
|
[
"794889f0aea28df30efe9d35358ed8cf335cc2f3"
] |
[
"src/normalization/MPN.py"
] |
[
"'''\n@file: MPNCOV.py\n@author: Jiangtao Xie\n@author: Peihua Li\nPlease cite the paper below if you use the code:\n\nPeihua Li, Jiangtao Xie, Qilong Wang and Zilin Gao. Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix Square Root Normalization. IEEE Int. Conf. on Computer Vision and Pattern Recognition (CVPR), pp. 947-955, 2018.\n\nPeihua Li, Jiangtao Xie, Qilong Wang and Wangmeng Zuo. Is Second-order Information Helpful for Large-scale Visual Recognition? IEEE Int. Conf. on Computer Vision (ICCV), pp. 2070-2078, 2017.\n\nCopyright (C) 2018 Peihua Li and Jiangtao Xie\n\nAll rights reserved.\n'''\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Function\n\nclass MPN(nn.Module):\n \"\"\"Matrix power normalized Covariance pooling (MPNCOV)\n implementation of fast MPN-COV (i.e.,iSQRT-COV)\n https://arxiv.org/abs/1712.01034\n\n Args:\n iterNum: #iteration of Newton-schulz method\n is_sqrt: whether perform matrix square root or not\n is_vec: whether the output is a vector or not\n input_dim: the #channel of input feature\n dimension_reduction: if None, it will not use 1x1 conv to\n reduce the #channel of feature.\n if 256 or others, the #channel of feature\n will be reduced to 256 or others.\n \"\"\"\n def __init__(self, iterNum=3, input_dim=256):\n\n super(MPN, self).__init__()\n self.iterNum=iterNum\n self.output_dim = int(input_dim*(input_dim+1)/2)\n\n def _sqrtm(self, x):\n return Sqrtm.apply(x, self.iterNum)\n def _triuvec(self, x):\n return Triuvec.apply(x)\n\n def forward(self, x):\n x = self._sqrtm(x)\n x = self._triuvec(x)\n x = x.view(x.shape[0], -1)\n return x\n\n\nclass Covpool(Function):\n @staticmethod\n def forward(ctx, input):\n x = input\n dtype = x.dtype\n batchSize = x.data.shape[0]\n dim = x.data.shape[1]\n h = x.data.shape[2]\n w = x.data.shape[3]\n M = h*w\n x = x.reshape(batchSize,dim,M)\n I_hat = (-1./M/M)*torch.ones(M,M,device = x.device, dtype=dtype) \\\n + (1./M)*torch.eye(M,M,device = x.device, dtype=dtype)\n I_hat = I_hat.view(1,M,M).repeat(batchSize,1,1).type(x.dtype)\n y = x.bmm(I_hat).bmm(x.transpose(1,2))\n ctx.save_for_backward(input,I_hat)\n return y\n @staticmethod\n def backward(ctx, grad_output):\n input,I_hat = ctx.saved_tensors\n x = input\n grad_output = grad_output.type(x.dtype)\n I_hat = I_hat.type(x.dtype)\n batchSize = x.data.shape[0]\n dim = x.data.shape[1]\n h = x.data.shape[2]\n w = x.data.shape[3]\n M = h*w\n x = x.reshape(batchSize,dim,M)\n grad_input = grad_output + grad_output.transpose(1,2)\n grad_input = grad_input.bmm(x).bmm(I_hat)\n grad_input = grad_input.reshape(batchSize,dim,h,w)\n return grad_input\n\nclass Sqrtm(Function):\n @staticmethod\n def forward(ctx, input, iterN):\n x = input\n batchSize = x.data.shape[0]\n dim = x.data.shape[1]\n dtype = x.dtype\n I3 = 3.0*torch.eye(dim,dim,device = x.device).view(1, dim, dim).repeat(batchSize,1,1).type(dtype)\n normA = (1.0/3.0)*x.mul(I3).sum(dim=1).sum(dim=1)\n A = x.div(normA.view(batchSize,1,1).expand_as(x))\n Y = torch.zeros(batchSize, iterN, dim, dim, requires_grad = False, device = x.device).type(dtype)\n Z = torch.eye(dim,dim,device = x.device).view(1,dim,dim).repeat(batchSize,iterN,1,1).type(dtype)\n if iterN < 2:\n ZY = 0.5*(I3 - A)\n YZY = A.bmm(ZY)\n else:\n ZY = 0.5*(I3 - A)\n Y[:,0,:,:] = A.bmm(ZY)\n Z[:,0,:,:] = ZY\n for i in range(1, iterN-1):\n ZY = 0.5*(I3 - Z[:,i-1,:,:].bmm(Y[:,i-1,:,:]))\n Y[:,i,:,:] = Y[:,i-1,:,:].bmm(ZY)\n Z[:,i,:,:] = ZY.bmm(Z[:,i-1,:,:])\n YZY = 0.5*Y[:,iterN-2,:,:].bmm(I3 - Z[:,iterN-2,:,:].bmm(Y[:,iterN-2,:,:]))\n y = YZY*torch.sqrt(normA).view(batchSize, 1, 1).expand_as(x)\n ctx.save_for_backward(input, A, YZY, normA, Y, Z)\n ctx.iterN = iterN\n return y\n @staticmethod\n def backward(ctx, grad_output):\n input, A, ZY, normA, Y, Z = ctx.saved_tensors\n iterN = ctx.iterN\n x = input\n grad_output = grad_output.type(x.dtype)\n A = A.type(x.dtype)\n ZY = ZY.type(x.dtype)\n normA = normA.type(x.dtype)\n Y = Y.type(x.dtype)\n X = Z.type(x.dtype)\n batchSize = x.data.shape[0]\n dim = x.data.shape[1]\n dtype = grad_output.dtype\n der_postCom = grad_output*torch.sqrt(normA).view(batchSize, 1, 1).expand_as(x)\n der_postComAux = (grad_output*ZY).sum(dim=1).sum(dim=1).div(2*torch.sqrt(normA))\n I3 = 3.0*torch.eye(dim,dim,device = x.device).view(1, dim, dim).repeat(batchSize,1,1).type(dtype)\n if iterN < 2:\n der_NSiter = 0.5*(der_postCom.bmm(I3 - A) - A.bmm(der_postCom))\n else:\n dldY = 0.5*(der_postCom.bmm(I3 - Y[:,iterN-2,:,:].bmm(Z[:,iterN-2,:,:])) -\n Z[:,iterN-2,:,:].bmm(Y[:,iterN-2,:,:]).bmm(der_postCom))\n dldZ = -0.5*Y[:,iterN-2,:,:].bmm(der_postCom).bmm(Y[:,iterN-2,:,:])\n for i in range(iterN-3, -1, -1):\n YZ = I3 - Y[:,i,:,:].bmm(Z[:,i,:,:])\n ZY = Z[:,i,:,:].bmm(Y[:,i,:,:])\n dldY_ = 0.5*(dldY.bmm(YZ) -\n Z[:,i,:,:].bmm(dldZ).bmm(Z[:,i,:,:]) -\n ZY.bmm(dldY))\n dldZ_ = 0.5*(YZ.bmm(dldZ) -\n Y[:,i,:,:].bmm(dldY).bmm(Y[:,i,:,:]) -\n dldZ.bmm(ZY))\n dldY = dldY_\n dldZ = dldZ_\n der_NSiter = 0.5*(dldY.bmm(I3 - A) - dldZ - A.bmm(dldY))\n der_NSiter = der_NSiter.transpose(1, 2)\n grad_input = der_NSiter.div(normA.view(batchSize,1,1).expand_as(x))\n grad_aux = der_NSiter.mul(x).sum(dim=1).sum(dim=1)\n for i in range(batchSize):\n grad_input[i,:,:] += (der_postComAux[i] \\\n - grad_aux[i] / (normA[i] * normA[i])) \\\n *torch.ones(dim,device = x.device).diag().type(dtype)\n return grad_input, None\n\nclass Triuvec(Function):\n @staticmethod\n def forward(ctx, input):\n x = input\n batchSize = x.data.shape[0]\n dim = x.data.shape[1]\n dtype = x.dtype\n x = x.reshape(batchSize, dim*dim)\n I = torch.ones(dim,dim).triu().reshape(dim*dim)\n index = I.nonzero(as_tuple=False)\n y = torch.zeros(batchSize,int(dim*(dim+1)/2),device = x.device).type(dtype)\n y = x[:,index]\n ctx.save_for_backward(input,index)\n return y\n @staticmethod\n def backward(ctx, grad_output):\n input,index = ctx.saved_tensors\n x = input\n batchSize = x.data.shape[0]\n dim = x.data.shape[1]\n dtype = grad_output.dtype\n grad_input = torch.zeros(batchSize,dim*dim,device = x.device,requires_grad=False).type(dtype)\n grad_input[:,index] = grad_output\n grad_input = grad_input.reshape(batchSize,dim,dim)\n return grad_input\n\ndef CovpoolLayer(var):\n return Covpool.apply(var)\n\ndef SqrtmLayer(var, iterN):\n return Sqrtm.apply(var, iterN)\n\ndef TriuvecLayer(var):\n return Triuvec.apply(var)\n\n\ndef cov_forward(x):\n batchsize = x.data.shape[0]\n dim = x.data.shape[1]\n h = x.data.shape[2]\n w = x.data.shape[3]\n M = h*w\n x = x.reshape(batchsize,dim,M)\n I_hat = (-1./M/M)*torch.ones(M,M,device = x.device) + (1./M/M)*torch.eye(M,M,device = x.device)\n I_hat = I_hat.view(1,M,M).repeat(batchsize,1,1).type(x.dtype)\n y = x.bmm(I_hat).bmm(x.transpose(1,2))\n return y\n\n\ndef sqrt_forward(x,numIters=5):\n batchSize = x.data.shape[0]\n dim = x.data.shape[1]\n dtype = x.dtype\n I = torch.eye(dim,dim,device = x.device).view(1, dim, dim).repeat(batchSize,1,1).type(dtype)\n normA = x.mul(I).sum(dim=1).sum(dim=1)\n Y = x.div(normA.view(batchSize, 1, 1).expand_as(x))\n Z = torch.eye(dim,dim,device = x.device).view(1,dim,dim).repeat(batchSize,1,1).type(dtype)\n for i in range(numIters):\n T = 0.5*(3.0*I - Z.bmm(Y))\n Y = Y.bmm(T)\n Z = T.bmm(Z)\n y = Y*torch.sqrt(normA).view(batchSize, 1, 1).expand_as(x)\n return y\n\n\ndef triu_forward(x):\n batchSize = x.data.shape[0]\n dim = x.data.shape[1]\n dtype = x.dtype\n x = x.reshape(batchSize, dim*dim)\n I = torch.ones(dim,dim).triu().t().reshape(dim*dim)\n index = I.nonzero(as_tuple=False)\n y = torch.zeros(batchSize,int(dim*(dim+1)/2),device = x.device).type(dtype)\n y = x[:,index]\n return y\n"
] |
[
[
"torch.sqrt",
"torch.eye",
"torch.ones",
"torch.zeros"
]
] |
zhaox1n/spark
|
[
"1f150b9392706293946278dd35e8f5a5016ed6df"
] |
[
"python/pyspark/pandas/indexes/numeric.py"
] |
[
"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport pandas as pd\nfrom pandas.api.types import is_hashable\n\nfrom pyspark import pandas as ps\nfrom pyspark.pandas.indexes.base import Index\nfrom pyspark.pandas.series import Series\n\n\nclass NumericIndex(Index):\n \"\"\"\n Provide numeric type operations.\n This is an abstract class.\n \"\"\"\n\n pass\n\n\nclass IntegerIndex(NumericIndex):\n \"\"\"\n This is an abstract class for Int64Index.\n \"\"\"\n\n pass\n\n\nclass Int64Index(IntegerIndex):\n \"\"\"\n Immutable sequence used for indexing and alignment. The basic object\n storing axis labels for all pandas objects. Int64Index is a special case\n of `Index` with purely integer labels.\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n dtype : NumPy dtype (default: int64)\n copy : bool\n Make a copy of input ndarray.\n name : object\n Name to be stored in the index.\n\n See Also\n --------\n Index : The base pandas-on-Spark Index type.\n Float64Index : A special case of :class:`Index` with purely float labels.\n\n Notes\n -----\n An Index instance can **only** contain hashable objects.\n\n Examples\n --------\n >>> ps.Int64Index([1, 2, 3])\n Int64Index([1, 2, 3], dtype='int64')\n\n From a Series:\n\n >>> s = ps.Series([1, 2, 3], index=[10, 20, 30])\n >>> ps.Int64Index(s)\n Int64Index([1, 2, 3], dtype='int64')\n\n From an Index:\n\n >>> idx = ps.Index([1, 2, 3])\n >>> ps.Int64Index(idx)\n Int64Index([1, 2, 3], dtype='int64')\n \"\"\"\n\n def __new__(cls, data=None, dtype=None, copy=False, name=None):\n if not is_hashable(name):\n raise TypeError(\"Index.name must be a hashable type\")\n\n if isinstance(data, (Series, Index)):\n if dtype is None:\n dtype = \"int64\"\n return Index(data, dtype=dtype, copy=copy, name=name)\n\n return ps.from_pandas(pd.Int64Index(data=data, dtype=dtype, copy=copy, name=name))\n\n\nclass Float64Index(NumericIndex):\n \"\"\"\n Immutable sequence used for indexing and alignment. The basic object\n storing axis labels for all pandas objects. Float64Index is a special case\n of `Index` with purely float labels.\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n dtype : NumPy dtype (default: float64)\n copy : bool\n Make a copy of input ndarray.\n name : object\n Name to be stored in the index.\n\n See Also\n --------\n Index : The base pandas-on-Spark Index type.\n Int64Index : A special case of :class:`Index` with purely integer labels.\n\n Notes\n -----\n An Index instance can **only** contain hashable objects.\n\n Examples\n --------\n >>> ps.Float64Index([1.0, 2.0, 3.0])\n Float64Index([1.0, 2.0, 3.0], dtype='float64')\n\n From a Series:\n\n >>> s = ps.Series([1, 2, 3], index=[10, 20, 30])\n >>> ps.Float64Index(s)\n Float64Index([1.0, 2.0, 3.0], dtype='float64')\n\n From an Index:\n\n >>> idx = ps.Index([1, 2, 3])\n >>> ps.Float64Index(idx)\n Float64Index([1.0, 2.0, 3.0], dtype='float64')\n \"\"\"\n\n def __new__(cls, data=None, dtype=None, copy=False, name=None):\n if not is_hashable(name):\n raise TypeError(\"Index.name must be a hashable type\")\n\n if isinstance(data, (Series, Index)):\n if dtype is None:\n dtype = \"float64\"\n return Index(data, dtype=dtype, copy=copy, name=name)\n\n return ps.from_pandas(pd.Float64Index(data=data, dtype=dtype, copy=copy, name=name))\n\n\ndef _test():\n import os\n import doctest\n import sys\n from pyspark.sql import SparkSession\n import pyspark.pandas.indexes.numeric\n\n os.chdir(os.environ[\"SPARK_HOME\"])\n\n globs = pyspark.pandas.indexes.numeric.__dict__.copy()\n globs[\"ps\"] = pyspark.pandas\n spark = (\n SparkSession.builder.master(\"local[4]\")\n .appName(\"pyspark.pandas.indexes.numeric tests\")\n .getOrCreate()\n )\n (failure_count, test_count) = doctest.testmod(\n pyspark.pandas.indexes.numeric,\n globs=globs,\n optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,\n )\n spark.stop()\n if failure_count:\n sys.exit(-1)\n\n\nif __name__ == \"__main__\":\n _test()\n"
] |
[
[
"pandas.api.types.is_hashable",
"pandas.Float64Index",
"pandas.Int64Index"
]
] |
KelSolaar/colour-science
|
[
"6c68f40544a0214614c4d01c2c3290e86406d9ad"
] |
[
"colour/appearance/ciecam02.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCIECAM02 Colour Appearance Model\n================================\n\nDefines *CIECAM02* colour appearance model objects:\n\n- :class:`colour.appearance.InductionFactors_CIECAM02`\n- :attr:`colour.VIEWING_CONDITIONS_CIECAM02`\n- :class:`colour.CAM_Specification_CIECAM02`\n- :func:`colour.XYZ_to_CIECAM02`\n- :func:`colour.CIECAM02_to_XYZ`\n\nReferences\n----------\n- :cite:`Fairchild2004c` : Fairchild, M. D. (2004). CIECAM02. In Color\n Appearance Models (2nd ed., pp. 289-301). Wiley. ISBN:978-0-470-01216-1\n- :cite:`Luo2013` : Luo, Ming Ronnier, & Li, C. (2013). CIECAM02 and Its\n Recent Developments. In C. Fernandez-Maloigne (Ed.), Advanced Color Image\n Processing and Analysis (pp. 19-58). Springer New York.\n doi:10.1007/978-1-4419-6190-7\n- :cite:`Moroneya` : Moroney, N., Fairchild, M. D., Hunt, R. W. G., Li, C.,\n Luo, M. R., & Newman, T. (2002). The CIECAM02 color appearance model. Color\n and Imaging Conference, 1, 23-27.\n- :cite:`Wikipedia2007a` : Fairchild, M. D. (2004). CIECAM02. In Color\n Appearance Models (2nd ed., pp. 289-301). Wiley. ISBN:978-0-470-01216-1\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport numpy as np\nfrom collections import namedtuple\n\nfrom colour.algebra import spow\nfrom colour.adaptation import CAT_CAT02\nfrom colour.appearance.hunt import (MATRIX_HPE_TO_XYZ, MATRIX_XYZ_TO_HPE,\n luminance_level_adaptation_factor)\nfrom colour.constants import EPSILON\nfrom colour.utilities import (\n CaseInsensitiveMapping, as_float_array, as_int_array, as_namedtuple,\n as_float, from_range_degrees, matrix_dot, vector_dot, from_range_100, ones,\n to_domain_100, to_domain_degrees, tsplit, tstack, zeros)\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = [\n 'CAT02_INVERSE_CAT', 'InductionFactors_CIECAM02',\n 'VIEWING_CONDITIONS_CIECAM02', 'HUE_DATA_FOR_HUE_QUADRATURE',\n 'CAM_Specification_CIECAM02', 'XYZ_to_CIECAM02', 'CIECAM02_to_XYZ',\n 'chromatic_induction_factors', 'base_exponential_non_linearity',\n 'viewing_condition_dependent_parameters', 'degree_of_adaptation',\n 'full_chromatic_adaptation_forward', 'full_chromatic_adaptation_inverse',\n 'RGB_to_rgb', 'rgb_to_RGB',\n 'post_adaptation_non_linear_response_compression_forward',\n 'post_adaptation_non_linear_response_compression_inverse',\n 'opponent_colour_dimensions_forward', 'opponent_colour_dimensions_inverse',\n 'hue_angle', 'hue_quadrature', 'eccentricity_factor',\n 'achromatic_response_forward', 'achromatic_response_inverse',\n 'lightness_correlate', 'brightness_correlate',\n 'temporary_magnitude_quantity_forward',\n 'temporary_magnitude_quantity_inverse', 'chroma_correlate',\n 'colourfulness_correlate', 'saturation_correlate', 'P',\n 'matrix_post_adaptation_non_linear_response_compression'\n]\n\nCAT02_INVERSE_CAT = np.linalg.inv(CAT_CAT02)\n\"\"\"\nInverse CAT02 chromatic adaptation transform.\n\nCAT02_INVERSE_CAT : array_like, (3, 3)\n\"\"\"\n\n\nclass InductionFactors_CIECAM02(\n namedtuple('InductionFactors_CIECAM02', ('F', 'c', 'N_c'))):\n \"\"\"\n *CIECAM02* colour appearance model induction factors.\n\n Parameters\n ----------\n F : numeric or array_like\n Maximum degree of adaptation :math:`F`.\n c : numeric or array_like\n Exponential non linearity :math:`c`.\n N_c : numeric or array_like\n Chromatic induction factor :math:`N_c`.\n\n References\n ----------\n :cite:`Fairchild2004c`, :cite:`Luo2013`, :cite:`Moroneya`,\n :cite:`Wikipedia2007a`\n \"\"\"\n\n\nVIEWING_CONDITIONS_CIECAM02 = CaseInsensitiveMapping({\n 'Average': InductionFactors_CIECAM02(1, 0.69, 1),\n 'Dim': InductionFactors_CIECAM02(0.9, 0.59, 0.9),\n 'Dark': InductionFactors_CIECAM02(0.8, 0.525, 0.8)\n})\nVIEWING_CONDITIONS_CIECAM02.__doc__ = \"\"\"\nReference *CIECAM02* colour appearance model viewing conditions.\n\nReferences\n----------\n:cite:`Fairchild2004c`, :cite:`Luo2013`, :cite:`Moroneya`,\n:cite:`Wikipedia2007a`\n\nVIEWING_CONDITIONS_CIECAM02 : CaseInsensitiveMapping\n **{'Average', 'Dim', 'Dark'}**\n\"\"\"\n\nHUE_DATA_FOR_HUE_QUADRATURE = {\n 'h_i': np.array([20.14, 90.00, 164.25, 237.53, 380.14]),\n 'e_i': np.array([0.8, 0.7, 1.0, 1.2, 0.8]),\n 'H_i': np.array([0.0, 100.0, 200.0, 300.0, 400.0])\n}\n\n\nclass CAM_Specification_CIECAM02(\n namedtuple('CAM_Specification_CIECAM02',\n ('J', 'C', 'h', 's', 'Q', 'M', 'H', 'HC'))):\n \"\"\"\n Defines the *CIECAM02* colour appearance model specification.\n\n Parameters\n ----------\n J : numeric or array_like\n Correlate of *Lightness* :math:`J`.\n C : numeric or array_like\n Correlate of *chroma* :math:`C`.\n h : numeric or array_like\n *Hue* angle :math:`h` in degrees.\n s : numeric or array_like\n Correlate of *saturation* :math:`s`.\n Q : numeric or array_like\n Correlate of *brightness* :math:`Q`.\n M : numeric or array_like\n Correlate of *colourfulness* :math:`M`.\n H : numeric or array_like\n *Hue* :math:`h` quadrature :math:`H`.\n HC : numeric or array_like\n *Hue* :math:`h` composition :math:`H^C`.\n\n References\n ----------\n :cite:`Fairchild2004c`, :cite:`Luo2013`, :cite:`Moroneya`,\n :cite:`Wikipedia2007a`\n \"\"\"\n\n def __new__(cls,\n J=None,\n C=None,\n h=None,\n s=None,\n Q=None,\n M=None,\n H=None,\n HC=None):\n \"\"\"\n Returns a new instance of the :class:`colour.\\\nCAM_Specification_CIECAM02` class.\n \"\"\"\n\n return super(CAM_Specification_CIECAM02, cls).__new__(\n cls, J, C, h, s, Q, M, H, HC)\n\n\ndef XYZ_to_CIECAM02(XYZ,\n XYZ_w,\n L_A,\n Y_b,\n surround=VIEWING_CONDITIONS_CIECAM02['Average'],\n discount_illuminant=False):\n \"\"\"\n Computes the *CIECAM02* colour appearance model correlates from given\n *CIE XYZ* tristimulus values.\n\n This is the *forward* implementation.\n\n Parameters\n ----------\n XYZ : array_like\n *CIE XYZ* tristimulus values of test sample / stimulus.\n XYZ_w : array_like\n *CIE XYZ* tristimulus values of reference white.\n L_A : numeric or array_like\n Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken\n to be 20% of the luminance of a white object in the scene).\n Y_b : numeric or array_like\n Relative luminance of background :math:`Y_b` in :math:`cd/m^2`.\n surround : InductionFactors_CIECAM02, optional\n Surround viewing conditions induction factors.\n discount_illuminant : bool, optional\n Truth value indicating if the illuminant should be discounted.\n\n Returns\n -------\n CAM_Specification_CIECAM02\n *CIECAM02* colour appearance model specification.\n\n Notes\n -----\n\n +------------------------------+-----------------------+---------------+\n | **Domain** | **Scale - Reference** | **Scale - 1** |\n +==============================+=======================+===============+\n | ``XYZ`` | [0, 100] | [0, 1] |\n +------------------------------+-----------------------+---------------+\n | ``XYZ_w`` | [0, 100] | [0, 1] |\n +------------------------------+-----------------------+---------------+\n\n +----------------------------------+-----------------------\\\n+---------------+\n | **Range** | **Scale - Reference** \\\n| **Scale - 1** |\n +==================================+=======================\\\n+===============+\n | ``CAM_Specification_CIECAM02.J`` | [0, 100] \\\n| [0, 1] |\n +----------------------------------+-----------------------\\\n+---------------+\n | ``CAM_Specification_CIECAM02.C`` | [0, 100] \\\n| [0, 1] |\n +----------------------------------+-----------------------\\\n+---------------+\n | ``CAM_Specification_CIECAM02.h`` | [0, 360] \\\n| [0, 1] |\n +----------------------------------+-----------------------\\\n+---------------+\n | ``CAM_Specification_CIECAM02.s`` | [0, 100] \\\n| [0, 1] |\n +----------------------------------+-----------------------\\\n+---------------+\n | ``CAM_Specification_CIECAM02.Q`` | [0, 100] \\\n| [0, 1] |\n +----------------------------------+-----------------------\\\n+---------------+\n | ``CAM_Specification_CIECAM02.M`` | [0, 100] \\\n| [0, 1] |\n +----------------------------------+-----------------------\\\n+---------------+\n | ``CAM_Specification_CIECAM02.H`` | [0, 360] \\\n| [0, 1] |\n +----------------------------------+-----------------------\\\n+---------------+\n\n References\n ----------\n :cite:`Fairchild2004c`, :cite:`Luo2013`, :cite:`Moroneya`,\n :cite:`Wikipedia2007a`\n\n Examples\n --------\n >>> XYZ = np.array([19.01, 20.00, 21.78])\n >>> XYZ_w = np.array([95.05, 100.00, 108.88])\n >>> L_A = 318.31\n >>> Y_b = 20.0\n >>> surround = VIEWING_CONDITIONS_CIECAM02['Average']\n >>> XYZ_to_CIECAM02(XYZ, XYZ_w, L_A, Y_b, surround) # doctest: +ELLIPSIS\n CAM_Specification_CIECAM02(J=41.7310911..., C=0.1047077..., \\\nh=219.0484326..., s=2.3603053..., Q=195.3713259..., M=0.1088421..., \\\nH=278.0607358..., HC=None)\n \"\"\"\n\n XYZ = to_domain_100(XYZ)\n XYZ_w = to_domain_100(XYZ_w)\n _X_w, Y_w, _Z_w = tsplit(XYZ_w)\n L_A = as_float_array(L_A)\n Y_b = as_float_array(Y_b)\n\n n, F_L, N_bb, N_cb, z = tsplit(\n viewing_condition_dependent_parameters(Y_b, Y_w, L_A))\n\n # Converting *CIE XYZ* tristimulus values to *CMCCAT2000* transform\n # sharpened *RGB* values.\n RGB = vector_dot(CAT_CAT02, XYZ)\n RGB_w = vector_dot(CAT_CAT02, XYZ_w)\n\n # Computing degree of adaptation :math:`D`.\n D = (degree_of_adaptation(surround.F, L_A)\n if not discount_illuminant else ones(L_A.shape))\n\n # Computing full chromatic adaptation.\n RGB_c = full_chromatic_adaptation_forward(RGB, RGB_w, Y_w, D)\n RGB_wc = full_chromatic_adaptation_forward(RGB_w, RGB_w, Y_w, D)\n\n # Converting to *Hunt-Pointer-Estevez* colourspace.\n RGB_p = RGB_to_rgb(RGB_c)\n RGB_pw = RGB_to_rgb(RGB_wc)\n\n # Applying forward post-adaptation non linear response compression.\n RGB_a = post_adaptation_non_linear_response_compression_forward(RGB_p, F_L)\n RGB_aw = post_adaptation_non_linear_response_compression_forward(\n RGB_pw, F_L)\n\n # Converting to preliminary cartesian coordinates.\n a, b = tsplit(opponent_colour_dimensions_forward(RGB_a))\n\n # Computing the *hue* angle :math:`h`.\n h = hue_angle(a, b)\n\n # Computing hue :math:`h` quadrature :math:`H`.\n H = hue_quadrature(h)\n # TODO: Compute hue composition.\n\n # Computing eccentricity factor *e_t*.\n e_t = eccentricity_factor(h)\n\n # Computing achromatic responses for the stimulus and the whitepoint.\n A = achromatic_response_forward(RGB_a, N_bb)\n A_w = achromatic_response_forward(RGB_aw, N_bb)\n\n # Computing the correlate of *Lightness* :math:`J`.\n J = lightness_correlate(A, A_w, surround.c, z)\n\n # Computing the correlate of *brightness* :math:`Q`.\n Q = brightness_correlate(surround.c, J, A_w, F_L)\n\n # Computing the correlate of *chroma* :math:`C`.\n C = chroma_correlate(J, n, surround.N_c, N_cb, e_t, a, b, RGB_a)\n\n # Computing the correlate of *colourfulness* :math:`M`.\n M = colourfulness_correlate(C, F_L)\n\n # Computing the correlate of *saturation* :math:`s`.\n s = saturation_correlate(M, Q)\n\n return CAM_Specification_CIECAM02(\n from_range_100(J), from_range_100(C), from_range_degrees(h),\n from_range_100(s), from_range_100(Q), from_range_100(M),\n from_range_degrees(H), None)\n\n\ndef CIECAM02_to_XYZ(specification,\n XYZ_w,\n L_A,\n Y_b,\n surround=VIEWING_CONDITIONS_CIECAM02['Average'],\n discount_illuminant=False):\n \"\"\"\n Converts from *CIECAM02* specification to *CIE XYZ* tristimulus values.\n\n This is the *inverse* implementation.\n\n Parameters\n ----------\n specification : CAM_Specification_CIECAM02\n *CIECAM02* colour appearance model specification. Correlate of\n *Lightness* :math:`J`, correlate of *chroma* :math:`C` or correlate of\n *colourfulness* :math:`M` and *hue* angle :math:`h` in degrees must be\n specified, e.g. :math:`JCh` or :math:`JMh`.\n XYZ_w : array_like\n *CIE XYZ* tristimulus values of reference white.\n L_A : numeric or array_like\n Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`, (often taken\n to be 20% of the luminance of a white object in the scene).\n Y_b : numeric or array_like\n Relative luminance of background :math:`Y_b` in :math:`cd/m^2`.\n surround : InductionFactors_CIECAM02, optional\n Surround viewing conditions.\n discount_illuminant : bool, optional\n Discount the illuminant.\n\n Returns\n -------\n XYZ : ndarray\n *CIE XYZ* tristimulus values.\n\n Raises\n ------\n ValueError\n If neither *C* or *M* correlates have been defined in the\n ``CAM_Specification_CIECAM02`` argument.\n\n Warnings\n --------\n The output range of that definition is non standard!\n\n Notes\n -----\n\n +----------------------------------+-----------------------\\\n+---------------+\n | **Domain** | **Scale - Reference** \\\n| **Scale - 1** |\n +==================================+=======================\\\n+===============+\n | ``CAM_Specification_CIECAM02.J`` | [0, 100] \\\n| [0, 1] |\n +----------------------------------+-----------------------\\\n+---------------+\n | ``CAM_Specification_CIECAM02.C`` | [0, 100] \\\n| [0, 1] |\n +----------------------------------+-----------------------\\\n+---------------+\n | ``CAM_Specification_CIECAM02.h`` | [0, 360] \\\n| [0, 1] |\n +----------------------------------+-----------------------\\\n+---------------+\n | ``CAM_Specification_CIECAM02.s`` | [0, 100] \\\n| [0, 1] |\n +----------------------------------+-----------------------\\\n+---------------+\n | ``CAM_Specification_CIECAM02.Q`` | [0, 100] \\\n| [0, 1] |\n +----------------------------------+-----------------------\\\n+---------------+\n | ``CAM_Specification_CIECAM02.M`` | [0, 100] \\\n| [0, 1] |\n +----------------------------------+-----------------------\\\n+---------------+\n | ``CAM_Specification_CIECAM02.H`` | [0, 360] \\\n| [0, 1] |\n +----------------------------------+-----------------------\\\n+---------------+\n | ``XYZ_w`` | [0, 100] \\\n| [0, 1] |\n +----------------------------------+-----------------------\\\n+---------------+\n\n +------------------------------+-----------------------+---------------+\n | **Range** | **Scale - Reference** | **Scale - 1** |\n +==============================+=======================+===============+\n | ``XYZ`` | [0, 100] | [0, 1] |\n +------------------------------+-----------------------+---------------+\n\n - ``CAM_Specification_CIECAM02`` can also be passed as a compatible\n argument to :func:`colour.utilities.as_namedtuple` definition.\n\n References\n ----------\n :cite:`Fairchild2004c`, :cite:`Luo2013`, :cite:`Moroneya`,\n :cite:`Wikipedia2007a`\n\n Examples\n --------\n >>> specification = CAM_Specification_CIECAM02(J=41.731091132513917,\n ... C=0.104707757171031,\n ... h=219.048432658311780)\n >>> XYZ_w = np.array([95.05, 100.00, 108.88])\n >>> L_A = 318.31\n >>> Y_b = 20.0\n >>> CIECAM02_to_XYZ(specification, XYZ_w, L_A, Y_b) # doctest: +ELLIPSIS\n array([ 19.01..., 20... , 21.78...])\n \"\"\"\n\n J, C, h, _s, _Q, M, _H, _HC = as_namedtuple(specification,\n CAM_Specification_CIECAM02)\n J = to_domain_100(J)\n C = to_domain_100(C) if C is not None else C\n h = to_domain_degrees(h)\n M = to_domain_100(M) if M is not None else M\n L_A = as_float_array(L_A)\n XYZ_w = to_domain_100(XYZ_w)\n _X_w, Y_w, _Z_w = tsplit(XYZ_w)\n\n n, F_L, N_bb, N_cb, z = tsplit(\n viewing_condition_dependent_parameters(Y_b, Y_w, L_A))\n\n if C is None and M is not None:\n C = M / spow(F_L, 0.25)\n elif C is None:\n raise ValueError('Either \"C\" or \"M\" correlate must be defined in '\n 'the \"CAM_Specification_CIECAM02\" argument!')\n\n # Converting *CIE XYZ* tristimulus values to *CMCCAT2000* transform\n # sharpened *RGB* values.\n RGB_w = vector_dot(CAT_CAT02, XYZ_w)\n\n # Computing degree of adaptation :math:`D`.\n D = (degree_of_adaptation(surround.F, L_A)\n if not discount_illuminant else ones(L_A.shape))\n\n # Computing full chromatic adaptation.\n RGB_wc = full_chromatic_adaptation_forward(RGB_w, RGB_w, Y_w, D)\n\n # Converting to *Hunt-Pointer-Estevez* colourspace.\n RGB_pw = RGB_to_rgb(RGB_wc)\n\n # Applying post-adaptation non linear response compression.\n RGB_aw = post_adaptation_non_linear_response_compression_forward(\n RGB_pw, F_L)\n\n # Computing achromatic response for the whitepoint.\n A_w = achromatic_response_forward(RGB_aw, N_bb)\n\n # Computing temporary magnitude quantity :math:`t`.\n t = temporary_magnitude_quantity_inverse(C, J, n)\n\n # Computing eccentricity factor *e_t*.\n e_t = eccentricity_factor(h)\n\n # Computing achromatic response :math:`A` for the stimulus.\n A = achromatic_response_inverse(A_w, J, surround.c, z)\n\n # Computing *P_1* to *P_3*.\n P_n = P(surround.N_c, N_cb, e_t, t, A, N_bb)\n _P_1, P_2, _P_3 = tsplit(P_n)\n\n # Computing opponent colour dimensions :math:`a` and :math:`b`.\n a, b = tsplit(opponent_colour_dimensions_inverse(P_n, h))\n\n # Computing post-adaptation non linear response compression matrix.\n RGB_a = matrix_post_adaptation_non_linear_response_compression(P_2, a, b)\n\n # Applying inverse post-adaptation non linear response compression.\n RGB_p = post_adaptation_non_linear_response_compression_inverse(RGB_a, F_L)\n\n # Converting to *Hunt-Pointer-Estevez* colourspace.\n RGB_c = rgb_to_RGB(RGB_p)\n\n # Applying inverse full chromatic adaptation.\n RGB = full_chromatic_adaptation_inverse(RGB_c, RGB_w, Y_w, D)\n\n # Converting *CMCCAT2000* transform sharpened *RGB* values to *CIE XYZ*\n # tristimulus values.\n XYZ = vector_dot(CAT02_INVERSE_CAT, RGB)\n\n return from_range_100(XYZ)\n\n\ndef chromatic_induction_factors(n):\n \"\"\"\n Returns the chromatic induction factors :math:`N_{bb}` and :math:`N_{cb}`.\n\n Parameters\n ----------\n n : numeric or array_like\n Function of the luminance factor of the background :math:`n`.\n\n Returns\n -------\n ndarray\n Chromatic induction factors :math:`N_{bb}` and :math:`N_{cb}`.\n\n Examples\n --------\n >>> chromatic_induction_factors(0.2) # doctest: +ELLIPSIS\n array([ 1.000304, 1.000304])\n \"\"\"\n\n n = as_float_array(n)\n\n N_bb = N_cb = 0.725 * spow(1 / n, 0.2)\n N_bbcb = tstack([N_bb, N_cb])\n\n return N_bbcb\n\n\ndef base_exponential_non_linearity(n):\n \"\"\"\n Returns the base exponential non linearity :math:`n`.\n\n Parameters\n ----------\n n : numeric or array_like\n Function of the luminance factor of the background :math:`n`.\n\n Returns\n -------\n numeric or ndarray\n Base exponential non linearity :math:`z`.\n\n Examples\n --------\n >>> base_exponential_non_linearity(0.2) # doctest: +ELLIPSIS\n 1.9272135...\n \"\"\"\n\n n = as_float_array(n)\n\n z = 1.48 + np.sqrt(n)\n\n return z\n\n\ndef viewing_condition_dependent_parameters(Y_b, Y_w, L_A):\n \"\"\"\n Returns the viewing condition dependent parameters.\n\n Parameters\n ----------\n Y_b : numeric or array_like\n Adapting field *Y* tristimulus value :math:`Y_b`.\n Y_w : numeric or array_like\n Whitepoint *Y* tristimulus value :math:`Y_w`.\n L_A : numeric or array_like\n Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`.\n\n Returns\n -------\n ndarray\n Viewing condition dependent parameters.\n\n Examples\n --------\n >>> viewing_condition_dependent_parameters(20.0, 100.0, 318.31)\n ... # doctest: +ELLIPSIS\n array([ 0.2..., 1.1675444..., 1.000304 , 1.000304 , 1.9272136...])\n \"\"\"\n\n Y_b = as_float_array(Y_b)\n Y_w = as_float_array(Y_w)\n\n n = Y_b / Y_w\n\n F_L = luminance_level_adaptation_factor(L_A)\n N_bb, N_cb = tsplit(chromatic_induction_factors(n))\n z = base_exponential_non_linearity(n)\n\n return tstack([n, F_L, N_bb, N_cb, z])\n\n\ndef degree_of_adaptation(F, L_A):\n \"\"\"\n Returns the degree of adaptation :math:`D` from given surround maximum\n degree of adaptation :math:`F` and Adapting field *luminance* :math:`L_A`\n in :math:`cd/m^2`.\n\n Parameters\n ----------\n F : numeric or array_like\n Surround maximum degree of adaptation :math:`F`.\n L_A : numeric or array_like\n Adapting field *luminance* :math:`L_A` in :math:`cd/m^2`.\n\n Returns\n -------\n numeric or ndarray\n Degree of adaptation :math:`D`.\n\n Examples\n --------\n >>> degree_of_adaptation(1.0, 318.31) # doctest: +ELLIPSIS\n 0.9944687...\n \"\"\"\n\n F = as_float_array(F)\n L_A = as_float_array(L_A)\n\n D = F * (1 - (1 / 3.6) * np.exp((-L_A - 42) / 92))\n\n return D\n\n\ndef full_chromatic_adaptation_forward(RGB, RGB_w, Y_w, D):\n \"\"\"\n Applies full chromatic adaptation to given *CMCCAT2000* transform sharpened\n *RGB* array using given *CMCCAT2000* transform sharpened whitepoint\n *RGB_w* array.\n\n Parameters\n ----------\n RGB : array_like\n *CMCCAT2000* transform sharpened *RGB* array.\n RGB_w : array_like\n *CMCCAT2000* transform sharpened whitepoint *RGB_w* array.\n Y_w : numeric or array_like\n Whitepoint *Y* tristimulus value :math:`Y_w`.\n D : numeric or array_like\n Degree of adaptation :math:`D`.\n\n Returns\n -------\n ndarray\n Adapted *RGB* array.\n\n Examples\n --------\n >>> RGB = np.array([18.985456, 20.707422, 21.747482])\n >>> RGB_w = np.array([94.930528, 103.536988, 108.717742])\n >>> Y_w = 100.0\n >>> D = 0.994468780088\n >>> full_chromatic_adaptation_forward(RGB, RGB_w, Y_w, D)\n ... # doctest: +ELLIPSIS\n array([ 19.9937078..., 20.0039363..., 20.0132638...])\n \"\"\"\n\n RGB = as_float_array(RGB)\n RGB_w = as_float_array(RGB_w)\n Y_w = as_float_array(Y_w)\n D = as_float_array(D)\n\n RGB_c = (((Y_w[..., np.newaxis] * D[..., np.newaxis] / RGB_w) + 1 -\n D[..., np.newaxis]) * RGB)\n\n return RGB_c\n\n\ndef full_chromatic_adaptation_inverse(RGB, RGB_w, Y_w, D):\n \"\"\"\n Reverts full chromatic adaptation of given *CMCCAT2000* transform sharpened\n *RGB* array using given *CMCCAT2000* transform sharpened whitepoint\n *RGB_w* array.\n\n Parameters\n ----------\n RGB : array_like\n *CMCCAT2000* transform sharpened *RGB* array.\n RGB_w : array_like\n *CMCCAT2000* transform sharpened whitepoint *RGB_w* array.\n Y_w : numeric or array_like\n Whitepoint *Y* tristimulus value :math:`Y_w`.\n D : numeric or array_like\n Degree of adaptation :math:`D`.\n\n Returns\n -------\n ndarray\n Adapted *RGB* array.\n\n Examples\n --------\n >>> RGB = np.array([19.99370783, 20.00393634, 20.01326387])\n >>> RGB_w = np.array([94.930528, 103.536988, 108.717742])\n >>> Y_w = 100.0\n >>> D = 0.994468780088\n >>> full_chromatic_adaptation_inverse(RGB, RGB_w, Y_w, D)\n array([ 18.985456, 20.707422, 21.747482])\n \"\"\"\n\n RGB = as_float_array(RGB)\n RGB_w = as_float_array(RGB_w)\n Y_w = as_float_array(Y_w)\n D = as_float_array(D)\n\n RGB_c = (RGB / (Y_w[..., np.newaxis] *\n (D[..., np.newaxis] / RGB_w) + 1 - D[..., np.newaxis]))\n\n return RGB_c\n\n\ndef RGB_to_rgb(RGB):\n \"\"\"\n Converts given *RGB* array to *Hunt-Pointer-Estevez*\n :math:`\\\\rho\\\\gamma\\\\beta` colourspace.\n\n Parameters\n ----------\n RGB : array_like\n *RGB* array.\n\n Returns\n -------\n ndarray\n *Hunt-Pointer-Estevez* :math:`\\\\rho\\\\gamma\\\\beta` colourspace array.\n\n Examples\n --------\n >>> RGB = np.array([19.99370783, 20.00393634, 20.01326387])\n >>> RGB_to_rgb(RGB) # doctest: +ELLIPSIS\n array([ 19.9969397..., 20.0018612..., 20.0135053...])\n \"\"\"\n\n rgb = vector_dot(matrix_dot(MATRIX_XYZ_TO_HPE, CAT02_INVERSE_CAT), RGB)\n\n return rgb\n\n\ndef rgb_to_RGB(rgb):\n \"\"\"\n Converts given *Hunt-Pointer-Estevez* :math:`\\\\rho\\\\gamma\\\\beta`\n colourspace array to *RGB* array.\n\n Parameters\n ----------\n rgb : array_like\n *Hunt-Pointer-Estevez* :math:`\\\\rho\\\\gamma\\\\beta` colourspace array.\n\n Returns\n -------\n ndarray\n *RGB* array.\n\n Examples\n --------\n >>> rgb = np.array([19.99693975, 20.00186123, 20.01350530])\n >>> rgb_to_RGB(rgb) # doctest: +ELLIPSIS\n array([ 19.9937078..., 20.0039363..., 20.0132638...])\n \"\"\"\n\n RGB = vector_dot(matrix_dot(CAT_CAT02, MATRIX_HPE_TO_XYZ), rgb)\n\n return RGB\n\n\ndef post_adaptation_non_linear_response_compression_forward(RGB, F_L):\n \"\"\"\n Returns given *CMCCAT2000* transform sharpened *RGB* array with post\n adaptation non linear response compression.\n\n Parameters\n ----------\n RGB : array_like\n *CMCCAT2000* transform sharpened *RGB* array.\n F_L : array_like\n *Luminance* level adaptation factor :math:`F_L`.\n\n Returns\n -------\n ndarray\n Compressed *CMCCAT2000* transform sharpened *RGB* array.\n\n Notes\n -----\n - This definition implements negative values handling as per\n :cite:`Luo2013`.\n\n Examples\n --------\n >>> RGB = np.array([19.99693975, 20.00186123, 20.01350530])\n >>> F_L = 1.16754446415\n >>> post_adaptation_non_linear_response_compression_forward(RGB, F_L)\n ... # doctest: +ELLIPSIS\n array([ 7.9463202..., 7.9471152..., 7.9489959...])\n \"\"\"\n\n RGB = as_float_array(RGB)\n F_L = as_float_array(F_L)\n\n F_L_RGB = spow(F_L[..., np.newaxis] * np.absolute(RGB) / 100, 0.42)\n RGB_c = (400 * np.sign(RGB) * F_L_RGB) / (27.13 + F_L_RGB) + 0.1\n\n return RGB_c\n\n\ndef post_adaptation_non_linear_response_compression_inverse(RGB, F_L):\n \"\"\"\n Returns given *CMCCAT2000* transform sharpened *RGB* array without post\n adaptation non linear response compression.\n\n Parameters\n ----------\n RGB : array_like\n *CMCCAT2000* transform sharpened *RGB* array.\n F_L : array_like\n *Luminance* level adaptation factor :math:`F_L`.\n\n Returns\n -------\n ndarray\n Uncompressed *CMCCAT2000* transform sharpened *RGB* array.\n\n Examples\n --------\n >>> RGB = np.array([7.94632020, 7.94711528, 7.94899595])\n >>> F_L = 1.16754446415\n >>> post_adaptation_non_linear_response_compression_inverse(RGB, F_L)\n ... # doctest: +ELLIPSIS\n array([ 19.9969397..., 20.0018612..., 20.0135052...])\n \"\"\"\n\n RGB = as_float_array(RGB)\n F_L = as_float_array(F_L)\n\n RGB_p = ((np.sign(RGB - 0.1) * (100 / F_L[..., np.newaxis]) * spow(\n (27.13 * np.absolute(RGB - 0.1)) / (400 - np.absolute(RGB - 0.1)),\n 1 / 0.42)))\n\n return RGB_p\n\n\ndef opponent_colour_dimensions_forward(RGB):\n \"\"\"\n Returns opponent colour dimensions from given compressed *CMCCAT2000*\n transform sharpened *RGB* array for forward *CIECAM02* implementation.\n\n Parameters\n ----------\n RGB : array_like\n Compressed *CMCCAT2000* transform sharpened *RGB* array.\n\n Returns\n -------\n ndarray\n Opponent colour dimensions.\n\n Examples\n --------\n >>> RGB = np.array([7.94632020, 7.94711528, 7.94899595])\n >>> opponent_colour_dimensions_forward(RGB) # doctest: +ELLIPSIS\n array([-0.0006241..., -0.0005062...])\n \"\"\"\n\n R, G, B = tsplit(RGB)\n\n a = R - 12 * G / 11 + B / 11\n b = (R + G - 2 * B) / 9\n\n ab = tstack([a, b])\n\n return ab\n\n\ndef opponent_colour_dimensions_inverse(P_n, h):\n \"\"\"\n Returns opponent colour dimensions from given points :math:`P_n` and hue\n :math:`h` in degrees for inverse *CIECAM02* implementation.\n\n Parameters\n ----------\n P_n : array_like\n Points :math:`P_n`.\n h : numeric or array_like\n Hue :math:`h` in degrees.\n\n Returns\n -------\n ndarray\n Opponent colour dimensions.\n\n Notes\n -----\n - This definition implements negative values handling as per\n :cite:`Luo2013`.\n\n Examples\n --------\n >>> P_n = np.array([30162.89081534, 24.23720547, 1.05000000])\n >>> h = -140.95156734\n >>> opponent_colour_dimensions_inverse(P_n, h) # doctest: +ELLIPSIS\n array([-0.0006241..., -0.0005062...])\n \"\"\"\n\n P_1, P_2, P_3 = tsplit(P_n)\n hr = np.radians(h)\n\n sin_hr = np.sin(hr)\n cos_hr = np.cos(hr)\n\n P_4 = P_1 / sin_hr\n P_5 = P_1 / cos_hr\n n = P_2 * (2 + P_3) * (460 / 1403)\n\n a = zeros(hr.shape)\n b = zeros(hr.shape)\n\n b = np.where(\n np.isfinite(P_1) * np.abs(sin_hr) >= np.abs(cos_hr),\n (n / (P_4 + (2 + P_3) * (220 / 1403) * (cos_hr / sin_hr) -\n (27 / 1403) + P_3 * (6300 / 1403))),\n b,\n )\n\n a = np.where(\n np.isfinite(P_1) * np.abs(sin_hr) >= np.abs(cos_hr),\n b * (cos_hr / sin_hr),\n a,\n )\n\n a = np.where(\n np.isfinite(P_1) * np.abs(sin_hr) < np.abs(cos_hr),\n (n / (P_5 + (2 + P_3) * (220 / 1403) - (\n (27 / 1403) - P_3 * (6300 / 1403)) * (sin_hr / cos_hr))),\n a,\n )\n\n b = np.where(\n np.isfinite(P_1) * np.abs(sin_hr) < np.abs(cos_hr),\n a * (sin_hr / cos_hr),\n b,\n )\n\n ab = tstack([a, b])\n\n return ab\n\n\ndef hue_angle(a, b):\n \"\"\"\n Returns the *hue* angle :math:`h` in degrees.\n\n Parameters\n ----------\n a : numeric or array_like\n Opponent colour dimension :math:`a`.\n b : numeric or array_like\n Opponent colour dimension :math:`b`.\n\n Returns\n -------\n numeric or ndarray\n *Hue* angle :math:`h` in degrees.\n\n Examples\n --------\n >>> a = -0.000624112068243\n >>> b = -0.000506270106773\n >>> hue_angle(a, b) # doctest: +ELLIPSIS\n 219.0484326...\n \"\"\"\n\n a = as_float_array(a)\n b = as_float_array(b)\n\n h = np.degrees(np.arctan2(b, a)) % 360\n\n return h\n\n\ndef hue_quadrature(h):\n \"\"\"\n Returns the hue quadrature from given hue :math:`h` angle in degrees.\n\n Parameters\n ----------\n h : numeric or array_like\n Hue :math:`h` angle in degrees.\n\n Returns\n -------\n numeric or ndarray\n Hue quadrature.\n\n Examples\n --------\n >>> hue_quadrature(219.0484326582719) # doctest: +ELLIPSIS\n 278.0607358...\n \"\"\"\n\n h = as_float_array(h)\n\n h_i = HUE_DATA_FOR_HUE_QUADRATURE['h_i']\n e_i = HUE_DATA_FOR_HUE_QUADRATURE['e_i']\n H_i = HUE_DATA_FOR_HUE_QUADRATURE['H_i']\n\n # *np.searchsorted* returns an erroneous index if a *nan* is used as input.\n h[np.asarray(np.isnan(h))] = 0\n i = as_int_array(np.searchsorted(h_i, h, side='left') - 1)\n\n h_ii = h_i[i]\n e_ii = e_i[i]\n H_ii = H_i[i]\n h_ii1 = h_i[i + 1]\n e_ii1 = e_i[i + 1]\n\n H = H_ii + ((100 * (h - h_ii) / e_ii) / (\n (h - h_ii) / e_ii + (h_ii1 - h) / e_ii1))\n H = np.where(\n h < 20.14,\n 385.9 + (14.1 * h / 0.856) / (h / 0.856 + (20.14 - h) / 0.8),\n H,\n )\n H = np.where(\n h >= 237.53,\n H_ii + ((85.9 * (h - h_ii) / e_ii) / (\n (h - h_ii) / e_ii + (360 - h) / 0.856)),\n H,\n )\n return as_float(H)\n\n\ndef eccentricity_factor(h):\n \"\"\"\n Returns the eccentricity factor :math:`e_t` from given hue :math:`h` angle\n in degrees for forward *CIECAM02* implementation.\n\n Parameters\n ----------\n h : numeric or array_like\n Hue :math:`h` angle in degrees.\n\n Returns\n -------\n numeric or ndarray\n Eccentricity factor :math:`e_t`.\n\n Examples\n --------\n >>> eccentricity_factor(-140.951567342) # doctest: +ELLIPSIS\n 1.1740054...\n \"\"\"\n\n h = as_float_array(h)\n\n e_t = 1 / 4 * (np.cos(2 + h * np.pi / 180) + 3.8)\n\n return e_t\n\n\ndef achromatic_response_forward(RGB, N_bb):\n \"\"\"\n Returns the achromatic response :math:`A` from given compressed\n *CMCCAT2000* transform sharpened *RGB* array and :math:`N_{bb}` chromatic\n induction factor for forward *CIECAM02* implementation.\n\n Parameters\n ----------\n RGB : array_like\n Compressed *CMCCAT2000* transform sharpened *RGB* array.\n N_bb : numeric or array_like\n Chromatic induction factor :math:`N_{bb}`.\n\n Returns\n -------\n numeric or ndarray\n Achromatic response :math:`A`.\n\n Examples\n --------\n >>> RGB = np.array([7.94632020, 7.94711528, 7.94899595])\n >>> N_bb = 1.000304004559381\n >>> achromatic_response_forward(RGB, N_bb) # doctest: +ELLIPSIS\n 23.9394809...\n \"\"\"\n\n R, G, B = tsplit(RGB)\n\n A = (2 * R + G + (1 / 20) * B - 0.305) * N_bb\n\n return A\n\n\ndef achromatic_response_inverse(A_w, J, c, z):\n \"\"\"\n Returns the achromatic response :math:`A` from given achromatic response\n :math:`A_w` for the whitepoint, *Lightness* correlate :math:`J`, surround\n exponential non linearity :math:`c` and base exponential non linearity\n :math:`z` for inverse *CIECAM02* implementation.\n\n Parameters\n ----------\n A_w : numeric or array_like\n Achromatic response :math:`A_w` for the whitepoint.\n J : numeric or array_like\n *Lightness* correlate :math:`J`.\n c : numeric or array_like\n Surround exponential non linearity :math:`c`.\n z : numeric or array_like\n Base exponential non linearity :math:`z`.\n\n Returns\n -------\n numeric or ndarray\n Achromatic response :math:`A`.\n\n Examples\n --------\n >>> A_w = 46.1882087914\n >>> J = 41.73109113251392\n >>> c = 0.69\n >>> z = 1.927213595499958\n >>> achromatic_response_inverse(A_w, J, c, z) # doctest: +ELLIPSIS\n 23.9394809...\n \"\"\"\n\n A_w = as_float_array(A_w)\n J = as_float_array(J)\n c = as_float_array(c)\n z = as_float_array(z)\n\n A = A_w * spow(J / 100, 1 / (c * z))\n\n return A\n\n\ndef lightness_correlate(A, A_w, c, z):\n \"\"\"\n Returns the *Lightness* correlate :math:`J`.\n\n Parameters\n ----------\n A : numeric or array_like\n Achromatic response :math:`A` for the stimulus.\n A_w : numeric or array_like\n Achromatic response :math:`A_w` for the whitepoint.\n c : numeric or array_like\n Surround exponential non linearity :math:`c`.\n z : numeric or array_like\n Base exponential non linearity :math:`z`.\n\n Returns\n -------\n numeric or ndarray\n *Lightness* correlate :math:`J`.\n\n Examples\n --------\n >>> A = 23.9394809667\n >>> A_w = 46.1882087914\n >>> c = 0.69\n >>> z = 1.9272135955\n >>> lightness_correlate(A, A_w, c, z) # doctest: +ELLIPSIS\n 41.7310911...\n \"\"\"\n\n A = as_float_array(A)\n A_w = as_float_array(A_w)\n c = as_float_array(c)\n z = as_float_array(z)\n\n J = 100 * spow(A / A_w, c * z)\n\n return J\n\n\ndef brightness_correlate(c, J, A_w, F_L):\n \"\"\"\n Returns the *brightness* correlate :math:`Q`.\n\n Parameters\n ----------\n c : numeric or array_like\n Surround exponential non linearity :math:`c`.\n J : numeric or array_like\n *Lightness* correlate :math:`J`.\n A_w : numeric or array_like\n Achromatic response :math:`A_w` for the whitepoint.\n F_L : numeric or array_like\n *Luminance* level adaptation factor :math:`F_L`.\n\n Returns\n -------\n numeric or ndarray\n *Brightness* correlate :math:`Q`.\n\n Examples\n --------\n >>> c = 0.69\n >>> J = 41.7310911325\n >>> A_w = 46.1882087914\n >>> F_L = 1.16754446415\n >>> brightness_correlate(c, J, A_w, F_L) # doctest: +ELLIPSIS\n 195.3713259...\n \"\"\"\n\n c = as_float_array(c)\n J = as_float_array(J)\n A_w = as_float_array(A_w)\n F_L = as_float_array(F_L)\n\n Q = (4 / c) * np.sqrt(J / 100) * (A_w + 4) * spow(F_L, 0.25)\n\n return Q\n\n\ndef temporary_magnitude_quantity_forward(N_c, N_cb, e_t, a, b, RGB_a):\n \"\"\"\n Returns the temporary magnitude quantity :math:`t`. for forward *CIECAM02*\n implementation.\n\n Parameters\n ----------\n N_c : numeric or array_like\n Surround chromatic induction factor :math:`N_{c}`.\n N_cb : numeric or array_like\n Chromatic induction factor :math:`N_{cb}`.\n e_t : numeric or array_like\n Eccentricity factor :math:`e_t`.\n a : numeric or array_like\n Opponent colour dimension :math:`a`.\n b : numeric or array_like\n Opponent colour dimension :math:`b`.\n RGB_a : array_like\n Compressed stimulus *CMCCAT2000* transform sharpened *RGB* array.\n\n Returns\n -------\n numeric or ndarray\n Temporary magnitude quantity :math:`t`.\n\n Examples\n --------\n >>> N_c = 1.0\n >>> N_cb = 1.00030400456\n >>> e_t = 1.174005472851914\n >>> a = -0.000624112068243\n >>> b = -0.000506270106773\n >>> RGB_a = np.array([7.94632020, 7.94711528, 7.94899595])\n >>> temporary_magnitude_quantity_forward(N_c, N_cb, e_t, a, b, RGB_a)\n ... # doctest: +ELLIPSIS\n 0.1497462...\n \"\"\"\n\n N_c = as_float_array(N_c)\n N_cb = as_float_array(N_cb)\n e_t = as_float_array(e_t)\n a = as_float_array(a)\n b = as_float_array(b)\n Ra, Ga, Ba = tsplit(RGB_a)\n\n t = (((50000 / 13) * N_c * N_cb) * (e_t * spow(a ** 2 + b ** 2, 0.5)) /\n (Ra + Ga + 21 * Ba / 20))\n\n return t\n\n\ndef temporary_magnitude_quantity_inverse(C, J, n):\n \"\"\"\n Returns the temporary magnitude quantity :math:`t`. for inverse *CIECAM02*\n implementation.\n\n Parameters\n ----------\n C : numeric or array_like\n *Chroma* correlate :math:`C`.\n J : numeric or array_like\n *Lightness* correlate :math:`J`.\n n : numeric or array_like\n Function of the luminance factor of the background :math:`n`.\n\n Returns\n -------\n numeric or ndarray\n Temporary magnitude quantity :math:`t`.\n\n Notes\n -----\n - This definition implements negative values handling as per\n :cite:`Luo2013`.\n\n Examples\n --------\n >>> C = 68.8364136888275\n >>> J = 41.749268505999\n >>> n = 0.2\n >>> temporary_magnitude_quantity_inverse(C, J, n) # doctest: +ELLIPSIS\n 202.3873619...\n \"\"\"\n\n C = as_float_array(C)\n J = np.maximum(J, EPSILON)\n n = as_float_array(n)\n\n t = spow(C / (np.sqrt(J / 100) * spow(1.64 - 0.29 ** n, 0.73)), 1 / 0.9)\n\n return t\n\n\ndef chroma_correlate(J, n, N_c, N_cb, e_t, a, b, RGB_a):\n \"\"\"\n Returns the *chroma* correlate :math:`C`.\n\n Parameters\n ----------\n J : numeric or array_like\n *Lightness* correlate :math:`J`.\n n : numeric or array_like\n Function of the luminance factor of the background :math:`n`.\n N_c : numeric or array_like\n Surround chromatic induction factor :math:`N_{c}`.\n N_cb : numeric or array_like\n Chromatic induction factor :math:`N_{cb}`.\n e_t : numeric or array_like\n Eccentricity factor :math:`e_t`.\n a : numeric or array_like\n Opponent colour dimension :math:`a`.\n b : numeric or array_like\n Opponent colour dimension :math:`b`.\n RGB_a : array_like\n Compressed stimulus *CMCCAT2000* transform sharpened *RGB* array.\n\n Returns\n -------\n numeric or ndarray\n *Chroma* correlate :math:`C`.\n\n Examples\n --------\n >>> J = 41.7310911325\n >>> n = 0.2\n >>> N_c = 1.0\n >>> N_cb = 1.00030400456\n >>> e_t = 1.17400547285\n >>> a = -0.000624112068243\n >>> b = -0.000506270106773\n >>> RGB_a = np.array([7.94632020, 7.94711528, 7.94899595])\n >>> chroma_correlate(J, n, N_c, N_cb, e_t, a, b, RGB_a)\n ... # doctest: +ELLIPSIS\n 0.1047077...\n \"\"\"\n\n J = as_float_array(J)\n n = as_float_array(n)\n\n t = temporary_magnitude_quantity_forward(N_c, N_cb, e_t, a, b, RGB_a)\n C = spow(t, 0.9) * spow(J / 100, 0.5) * spow(1.64 - 0.29 ** n, 0.73)\n\n return C\n\n\ndef colourfulness_correlate(C, F_L):\n \"\"\"\n Returns the *colourfulness* correlate :math:`M`.\n\n Parameters\n ----------\n C : numeric or array_like\n *Chroma* correlate :math:`C`.\n F_L : numeric or array_like\n *Luminance* level adaptation factor :math:`F_L`.\n\n Returns\n -------\n numeric or ndarray\n *Colourfulness* correlate :math:`M`.\n\n Examples\n --------\n >>> C = 0.104707757171\n >>> F_L = 1.16754446415\n >>> colourfulness_correlate(C, F_L) # doctest: +ELLIPSIS\n 0.1088421...\n \"\"\"\n\n C = as_float_array(C)\n F_L = as_float_array(F_L)\n\n M = C * spow(F_L, 0.25)\n\n return M\n\n\ndef saturation_correlate(M, Q):\n \"\"\"\n Returns the *saturation* correlate :math:`s`.\n\n Parameters\n ----------\n M : numeric or array_like\n *Colourfulness* correlate :math:`M`.\n Q : numeric or array_like\n *Brightness* correlate :math:`C`.\n\n Returns\n -------\n numeric or ndarray\n *Saturation* correlate :math:`s`.\n\n Examples\n --------\n >>> M = 0.108842175669\n >>> Q = 195.371325966\n >>> saturation_correlate(M, Q) # doctest: +ELLIPSIS\n 2.3603053...\n \"\"\"\n\n M = as_float_array(M)\n Q = as_float_array(Q)\n\n s = 100 * spow(M / Q, 0.5)\n\n return s\n\n\ndef P(N_c, N_cb, e_t, t, A, N_bb):\n \"\"\"\n Returns the points :math:`P_1`, :math:`P_2` and :math:`P_3`.\n\n Parameters\n ----------\n N_c : numeric or array_like\n Surround chromatic induction factor :math:`N_{c}`.\n N_cb : numeric or array_like\n Chromatic induction factor :math:`N_{cb}`.\n e_t : numeric or array_like\n Eccentricity factor :math:`e_t`.\n t : numeric or array_like\n Temporary magnitude quantity :math:`t`.\n A : numeric or array_like\n Achromatic response :math:`A` for the stimulus.\n N_bb : numeric or array_like\n Chromatic induction factor :math:`N_{bb}`.\n\n Returns\n -------\n ndarray\n Points :math:`P`.\n\n Examples\n --------\n >>> N_c = 1.0\n >>> N_cb = 1.00030400456\n >>> e_t = 1.174005472851914\n >>> t = 0.149746202921\n >>> A = 23.9394809667\n >>> N_bb = 1.00030400456\n >>> P(N_c, N_cb, e_t, t, A, N_bb) # doctest: +ELLIPSIS\n array([ 3.0162890...e+04, 2.4237205...e+01, 1.0500000...e+00])\n \"\"\"\n\n N_c = as_float_array(N_c)\n N_cb = as_float_array(N_cb)\n e_t = as_float_array(e_t)\n t = as_float_array(t)\n A = as_float_array(A)\n N_bb = as_float_array(N_bb)\n\n P_1 = ((50000 / 13) * N_c * N_cb * e_t) / t\n P_2 = A / N_bb + 0.305\n P_3 = ones(P_1.shape) * (21 / 20)\n\n P_n = tstack([P_1, P_2, P_3])\n\n return P_n\n\n\ndef matrix_post_adaptation_non_linear_response_compression(P_2, a, b):\n \"\"\"\n Returns the post-adaptation non-linear-response compression matrix.\n\n Parameters\n ----------\n P_2 : numeric or array_like\n Point :math:`P_2`.\n a : numeric or array_like\n Opponent colour dimension :math:`a`.\n b : numeric or array_like\n Opponent colour dimension :math:`b`.\n\n Returns\n -------\n ndarray\n Points :math:`P`.\n\n Examples\n --------\n >>> P_2 = 24.2372054671\n >>> a = -0.000624112068243\n >>> b = -0.000506270106773\n >>> matrix_post_adaptation_non_linear_response_compression(P_2, a, b)\n ... # doctest: +ELLIPSIS\n array([ 7.9463202..., 7.9471152..., 7.9489959...])\n \"\"\"\n\n P_2 = as_float_array(P_2)\n a = as_float_array(a)\n b = as_float_array(b)\n\n R_a = (460 * P_2 + 451 * a + 288 * b) / 1403\n G_a = (460 * P_2 - 891 * a - 261 * b) / 1403\n B_a = (460 * P_2 - 220 * a - 6300 * b) / 1403\n\n RGB_a = tstack([R_a, G_a, B_a])\n\n return RGB_a\n"
] |
[
[
"numpy.radians",
"numpy.maximum",
"numpy.sqrt",
"numpy.abs",
"numpy.absolute",
"numpy.linalg.inv",
"numpy.isfinite",
"numpy.isnan",
"numpy.cos",
"numpy.sin",
"numpy.sign",
"numpy.arctan2",
"numpy.searchsorted",
"numpy.exp",
"numpy.array",
"numpy.where"
]
] |
EricaXia/privacy
|
[
"a74561c929dfd01bc5f417ca42f8c7a100b349dc"
] |
[
"tensorflow_privacy/privacy/analysis/privacy_ledger.py"
] |
[
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PrivacyLedger class for keeping a record of private queries.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\nfrom tensorflow_privacy.privacy.analysis import tensor_buffer\nfrom tensorflow_privacy.privacy.dp_query import dp_query\n\nSampleEntry = collections.namedtuple( # pylint: disable=invalid-name\n 'SampleEntry', ['population_size', 'selection_probability', 'queries'])\n\nGaussianSumQueryEntry = collections.namedtuple( # pylint: disable=invalid-name\n 'GaussianSumQueryEntry', ['l2_norm_bound', 'noise_stddev'])\n\n\ndef format_ledger(sample_array, query_array):\n \"\"\"Converts array representation into a list of SampleEntries.\"\"\"\n samples = []\n query_pos = 0\n sample_pos = 0\n for sample in sample_array:\n population_size, selection_probability, num_queries = sample\n queries = []\n for _ in range(int(num_queries)):\n query = query_array[query_pos]\n assert int(query[0]) == sample_pos\n queries.append(GaussianSumQueryEntry(*query[1:]))\n query_pos += 1\n samples.append(SampleEntry(population_size, selection_probability, queries))\n sample_pos += 1\n return samples\n\n\nclass PrivacyLedger(object):\n \"\"\"Class for keeping a record of private queries.\n\n The PrivacyLedger keeps a record of all queries executed over a given dataset\n for the purpose of computing privacy guarantees. To use it, it must be\n associated with a `DPQuery` object via a `QueryWithLedger`.\n\n The current implementation works only with DPQueries that consist of composing\n Gaussian sum mechanism with Poisson subsampling.\n\n Example usage:\n\n ```\n import tensorflow_privacy as tfp\n\n dp_query = tfp.QueryWithLedger(\n tensorflow_privacy.GaussianSumQuery(\n l2_norm_clip=1.0, stddev=1.0),\n population_size=10000,\n selection_probability=0.01)\n\n # Use dp_query here in training loop.\n\n formatted_ledger = dp_query.ledger.get_formatted_ledger_eager()\n orders = ([1.25, 1.5, 1.75, 2., 2.25, 2.5, 3., 3.5, 4., 4.5] +\n list(range(5, 64)) + [128, 256, 512])\n total_rdp = tfp.compute_rdp_from_ledger(formatted_ledger, orders)\n epsilon = tfp.get_privacy_spent(orders, total_rdp, target_delta=1e-5)\n ```\n \"\"\"\n\n def __init__(self,\n population_size,\n selection_probability):\n \"\"\"Initializes the PrivacyLedger.\n\n Args:\n population_size: An integer (may be variable) specifying the size of the\n population, i.e. size of the training data used in each epoch.\n selection_probability: A floating point value (may be variable) specifying\n the probability each record is included in a sample.\n\n Raises:\n ValueError: If `selection_probability` is 0.\n \"\"\"\n self._population_size = population_size\n self._selection_probability = selection_probability\n\n if tf.executing_eagerly():\n if tf.equal(selection_probability, 0):\n raise ValueError('Selection probability cannot be 0.')\n init_capacity = tf.cast(tf.math.ceil(1 / selection_probability), tf.int32)\n else:\n if selection_probability == 0:\n raise ValueError('Selection probability cannot be 0.')\n init_capacity = np.int(np.ceil(1 / selection_probability))\n\n # The query buffer stores rows corresponding to GaussianSumQueryEntries.\n self._query_buffer = tensor_buffer.TensorBuffer(\n init_capacity, [3], tf.float32, 'query')\n self._sample_var = tf.Variable(\n initial_value=tf.zeros([3]), trainable=False, name='sample')\n\n # The sample buffer stores rows corresponding to SampleEntries.\n self._sample_buffer = tensor_buffer.TensorBuffer(\n init_capacity, [3], tf.float32, 'sample')\n self._sample_count = tf.Variable(\n initial_value=0.0, trainable=False, name='sample_count')\n self._query_count = tf.Variable(\n initial_value=0.0, trainable=False, name='query_count')\n self._cs = tf.CriticalSection()\n\n def record_sum_query(self, l2_norm_bound, noise_stddev):\n \"\"\"Records that a query was issued.\n\n Args:\n l2_norm_bound: The maximum l2 norm of the tensor group in the query.\n noise_stddev: The standard deviation of the noise applied to the sum.\n\n Returns:\n An operation recording the sum query to the ledger. This should be called\n for every Gaussian sum query that is issued on a sample.\n \"\"\"\n\n def _do_record_query():\n with tf.control_dependencies(\n [tf.assign(self._query_count, self._query_count + 1)]):\n return self._query_buffer.append(\n [self._sample_count, l2_norm_bound, noise_stddev])\n\n return self._cs.execute(_do_record_query)\n\n def finalize_sample(self):\n \"\"\"Finalizes sample and records sample ledger entry.\n\n This should be called once per application of the mechanism on a sample,\n after all sum queries have been recorded.\n\n Returns:\n An operation recording the complete mechanism (sampling and sum\n estimation) to the ledger.\n \"\"\"\n with tf.control_dependencies([\n tf.assign(self._sample_var, [\n self._population_size, self._selection_probability,\n self._query_count\n ])\n ]):\n with tf.control_dependencies([\n tf.assign(self._sample_count, self._sample_count + 1),\n tf.assign(self._query_count, 0)\n ]):\n return self._sample_buffer.append(self._sample_var)\n\n def get_unformatted_ledger(self):\n \"\"\"Returns the raw sample and query values.\"\"\"\n return self._sample_buffer.values, self._query_buffer.values\n\n def get_formatted_ledger(self, sess):\n \"\"\"Gets the formatted query ledger.\n\n Args:\n sess: The tensorflow session in which the ledger was created.\n\n Returns:\n The query ledger as a list of `SampleEntry` instances.\n \"\"\"\n sample_array = sess.run(self._sample_buffer.values)\n query_array = sess.run(self._query_buffer.values)\n\n return format_ledger(sample_array, query_array)\n\n def get_formatted_ledger_eager(self):\n \"\"\"Gets the formatted query ledger.\n\n Returns:\n The query ledger as a list of `SampleEntry` instances.\n \"\"\"\n sample_array = self._sample_buffer.values.numpy()\n query_array = self._query_buffer.values.numpy()\n\n return format_ledger(sample_array, query_array)\n\n\nclass QueryWithLedger(dp_query.DPQuery):\n \"\"\"A class for DP queries that record events to a `PrivacyLedger`.\n\n `QueryWithLedger` should be the top-level query in a structure of queries that\n may include sum queries, nested queries, etc. It should simply wrap another\n query and contain a reference to the ledger. Any contained queries (including\n those contained in the leaves of a nested query) should also contain a\n reference to the same ledger object.\n\n Only composed Gaussian sum queries with Poisson subsampling are supported.\n This includes `GaussianSumQuery`, `QuantileEstimatorQuery`, and\n `QuantileAdaptiveClipSumQuery`, as well as `NestedQuery` or `NormalizedQuery`\n objects that contain the previous mentioned query types.\n \"\"\"\n\n def __init__(self, query,\n population_size=None, selection_probability=None,\n ledger=None):\n \"\"\"Initializes the `QueryWithLedger`.\n\n Args:\n query: The query whose events should be recorded to the ledger. Any\n subqueries (including those in the leaves of a nested query) should also\n contain a reference to the same ledger given here.\n population_size: An integer (may be variable) specifying the size of the\n population, i.e. size of the training data used in each epoch. May be\n `None` if `ledger` is specified.\n selection_probability: A floating point value (may be variable) specifying\n the probability each record is included in a sample under Poisson\n subsampling. May be `None` if `ledger` is specified.\n ledger: A `PrivacyLedger` to use. Must be specified if either of\n `population_size` or `selection_probability` is `None`.\n \"\"\"\n self._query = query\n if population_size is not None and selection_probability is not None:\n self.set_ledger(PrivacyLedger(population_size, selection_probability))\n elif ledger is not None:\n self.set_ledger(ledger)\n else:\n raise ValueError('One of (population_size, selection_probability) or '\n 'ledger must be specified.')\n\n @property\n def ledger(self):\n \"\"\"Gets the ledger that all inner queries record to.\"\"\"\n return self._ledger\n\n def set_ledger(self, ledger):\n \"\"\"Sets a new ledger.\"\"\"\n self._ledger = ledger\n self._query.set_ledger(ledger)\n\n def initial_global_state(self):\n \"\"\"Implements `tensorflow_privacy.DPQuery.initial_global_state`.\"\"\"\n return self._query.initial_global_state()\n\n def derive_sample_params(self, global_state):\n \"\"\"Implements `tensorflow_privacy.DPQuery.derive_sample_params`.\"\"\"\n return self._query.derive_sample_params(global_state)\n\n def initial_sample_state(self, template):\n \"\"\"Implements `tensorflow_privacy.DPQuery.initial_sample_state`.\"\"\"\n return self._query.initial_sample_state(template)\n\n def preprocess_record(self, params, record):\n \"\"\"Implements `tensorflow_privacy.DPQuery.preprocess_record`.\"\"\"\n return self._query.preprocess_record(params, record)\n\n def accumulate_preprocessed_record(self, sample_state, preprocessed_record):\n \"\"\"Implements `tensorflow_privacy.DPQuery.accumulate_preprocessed_record`.\"\"\"\n return self._query.accumulate_preprocessed_record(\n sample_state, preprocessed_record)\n\n def merge_sample_states(self, sample_state_1, sample_state_2):\n \"\"\"Implements `tensorflow_privacy.DPQuery.merge_sample_states`.\"\"\"\n return self._query.merge_sample_states(sample_state_1, sample_state_2)\n\n def get_noised_result(self, sample_state, global_state):\n \"\"\"Implements `tensorflow_privacy.DPQuery.derive_metrics`.\n\n Besides noising and returning the result of the inner query, ensures that\n the sample is recorded to the ledger.\n\n Args:\n sample_state: The sample state after all records have been accumulated.\n global_state: The global state, storing long-term privacy bookkeeping.\n\n Returns:\n A tuple (result, new_global_state) where \"result\" is the result of the\n query and \"new_global_state\" is the updated global state.\n \"\"\"\n # Ensure sample_state is fully aggregated before calling get_noised_result.\n with tf.control_dependencies(tf.nest.flatten(sample_state)):\n result, new_global_state = self._query.get_noised_result(\n sample_state, global_state)\n\n # Ensure inner queries have recorded before finalizing.\n with tf.control_dependencies(tf.nest.flatten(result)):\n finalize = self._ledger.finalize_sample()\n\n # Ensure finalizing happens.\n with tf.control_dependencies([finalize]):\n return tf.nest.map_structure(tf.identity, result), new_global_state\n"
] |
[
[
"tensorflow.compat.v1.Variable",
"tensorflow.compat.v1.equal",
"tensorflow.compat.v1.math.ceil",
"tensorflow.compat.v1.assign",
"tensorflow.compat.v1.nest.map_structure",
"tensorflow.compat.v1.control_dependencies",
"tensorflow.compat.v1.zeros",
"tensorflow.compat.v1.executing_eagerly",
"numpy.ceil",
"tensorflow.compat.v1.CriticalSection",
"tensorflow.compat.v1.nest.flatten"
]
] |
dnyaneshb25/dnyaneshbhonde
|
[
"f182448af374732e5b13c58fbcd62867310959ef"
] |
[
"Code/YOLO/darkflow/darkflow/dark/layer.py"
] |
[
"from ..utils import loader\nimport numpy as np\n\nclass Layer(object):\n\n def __init__(self, *args):\n self._signature = list(args)\n self.type = list(args)[0]\n self.number = list(args)[1]\n\n self.w = dict() # weights\n self.h = dict() # placeholders\n self.wshape = dict() # weight shape\n self.wsize = dict() # weight size\n self.setup(*args[2:]) # set attr up\n self.present()\n for var in self.wshape:\n shp = self.wshape[var]\n size = np.prod(shp)\n self.wsize[var] = size\n\n def load(self, src_loader):\n var_lay = src_loader.VAR_LAYER\n if self.type not in var_lay: return\n\n src_type = type(src_loader)\n if src_type is loader.weights_loader:\n wdict = self.load_weights(src_loader)\n else: \n wdict = self.load_ckpt(src_loader)\n if wdict is not None:\n self.recollect(wdict)\n\n def load_weights(self, src_loader):\n val = src_loader([self.presenter])\n if val is None: return None\n else: return val.w\n\n def load_ckpt(self, src_loader):\n result = dict()\n presenter = self.presenter\n for var in presenter.wshape:\n name = presenter.varsig(var)\n shape = presenter.wshape[var]\n key = [name, shape]\n val = src_loader(key)\n result[var] = val\n return result\n\n @property\n def signature(self):\n return self._signature\n\n # For comparing two layers\n def __eq__(self, other):\n return self.signature == other.signature\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def varsig(self, var):\n if var not in self.wshape:\n return None\n sig = str(self.number)\n sig += '-' + self.type\n sig += '/' + var\n return sig\n\n def recollect(self, w): self.w = w\n def present(self): self.presenter = self\n def setup(self, *args): pass\n def finalize(self): pass "
] |
[
[
"numpy.prod"
]
] |
ENSYSTRA/EU-SES
|
[
"174b61aa9b3e0ffdcb843fa3c16c47c0c1175afd"
] |
[
"euses/model.py"
] |
[
"import geopandas as gpd\nimport pandas as pd\nimport sys\nimport ruamel.yaml\nfrom geopy import distance\nyaml = ruamel.yaml.YAML()\nfrom . import parameters as pr\nimport os\n\nvre_dic = {'Wind':['onshore_wind',5],'Solar':['rooftop_pv',170],'Wind Offshore':['offshore_wind',5.36]}\n\ndc_links = pd.read_csv('data/links/dc_links.csv')\n\ndef export_timeseries(regions_geo, ds_regions,data_name,sign):\n df = pd.DataFrame(index= ds_regions.time.values)\n for i,rows in regions_geo.iterrows():\n if len(ds_regions[data_name].loc[rows.nuts_2s].values) != 0:\n df[rows.id] = ds_regions[data_name].loc[rows.nuts_2s].values\n df = df * sign\n df.to_csv('calliope_model/timeseries_data/{}.csv'.format(data_name))\n\ndef create_timeseries_csv(regions_geo, ds_regions):\n data_list = [{'power':-1}, {'heat':-1}, {'pv_cf':1}, {'wind_cf':1},\n {'wind_offshore_cf':1}, {'hydro_inflow':1},\n {'cop_air':1}]\n\n for series in data_list:\n v, k = series.popitem()\n export_timeseries(regions_geo, ds_regions,v,k)\n\ndef create_location_yaml(regions_geo, ds_regions, sectors):\n ds_regions[\"power_plants\"] = ds_regions[\"power_plants\"].groupby('tech').sum('fuel')\n yaml = ruamel.yaml.YAML()\n\n if len(regions_geo) > 1 :\n dict_file = {'locations': {},'links': {}}\n else:\n dict_file = {'locations': {}}\n\n line_lenght = [0]\n for i, rows in regions_geo.iterrows():\n dict_file['locations'][rows.id]= {}\n coords = rows.geometry.centroid\n dict_file['locations'][rows.id]['coordinates'] = {'lat':round(coords.y,2),'lon':round(coords.x,2)}\n dict_file['locations'][rows.id]['techs'] = {}\n dict_file['locations'][rows.id]['techs']['demand_electricity'] = {'constraints':{'resource':'file=power.csv'}}\n\n\n if 'heat' in sectors:\n dict_file['locations'][rows.id]['techs']['demand_heat'] = {'constraints':{'resource':'file=heat.csv'}}\n for add_tech in ['supply_gas','supply_biogas', 'heat_pump_air']:\n dict_file['locations'][rows.id]['techs'][add_tech] = None\n\n for tech_dic in [{'tech':'power_plants'}, {'hydro_tech':'hydro_capacity'}]:\n tech_coords, tech_var = tech_dic.popitem()\n for tech in ds_regions.coords[tech_coords].values:\n installed_capacity = ds_regions[tech_var].loc[rows.nuts_2s,tech].values.item()\n if tech != 'Hydro':\n dict_file['locations'][rows.id]['techs'][tech.lower().replace(' ','_')] = None\n if tech in vre_dic.keys():\n dict_file['locations'][rows.id]['techs'][tech.lower().replace(' ','_')] = {'constraints':{'energy_cap_min':installed_capacity}}\n area_max = ds_regions[vre_dic.get(tech)[0]].loc[rows.nuts_2s].values.item()\n if tech == 'Solar':\n area_max = area_max + ds_regions['utility_pv'].loc[rows.nuts_2s].values.item()\n if area_max*vre_dic.get(tech)[1] < installed_capacity:\n area_max = (installed_capacity / vre_dic.get(tech)[1])+1\n dict_file['locations'][rows.id]['techs'][tech.lower().replace(' ','_')]['constraints']['resource_area_max'] = area_max\n if tech in ['HPHS', 'HDAM']:\n storage_capacity = ds_regions['hydro_storage'].loc[rows.nuts_2s,tech].values.item()\n if storage_capacity == 0:\n storage_capacity = 6*installed_capacity\n dict_file['locations'][rows.id]['techs'][tech.lower().replace(' ','_')] = {'constraints':{'energy_cap_equals':installed_capacity}}\n dict_file['locations'][rows.id]['techs'][tech.lower().replace(' ','_')]['constraints']['storage_cap_equals'] = storage_capacity\n if tech in ['Combined cycle']:\n dict_file['locations'][rows.id]['techs'][tech.lower().replace(' ','_')] = {'constraints':{'energy_cap_min':installed_capacity}}\n if tech in ['HROR']:\n dict_file['locations'][rows.id]['techs'][tech.lower().replace(' ','_')] = {'constraints':{'energy_cap_equals':installed_capacity}}\n\n\n for techs in ['battery', 'hydrogen']:\n dict_file['locations'][rows.id]['techs'][techs] = None\n\n for j, rows_2 in regions_geo.iterrows():\n g1_geo = rows.geometry.buffer(0.0001)\n g2_geo = rows_2.geometry.buffer(0.0001)\n fr = rows.geometry.centroid\n to = rows_2.geometry.centroid\n length = int(distance.distance((fr.y,fr.x), (to.y,to.x)).km*1.25)\n if g1_geo.intersects(g2_geo) == True and length not in line_lenght:\n line_lenght.append(length)\n trans_dic = {'techs':{'ac_transmission': {'distance':length/1e2} }}\n dict_file['links']['{},{}'.format(rows.id, rows_2.id)] = trans_dic\n\n for i,rows in dc_links.iterrows():\n rows_filtr_from = regions_geo[regions_geo.nuts_2s.astype(str).str.contains(rows['from'],regex=True)]\n rows_filtr_to = regions_geo[regions_geo.nuts_2s.astype(str).str.contains(rows['to'],regex=True)]\n if (rows_filtr_to.empty or rows_filtr_from.empty) == False:\n fr_index = rows_filtr_from.id.values[0]\n to_index = rows_filtr_to.id.values[0]\n if rows_filtr_from.iloc[0].id != rows_filtr_to.iloc[0].id:\n trans_dic = {'techs':{'dc_transmission': {'constraints':{'energy_cap_equals':rows.capacity},'distance':rows.length/1e2} }}\n dict_file['links']['{},{}'.format(fr_index, to_index)] = trans_dic\n\n\n with open(r'calliope_model/model_config/locations.yaml', 'w') as file:\n documents = yaml.dump(dict_file, file)\n\ndef create_model_yaml(self, regions_geo, sectors, op_mode, co2_cap_factor):\n ds_regions = self.ds_regions\n pop_factor = ds_regions[\"population\"].sum()/500.9e6\n year = self.year\n\n dict_file = {'import': {}, 'model': {}, 'run': {}}\n dict_file['import'] = ['model_config/techs_elec.yaml','model_config/locations.yaml', 'scenarios.yaml']\n\n dict_file['model']['name'] = 'ESES model'\n dict_file['model']['calliope_version'] = '0.6.5'\n dict_file['model']['timeseries_data_path'] = 'timeseries_data'\n dict_file['model']['subset_time'] = ['{}-01-01'.format(year), '{}-12-31'.format(year)]\n # dict_file['model']['time'] = {'function':'resample','function_options':{'resolution': '3H'}}\n\n dict_file['run']['solver'] = 'cbc'\n dict_file['run']['ensure_feasibility'] = 'false'\n dict_file['run']['bigM'] = 1e9\n dict_file['run']['zero_threshold'] = 1e-15\n dict_file['run']['mode'] = op_mode\n dict_file['run']['objective_options.cost_class'] = {'monetary': 1}\n\n dict_file['group_constraints'] = {}\n if op_mode == 'plan':\n # for i,rows in regions_geo.iterrows():\n # dict_file['group_constraints']['{}_land_area_cap'.format(rows.id)] = {}\n # dict_file['group_constraints']['{}_land_area_cap'.format(rows.id)]['techs'] =['wind','solar']\n # dict_file['group_constraints']['{}_land_area_cap'.format(rows.id)]['locs'] = [rows.id]\n # area_max = ds_regions['land_area'].loc[rows.nuts_2s].values.item()\n # wind_solar_area = (ds_regions['power_plants'].loc[rows.nuts_2s,'Solar'].values.item() / tech_area.get('Solar')) + (ds_regions['power_plants'].loc[rows.nuts_2s,'Wind'].values.item() / tech_area.get('Wind'))\n # if area_max < wind_solar_area:\n # area_max = wind_solar_area + 1\n # dict_file['group_constraints']['{}_land_area_cap'.format(rows.id)]['resource_area_max'] = area_max\n\n # CO2 emissions constraint\n if co2_cap_factor!=None:\n c02_vol = sum([pr.get_metadata(c,'co_2_1990') for c in self.countries])*1e6\n dict_file['group_constraints']['systemwide_co2_cap'] = {'cost_max':{'co2':co2_cap_factor*c02_vol}}\n\n else:\n dict_file['import'] = ['model_config/techs_elec.yaml','model_config/locations.yaml']\n dict_file['run']['operation'] = {'horizon': 48, 'window': 24}\n\n\n # biogas cap\n biogas_cap = float(pop_factor) * 116.4e6\n constraint = {'techs':['supply_biogas'],'carrier_prod_max':{'gas':biogas_cap}}\n dict_file['group_constraints']['systemwide_biogas_cap'] = constraint\n\n if 'heat' in sectors:\n dict_file['import'] = ['model_config/techs_elec_heat.yaml','model_config/locations.yaml', 'scenarios.yaml']\n\n with open(r'calliope_model/model.yaml', 'w') as file:\n documents = yaml.dump(dict_file, file)\n"
] |
[
[
"pandas.read_csv",
"pandas.DataFrame"
]
] |
vatsalag99/mapping_self-harm_risk_twitter
|
[
"262c36f994c909714a738686b025633d832bc596"
] |
[
"app/Data Processing .py"
] |
[
"\n# coding: utf-8\n\n# In[1]:\n\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport ftfy\nimport matplotlib.pyplot as plt\nimport nltk\nimport numpy as np\nimport pandas as pd\nimport re\nimport time\n\nfrom math import exp\nfrom numpy import sign\n\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score\nfrom gensim.models import KeyedVectors\nfrom nltk.corpus import stopwords\nfrom nltk import PorterStemmer\n\nfrom keras.models import Model, Sequential\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom keras.layers import Conv1D, Dense, Input, LSTM, Embedding, Dropout, Activation, MaxPooling1D\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\n\n\n# In[69]:\nEMBEDDING_FILE = 'GoogleNews-vectors-negative300.bin.gz'\nprint(\"Loading embedding file..\")\nword2vec = KeyedVectors.load_word2vec_format(EMBEDDING_FILE, binary=True)\n\nwhile(True):\n df = pd.read_csv(\"twitter2.csv\")\n df = df[['tweet', 'location']]\n df\n\n\n # In[70]:\n\n\n df = df.dropna()\n df\n\n\n # In[118]:\n\n\n import re\n\n # Expand Contraction\n cList = {\n \"ain't\": \"am not\",\n \"aren't\": \"are not\",\n \"can't\": \"cannot\",\n \"can't've\": \"cannot have\",\n \"'cause\": \"because\",\n \"could've\": \"could have\",\n \"couldn't\": \"could not\",\n \"couldn't've\": \"could not have\",\n \"didn't\": \"did not\",\n \"doesn't\": \"does not\",\n \"don't\": \"do not\",\n \"hadn't\": \"had not\",\n \"hadn't've\": \"had not have\",\n \"hasn't\": \"has not\",\n \"haven't\": \"have not\",\n \"he'd\": \"he would\",\n \"he'd've\": \"he would have\",\n \"he'll\": \"he will\",\n \"he'll've\": \"he will have\",\n \"he's\": \"he is\",\n \"how'd\": \"how did\",\n \"how'd'y\": \"how do you\",\n \"how'll\": \"how will\",\n \"how's\": \"how is\",\n \"I'd\": \"I would\",\n \"I'd've\": \"I would have\",\n \"I'll\": \"I will\",\n \"I'll've\": \"I will have\",\n \"I'm\": \"I am\",\n \"I've\": \"I have\",\n \"isn't\": \"is not\",\n \"it'd\": \"it had\",\n \"it'd've\": \"it would have\",\n \"it'll\": \"it will\",\n \"it'll've\": \"it will have\",\n \"it's\": \"it is\",\n \"let's\": \"let us\",\n \"ma'am\": \"madam\",\n \"mayn't\": \"may not\",\n \"might've\": \"might have\",\n \"mightn't\": \"might not\",\n \"mightn't've\": \"might not have\",\n \"must've\": \"must have\",\n \"mustn't\": \"must not\",\n \"mustn't've\": \"must not have\",\n \"needn't\": \"need not\",\n \"needn't've\": \"need not have\",\n \"o'clock\": \"of the clock\",\n \"oughtn't\": \"ought not\",\n \"oughtn't've\": \"ought not have\",\n \"shan't\": \"shall not\",\n \"sha'n't\": \"shall not\",\n \"shan't've\": \"shall not have\",\n \"she'd\": \"she would\",\n \"she'd've\": \"she would have\",\n \"she'll\": \"she will\",\n \"she'll've\": \"she will have\",\n \"she's\": \"she is\",\n \"should've\": \"should have\",\n \"shouldn't\": \"should not\",\n \"shouldn't've\": \"should not have\",\n \"so've\": \"so have\",\n \"so's\": \"so is\",\n \"that'd\": \"that would\",\n \"that'd've\": \"that would have\",\n \"that's\": \"that is\",\n \"there'd\": \"there had\",\n \"there'd've\": \"there would have\",\n \"there's\": \"there is\",\n \"they'd\": \"they would\",\n \"they'd've\": \"they would have\",\n \"they'll\": \"they will\",\n \"they'll've\": \"they will have\",\n \"they're\": \"they are\",\n \"they've\": \"they have\",\n \"to've\": \"to have\",\n \"wasn't\": \"was not\",\n \"we'd\": \"we had\",\n \"we'd've\": \"we would have\",\n \"we'll\": \"we will\",\n \"we'll've\": \"we will have\",\n \"we're\": \"we are\",\n \"we've\": \"we have\",\n \"weren't\": \"were not\",\n \"what'll\": \"what will\",\n \"what'll've\": \"what will have\",\n \"what're\": \"what are\",\n \"what's\": \"what is\",\n \"what've\": \"what have\",\n \"when's\": \"when is\",\n \"when've\": \"when have\",\n \"where'd\": \"where did\",\n \"where's\": \"where is\",\n \"where've\": \"where have\",\n \"who'll\": \"who will\",\n \"who'll've\": \"who will have\",\n \"who's\": \"who is\",\n \"who've\": \"who have\",\n \"why's\": \"why is\",\n \"why've\": \"why have\",\n \"will've\": \"will have\",\n \"won't\": \"will not\",\n \"won't've\": \"will not have\",\n \"would've\": \"would have\",\n \"wouldn't\": \"would not\",\n \"wouldn't've\": \"would not have\",\n \"y'all\": \"you all\",\n \"y'alls\": \"you alls\",\n \"y'all'd\": \"you all would\",\n \"y'all'd've\": \"you all would have\",\n \"y'all're\": \"you all are\",\n \"y'all've\": \"you all have\",\n \"you'd\": \"you had\",\n \"you'd've\": \"you would have\",\n \"you'll\": \"you you will\",\n \"you'll've\": \"you you will have\",\n \"you're\": \"you are\",\n \"you've\": \"you have\"\n }\n\n c_re = re.compile('(%s)' % '|'.join(cList.keys()))\n\n def expandContractions(text, c_re=c_re):\n def replace(match):\n return cList[match.group(0)]\n return c_re.sub(replace, text)\n\n def clean_tweets(tweets, df):\n cleaned_tweets = []\n i = 0\n for tweet in tweets:\n new_tweet = str(tweet)\n # if url links then dont append to avoid news articles\n # also check tweet length, save those > 10 (length of word \"depression\")\n if re.match(\"(\\w+:\\/\\/\\S+)\", tweet) == None and len(tweet) > 10:\n #remove hashtag, @mention, emoji and image URLs\n new_tweet = ' '.join(re.sub(\"(@[A-Za-z0-9]+)|(\\#[A-Za-z0-9]+)|(<Emoji:.*>)|(pic\\.twitter\\.com\\/.*)\", \" \", tweet).split())\n \n #fix weirdly encoded texts\n new_tweet = ftfy.fix_text(new_tweet)\n \n #expand contraction\n new_tweet = expandContractions(new_tweet)\n\n #remove punctuation\n new_tweet = ' '.join(re.sub(\"([^0-9A-Za-z \\t])\", \" \", new_tweet).split())\n\n #stop words\n stop_words = set(stopwords.words('english'))\n word_tokens = nltk.word_tokenize(new_tweet) \n filtered_sentence = [w for w in word_tokens if not w in stop_words]\n new_tweet = ' '.join(filtered_sentence)\n\n #stemming words\n new_tweet = PorterStemmer().stem(new_tweet) \n cleaned_tweets.append(new_tweet)\n i += 1\n else:\n df = df.drop(df.index[i])\n \n return cleaned_tweets, df\n\n\n # In[120]:\n\n\n import nltk\n nltk.download('stopwords')\n nltk.download('punkt')\n\n df_arr = [x for x in df[\"tweet\"]]\n X, df = clean_tweets(df_arr, df)\n print(len(X))\n df\n\n\n # In[73]:\n\n\n MAX_NB_WORDS = 20000\n tokenizer = Tokenizer(num_words=MAX_NB_WORDS)\n tokenizer.fit_on_texts(X)\n\n\n # In[74]:\n\n\n sequence = tokenizer.texts_to_sequences(X)\n\n\n # In[75]:\n\n\n word_index = tokenizer.word_index\n print('Found %s unique tokens' % len(word_index))\n\n\n # In[76]:\n\n\n MAX_SEQUENCE_LENGTH = 140\n data = pad_sequences(sequence, maxlen=MAX_SEQUENCE_LENGTH)\n print('Shape of data_d tensor:', data.shape)\n\n\n\n # In[77]:\n\n\n nb_words = min(MAX_NB_WORDS, len(word_index))\n EMBEDDING_DIM = 300\n embedding_matrix = np.zeros((nb_words, EMBEDDING_DIM))\n\n for (word, idx) in word_index.items():\n print(word, idx)\n if word in word2vec.vocab and idx < MAX_NB_WORDS:\n embedding_matrix[idx] = word2vec.word_vec(word)\n\n\n # In[78]:\n\n\n from keras.models import model_from_json\n\n # Model reconstruction from JSON file\n with open('model_architecture.json', 'r') as f:\n model = model_from_json(f.read())\n\n # Load weights into the new model\n model.load_weights('model_weights.h5')\n\n\n # In[79]:\n\n\n labels_pred = model.predict(data)\n labels_pred = np.round(labels_pred.flatten())\n\n\n # In[80]:\n\n\n print(X)\n labels_pred\n\n\n # In[81]:\n\n\n i = 0\n locations = [] \n\n for x in np.nditer(labels_pred):\n if(x == 1):\n locations.append(df.iloc[i][\"location\"])\n i+=1\n\n locations\n print(len(locations))\n\n\n # In[82]:\n\n\n top = 49.3457868 # north lat\n left = -124.7844079 # west long\n right = -66.9513812 # east long\n bottom = 24.7433195 # south lat\n\n def cull(lat, lng):\n if bottom <= lat <= top and left <= lng <= right:\n return True \n return False \n\n\n # In[83]:\n\n\n from geopy.geocoders import Nominatim, ArcGIS\n from geopy.extra.rate_limiter import RateLimiter\n\n import pycountry\n\n df_coord = pd.DataFrame(columns=('Latitude', 'Longitude'))\n\n geolocator = ArcGIS(timeout=10)\n\n i = 0\n for location in locations:\n if location:\n loc = geolocator.geocode(location, exactly_one=True)\n if loc:\n print(loc.address)\n if(cull(loc.latitude, loc.longitude)):\n df_coord.loc[i] = (loc.latitude, loc.longitude)\n print(loc.latitude, loc.longitude)\n i+=1\n\n\n # In[84]:\n\n\n df_coord\n\n\n # In[57]:\n\n\n i = 0\n demo_locations = [] \n\n for x in np.nditer(labels_pred):\n demo_locations.append(df.iloc[i][\"location\"])\n i+=1\n\n demo_locations\n print(len(demo_locations))\n with open('coords.csv', 'a', encoding='utf-8') as f:\n df_coord.to_csv(f, header=False, encoding='utf-8')\n\n\n"
] |
[
[
"pandas.read_csv",
"numpy.zeros",
"numpy.nditer",
"pandas.DataFrame"
]
] |
cschupbach/tmdb
|
[
"8ccf72f8d2e83b5605eff81a06a12961819b40c7"
] |
[
"src/data/processed/process.py"
] |
[
"import pandas as pd\nimport ast\nfrom datetime import datetime as dt\nfrom utils import parse\nfrom utils import edit_tools as tools\nfrom utils import networks\nfrom utils import finalize\n\n\ndef process_series_data():\n df = pd.read_csv('../../../data/raw/series.csv')\n df['network_id'] = parse.dict_column(df['networks'], key='id')\n df['network_name'] = parse.dict_column(df['networks'], key='name')\n df['genre_id'] = parse.dict_column(df['genres'], key='id')\n df['genre_name'] = parse.dict_column(df['genres'], key='name')\n df['content_rating'] = parse.US_content_rating(df['content_ratings'])\n df['in_production'] = [int(s) for s in df['in_production']]\n df.loc[df.content_rating.isnull(), ['content_rating']] = 'NR'\n df = df.rename(columns={'id':'series_id','name':'series_name'})\n\n cols = ['series_id','series_name','first_air_date','last_air_date','in_production',\n 'network_id','episode_run_time','origin_country','original_language','popularity',\n 'status','type','vote_average','vote_count','content_rating','genre_id','genre_name']\n\n return df[cols]\n\n\ndef process_episode_data():\n series_data = process_series_data()\n data = pd.read_csv('../../../data/raw/episodes.csv')\n\n df = pd.concat([pd.DataFrame(ast.literal_eval(d)) for d in data['episodes']], sort=False)\n df['air_year'] = pd.to_datetime(df['air_date']).dt.year\n df = df[(df.air_year>=2010)&(df.air_year<=2019)]\n df = df.rename(columns={'vote_average':'ep_vote_average','vote_count':'ep_vote_count','show_id':'series_id'})\n df = df.merge(series_data, on='series_id', how='left')\n\n df = tools.edit_network_ids(df)\n df = tools.edit_runtimes(df)\n df = tools.edit_origin_countries(df)\n df = tools.edit_first_air_dates(df)\n df = tools.edit_genre_ids_names(df)\n\n network_ids = [int(nwk) for nwk in networks.get_network_ids()]\n df = df[(df.runtime.notnull())&(df.network_id.isin(network_ids))]\n df.index = range(len(df))\n\n df = finalize.finalize(df)\n\n print('Writing: ../../../data/processed/data.csv')\n df.to_csv('../../../data/processed/data.csv', index=False)\n\n return None\n\n\nif __name__ == '__main__':\n process_episode_data()\n"
] |
[
[
"pandas.read_csv",
"pandas.to_datetime"
]
] |
filmackay/flypy
|
[
"d64e70959c5c8af9e914dcc3ce1068fb99859c3a"
] |
[
"flypy/lib/arrays/tests/test_array.py"
] |
[
"# -*- coding: utf-8 -*-\nfrom __future__ import print_function, division, absolute_import\n\nimport unittest\nfrom flypy import jit\n\nimport numpy as np\n\nclass TestArrayAttributes(unittest.TestCase):\n\n def test_array_create(self):\n @jit\n def identity(a):\n return a\n\n a = np.arange(10)\n result = identity(a)\n self.assertTrue(np.all(a == result))\n\n def test_array_length(self):\n @jit\n def length(a):\n return len(a)\n\n self.assertEqual(length(np.arange(10)), 10)\n self.assertEqual(length(np.empty((12, 8))), 12)\n\nclass TestArrayIndexing(unittest.TestCase):\n\n def test_1d_array_index(self):\n @jit\n def index(a):\n return a[6]\n\n a = np.arange(10)\n self.assertEqual(a[6], index(a))\n\n def test_2d_array_index(self):\n @jit\n def index(a):\n return a[6, 9]\n\n a = np.arange(8 * 12).reshape(8, 12)\n self.assertEqual(a[6, 9], index(a))\n\n def test_nd_array_index(self):\n @jit\n def index(a, t):\n return a[t]\n\n def test(t, dtype=np.float64):\n shape = tuple(np.array(t) + 5)\n a = np.empty(shape, dtype=dtype)\n a[t] = 6.4\n\n self.assertEqual(6.4, index(a, t))\n\n test((2,))\n test((2, 6))\n test((2, 6, 9))\n test((2, 6, 9, 4))\n test((2, 6, 9, 4, 3))\n\n def test_1d_array_setitem(self):\n @jit\n def index(a):\n a[6] = 14\n\n a = np.arange(10)\n index(a)\n self.assertEqual(a[6], 14)\n\n def test_1d_array_setitem_x(self):\n @jit\n def index(a, i):\n a[i] = 14\n\n for i in [0, 1, 8, 9]:\n a = np.arange(10)\n index(a, i)\n self.assertEqual(a[i], 14, \"1D array getitem(%d)\" % i)\n\n def test_2d_array_setitem(self):\n @jit\n def index(a):\n a[6, 9] = 14\n\n a = np.arange(8 * 12).reshape(8, 12)\n index(a)\n self.assertEqual(a[6, 9], 14)\n\n def test_2d_array_setitem_x(self):\n @jit\n def index(a, i, j):\n a[i, j] = 14\n\n x = [0, 1, 6, 7]\n y = [0, 1, 10, 11]\n for i in x:\n for j in y:\n a = np.arange(8 * 12).reshape(8, 12)\n index(a, i, j)\n self.assertEqual(a[i, j], 14, \"2D array getitem(%d, %d)\" %\n (i, j))\n\n def test_2d_array_setitem_0index(self):\n @jit\n def index(a):\n a[0, 0] = 14\n\n a = np.arange(8 * 12).reshape(8, 12)\n index(a)\n self.assertEqual(a[0, 0], 14)\n\n def test_nd_array_setitem(self):\n @jit\n def index(a, t):\n a[t] = 14\n\n def test(t, dtype=np.float64):\n shape = tuple(np.array(t) + 5)\n a = np.empty(shape, dtype=dtype)\n index(a, t)\n self.assertEqual(a[t], 14)\n\n test((2,))\n test((2, 6))\n test((2, 6, 9))\n test((2, 6, 9, 4))\n test((2, 6, 9, 4, 3))\n\n def test_partial_getitem(self):\n @jit\n def index(a):\n return a[6]\n\n a = np.arange(8 * 12).reshape(8, 12)\n result = index(a)\n self.assertEqual(len(result), 12)\n self.assertTrue(np.all(result == a[6]))\n\n def test_partial_setitem(self):\n @jit\n def index(a):\n a[6] = 4\n\n a = np.arange(8 * 12).reshape(8, 12)\n index(a)\n self.assertTrue(np.all(a[6] == 4))\n\n\nclass TestArraySlicing(unittest.TestCase):\n\n def test_1d_array_slice(self):\n @jit\n def index(a):\n return a[:]\n\n a = np.arange(10)\n self.assertTrue(np.all(a == index(a)))\n\n def test_1d_array_slice_bounds(self):\n @jit\n def index(a, start, stop, step):\n return a[start:stop:step]\n\n def test(start=0, stop=10, step=1):\n a = np.arange(10)\n result = index(a, start, stop, step)\n expected = a[start:stop:step]\n self.assertTrue(np.all(result == expected), (result, expected))\n\n # Ascending\n test(1)\n test(3)\n test(2, 8, 3)\n test(2, 9, 3)\n\n # Descending (wrap-around)\n test(-2)\n test(-2, -3)\n test(-2, -3, -1)\n\n # Wrap around and adjust\n test(-12, 3, 1)\n test(12, 4, -1)\n test(12, -3, -1)\n test(8, -12, -1)\n\n\n def test_2d_array_slice(self):\n @jit\n def index(a):\n return a[:, 5]\n\n a = np.arange(8 * 12).reshape(8, 12)\n result = index(a)\n self.assertTrue(np.all(a[:, 5] == result))\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=3)"
] |
[
[
"numpy.all",
"numpy.arange",
"numpy.array",
"numpy.empty"
]
] |
YoungjuNa-KR/Gaze_estimator_implementation
|
[
"c7b84189c263456c648829bc399a5edb2ec17bb8"
] |
[
"utils/utils_image.py"
] |
[
"import os\nimport math\nimport random\nimport numpy as np\nimport torch\nimport cv2\nfrom torchvision.utils import make_grid\nfrom datetime import datetime\n# import torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\nfrom skimage.measure import compare_ssim\n\n'''\nmodified by Kai Zhang (github: https://github.com/cszn)\n03/03/2019\nhttps://github.com/twhui/SRGAN-pyTorch\nhttps://github.com/xinntao/BasicSR\n'''\n\nIMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP']\n\n\ndef is_image_file(filename):\n return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)\n\n\ndef get_timestamp():\n return datetime.now().strftime('%y%m%d-%H%M%S')\n\n\ndef imshow(x, title=None, cbar=False, figsize=None):\n plt.figure(figsize=figsize)\n plt.imshow(np.squeeze(x), interpolation='nearest', cmap='gray')\n if title:\n plt.title(title)\n if cbar:\n plt.colorbar()\n plt.show()\n\n\ndef surf(Z):\n from mpl_toolkits.mplot3d import Axes3D\n fig = plt.figure()\n ax = Axes3D(fig)\n X = np.arange(0, 25, 1)\n Y = np.arange(0, 25, 1)\n \n ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap='rainbow')\n # ax3.contour(X, Y, Z, zdim='z', offset=-2, cmap='rainbow)\n# ax.view_init(elev=45, azim=45)\n# ax.set_xlabel(\"x\")\n# plt.title(\" \")\n plt.tight_layout(0.9)\n plt.show()\n\n\n\n\n'''\n# =======================================\n# get image pathes of files\n# =======================================\n'''\n\n\ndef get_image_paths(dataroot):\n paths = None # return None if dataroot is None\n if dataroot is not None:\n paths = sorted(_get_paths_from_images(dataroot))\n return paths\n\n\ndef _get_paths_from_images(path):\n assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)\n images = []\n for dirpath, _, fnames in sorted(os.walk(path)):\n for fname in sorted(fnames):\n if is_image_file(fname):\n img_path = os.path.join(dirpath, fname)\n images.append(img_path)\n assert images, '{:s} has no valid image file'.format(path)\n return images\n\n\n'''\n# =======================================\n# makedir\n# =======================================\n'''\n\n\ndef mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n\ndef mkdirs(paths):\n if isinstance(paths, str):\n mkdir(paths)\n else:\n for path in paths:\n mkdir(path)\n\n\ndef mkdir_and_rename(path):\n if os.path.exists(path):\n new_name = path + '_archived_' + get_timestamp()\n print('Path already exists. Rename it to [{:s}]'.format(new_name))\n os.rename(path, new_name)\n os.makedirs(path)\n\n\n'''\n# =======================================\n# read image from path\n# Note: opencv is fast\n# but read BGR numpy image\n# =======================================\n'''\n\n\n# ----------------------------------------\n# get single image of size HxWxn_channles (BGR)\n# ----------------------------------------\ndef read_img(path):\n # read image by cv2\n # return: Numpy float32, HWC, BGR, [0,1]\n img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # cv2.IMREAD_GRAYSCALE\n img = img.astype(np.float32) / 255.\n if img.ndim == 2:\n img = np.expand_dims(img, axis=2)\n # some images have 4 channels\n if img.shape[2] > 3:\n img = img[:, :, :3]\n return img\n\n\n# ----------------------------------------\n# get uint8 image of size HxWxn_channles (RGB)\n# ----------------------------------------\ndef imread_uint(path, n_channels=3):\n # input: path\n # output: HxWx3(RGB or GGG), or HxWx1 (G)\n if n_channels == 1:\n img = cv2.imread(path, 0) # cv2.IMREAD_GRAYSCALE\n img = np.expand_dims(img, axis=2) # HxWx1\n elif n_channels == 3:\n img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # BGR or G\n if img.ndim == 2:\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # GGG\n else:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB\n return img\n\n\ndef imsave(img, img_path):\n if img.ndim == 3:\n img = img[:, :, [2, 1, 0]]\n cv2.imwrite(img_path, img)\n\n\n'''\n# =======================================\n# numpy(single) <---> numpy(unit)\n# numpy(single) <---> tensor\n# numpy(unit) <---> tensor\n# =======================================\n'''\n\n\n# --------------------------------\n# numpy(single) <---> numpy(unit)\n# --------------------------------\n\n\ndef uint2single(img):\n\n return np.float32(img/255.)\n\ndef unit2single(img):\n\n return np.float32(img/255.)\n\ndef single2uint(img):\n\n return np.uint8((img.clip(0, 1)*255.).round())\n\n\ndef unit162single(img):\n\n return np.float32(img/65535.)\n\n\ndef single2uint16(img):\n\n return np.uint8((img.clip(0, 1)*65535.).round())\n\n\n# --------------------------------\n# numpy(unit) <---> tensor\n# uint (HxWxn_channels (RGB) or G)\n# --------------------------------\n\n\n# convert uint (HxWxn_channels) to 4-dimensional torch tensor\ndef uint2tensor4(img):\n if img.ndim == 2:\n img = np.expand_dims(img, axis=2)\n return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.).unsqueeze(0)\n\n\n# convert uint (HxWxn_channels) to 3-dimensional torch tensor\ndef uint2tensor3(img):\n if img.ndim == 2:\n img = np.expand_dims(img, axis=2)\n return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.)\n\n\n# convert torch tensor to uint\ndef tensor2uint(img):\n img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy()\n if img.ndim == 3:\n img = np.transpose(img, (1, 2, 0))\n return np.uint8((img*255.0).round())\n\n\n# --------------------------------\n# numpy(single) <---> tensor\n# single (HxWxn_channels (RGB) or G)\n# --------------------------------\n\n\n# convert single (HxWxn_channels) to 4-dimensional torch tensor\ndef single2tensor4(img):\n return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().unsqueeze(0)\n\n\ndef single2tensor5(img):\n return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float().unsqueeze(0)\n\n\ndef single42tensor4(img):\n return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float()\n\n# convert single (HxWxn_channels) to 3-dimensional torch tensor\ndef single2tensor3(img):\n return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float()\n\n\n# convert torch tensor to single\ndef tensor2single(img):\n img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy()\n if img.ndim == 3:\n img = np.transpose(img, (1, 2, 0))\n\n return img\n\ndef tensor2single3(img):\n img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy()\n if img.ndim == 3:\n img = np.transpose(img, (1, 2, 0))\n elif img.ndim == 2:\n img = np.expand_dims(img, axis=2)\n return img\n\n\n# from skimage.io import imread, imsave\ndef tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):\n '''\n Converts a torch Tensor into an image Numpy array of BGR channel order\n Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order\n Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)\n '''\n tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp\n tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1]\n n_dim = tensor.dim()\n if n_dim == 4:\n n_img = len(tensor)\n img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()\n img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR\n elif n_dim == 3:\n img_np = tensor.numpy()\n img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR\n elif n_dim == 2:\n img_np = tensor.numpy()\n else:\n raise TypeError(\n 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))\n if out_type == np.uint8:\n img_np = (img_np * 255.0).round()\n # Important. Unlike matlab, numpy.unit8() WILL NOT round by default.\n return img_np.astype(out_type)\n\n\n'''\n# =======================================\n# image processing process on numpy image\n# augment(img_list, hflip=True, rot=True):\n# =======================================\n'''\n\n\ndef augment_img(img, mode=0):\n if mode == 0:\n return img\n elif mode == 1:\n return np.flipud(np.rot90(img))\n elif mode == 2:\n return np.flipud(img)\n elif mode == 3:\n return np.rot90(img, k=3)\n elif mode == 4:\n return np.flipud(np.rot90(img, k=2))\n elif mode == 5:\n return np.rot90(img)\n elif mode == 6:\n return np.rot90(img, k=2)\n elif mode == 7:\n return np.flipud(np.rot90(img, k=3))\n\n\ndef augment_img_np3(img, mode=0):\n if mode == 0:\n return img\n elif mode == 1:\n return img.transpose(1, 0, 2)\n elif mode == 2:\n return img[::-1, :, :]\n elif mode == 3:\n img = img[::-1, :, :]\n img = img.transpose(1, 0, 2)\n return img\n elif mode == 4:\n return img[:, ::-1, :]\n elif mode == 5:\n img = img[:, ::-1, :]\n img = img.transpose(1, 0, 2)\n return img\n elif mode == 6:\n img = img[:, ::-1, :]\n img = img[::-1, :, :]\n return img\n elif mode == 7:\n img = img[:, ::-1, :]\n img = img[::-1, :, :]\n img = img.transpose(1, 0, 2)\n return img\n\n\ndef augment_img_tensor(img, mode=0):\n img_size = img.size()\n img_np = img.data.cpu().numpy()\n if len(img_size) == 3:\n img_np = np.transpose(img_np, (1, 2, 0))\n elif len(img_size) == 4:\n img_np = np.transpose(img_np, (2, 3, 1, 0))\n img_np = augment_img(img_np, mode=mode)\n img_tensor = torch.from_numpy(np.ascontiguousarray(img_np))\n if len(img_size) == 3:\n img_tensor = img_tensor.permute(2, 0, 1)\n elif len(img_size) == 4:\n img_tensor = img_tensor.permute(3, 2, 0, 1)\n\n return img_tensor.type_as(img)\n\n\ndef augment_imgs(img_list, hflip=False, rot=False):\n # horizontal flip OR rotate\n hflip = hflip and random.random() < 0.5\n vflip = rot and random.random() < 0.5\n rot90 = rot and random.random() < 0.5\n\n def _augment(img):\n if hflip:\n img = img[:, ::-1, :]\n if vflip:\n img = img[::-1, :, :]\n if rot90:\n img = img.transpose(1, 0, 2)\n return img\n\n return [_augment(img) for img in img_list]\n\n\n'''\n# =======================================\n# image processing process on numpy image\n# channel_convert(in_c, tar_type, img_list):\n# rgb2ycbcr(img, only_y=True):\n# bgr2ycbcr(img, only_y=True):\n# ycbcr2rgb(img):\n# modcrop(img_in, scale):\n# =======================================\n'''\n\n\ndef rgb2ycbcr(img, only_y=True):\n '''same as matlab rgb2ycbcr\n only_y: only return Y channel\n Input:\n uint8, [0, 255]\n float, [0, 1]\n '''\n in_img_type = img.dtype\n img.astype(np.float32)\n if in_img_type != np.uint8:\n img *= 255.\n # convert\n if only_y:\n rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0\n else:\n rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],\n [24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]\n if in_img_type == np.uint8:\n rlt = rlt.round()\n else:\n rlt /= 255.\n return rlt.astype(in_img_type)\n\n\ndef ycbcr2rgb(img):\n '''same as matlab ycbcr2rgb\n Input:\n uint8, [0, 255]\n float, [0, 1]\n '''\n in_img_type = img.dtype\n img.astype(np.float32)\n if in_img_type != np.uint8:\n img *= 255.\n # convert\n rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],\n [0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]\n if in_img_type == np.uint8:\n rlt = rlt.round()\n else:\n rlt /= 255.\n return rlt.astype(in_img_type)\n\n\ndef bgr2ycbcr(img, only_y=True):\n '''bgr version of rgb2ycbcr\n only_y: only return Y channel\n Input:\n uint8, [0, 255]\n float, [0, 1]\n '''\n in_img_type = img.dtype\n img.astype(np.float32)\n if in_img_type != np.uint8:\n img *= 255.\n # convert\n if only_y:\n rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0\n else:\n rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],\n [65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]\n if in_img_type == np.uint8:\n rlt = rlt.round()\n else:\n rlt /= 255.\n return rlt.astype(in_img_type)\n\n\ndef modcrop(img_in, scale):\n # img_in: Numpy, HWC or HW\n img = np.copy(img_in)\n if img.ndim == 2:\n H, W = img.shape\n H_r, W_r = H % scale, W % scale\n img = img[:H - H_r, :W - W_r]\n elif img.ndim == 3:\n H, W, C = img.shape\n H_r, W_r = H % scale, W % scale\n img = img[:H - H_r, :W - W_r, :]\n else:\n raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))\n return img\n\n\ndef shave(img_in, border=0):\n # img_in: Numpy, HWC or HW\n img = np.copy(img_in)\n h, w = img.shape[:2]\n img = img[border:h-border, border:w-border]\n return img\n\n\ndef channel_convert(in_c, tar_type, img_list):\n # conversion among BGR, gray and y\n if in_c == 3 and tar_type == 'gray': # BGR to gray\n gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list]\n return [np.expand_dims(img, axis=2) for img in gray_list]\n elif in_c == 3 and tar_type == 'y': # BGR to y\n y_list = [bgr2ycbcr(img, only_y=True) for img in img_list]\n return [np.expand_dims(img, axis=2) for img in y_list]\n elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR\n return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list]\n else:\n return img_list\n\n\n'''\n# =======================================\n# metric, PSNR and SSIM\n# =======================================\n'''\n\n\n# ----------\n# PSNR\n# ----------\ndef calculate_psnr(img1, img2, border=0):\n # img1 and img2 have range [0, 255]\n if not img1.shape == img2.shape:\n raise ValueError('Input images must have the same dimensions.')\n h, w = img1.shape[:2]\n img1 = img1[border:h-border, border:w-border]\n img2 = img2[border:h-border, border:w-border]\n\n img1 = img1.astype(np.float64)\n img2 = img2.astype(np.float64)\n mse = np.mean((img1 - img2)**2)\n if mse == 0:\n return float('inf')\n return 20 * math.log10(255.0 / math.sqrt(mse))\n\ndef PSNR1(original, compressed):\n\n #ori = original.astype(np.float64)/255\n #com = compressed.astype(np.float64)/255\n psnr = cv2.PSNR(original,compressed)\n return psnr\n\n\n# ----------\n# SSIM\n# ----------\ndef calculate_ssim(img1, img2, border=0):\n '''calculate SSIM\n the same outputs as MATLAB's\n img1, img2: [0, 255]\n '''\n if not img1.shape == img2.shape:\n raise ValueError('Input images must have the same dimensions.')\n h, w = img1.shape[:2]\n img1 = img1[border:h-border, border:w-border]\n img2 = img2[border:h-border, border:w-border]\n\n if img1.ndim == 2:\n return ssim(img1, img2)\n elif img1.ndim == 3:\n if img1.shape[2] == 3:\n ssims = []\n for i in range(3):\n ssims.append(ssim(img1, img2))\n return np.array(ssims).mean()\n elif img1.shape[2] == 1:\n return ssim(np.squeeze(img1), np.squeeze(img2))\n else:\n raise ValueError('Wrong input image dimensions.')\n\ndef SSIM(original, compressed):\n # Convert the images to grayscale\n grayA = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)\n grayB = cv2.cvtColor(compressed, cv2.COLOR_BGR2GRAY)\n\n # Compute the Structural Similarity Index (SSIM) between the two\n # images, ensuring that the difference image is returned\n (score, diff) = compare_ssim(grayA, grayB, full=True)\n diff = (diff * 255).astype(\"uint8\")\n # 6. You can print only the score if you want\n print(f\"SSIM value is {score}\")\n return score\n\ndef ssim(img1, img2):\n C1 = (0.01 * 255)**2\n C2 = (0.03 * 255)**2\n\n img1 = img1.astype(np.float64)\n img2 = img2.astype(np.float64)\n kernel = cv2.getGaussianKernel(11, 1.5)\n window = np.outer(kernel, kernel.transpose())\n\n mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid\n mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]\n mu1_sq = mu1**2\n mu2_sq = mu2**2\n mu1_mu2 = mu1 * mu2\n sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq\n sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq\n sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2\n\n ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *\n (sigma1_sq + sigma2_sq + C2))\n return ssim_map.mean()\n\n\n'''\n# =======================================\n# pytorch version of matlab imresize\n# =======================================\n'''\n\n\n# matlab 'imresize' function, now only support 'bicubic'\ndef cubic(x):\n absx = torch.abs(x)\n absx2 = absx**2\n absx3 = absx**3\n return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \\\n (-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))\n\n\ndef calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):\n if (scale < 1) and (antialiasing):\n # Use a modified kernel to simultaneously interpolate and antialias- larger kernel width\n kernel_width = kernel_width / scale\n\n # Output-space coordinates\n x = torch.linspace(1, out_length, out_length)\n\n # Input-space coordinates. Calculate the inverse mapping such that 0.5\n # in output space maps to 0.5 in input space, and 0.5+scale in output\n # space maps to 1.5 in input space.\n u = x / scale + 0.5 * (1 - 1 / scale)\n\n # What is the left-most pixel that can be involved in the computation?\n left = torch.floor(u - kernel_width / 2)\n\n # What is the maximum number of pixels that can be involved in the\n # computation? Note: it's OK to use an extra pixel here; if the\n # corresponding weights are all zero, it will be eliminated at the end\n # of this function.\n P = math.ceil(kernel_width) + 2\n\n # The indices of the input pixels involved in computing the k-th output\n # pixel are in row k of the indices matrix.\n indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(\n 1, P).expand(out_length, P)\n\n # The weights used to compute the k-th output pixel are in row k of the\n # weights matrix.\n distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices\n # apply cubic kernel\n if (scale < 1) and (antialiasing):\n weights = scale * cubic(distance_to_center * scale)\n else:\n weights = cubic(distance_to_center)\n # Normalize the weights matrix so that each row sums to 1.\n weights_sum = torch.sum(weights, 1).view(out_length, 1)\n weights = weights / weights_sum.expand(out_length, P)\n\n # If a column in weights is all zero, get rid of it. only consider the first and last column.\n weights_zero_tmp = torch.sum((weights == 0), 0)\n if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):\n indices = indices.narrow(1, 1, P - 2)\n weights = weights.narrow(1, 1, P - 2)\n if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):\n indices = indices.narrow(1, 0, P - 2)\n weights = weights.narrow(1, 0, P - 2)\n weights = weights.contiguous()\n indices = indices.contiguous()\n sym_len_s = -indices.min() + 1\n sym_len_e = indices.max() - in_length\n indices = indices + sym_len_s - 1\n return weights, indices, int(sym_len_s), int(sym_len_e)\n\n\n# --------------------------------\n# imresize for tensor image\n# --------------------------------\ndef imresize(img, scale, antialiasing=True):\n # Now the scale should be the same for H and W\n # input: img: pytorch tensor, CHW or HW [0,1]\n # output: CHW or HW [0,1] w/o round\n need_squeeze = True if img.dim() == 2 else False\n if need_squeeze:\n img.unsqueeze_(0)\n in_C, in_H, in_W = img.size()\n out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)\n kernel_width = 4\n kernel = 'cubic'\n\n # Return the desired dimension order for performing the resize. The\n # strategy is to perform the resize first along the dimension with the\n # smallest scale factor.\n # Now we do not support this.\n\n # get weights and indices\n weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(\n in_H, out_H, scale, kernel, kernel_width, antialiasing)\n weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(\n in_W, out_W, scale, kernel, kernel_width, antialiasing)\n # process H dimension\n # symmetric copying\n img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)\n img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)\n\n sym_patch = img[:, :sym_len_Hs, :]\n inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()\n sym_patch_inv = sym_patch.index_select(1, inv_idx)\n img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)\n\n sym_patch = img[:, -sym_len_He:, :]\n inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()\n sym_patch_inv = sym_patch.index_select(1, inv_idx)\n img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)\n\n out_1 = torch.FloatTensor(in_C, out_H, in_W)\n kernel_width = weights_H.size(1)\n for i in range(out_H):\n idx = int(indices_H[i][0])\n for j in range(out_C):\n out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])\n\n # process W dimension\n # symmetric copying\n out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)\n out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)\n\n sym_patch = out_1[:, :, :sym_len_Ws]\n inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()\n sym_patch_inv = sym_patch.index_select(2, inv_idx)\n out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)\n\n sym_patch = out_1[:, :, -sym_len_We:]\n inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()\n sym_patch_inv = sym_patch.index_select(2, inv_idx)\n out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)\n\n out_2 = torch.FloatTensor(in_C, out_H, out_W)\n kernel_width = weights_W.size(1)\n for i in range(out_W):\n idx = int(indices_W[i][0])\n for j in range(out_C):\n out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_W[i])\n if need_squeeze:\n out_2.squeeze_()\n return out_2\n\n\n# --------------------------------\n# imresize for numpy image\n# --------------------------------\ndef imresize_np(img, scale, antialiasing=True):\n # Now the scale should be the same for H and W\n # input: img: Numpy, HWC or HW [0,1]\n # output: HWC or HW [0,1] w/o round\n img = torch.from_numpy(img)\n need_squeeze = True if img.dim() == 2 else False\n if need_squeeze:\n img.unsqueeze_(2)\n\n in_H, in_W, in_C = img.size()\n out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)\n kernel_width = 4\n kernel = 'cubic'\n\n # Return the desired dimension order for performing the resize. The\n # strategy is to perform the resize first along the dimension with the\n # smallest scale factor.\n # Now we do not support this.\n\n # get weights and indices\n weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(\n in_H, out_H, scale, kernel, kernel_width, antialiasing)\n weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(\n in_W, out_W, scale, kernel, kernel_width, antialiasing)\n # process H dimension\n # symmetric copying\n img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)\n img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)\n\n sym_patch = img[:sym_len_Hs, :, :]\n inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()\n sym_patch_inv = sym_patch.index_select(0, inv_idx)\n img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)\n\n sym_patch = img[-sym_len_He:, :, :]\n inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()\n sym_patch_inv = sym_patch.index_select(0, inv_idx)\n img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)\n\n out_1 = torch.FloatTensor(out_H, in_W, in_C)\n kernel_width = weights_H.size(1)\n for i in range(out_H):\n idx = int(indices_H[i][0])\n for j in range(out_C):\n out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i])\n\n # process W dimension\n # symmetric copying\n out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)\n out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)\n\n sym_patch = out_1[:, :sym_len_Ws, :]\n inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()\n sym_patch_inv = sym_patch.index_select(1, inv_idx)\n out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)\n\n sym_patch = out_1[:, -sym_len_We:, :]\n inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()\n sym_patch_inv = sym_patch.index_select(1, inv_idx)\n out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)\n\n out_2 = torch.FloatTensor(out_H, out_W, in_C)\n kernel_width = weights_W.size(1)\n for i in range(out_W):\n idx = int(indices_W[i][0])\n for j in range(out_C):\n out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i])\n if need_squeeze:\n out_2.squeeze_()\n\n return out_2.numpy()\n\n\nif __name__ == '__main__':\n img = imread_uint('test.bmp',3)\n"
] |
[
[
"torch.abs",
"numpy.dot",
"numpy.expand_dims",
"numpy.squeeze",
"numpy.flipud",
"torch.sum",
"numpy.mean",
"torch.FloatTensor",
"matplotlib.pyplot.tight_layout",
"numpy.arange",
"numpy.matmul",
"torch.from_numpy",
"numpy.copy",
"numpy.float32",
"matplotlib.pyplot.figure",
"numpy.rot90",
"torch.linspace",
"torch.floor",
"matplotlib.pyplot.title",
"numpy.ascontiguousarray",
"numpy.transpose",
"matplotlib.pyplot.show",
"numpy.array",
"matplotlib.pyplot.colorbar"
]
] |
MuyanXiao/models
|
[
"819ea3479f0754b34cab9d23ed0a3f9242d85bf6"
] |
[
"research/deeplab/eval.py"
] |
[
"# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Evaluation script for the DeepLab model.\n\nSee model.py for more details and usage.\n\"\"\"\n\nfrom __future__ import print_function\nimport math\nimport six\nimport tensorflow as tf\nfrom deeplab import common\nfrom deeplab import model\nfrom deeplab.datasets import segmentation_dataset\nfrom deeplab.utils import input_generator\n\nslim = tf.contrib.slim\n\nflags = tf.app.flags\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('master', '', 'BNS name of the tensorflow server')\n\n# Settings for log directories.\n\nflags.DEFINE_string('eval_logdir', None, 'Where to write the event logs.')\n\nflags.DEFINE_string('checkpoint_dir', None, 'Directory of model checkpoints.')\n\n# Settings for evaluating the model.\n\nflags.DEFINE_integer('eval_batch_size', 1,\n 'The number of images in each batch during evaluation.')\n\nflags.DEFINE_multi_integer('eval_crop_size', [513, 513],\n 'Image crop size [height, width] for evaluation.')\n\nflags.DEFINE_integer('eval_interval_secs', 60 * 5,\n 'How often (in seconds) to run evaluation.')\n\n# For `xception_65`, use atrous_rates = [12, 24, 36] if output_stride = 8, or\n# rates = [6, 12, 18] if output_stride = 16. For `mobilenet_v2`, use None. Note\n# one could use different atrous_rates/output_stride during training/evaluation.\nflags.DEFINE_multi_integer('atrous_rates', None,\n 'Atrous rates for atrous spatial pyramid pooling.')\n\nflags.DEFINE_integer('output_stride', 16,\n 'The ratio of input to output spatial resolution.')\n\n# Change to [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] for multi-scale test.\nflags.DEFINE_multi_float('eval_scales', [1.0],\n 'The scales to resize images for evaluation.')\n\n# Change to True for adding flipped images during test.\nflags.DEFINE_bool('add_flipped_images', False,\n 'Add flipped images for evaluation or not.')\n\n# Dataset settings.\n\nflags.DEFINE_string('dataset', 'pascal_voc_seg',\n 'Name of the segmentation dataset.')\n\nflags.DEFINE_string('eval_split', 'val',\n 'Which split of the dataset used for evaluation')\n\nflags.DEFINE_string('dataset_dir', None, 'Where the dataset reside.')\n\nflags.DEFINE_integer('max_number_of_evaluations', 1,\n 'Maximum number of eval iterations. Will loop '\n 'indefinitely upon nonpositive values.')\n\n\ndef main(unused_argv):\n tf.logging.set_verbosity(tf.logging.INFO)\n # Get dataset-dependent information.\n dataset = segmentation_dataset.get_dataset(\n FLAGS.dataset, FLAGS.eval_split, dataset_dir=FLAGS.dataset_dir)\n\n tf.gfile.MakeDirs(FLAGS.eval_logdir)\n tf.logging.info('Evaluating on %s set', FLAGS.eval_split)\n\n with tf.Graph().as_default():\n samples = input_generator.get(\n dataset,\n FLAGS.eval_crop_size,\n FLAGS.eval_batch_size,\n min_resize_value=FLAGS.min_resize_value,\n max_resize_value=FLAGS.max_resize_value,\n resize_factor=FLAGS.resize_factor,\n dataset_split=FLAGS.eval_split,\n is_training=False,\n model_variant=FLAGS.model_variant)\n\n model_options = common.ModelOptions(\n outputs_to_num_classes={common.OUTPUT_TYPE: dataset.num_classes},\n crop_size=FLAGS.eval_crop_size,\n atrous_rates=FLAGS.atrous_rates,\n output_stride=FLAGS.output_stride)\n\n if tuple(FLAGS.eval_scales) == (1.0,):\n tf.logging.info('Performing single-scale test.')\n predictions = model.predict_labels(samples[common.IMAGE], model_options,\n image_pyramid=FLAGS.image_pyramid)\n else:\n tf.logging.info('Performing multi-scale test.')\n predictions = model.predict_labels_multi_scale(\n samples[common.IMAGE],\n model_options=model_options,\n eval_scales=FLAGS.eval_scales,\n add_flipped_images=FLAGS.add_flipped_images)\n predictions = predictions[common.OUTPUT_TYPE]\n predictions = tf.reshape(predictions, shape=[-1])\n labels = tf.reshape(samples[common.LABEL], shape=[-1])\n weights = tf.to_float(tf.not_equal(labels, dataset.ignore_label))\n\n # Set ignore_label regions to label 0, because metrics.mean_iou requires\n # range of labels = [0, dataset.num_classes). Note the ignore_label regions\n # are not evaluated since the corresponding regions contain weights = 0.\n labels = tf.where(\n tf.equal(labels, dataset.ignore_label), tf.zeros_like(labels), labels)\n\n predictions_tag = 'miou'\n for eval_scale in FLAGS.eval_scales:\n predictions_tag += '_' + str(eval_scale)\n if FLAGS.add_flipped_images:\n predictions_tag += '_flipped'\n\n # Define the evaluation metric.\n metric_map = {}\n metric_map[predictions_tag] = tf.metrics.mean_iou(\n predictions, labels, dataset.num_classes, weights=weights)\n\n metrics_to_values, metrics_to_updates = (\n tf.contrib.metrics.aggregate_metric_map(metric_map))\n\n for metric_name, metric_value in six.iteritems(metrics_to_values):\n slim.summaries.add_scalar_summary(\n metric_value, metric_name, print_summary=True)\n\n num_batches = int(\n math.ceil(dataset.num_samples / float(FLAGS.eval_batch_size)))\n\n tf.logging.info('Eval num images %d', dataset.num_samples)\n tf.logging.info('Eval batch size %d and num batch %d',\n FLAGS.eval_batch_size, num_batches)\n\n num_eval_iters = None\n print(FLAGS.max_number_of_evaluations)\n if FLAGS.max_number_of_evaluations > 0:\n num_eval_iters = FLAGS.max_number_of_evaluations\n # slim.evaluation.evaluation_loop(\n # master=FLAGS.master,\n # checkpoint_dir=FLAGS.checkpoint_dir,\n # logdir=FLAGS.eval_logdir,\n # num_evals=num_batches,\n # eval_op=list(metrics_to_updates.values()),\n # max_number_of_evaluations=num_eval_iters,\n # eval_interval_secs=FLAGS.eval_interval_secs)\n print(\"eval\")\n slim.evaluation.evaluate_once(\n master=FLAGS.master,\n checkpoint_path=FLAGS.checkpoint_dir,\n logdir=FLAGS.eval_logdir,\n num_evals=num_batches\n )\n\nif __name__ == '__main__':\n flags.mark_flag_as_required('checkpoint_dir')\n flags.mark_flag_as_required('eval_logdir')\n flags.mark_flag_as_required('dataset_dir')\n tf.app.run()\n"
] |
[
[
"tensorflow.metrics.mean_iou",
"tensorflow.not_equal",
"tensorflow.Graph",
"tensorflow.reshape",
"tensorflow.equal",
"tensorflow.contrib.metrics.aggregate_metric_map",
"tensorflow.zeros_like",
"tensorflow.gfile.MakeDirs",
"tensorflow.logging.info",
"tensorflow.logging.set_verbosity",
"tensorflow.app.run"
]
] |
rlckd159/deep-graph-matching-consensus
|
[
"1656cdae27c705a0aa6d2912a24e566b8b86e1b0"
] |
[
"examples/pascal.py"
] |
[
"import os.path as osp\n\nimport argparse\nimport torch\nfrom torch_geometric.datasets import PascalVOCKeypoints as PascalVOC\nimport torch_geometric.transforms as T\nfrom torch_geometric.data import DataLoader\n\nfrom dgmc.utils import ValidPairDataset\nfrom dgmc.models import DGMC, SplineCNN\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--isotropic', action='store_true')\nparser.add_argument('--dim', type=int, default=256)\nparser.add_argument('--rnd_dim', type=int, default=128)\nparser.add_argument('--num_layers', type=int, default=2)\nparser.add_argument('--num_steps', type=int, default=10)\nparser.add_argument('--lr', type=float, default=0.001)\nparser.add_argument('--batch_size', type=int, default=512)\nparser.add_argument('--epochs', type=int, default=15)\nparser.add_argument('--test_samples', type=int, default=1000)\nargs = parser.parse_args()\n\npre_filter = lambda data: data.pos.size(0) > 0 # noqa\ntransform = T.Compose([\n T.Delaunay(),\n T.FaceToEdge(),\n T.Distance() if args.isotropic else T.Cartesian(),\n])\n\ntrain_datasets = []\ntest_datasets = []\npath = osp.join('..', 'data', 'PascalVOC')\nfor category in PascalVOC.categories:\n dataset = PascalVOC(path, category, train=True, transform=transform,\n pre_filter=pre_filter)\n train_datasets += [ValidPairDataset(dataset, dataset, sample=True)]\n dataset = PascalVOC(path, category, train=False, transform=transform,\n pre_filter=pre_filter)\n test_datasets += [ValidPairDataset(dataset, dataset, sample=True)]\ntrain_dataset = torch.utils.data.ConcatDataset(train_datasets)\ntrain_loader = DataLoader(train_dataset, args.batch_size, shuffle=True,\n follow_batch=['x_s', 'x_t'])\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\npsi_1 = SplineCNN(dataset.num_node_features, args.dim,\n dataset.num_edge_features, args.num_layers, cat=False,\n dropout=0.5)\npsi_2 = SplineCNN(args.rnd_dim, args.rnd_dim, dataset.num_edge_features,\n args.num_layers, cat=True, dropout=0.0)\nmodel = DGMC(psi_1, psi_2, num_steps=args.num_steps).to(device)\noptimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\n\n\ndef generate_y(y_col):\n y_row = torch.arange(y_col.size(0), device=device)\n return torch.stack([y_row, y_col], dim=0)\n\n\ndef train():\n model.train()\n\n total_loss = 0\n for data in train_loader:\n optimizer.zero_grad()\n data = data.to(device)\n S_0, S_L = model(data.x_s, data.edge_index_s, data.edge_attr_s,\n data.x_s_batch, data.x_t, data.edge_index_t,\n data.edge_attr_t, data.x_t_batch)\n y = generate_y(data.y)\n loss = model.loss(S_0, y)\n loss = model.loss(S_L, y) + loss if model.num_steps > 0 else loss\n loss.backward()\n optimizer.step()\n total_loss += loss.item() * (data.x_s_batch.max().item() + 1)\n\n return total_loss / len(train_loader.dataset)\n\n\[email protected]_grad()\ndef test(dataset):\n model.eval()\n\n loader = DataLoader(dataset, args.batch_size, shuffle=False,\n follow_batch=['x_s', 'x_t'])\n\n correct = num_examples = 0\n while (num_examples < args.test_samples):\n for data in loader:\n data = data.to(device)\n S_0, S_L = model(data.x_s, data.edge_index_s, data.edge_attr_s,\n data.x_s_batch, data.x_t, data.edge_index_t,\n data.edge_attr_t, data.x_t_batch)\n y = generate_y(data.y)\n correct += model.acc(S_L, y, reduction='sum')\n num_examples += y.size(1)\n\n if num_examples >= args.test_samples:\n return correct / num_examples\n\n\nfor epoch in range(1, args.epochs + 1):\n loss = train()\n print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}')\n\n accs = [100 * test(test_dataset) for test_dataset in test_datasets]\n accs += [sum(accs) / len(accs)]\n\n print(' '.join([c[:5].ljust(5) for c in PascalVOC.categories] + ['mean']))\n print(' '.join([f'{acc:.1f}'.ljust(5) for acc in accs]))\n"
] |
[
[
"torch.stack",
"torch.utils.data.ConcatDataset",
"torch.no_grad",
"torch.cuda.is_available"
]
] |
matthew-brett/THINGSvision
|
[
"f42aedec34bbe15f2d28e08f3c3666319d42bdec"
] |
[
"thingsvision/cornet/cornet_r.py"
] |
[
"from collections import OrderedDict\nimport torch\nfrom torch import nn\n\n\nHASH = '5930a990'\n\n\nclass Flatten(nn.Module):\n\n \"\"\"\n Helper module for flattening input tensor to 1-D for the use in Linear modules\n \"\"\"\n\n def forward(self, x):\n return x.view(x.size(0), -1)\n\n\nclass Identity(nn.Module):\n\n \"\"\"\n Helper module that stores the current tensor. Useful for accessing by name\n \"\"\"\n\n def forward(self, x):\n return x\n\n\nclass CORblock_R(nn.Module):\n\n def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, out_shape=None):\n super().__init__()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.out_shape = out_shape\n\n self.conv_input = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size,\n stride=stride, padding=kernel_size // 2)\n self.norm_input = nn.GroupNorm(32, out_channels)\n self.nonlin_input = nn.ReLU(inplace=True)\n\n self.conv1 = nn.Conv2d(out_channels, out_channels,\n kernel_size=3, padding=1, bias=False)\n self.norm1 = nn.GroupNorm(32, out_channels)\n self.nonlin1 = nn.ReLU(inplace=True)\n\n self.output = Identity() # for an easy access to this block's output\n\n def forward(self, inp=None, state=None, batch_size=None):\n if inp is None: # at t=0, there is no input yet except to V1\n inp = torch.zeros([batch_size, self.out_channels, self.out_shape, self.out_shape])\n if self.conv_input.weight.is_cuda:\n inp = inp.cuda()\n else:\n inp = self.conv_input(inp)\n inp = self.norm_input(inp)\n inp = self.nonlin_input(inp)\n\n if state is None: # at t=0, state is initialized to 0\n state = 0\n skip = inp + state\n\n x = self.conv1(skip)\n x = self.norm1(x)\n x = self.nonlin1(x)\n\n state = self.output(x)\n output = state\n return output, state\n\n\nclass CORnet_R(nn.Module):\n\n def __init__(self, times=5):\n super().__init__()\n self.times = times\n\n self.V1 = CORblock_R(3, 64, kernel_size=7, stride=4, out_shape=56)\n self.V2 = CORblock_R(64, 128, stride=2, out_shape=28)\n self.V4 = CORblock_R(128, 256, stride=2, out_shape=14)\n self.IT = CORblock_R(256, 512, stride=2, out_shape=7)\n self.decoder = nn.Sequential(OrderedDict([\n ('avgpool', nn.AdaptiveAvgPool2d(1)),\n ('flatten', Flatten()),\n ('linear', nn.Linear(512, 1000))\n ]))\n\n def forward(self, inp):\n outputs = {'inp': inp}\n states = {}\n blocks = ['inp', 'V1', 'V2', 'V4', 'IT']\n\n for block in blocks[1:]:\n if block == 'V1': # at t=0 input to V1 is the image\n inp = outputs['inp']\n else: # at t=0 there is no input yet to V2 and up\n inp = None\n new_output, new_state = getattr(self, block)(inp, batch_size=outputs['inp'].shape[0])\n outputs[block] = new_output\n states[block] = new_state\n\n for t in range(1, self.times):\n for block in blocks[1:]:\n prev_block = blocks[blocks.index(block) - 1]\n prev_output = outputs[prev_block]\n prev_state = states[block]\n new_output, new_state = getattr(self, block)(prev_output, prev_state)\n outputs[block] = new_output\n states[block] = new_state\n\n out = self.decoder(outputs['IT'])\n return out\n"
] |
[
[
"torch.zeros",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.GroupNorm",
"torch.nn.ReLU"
]
] |
alxyok/kosmoss
|
[
"d471c8d7c9c171a99a1094f46a229e2aa415ab3b"
] |
[
"src/kosmoss/hyperopt/graphs.py"
] |
[
"import numpy as np\nimport os.path as osp\nfrom pytorch_lightning import LightningDataModule\nimport torch\nfrom torch_geometric.data import Dataset\nfrom torch_geometric.loader import DataLoader\nfrom typing import List, Tuple\n\nfrom kosmoss import CONFIG, DATA_PATH, METADATA\nfrom kosmoss.dataproc.flows import BuildGraphsFlow\n\n\nclass GNNDataset(Dataset):\n \n def __init__(self) -> None:\n \n self.timestep = str(CONFIG['timestep'])\n self.params = METADATA[str(self.timestep)]['features']\n self.num_shards = self.params['num_shards']\n \n super().__init__(DATA_PATH)\n\n @property\n def raw_file_names(self) -> list:\n return [\"\"]\n\n @property\n def processed_file_names(self) -> List[str]:\n return [osp.join(f\"graphs-{self.timestep}\", f\"data-{shard}.pt\") \n for shard in np.arange(self.num_shards)]\n \n \n def download(self) -> None:\n raise Exception(\"Execute the Notebooks in this Bootcamp following the order defined by the Readme.\")\n\n \n def process(self) -> None:\n BuildGraphsFlow()\n \n def len(self) -> int:\n return self.params['dataset_len']\n\n def get(self, idx: int) -> Tuple[torch.Tensor]:\n \n shard_size = self.len() // self.num_shards\n fileidx = idx // shard_size\n rowidx = idx % shard_size\n \n data_list = torch.load(osp.join(self.processed_dir, f\"graphs-{self.timestep}\", f'data-{fileidx}.pt'))\n data = data_list[rowidx]\n \n return data\n\n\nclass LitGNNDataModule(LightningDataModule):\n \n def __init__(self, batch_size: int) -> None:\n self.bs = batch_size\n super().__init__()\n \n def prepare_data(self) -> None:\n pass\n \n def setup(self, stage: str) -> None:\n dataset = GNNDataset().shuffle()\n length = len(dataset)\n \n self.testds = dataset[int(length * .9):]\n self.valds = dataset[int(length * .8):int(length * .9)]\n self.trainds = dataset[:int(length * .8)]\n \n \n def train_dataloader(self) -> DataLoader:\n return DataLoader(self.trainds, batch_size=self.bs, num_workers=4, shuffle=True)\n \n def val_dataloader(self) -> DataLoader:\n return DataLoader(self.valds, batch_size=self.bs, num_workers=4)\n \n def test_dataloader(self) -> DataLoader:\n return DataLoader(self.testds, batch_size=self.bs, num_workers=4)\n"
] |
[
[
"numpy.arange"
]
] |
hellozhaojian/transformers
|
[
"2355176f1ab306ada8956a06c79e9d07def57cad"
] |
[
"pre-train/bert_with_tpu.py"
] |
[
"# encoding: utf-8\n# Copyright 2019 The DeepNlp Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n\"\"\"\n@file: bert_with_tpu.py\n@time: 2019/11/10 7:07 上午\n\"\"\"\n\nimport sys, os\nimport tensorflow as tf\nimport logging\nlog = logging.getLogger('tensorflow')\nlog.setLevel(logging.INFO)\n\nGOOGLE_CLOUD_PROJECT_NAME = \"pre-train-bert-sogou\" #@param {type: \"string\" }\nBUCKET_NAME = \"bert-sogou-pretrain\" #@param {type: \"string\"}\nBASE_MODEL_DIR = \"fine_tuning/base_model\" #@param {type: \"string\"}\nNEW_MODEL_DIR = \"fine_tuning/model\" #@param {type: \"string\"}\nMODEL_NAME = \"chinese_L-12_H-768_A-12\" #@param {type: \"string\"}\nINPUT_DATA_DIR = \"fine_tuning/data/zh_wiki_news_2016\" #@param {type: \"string\"}\n\nPROCESSES = 4 #@param {type: \"integer\"}\nDO_LOWER_CASE = True\nMAX_SEQ_LENGTH = 128 #@param {type : \"integer\"}\nMASKED_LM_PROB = 0.15 #@param {type: \"number\" }\n# xxxx\nMAX_PREDICTIONS = 20 #@param {type: \"integer\"\n\n\n\n#! gcloud config set project pre-train-bert-sogou\nbase_model_name = \"gs://{}/{}/{}\".format(BUCKET_NAME, BASE_MODEL_DIR, MODEL_NAME)\nfine_tuning_name = \"gs://{}/{}/{}\".format(BUCKET_NAME, NEW_MODEL_DIR, MODEL_NAME)\n#! gsutil rm -rf $fine_tuning_name\n#! gsutil cp -r $base_model_name $fine_tuning_name\n\n#\n# storage_client = storage.Client()\n# bucket = storage_client.get_bucket(BUCKET_NAME)\n\nVOC_FNAME = \"gs://{}/{}/{}/vocab.txt\".format(BUCKET_NAME, NEW_MODEL_DIR, MODEL_NAME)\nTF_RECORD_DIR = \"gs://{}/{}_tfrecord\".format(BUCKET_NAME, INPUT_DATA_DIR)\n\nfile_partitions = [[]]\nindex = 0\n\n\n# def list_files(bucketFolder):\n# \"\"\"List all files in GCP bucket.\"\"\"\n# files = bucket.list_blobs(prefix=bucketFolder, max_results=1000)\n# fileList = [file.name for file in files]\n# return fileList\n\n\nprint(INPUT_DATA_DIR)\nprint(TF_RECORD_DIR)\nFULL_INPUT_DATA_DIR = \"gs://{}/{}\".format(BUCKET_NAME, INPUT_DATA_DIR)\n# ! gsutil ls $FULL_INPUT_DATA_DIR\n\n\n# for filename in list_files(INPUT_DATA_DIR):\n# if filename.find(\"tf\") != -1 or filename.endswith(\"/\"):\n# continue\n# if len(file_partitions[index]) == PROCESSES:\n# file_partitions.append([])\n# index += 1\n# file_partitions[index].append(\"gs://{}/{}\".format(BUCKET_NAME, filename))\n\n#! gsutil\n#mkdir $TF_RECORD_DIR\n#! gsutil\n#mkdir\n#gs: // bert - sogou - pretrain / fine_tuning / data / zh_wiki_news_2016_tfrecord\n# gs://bert-sogou-pretrain/fine_tuning/data/zh_wiki_news_2016_tfrecord\n#! gsutil\n#ls $TF_RECORD_DIR\n\n# index = 0\n# for partition in file_partitions:\n#\n# for filename in partition:\n# print(filename, \"----\", index)\n# index += 1\n#\n# XARGS_CMD = (\"gsutil ls {} | \"\n# \"awk 'BEGIN{{FS=\\\"/\\\"}}{{print $NF}}' | \"\n# \"xargs -n 1 -P {} -I{} \"\n# \"python3 bert/create_pretraining_data.py \"\n# \"--input_file=gs://{}/{}/{} \"\n# \"--output_file={}/{}.tfrecord \"\n# \"--vocab_file={} \"\n# \"--do_lower_case={} \"\n# \"--max_predictions_per_seq={} \"\n# \"--max_seq_length={} \"\n# \"--masked_lm_prob={} \"\n# \"--random_seed=34 \"\n# \"--dupe_factor=5\")\n#\n# XARGS_CMD = XARGS_CMD.format(\" \".join(partition),\n# PROCESSES, '{}', BUCKET_NAME, INPUT_DATA_DIR, '{}',\n# TF_RECORD_DIR, '{}',\n# VOC_FNAME, DO_LOWER_CASE,\n# MAX_PREDICTIONS, MAX_SEQ_LENGTH, MASKED_LM_PROB)\n#\n# print(XARGS_CMD)\n#\n# # ! $XARGS_CMD\n#\n# if index == 2:\n# break\nfrom bert import modeling, optimization, tokenization\n\n# Input data pipeline config\nTRAIN_BATCH_SIZE = 128 #@param {type:\"integer\"}\nMAX_PREDICTIONS = 20 #@param {type:\"integer\"}\nMAX_SEQ_LENGTH = 128 #@param {type:\"integer\"}\nMASKED_LM_PROB = 0.15 #@param\n\n# Training procedure config\nEVAL_BATCH_SIZE = 64\nLEARNING_RATE = 2e-5\nTRAIN_STEPS = 1000000 #@param {type:\"integer\"}\nSAVE_CHECKPOINTS_STEPS = 250 #@param {type:\"integer\"}\nNUM_TPU_CORES = 8\n\n\n\nBERT_GCS_DIR = fine_tuning_name+\"_latest/\"\n#BERT_GCS_DIR = fine_tuning_name+\"_without_pretrain/\"\n\n#! gsutil mkdir $BERT_GCS_DIR\n\nDATA_GCS_DIR = TF_RECORD_DIR\n\nVOCAB_FILE = VOC_FNAME\n\nCONFIG_FILE = \"gs://{}/{}/{}/bert_config.json\".format(BUCKET_NAME, BASE_MODEL_DIR, MODEL_NAME)\n\n\n#! gsutil ls $BERT_GCS_DIR\n\nINIT_CHECKPOINT = \"{}/bert_model.ckpt\".format(base_model_name)\n#\"gs://bert-sogou-pretrain/fine_tuning/base_model/chinese_L-12_H-768_A-12/bert_model.ckpt\"\nTMP_INIT_CHECKPOINT = tf.train.latest_checkpoint(BERT_GCS_DIR)\nif TMP_INIT_CHECKPOINT is not None:\n INIT_CHECKPOINT = TMP_INIT_CHECKPOINT\n\n\nbert_config = modeling.BertConfig.from_json_file(CONFIG_FILE)\ninput_files = tf.gfile.Glob(os.path.join(DATA_GCS_DIR,'*tfrecord'))\n\nlog.info(\"Using checkpoint: {}\".format(INIT_CHECKPOINT))\n\nlog.info(\"Using {} data shards\".format(len(input_files)))\n\n#! gsutil ls $INIT_CHECKPOINT*\n\n#INIT_CHECKPOINT = None\n\nimport sys\n\nsys.path.append(\"bert\")\nfrom bert.run_pretraining import input_fn_builder, model_fn_builder\nfrom bert import modeling, optimization, tokenization\nUSE_TPU=True\nTPU_ADDRESS = \"taey2113\"\nmodel_fn = model_fn_builder(\n bert_config=bert_config,\n init_checkpoint=INIT_CHECKPOINT,\n learning_rate=LEARNING_RATE,\n num_train_steps=TRAIN_STEPS,\n num_warmup_steps=10,\n use_tpu=USE_TPU,\n use_one_hot_embeddings=True)\n\ntpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(TPU_ADDRESS)\n\nrun_config = tf.contrib.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n model_dir=BERT_GCS_DIR,\n save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS,\n tpu_config=tf.contrib.tpu.TPUConfig(\n iterations_per_loop=SAVE_CHECKPOINTS_STEPS,\n num_shards=NUM_TPU_CORES,\n per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2))\n\nestimator = tf.contrib.tpu.TPUEstimator(\n use_tpu=USE_TPU,\n model_fn=model_fn,\n config=run_config,\n train_batch_size=TRAIN_BATCH_SIZE,\n eval_batch_size=EVAL_BATCH_SIZE)\n\ntrain_input_fn = input_fn_builder(\n input_files=input_files,\n max_seq_length=MAX_SEQ_LENGTH,\n max_predictions_per_seq=MAX_PREDICTIONS,\n is_training=True)\n\nestimator.train(input_fn=train_input_fn, max_steps=TRAIN_STEPS)\n\n\n\n\n"
] |
[
[
"tensorflow.contrib.cluster_resolver.TPUClusterResolver",
"tensorflow.contrib.tpu.TPUConfig",
"tensorflow.train.latest_checkpoint",
"tensorflow.contrib.tpu.TPUEstimator"
]
] |
jdowlingmedley/pyomeca
|
[
"6cedcb0350140094e50e76afd2fa60d13005a485"
] |
[
"tests/test_object_creation.py"
] |
[
"import numpy as np\nimport pytest\nimport xarray as xr\n\nfrom pyomeca import Analogs, Markers, Angles, Rototrans\nfrom ._constants import ANALOGS_DATA, MARKERS_DATA, EXPECTED_VALUES\nfrom .utils import is_expected_array\n\n\ndef test_analogs_creation():\n dims = (\"channel\", \"time\")\n array = Analogs()\n np.testing.assert_array_equal(x=array, y=xr.DataArray())\n assert array.dims == dims\n\n array = Analogs(ANALOGS_DATA.values)\n is_expected_array(array, **EXPECTED_VALUES[56])\n\n size = 10, 100\n array = Analogs.from_random_data(size=size)\n assert array.shape == size\n assert array.dims == dims\n\n with pytest.raises(ValueError):\n Analogs(MARKERS_DATA)\n\n\ndef test_markers_creation():\n dims = (\"axis\", \"channel\", \"time\")\n array = Markers()\n np.testing.assert_array_equal(x=array, y=xr.DataArray())\n assert array.dims == dims\n\n array = Markers(MARKERS_DATA.values)\n is_expected_array(array, **EXPECTED_VALUES[57])\n\n size = 3, 10, 100\n array = Markers.from_random_data(size=size)\n assert array.shape == (4, size[1], size[2])\n assert array.dims == dims\n\n with pytest.raises(ValueError):\n Markers(ANALOGS_DATA)\n\n\ndef test_angles_creation():\n dims = (\"axis\", \"channel\", \"time\")\n array = Angles()\n np.testing.assert_array_equal(x=array, y=xr.DataArray())\n assert array.dims == dims\n\n array = Angles(MARKERS_DATA.values, time=MARKERS_DATA.time)\n is_expected_array(array, **EXPECTED_VALUES[57])\n\n size = 10, 10, 100\n array = Angles.from_random_data(size=size)\n assert array.shape == size\n assert array.dims == dims\n\n with pytest.raises(ValueError):\n Angles(ANALOGS_DATA)\n\n\ndef test_rototrans_creation():\n dims = (\"row\", \"col\", \"time\")\n array = Rototrans()\n np.testing.assert_array_equal(x=array, y=xr.DataArray(np.eye(4)[..., np.newaxis]))\n assert array.dims == dims\n\n array = Rototrans(MARKERS_DATA.values, time=MARKERS_DATA.time)\n is_expected_array(array, **EXPECTED_VALUES[67])\n\n size = 4, 4, 100\n array = Rototrans.from_random_data(size=size)\n assert array.shape == size\n assert array.dims == dims\n\n with pytest.raises(ValueError):\n Angles(ANALOGS_DATA)\n"
] |
[
[
"numpy.eye"
]
] |
LDMDS/LDMDS-CME-TenYearFutures-CNN
|
[
"558d341750283e2867eda6cb2d473a197300bd31"
] |
[
"PredictModel.py"
] |
[
"from keras.models import Sequential\nfrom keras.layers import Convolution2D\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import Flatten\nfrom keras.layers import Dense\nimport numpy as np\nfrom keras.preprocessing import image\nfrom keras.models import model_from_json\n#import pdb\n#from pprint import pprint\n#########################################\n#\n# This loads the model and asserts if its bullish or bearish\n#\n#########################################\n\n# load json and create model\njson_file = open('model.json', 'r')\nloaded_model_json = json_file.read()\njson_file.close()\nclassifier = model_from_json(loaded_model_json)\n\n# load weights into new model\nclassifier.load_weights(\"model.h5\")\nprint(\"Loaded model from disk\")\n\n# load model on test data\ntest_image=image.load_img('dataset/realtime_image/1.jpg', target_size=(64,64))\ntest_image = image.img_to_array(test_image)\ntest_image = np.expand_dims(test_image, axis =0)\n\n# evaluate loaded model on test data\nresult = classifier.predict(test_image)\nfrom keras.preprocessing.image import ImageDataGenerator\ntrain_datagen = ImageDataGenerator(rescale = 1./255,shear_range = 0.2,zoom_range = 0.2,horizontal_flip = True)\ntraining_set = train_datagen.flow_from_directory('dataset/training_set',target_size = (64, 64),batch_size = 32,class_mode = 'binary')\ntraining_set.class_indices\n#pdb.set_trace()\n \nprint(result[0][0])\n\nif result[0][0] == 1:\n prediction = 'bull'\nelse:\n prediction = 'bear'\nprint(prediction)\n\n\n# #plot the image for crosscheck\n# import matplotlib.pyplot as plt\n# import matplotlib.image as mpimg\n# img=mpimg.imread('dataset/realtime_image/1.jpg')\n# imgplot = plt.imshow(img)\n# plt.show()"
] |
[
[
"numpy.expand_dims"
]
] |
hyunwoongko/bert2bert-summarization
|
[
"69f90250cde5e66b132ad2f359d970c959f1a754"
] |
[
"test.py"
] |
[
"import json\nimport sys\n\nimport torch\nfrom tqdm import tqdm\n\nfrom transformers import EncoderDecoderConfig, BertConfig, EncoderDecoderModel\nfrom bert2bert import KoBertTokenizer\n\n\[email protected]_grad()\ndef inference():\n step = sys.argv[1]\n encoder_config = BertConfig.from_pretrained(\"monologg/kobert\")\n decoder_config = BertConfig.from_pretrained(\"monologg/kobert\")\n config = EncoderDecoderConfig.from_encoder_decoder_configs(\n encoder_config, decoder_config\n )\n\n tokenizer = KoBertTokenizer()\n model = EncoderDecoderModel(config=config)\n ckpt = \"model.pt\"\n device = \"cuda\"\n\n model.load_state_dict(\n torch.load(\n f\"saved/{ckpt}.{step}\", map_location=\"cuda\"\n ),\n strict=True,\n )\n\n model = model.half().eval().to(device)\n test_data = open(\"dataset/abstractive_test_v2.jsonl\", \"r\").read().splitlines()\n submission = open(f\"submission_{step}.csv\", \"w\")\n\n test_set = []\n for data in test_data:\n data = json.loads(data)\n article_original = data[\"article_original\"]\n article_original = \" \".join(article_original)\n news_id = data[\"id\"]\n test_set.append((news_id, article_original))\n\n for i, (news_id, text) in tqdm(enumerate(test_set)):\n tokens = tokenizer.encode_batch([text], max_length=512)\n generated = model.generate(\n input_ids=tokens[\"input_ids\"].to(device),\n attention_mask=tokens[\"attention_mask\"].to(device),\n use_cache=True,\n bos_token_id=tokenizer.token2idx[\"[CLS]\"],\n eos_token_id=tokenizer.token2idx[\"[SEP]\"],\n pad_token_id=tokenizer.token2idx[\"[PAD]\"],\n num_beams=12,\n do_sample=False,\n temperature=1.0,\n no_repeat_ngram_size=4,\n bad_words_ids=[[tokenizer.token2idx[\"[UNK]\"]]],\n length_penalty=1.5,\n max_length=512,\n )\n\n output = tokenizer.decode_batch(generated.tolist())[0]\n submission.write(f\"{news_id},{output}\" + \"\\n\")\n print(news_id, output)\n\n\nif __name__ == '__main__':\n inference()\n"
] |
[
[
"torch.no_grad",
"torch.load"
]
] |
raybellwaves/xESMF
|
[
"77e5d0839089ff0b9daa3c9e2d9fc82cfab2f07f"
] |
[
"xesmf/frontend.py"
] |
[
"\"\"\"\nFrontend for xESMF, exposed to users.\n\"\"\"\n\nimport warnings\n\nimport cf_xarray as cfxr\nimport numpy as np\nimport scipy.sparse as sps\nimport xarray as xr\nfrom xarray import DataArray, Dataset\n\nfrom .backend import Grid, LocStream, Mesh, add_corner, esmf_regrid_build, esmf_regrid_finalize\nfrom .smm import _combine_weight_multipoly, add_nans_to_weights, apply_weights, read_weights\nfrom .util import split_polygons_and_holes\n\ntry:\n import dask.array as da\n\n dask_array_type = (da.Array,) # for isinstance checks\nexcept ImportError:\n dask_array_type = ()\n\n\ndef as_2d_mesh(lon, lat):\n\n if (lon.ndim, lat.ndim) == (2, 2):\n assert lon.shape == lat.shape, 'lon and lat should have same shape'\n elif (lon.ndim, lat.ndim) == (1, 1):\n lon, lat = np.meshgrid(lon, lat)\n else:\n raise ValueError('lon and lat should be both 1D or 2D')\n\n return lon, lat\n\n\ndef _get_lon_lat(ds):\n \"\"\"Return lon and lat extracted from ds.\"\"\"\n if ('lat' in ds and 'lon' in ds) or ('lat' in ds.coords and 'lon' in ds.coords):\n # Old way.\n return ds['lon'], ds['lat']\n # else : cf-xarray way\n try:\n lon = ds.cf['longitude']\n lat = ds.cf['latitude']\n except (KeyError, AttributeError, ValueError):\n # KeyError if cfxr doesn't detect the coords\n # AttributeError if ds is a dict\n raise ValueError('dataset must include lon/lat or be CF-compliant')\n\n return lon, lat\n\n\ndef _get_lon_lat_bounds(ds):\n \"\"\"Return bounds of lon and lat extracted from ds.\"\"\"\n if 'lat_b' in ds and 'lon_b' in ds:\n # Old way.\n return ds['lon_b'], ds['lat_b']\n # else : cf-xarray way\n try:\n lon_bnds = ds.cf.get_bounds('longitude')\n lat_bnds = ds.cf.get_bounds('latitude')\n except KeyError: # bounds are not already present\n if ds.cf['longitude'].ndim > 1:\n # We cannot infer 2D bounds, raise KeyError as custom \"lon_b\" is missing.\n raise KeyError('lon_b')\n lon_name = ds.cf['longitude'].name\n lat_name = ds.cf['latitude'].name\n ds = ds.cf.add_bounds([lon_name, lat_name])\n lon_bnds = ds.cf.get_bounds('longitude')\n lat_bnds = ds.cf.get_bounds('latitude')\n\n # Convert from CF bounds to xESMF bounds.\n # order=None is because we don't want to assume the dimension order for 2D bounds.\n lon_b = cfxr.bounds_to_vertices(lon_bnds, ds.cf.get_bounds_dim_name('longitude'), order=None)\n lat_b = cfxr.bounds_to_vertices(lat_bnds, ds.cf.get_bounds_dim_name('latitude'), order=None)\n return lon_b, lat_b\n\n\ndef ds_to_ESMFgrid(ds, need_bounds=False, periodic=None, append=None):\n \"\"\"\n Convert xarray DataSet or dictionary to ESMF.Grid object.\n\n Parameters\n ----------\n ds : xarray DataSet or dictionary\n Contains variables ``lon``, ``lat``,\n and optionally ``lon_b``, ``lat_b`` if need_bounds=True.\n\n Shape should be ``(n_lat, n_lon)`` or ``(n_y, n_x)``,\n as normal C or Python ordering. Will be then tranposed to F-ordered.\n\n need_bounds : bool, optional\n Need cell boundary values?\n\n periodic : bool, optional\n Periodic in longitude?\n\n Returns\n -------\n grid : ESMF.Grid object\n\n \"\"\"\n\n # use np.asarray(dr) instead of dr.values, so it also works for dictionary\n\n lon, lat = _get_lon_lat(ds)\n if hasattr(lon, 'dims'):\n if lon.ndim == 1:\n dim_names = lat.dims + lon.dims\n else:\n dim_names = lon.dims\n else:\n dim_names = None\n lon, lat = as_2d_mesh(np.asarray(lon), np.asarray(lat))\n\n if 'mask' in ds:\n mask = np.asarray(ds['mask'])\n else:\n mask = None\n\n # tranpose the arrays so they become Fortran-ordered\n if mask is not None:\n grid = Grid.from_xarray(lon.T, lat.T, periodic=periodic, mask=mask.T)\n else:\n grid = Grid.from_xarray(lon.T, lat.T, periodic=periodic, mask=None)\n\n if need_bounds:\n lon_b, lat_b = _get_lon_lat_bounds(ds)\n lon_b, lat_b = as_2d_mesh(np.asarray(lon_b), np.asarray(lat_b))\n add_corner(grid, lon_b.T, lat_b.T)\n\n return grid, lon.shape, dim_names\n\n\ndef ds_to_ESMFlocstream(ds):\n \"\"\"\n Convert xarray DataSet or dictionary to ESMF.LocStream object.\n\n Parameters\n ----------\n ds : xarray DataSet or dictionary\n Contains variables ``lon``, ``lat``.\n\n Returns\n -------\n locstream : ESMF.LocStream object\n\n \"\"\"\n\n lon, lat = _get_lon_lat(ds)\n if hasattr(lon, 'dims'):\n dim_names = lon.dims\n else:\n dim_names = None\n lon, lat = np.asarray(lon), np.asarray(lat)\n\n if len(lon.shape) > 1:\n raise ValueError('lon can only be 1d')\n if len(lat.shape) > 1:\n raise ValueError('lat can only be 1d')\n\n assert lon.shape == lat.shape\n\n locstream = LocStream.from_xarray(lon, lat)\n\n return locstream, (1,) + lon.shape, dim_names\n\n\ndef polys_to_ESMFmesh(polys):\n \"\"\"\n Convert a sequence of shapely Polygons to a ESMF.Mesh object.\n\n MultiPolygons are split in their polygon parts and holes are ignored.\n\n Parameters\n ----------\n polys : sequence of shapely Polygon or MultiPolygon\n\n Returns\n -------\n exterior : ESMF.Mesh\n A mesh where elements are the exterior rings of the polygons\n tuple\n The shape of the mesh : (1, N_elements)\n\n \"\"\"\n ext, holes, _, _ = split_polygons_and_holes(polys)\n if len(holes) > 0:\n warnings.warn(\n 'Some passed polygons have holes, those are not represented in the returned Mesh.'\n )\n return Mesh.from_polygons(ext), (1, len(ext))\n\n\nclass BaseRegridder(object):\n def __init__(\n self,\n grid_in,\n grid_out,\n method,\n filename=None,\n reuse_weights=False,\n extrap_method=None,\n extrap_dist_exponent=None,\n extrap_num_src_pnts=None,\n weights=None,\n ignore_degenerate=None,\n input_dims=None,\n ):\n \"\"\"\n Base xESMF regridding class supporting ESMF objects: `Grid`, `Mesh` and `LocStream`.\n\n Create or use existing subclasses to support other types of input objects. See for example `Regridder`\n to regrid `xarray.DataArray` objects, or `SpatialAverager` to average grids over regions defined by polygons.\n\n Parameters\n ----------\n grid_in, grid_out : ESMF Grid or Locstream or Mesh\n Input and output grid structures as ESMFpy objects.\n\n method : str\n Regridding method. Options are\n\n - 'bilinear'\n - 'conservative', **need grid corner information**\n - 'conservative_normed', **need grid corner information**\n - 'patch'\n - 'nearest_s2d'\n - 'nearest_d2s'\n\n filename : str, optional\n Name for the weight file. The default naming scheme is::\n\n {method}_{Ny_in}x{Nx_in}_{Ny_out}x{Nx_out}.nc\n\n e.g. bilinear_400x600_300x400.nc\n\n reuse_weights : bool, optional\n Whether to read existing weight file to save computing time.\n False by default (i.e. re-compute, not reuse).\n\n extrap_method : str, optional\n Extrapolation method. Options are\n\n - 'inverse_dist'\n - 'nearest_s2d'\n\n extrap_dist_exponent : float, optional\n The exponent to raise the distance to when calculating weights for the\n extrapolation method. If none are specified, defaults to 2.0\n\n extrap_num_src_pnts : int, optional\n The number of source points to use for the extrapolation methods\n that use more than one source point. If none are specified, defaults to 8\n\n weights : None, coo_matrix, dict, str, Dataset, Path,\n Regridding weights, stored as\n - a scipy.sparse COO matrix,\n - a dictionary with keys `row_dst`, `col_src` and `weights`,\n - an xarray Dataset with data variables `col`, `row` and `S`,\n - or a path to a netCDF file created by ESMF.\n If None, compute the weights.\n\n ignore_degenerate : bool, optional\n If False (default), raise error if grids contain degenerated cells\n (i.e. triangles or lines, instead of quadrilaterals)\n\n input_dims : tuple of str, optional\n A tuple of dimension names to look for when regridding DataArrays or Datasets.\n If not given or if those are not found on the regridded object, regridding\n uses the two last dimensions of the object (or the last one for input LocStreams and Meshes).\n\n Returns\n -------\n baseregridder : xESMF BaseRegridder object\n\n \"\"\"\n self.grid_in = grid_in\n self.grid_out = grid_out\n self.method = method\n self.reuse_weights = reuse_weights\n self.extrap_method = extrap_method\n self.extrap_dist_exponent = extrap_dist_exponent\n self.extrap_num_src_pnts = extrap_num_src_pnts\n self.ignore_degenerate = ignore_degenerate\n self.periodic = getattr(self.grid_in, 'periodic_dim', None) is not None\n self.sequence_in = isinstance(self.grid_in, (LocStream, Mesh))\n self.sequence_out = isinstance(self.grid_out, (LocStream, Mesh))\n\n if input_dims is not None and len(input_dims) != int(not self.sequence_in) + 1:\n raise ValueError(f'Wrong number of dimension names in `input_dims` ({len(input_dims)}.')\n self.in_horiz_dims = input_dims\n\n # record grid shape information\n # We need to invert Grid shapes to respect xESMF's convention (y, x).\n self.shape_in = self.grid_in.get_shape()[::-1]\n self.shape_out = self.grid_out.get_shape()[::-1]\n self.n_in = self.shape_in[0] * self.shape_in[1]\n self.n_out = self.shape_out[0] * self.shape_out[1]\n\n # some logic about reusing weights with either filename or weights args\n if reuse_weights and (filename is None) and (weights is None):\n raise ValueError('To reuse weights, you need to provide either filename or weights.')\n\n if not reuse_weights and weights is None:\n weights = self._compute_weights() # Dictionary of weights\n else:\n weights = filename if filename is not None else weights\n\n assert weights is not None\n\n # Convert weights, whatever their format, to a sparse coo matrix\n self.weights = read_weights(weights, self.n_in, self.n_out)\n\n # replace zeros by NaN in mask\n if self.grid_out.mask is not None and self.grid_out.mask[0] is not None:\n self.weights = add_nans_to_weights(self.weights)\n\n # follows legacy logic of writing weights if filename is provided\n if filename is not None and not reuse_weights:\n self.to_netcdf(filename=filename)\n\n # set default weights filename if none given\n self.filename = self._get_default_filename() if filename is None else filename\n\n @property\n def A(self):\n message = (\n 'regridder.A is deprecated and will be removed in future versions. '\n 'Use regridder.weights instead.'\n )\n\n warnings.warn(message, DeprecationWarning)\n # DeprecationWarning seems to be ignored by certain Python environments\n # Also print to make sure users notice this.\n print(message)\n return self.weights\n\n def _get_default_filename(self):\n # e.g. bilinear_400x600_300x400.nc\n filename = '{0}_{1}x{2}_{3}x{4}'.format(\n self.method,\n self.shape_in[0],\n self.shape_in[1],\n self.shape_out[0],\n self.shape_out[1],\n )\n\n if self.periodic:\n filename += '_peri.nc'\n else:\n filename += '.nc'\n\n return filename\n\n def _compute_weights(self):\n regrid = esmf_regrid_build(\n self.grid_in,\n self.grid_out,\n self.method,\n extrap_method=self.extrap_method,\n extrap_dist_exponent=self.extrap_dist_exponent,\n extrap_num_src_pnts=self.extrap_num_src_pnts,\n ignore_degenerate=self.ignore_degenerate,\n )\n\n w = regrid.get_weights_dict(deep_copy=True)\n esmf_regrid_finalize(regrid) # only need weights, not regrid object\n return w\n\n def __call__(self, indata, keep_attrs=False):\n \"\"\"\n Apply regridding to input data.\n\n Parameters\n ----------\n indata : numpy array, dask array, xarray DataArray or Dataset.\n If not an xarray object or if `input_dìms` was not given in the init,\n the rightmost two dimensions must be the same as ``ds_in``.\n Can have arbitrary additional dimensions.\n\n Examples of valid shapes\n\n - (n_lat, n_lon), if ``ds_in`` has shape (n_lat, n_lon)\n - (n_time, n_lev, n_y, n_x), if ``ds_in`` has shape (Ny, n_x)\n\n Either give `input_dims` or transpose your input data\n if the horizontal dimensions are not the rightmost two dimensions\n\n Variables without the regridded dimensions are silently skipped when passing a Dataset.\n\n keep_attrs : bool, optional\n Keep attributes for xarray DataArrays or Datasets.\n Defaults to False.\n\n Returns\n -------\n outdata : Data type is the same as input data type.\n On the same horizontal grid as ``ds_out``,\n with extra dims in ``dr_in``.\n\n Assuming ``ds_out`` has the shape of (n_y_out, n_x_out),\n examples of returning shapes are\n\n - (n_y_out, n_x_out), if ``dr_in`` is 2D\n - (n_time, n_lev, n_y_out, n_x_out), if ``dr_in`` has shape\n (n_time, n_lev, n_y, n_x)\n\n \"\"\"\n if isinstance(indata, np.ndarray):\n return self.regrid_numpy(indata)\n elif isinstance(indata, dask_array_type):\n return self.regrid_dask(indata)\n elif isinstance(indata, xr.DataArray):\n return self.regrid_dataarray(indata, keep_attrs=keep_attrs)\n elif isinstance(indata, xr.Dataset):\n return self.regrid_dataset(indata, keep_attrs=keep_attrs)\n else:\n raise TypeError(\n 'input must be numpy array, dask array, ' 'xarray DataArray or Dataset!'\n )\n\n @staticmethod\n def _regrid_array(indata, *, weights, shape_in, shape_out, sequence_in):\n if sequence_in:\n indata = np.expand_dims(indata, axis=-2)\n return apply_weights(weights, indata, shape_in, shape_out)\n\n @property\n def _regrid_kwargs(self):\n return {\n 'weights': self.weights,\n 'sequence_in': self.sequence_in,\n 'shape_in': self.shape_in,\n 'shape_out': self.shape_out,\n }\n\n def regrid_numpy(self, indata):\n \"\"\"See __call__().\"\"\"\n outdata = self._regrid_array(indata, **self._regrid_kwargs)\n return outdata\n\n def regrid_dask(self, indata):\n \"\"\"See __call__().\"\"\"\n\n extra_chunk_shape = indata.chunksize[0:-2]\n\n output_chunk_shape = extra_chunk_shape + self.shape_out\n\n outdata = da.map_blocks(\n self._regrid_array,\n indata,\n dtype=float,\n chunks=output_chunk_shape,\n **self._regrid_kwargs,\n )\n\n return outdata\n\n def regrid_dataarray(self, dr_in, keep_attrs=False):\n \"\"\"See __call__().\"\"\"\n\n input_horiz_dims, temp_horiz_dims = self._parse_xrinput(dr_in)\n\n dr_out = xr.apply_ufunc(\n self._regrid_array,\n dr_in,\n kwargs=self._regrid_kwargs,\n input_core_dims=[input_horiz_dims],\n output_core_dims=[temp_horiz_dims],\n dask='parallelized',\n output_dtypes=[float],\n output_sizes={\n temp_horiz_dims[0]: self.shape_out[0],\n temp_horiz_dims[1]: self.shape_out[1],\n },\n keep_attrs=keep_attrs,\n )\n\n return self._format_xroutput(dr_out, temp_horiz_dims)\n\n def regrid_dataset(self, ds_in, keep_attrs=False):\n \"\"\"See __call__().\"\"\"\n\n # get the first data variable to infer input_core_dims\n input_horiz_dims, temp_horiz_dims = self._parse_xrinput(ds_in)\n\n non_regriddable = [\n name\n for name, data in ds_in.data_vars.items()\n if not set(input_horiz_dims).issubset(data.dims)\n ]\n ds_in = ds_in.drop_vars(non_regriddable)\n\n ds_out = xr.apply_ufunc(\n self._regrid_array,\n ds_in,\n kwargs=self._regrid_kwargs,\n input_core_dims=[input_horiz_dims],\n output_core_dims=[temp_horiz_dims],\n dask='parallelized',\n output_dtypes=[float],\n output_sizes={\n temp_horiz_dims[0]: self.shape_out[0],\n temp_horiz_dims[1]: self.shape_out[1],\n },\n keep_attrs=keep_attrs,\n )\n\n return self._format_xroutput(ds_out, temp_horiz_dims)\n\n def _parse_xrinput(self, dr_in):\n # dr could be a DataArray or a Dataset\n # Get input horiz dim names and set output horiz dim names\n if self.in_horiz_dims is not None and all(dim in dr_in.dims for dim in self.in_horiz_dims):\n input_horiz_dims = self.in_horiz_dims\n else:\n if isinstance(dr_in, Dataset):\n name, dr_in = next(iter(dr_in.items()))\n else:\n # For warning purposes\n name = dr_in.name\n\n if self.sequence_in:\n input_horiz_dims = dr_in.dims[-1:]\n else:\n input_horiz_dims = dr_in.dims[-2:]\n\n # help user debugging invalid horizontal dimensions\n warnings.warn(\n (\n f'Using dimensions {input_horiz_dims} from data variable {name} '\n 'as the horizontal dimensions for the regridding.'\n ),\n UserWarning,\n )\n\n if self.sequence_out:\n temp_horiz_dims = ['dummy', 'locations']\n else:\n temp_horiz_dims = [s + '_new' for s in input_horiz_dims]\n\n if self.sequence_in and not self.sequence_out:\n temp_horiz_dims = ['dummy_new'] + temp_horiz_dims\n return input_horiz_dims, temp_horiz_dims\n\n def _format_xroutput(self, out, new_dims=None):\n out.attrs['regrid_method'] = self.method\n return out\n\n def __repr__(self):\n info = (\n 'xESMF Regridder \\n'\n 'Regridding algorithm: {} \\n'\n 'Weight filename: {} \\n'\n 'Reuse pre-computed weights? {} \\n'\n 'Input grid shape: {} \\n'\n 'Output grid shape: {} \\n'\n 'Periodic in longitude? {}'.format(\n self.method,\n self.filename,\n self.reuse_weights,\n self.shape_in,\n self.shape_out,\n self.periodic,\n )\n )\n\n return info\n\n def to_netcdf(self, filename=None):\n \"\"\"Save weights to disk as a netCDF file.\"\"\"\n if filename is None:\n filename = self.filename\n w = self.weights\n dim = 'n_s'\n ds = xr.Dataset({'S': (dim, w.data), 'col': (dim, w.col + 1), 'row': (dim, w.row + 1)})\n ds.to_netcdf(filename)\n return filename\n\n\nclass Regridder(BaseRegridder):\n def __init__(\n self,\n ds_in,\n ds_out,\n method,\n locstream_in=False,\n locstream_out=False,\n periodic=False,\n **kwargs,\n ):\n \"\"\"\n Make xESMF regridder\n\n Parameters\n ----------\n ds_in, ds_out : xarray DataSet, or dictionary\n Contain input and output grid coordinates.\n All variables that the cf-xarray accessor understand are accepted.\n Otherwise, look for ``lon``, ``lat``,\n optionally ``lon_b``, ``lat_b`` for conservative methods,\n and ``mask``. Note that for `mask`, the ESMF convention is used,\n where masked values are identified by 0, and non-masked values by 1.\n\n For conservative methods, if bounds are not present, they will be\n computed using `cf-xarray` (only 1D coordinates are currently supported).\n\n Shape can be 1D (n_lon,) and (n_lat,) for rectilinear grids,\n or 2D (n_y, n_x) for general curvilinear grids.\n Shape of bounds should be (n+1,) or (n_y+1, n_x+1).\n CF-bounds (shape (n, 2) or (n, m, 4)) are also accepted if they are\n accessible through the cf-xarray accessor.\n\n If either dataset includes a 2d mask variable, that will also be\n used to inform the regridding.\n\n method : str\n Regridding method. Options are\n\n - 'bilinear'\n - 'conservative', **need grid corner information**\n - 'conservative_normed', **need grid corner information**\n - 'patch'\n - 'nearest_s2d'\n - 'nearest_d2s'\n\n periodic : bool, optional\n Periodic in longitude? Default to False.\n Only useful for global grids with non-conservative regridding.\n Will be forced to False for conservative regridding.\n\n filename : str, optional\n Name for the weight file. The default naming scheme is::\n\n {method}_{Ny_in}x{Nx_in}_{Ny_out}x{Nx_out}.nc\n\n e.g. bilinear_400x600_300x400.nc\n\n reuse_weights : bool, optional\n Whether to read existing weight file to save computing time.\n False by default (i.e. re-compute, not reuse).\n\n extrap_method : str, optional\n Extrapolation method. Options are\n\n - 'inverse_dist'\n - 'nearest_s2d'\n\n extrap_dist_exponent : float, optional\n The exponent to raise the distance to when calculating weights for the\n extrapolation method. If none are specified, defaults to 2.0\n\n extrap_num_src_pnts : int, optional\n The number of source points to use for the extrapolation methods\n that use more than one source point. If none are specified, defaults to 8\n\n weights : None, coo_matrix, dict, str, Dataset, Path,\n Regridding weights, stored as\n - a scipy.sparse COO matrix,\n - a dictionary with keys `row_dst`, `col_src` and `weights`,\n - an xarray Dataset with data variables `col`, `row` and `S`,\n - or a path to a netCDF file created by ESMF.\n If None, compute the weights.\n\n ignore_degenerate : bool, optional\n If False (default), raise error if grids contain degenerated cells\n (i.e. triangles or lines, instead of quadrilaterals)\n\n Returns\n -------\n regridder : xESMF regridder object\n \"\"\"\n methods_avail_ls_in = ['nearest_s2d', 'nearest_d2s']\n methods_avail_ls_out = ['bilinear', 'patch'] + methods_avail_ls_in\n\n if locstream_in and method not in methods_avail_ls_in:\n raise ValueError(\n f'locstream input is only available for method in {methods_avail_ls_in}'\n )\n if locstream_out and method not in methods_avail_ls_out:\n raise ValueError(\n f'locstream output is only available for method in {methods_avail_ls_out}'\n )\n\n # record basic switches\n if method in ['conservative', 'conservative_normed']:\n need_bounds = True\n periodic = False # bound shape will not be N+1 for periodic grid\n else:\n need_bounds = False\n\n # construct ESMF grid, with some shape checking\n if locstream_in:\n grid_in, shape_in, input_dims = ds_to_ESMFlocstream(ds_in)\n else:\n grid_in, shape_in, input_dims = ds_to_ESMFgrid(\n ds_in, need_bounds=need_bounds, periodic=periodic\n )\n if locstream_out:\n grid_out, shape_out, _ = ds_to_ESMFlocstream(ds_out)\n else:\n grid_out, shape_out, _ = ds_to_ESMFgrid(ds_out, need_bounds=need_bounds)\n\n # Create the BaseRegridder\n super().__init__(grid_in, grid_out, method, input_dims=input_dims, **kwargs)\n\n # record output grid and metadata\n lon_out, lat_out = _get_lon_lat(ds_out)\n self._lon_out, self._lat_out = np.asarray(lon_out), np.asarray(lat_out)\n self._coord_names = dict(\n lon=lon_out.name if isinstance(lon_out, DataArray) else 'lon',\n lat=lat_out.name if isinstance(lat_out, DataArray) else 'lat',\n )\n self._lon_out_attrs = lon_out.attrs if isinstance(lon_out, DataArray) else {}\n self._lat_out_attrs = lat_out.attrs if isinstance(lat_out, DataArray) else {}\n\n if self._lon_out.ndim == 2:\n try:\n self.lon_dim = self.lat_dim = lon_out.dims\n except Exception:\n self.lon_dim = self.lat_dim = ('y', 'x')\n\n self.out_horiz_dims = self.lon_dim\n\n elif self._lon_out.ndim == 1:\n try:\n (self.lon_dim,) = lon_out.dims\n (self.lat_dim,) = lat_out.dims\n except Exception:\n self.lon_dim = 'lon'\n self.lat_dim = 'lat'\n\n self.out_horiz_dims = (self.lat_dim, self.lon_dim)\n\n def _format_xroutput(self, out, new_dims=None):\n if not self.sequence_out and new_dims is not None:\n # rename dimension name to match output grid\n out = out.rename(\n {new_dims[0]: self.out_horiz_dims[0], new_dims[1]: self.out_horiz_dims[1]}\n )\n\n # append output horizontal coordinate values\n # extra coordinates are automatically tracked by apply_ufunc\n lon_args = dict(data=self._lon_out, attrs=self._lon_out_attrs)\n lat_args = dict(data=self._lat_out, attrs=self._lat_out_attrs)\n if self.sequence_out:\n out.coords['lon'] = xr.DataArray(**lon_args, dims=('locations',))\n out.coords['lat'] = xr.DataArray(**lat_args, dims=('locations',))\n else:\n out.coords['lon'] = xr.DataArray(**lon_args, dims=self.lon_dim)\n out.coords['lat'] = xr.DataArray(**lat_args, dims=self.lat_dim)\n\n out.attrs['regrid_method'] = self.method\n\n if self.sequence_out:\n out = out.squeeze(dim='dummy')\n if self.lon_dim == self.lat_dim:\n out = out.rename(locations=self.lon_dim)\n\n # Use ds_out coordinates\n out = out.rename(self._coord_names)\n\n return out\n\n\nclass SpatialAverager(BaseRegridder):\n def __init__(\n self,\n ds_in,\n polys,\n ignore_holes=False,\n periodic=False,\n filename=None,\n reuse_weights=False,\n weights=None,\n ignore_degenerate=False,\n geom_dim_name='geom',\n ):\n \"\"\"Compute the exact average of a gridded array over a geometry.\n\n This uses the ESMF `conservative` regridding method to compute and apply weights\n mapping a 2D field unto geometries defined by polygons. The `conservative` method\n preserves the areal average of the input field. That is, *the value at each output\n grid cell is the average input value over the output grid area*. Here, the output\n grid cells are not rectangles defined by four corners, but polygons defined by\n multiple vertices (`ESMF.Mesh` objects). The regridding weights thus compute the\n areal-average of the input grid over each polygon.\n\n For multi-parts geometries (shapely.MultiPolygon), weights are computed for each\n geometry, then added, to compute the average over all geometries.\n\n When polygons include holes, the weights over the holes can either be substracted,\n or ignored.\n\n Parameters\n ----------\n ds_in : xr.DataArray or xr.Dataset or dictionary\n Contain input and output grid coordinates. Look for variables\n ``lon``, ``lat``, ``lon_b`` and ``lat_b``.\n\n Optionaly looks for ``mask``, in which case the ESMF convention is used,\n where masked values are identified by 0, and non-masked values by 1.\n\n Shape can be 1D (n_lon,) and (n_lat,) for rectilinear grids,\n or 2D (n_y, n_x) for general curvilinear grids.\n Shape of bounds should be (n+1,) or (n_y+1, n_x+1).\n\n polys : sequence of shapely Polygons and MultiPolygons\n Sequence of polygons over which to average `ds_in`.\n\n ignore_holes : bool\n Whether to ignore holes in polygons.\n Default (True) is to substract the weight of holes from the weight of the polygon.\n\n filename : str, optional\n Name for the weight file. The default naming scheme is::\n\n spatialavg_{Ny_in}x{Nx_in}_{Npoly_out}.nc\n\n e.g. spatialavg_400x600_30.nc\n\n reuse_weights : bool, optional\n Whether to read existing weight file to save computing time.\n False by default (i.e. re-compute, not reuse).\n\n weights : None, coo_matrix, dict, str, Dataset, Path,\n Regridding weights, stored as\n - a scipy.sparse COO matrix,\n - a dictionary with keys `row_dst`, `col_src` and `weights`,\n - an xarray Dataset with data variables `col`, `row` and `S`,\n - or a path to a netCDF file created by ESMF.\n If None, compute the weights.\n\n ignore_degenerate : bool, optional\n If False (default), raise error if grids contain degenerated cells\n (i.e. triangles or lines, instead of quadrilaterals)\n\n self.geom_dim_name : str\n Name of dimension along which averages for each polygon are stored.\n\n Returns\n -------\n xarray.DataArray\n Average over polygons along `geom_dim_name` dimension. The `lon` and\n `lat` coordinates are the polygon centroid coordinates.\n\n References\n ----------\n This approach is inspired by `OCGIS <https://github.com/NCPP/ocgis>`_.\n \"\"\"\n self.ignore_holes = ignore_holes\n self.polys = polys\n self.geom_dim_name = geom_dim_name\n\n grid_in, shape_in, input_dims = ds_to_ESMFgrid(ds_in, need_bounds=True, periodic=periodic)\n\n # Create an output locstream so that the regridder knows the output shape and coords.\n # Latitude and longitude coordinates are the polygon centroid.\n lon_out, lat_out = _get_lon_lat(ds_in)\n if hasattr(lon_out, 'name'):\n self._lon_out_name = lon_out.name\n self._lat_out_name = lat_out.name\n else:\n self._lon_out_name = 'lon'\n self._lat_out_name = 'lat'\n\n poly_centers = [poly.centroid.xy for poly in polys]\n self._lon_out = np.asarray([c[0][0] for c in poly_centers])\n self._lat_out = np.asarray([c[1][0] for c in poly_centers])\n\n # We put names 'lon' and 'lat' so ds_to_ESMFlocstream finds them easily.\n # _lon_out_name and _lat_out_name are used on the output anyway.\n ds_out = {'lon': self._lon_out, 'lat': self._lat_out}\n locstream_out, shape_out, _ = ds_to_ESMFlocstream(ds_out)\n\n # BaseRegridder with custom-computed weights and dummy out grid\n super().__init__(\n grid_in,\n locstream_out,\n 'conservative',\n input_dims=input_dims,\n weights=weights,\n filename=filename,\n reuse_weights=reuse_weights,\n ignore_degenerate=ignore_degenerate,\n )\n\n def _compute_weights(self):\n \"\"\"Return weight sparse matrix.\"\"\"\n\n # Split all (multi-)polygons into single polygons and holes. Keep track of owners.\n exts, holes, i_ext, i_hol = split_polygons_and_holes(self.polys)\n owners = np.array(i_ext + i_hol)\n\n mesh_ext, shape_ext = polys_to_ESMFmesh(exts)\n\n # Get weights for single polygons and holes\n # Stack everything together\n reg_ext = BaseRegridder(\n mesh_ext, self.grid_in, 'conservative', ignore_degenerate=self.ignore_degenerate\n )\n if len(holes) > 0 and not self.ignore_holes:\n mesh_holes, shape_holes = polys_to_ESMFmesh(holes)\n reg_holes = BaseRegridder(\n mesh_holes, self.grid_in, 'conservative', ignore_degenerate=self.ignore_degenerate\n )\n w_all = sps.hstack((reg_ext.weights.tocsc(), -reg_holes.weights.tocsc()))\n else:\n w_all = reg_ext.weights.tocsc()\n\n # Combine weights of same owner and normalize\n weights = _combine_weight_multipoly(w_all, owners)\n weights = weights.multiply(1 / weights.sum(axis=0))\n return weights.tocoo().T\n\n def _get_default_filename(self):\n # e.g. bilinear_400x600_300x400.nc\n filename = 'spatialavg_{0}x{1}_{2}.nc'.format(\n self.shape_in[0], self.shape_in[1], self.n_out\n )\n\n return filename\n\n def __repr__(self):\n info = (\n 'xESMF SpatialAverager \\n'\n 'Weight filename: {} \\n'\n 'Reuse pre-computed weights? {} \\n'\n 'Input grid shape: {} \\n'\n 'Output list length: {} \\n'.format(\n self.filename, self.reuse_weights, self.shape_in, self.n_out\n )\n )\n\n return info\n\n def _format_xroutput(self, out, new_dims=None):\n out = out.squeeze(dim='dummy')\n\n # rename dimension name to match output grid\n out = out.rename(locations=self.geom_dim_name)\n\n # append output horizontal coordinate values\n # extra coordinates are automatically tracked by apply_ufunc\n out.coords[self._lon_out_name] = xr.DataArray(self._lon_out, dims=(self.geom_dim_name,))\n out.coords[self._lat_out_name] = xr.DataArray(self._lat_out, dims=(self.geom_dim_name,))\n out.attrs['regrid_method'] = self.method\n return out\n"
] |
[
[
"numpy.asarray",
"numpy.array",
"numpy.expand_dims",
"numpy.meshgrid"
]
] |
ai-se/parGALE
|
[
"2922e7e5f382c795b1f58d9e45be54b88d2ed1eb"
] |
[
"utils/lib.py"
] |
[
"\"\"\"\nStandard library files and operators\n\"\"\"\nfrom __future__ import print_function, division\nimport random\nimport sys, os\nsys.path.append(os.path.abspath(\".\"))\nimport math\nimport numpy as np\n\n# Constants\nEPS = 0.00001\nPI = math.pi\n\n\n\nclass O:\n \"\"\"\n Default class which everything extends.\n \"\"\"\n def __init__(self,**d): self.has().update(**d)\n def has(self): return self.__dict__\n def update(self,**d) : self.has().update(d); return self\n def __repr__(self) :\n show=[':%s %s' % (k,self.has()[k])\n for k in sorted(self.has().keys() )\n if k[0] is not \"_\"]\n txt = ' '.join(show)\n if len(txt) > 60:\n show=map(lambda x: '\\t'+x+'\\n',show)\n return '{'+' '.join(show)+'}'\n def __getitem__(self, item):\n return self.has().get(item)\n\ndef norm(x, low, high):\n \"\"\"\n Normalize Value\n :param x: Value to be normalized\n :param low: Minimum value\n :param high: Maximum value\n :return: Normalized value\n \"\"\"\n nor = (x - low)/(high - low + EPS)\n if nor > 1:\n return 1\n elif nor < 0:\n return 0\n return nor\n\n\ndef de_norm(x, low, high):\n \"\"\"\n De-normalize value\n :param x: Value to be denormalized\n :param low: Minimum value\n :param high: Maximum value\n :return:\n \"\"\"\n de_nor = x*(high-low) + low\n if de_nor > high:\n return high\n elif de_nor < low:\n return low\n return de_nor\n\ndef uniform(low, high):\n \"\"\"\n Uniform value between low and high\n :param low: minimum of distribution\n :param high: maximum of distribution\n :return: Uniform value in the uniform distribution\n \"\"\"\n return random.uniform(low, high)\n\ndef seed(val=None):\n random.seed(val)\n\ndef say(*lst):\n \"\"\"\n Print value on the same line\n :param lst:\n :return:\n \"\"\"\n print(*lst, end=\"\")\n sys.stdout.flush()\n\ndef choice(lst):\n \"\"\"\n Return random value from list\n :param lst: list to search in\n :return:\n \"\"\"\n return random.choice(lst)\n\ndef rand():\n \"\"\"\n Returns a random number.\n \"\"\"\n return random.random()\n\ndef more(x,y):\n \"\"\"\n Check if x > y\n :param x: Left Comparative Value\n :param y: Right Comparative Value\n :return: Boolean\n \"\"\"\n return x > y\n\ndef less(x,y):\n \"\"\"\n Check if x < y\n :param x: Left Comparative Value\n :param y: Right Comparative Value\n :return: Boolean\n \"\"\"\n return x < y\n\ndef avg(lst):\n \"\"\"\n Average of list\n :param lst:\n :return:\n \"\"\"\n return sum(lst)/float(len(lst))\n\ndef cos(val):\n \"\"\"\n Return cosine of a value\n :param val: Value in radians\n :return:\n \"\"\"\n return math.cos(val)\n\ndef sin(val):\n \"\"\"\n Return sine of a value\n :param val: Value in radians\n :return:\n \"\"\"\n return math.sin(val)\n\ndef clone(lst):\n if lst is None:\n return None\n return lst[:]\n\nclass Point(O):\n\n def __init__(self, decisions, problem=None):\n \"\"\"\n Represents a point in the frontier for NSGA\n :param decisions: Set of decisions\n :param problem: Instance of the problem\n \"\"\"\n O.__init__(self)\n self.decisions = clone(decisions)\n if problem:\n self.objectives = problem.evaluate(decisions)\n else:\n self.objectives = []\n self.rank = 0\n self.dominated = []\n self.dominating = 0\n self.crowd_dist = 0\n\n def __hash__(self):\n return hash(self.decisions)\n\n def __eq__(self, other):\n return cmp(self.decisions, other.decisions) == 0\n\n def clone(self):\n \"\"\"\n Method to clone a point\n :return:\n \"\"\"\n new = Point(self.decisions)\n new.objectives = self.objectives\n return new\n\n def evaluate(self, problem):\n \"\"\"\n Evaluate a point\n :param problem: Problem used to evaluate\n \"\"\"\n if not self.objectives:\n self.objectives = problem.evaluate(self.decisions)\n return self.objectives\n\n\ndef report(lst, name):\n print(\"*** \", str.upper(name), \" ***\")\n s_lst = sorted(lst)\n low = s_lst[0]\n high = s_lst[-1]\n med = s_lst[len(lst)//2] if len(lst) % 2 else (s_lst[len(lst)//2] + s_lst[len(lst)//2 - 1])/2\n print(\"LOW : \", low)\n print(\"HIGH : \", high)\n print(\"MED : \", med)\n\ndef mkdir(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)\n return directory\n\ndef mean_iqr(lst):\n \"\"\"\n return mean and iqr of a list.\n :param lst: List to fetch mean and iqr\n :return: (mean, iqr)\n \"\"\"\n mean = np.mean(lst)\n q75, q25 = np.percentile(lst, [75, 25])\n return mean, q75 - q25\n\ndef write_objs(objs, file_name):\n with open(file_name+'.csv', 'w') as f:\n f.writelines(\",\".join(str(j) for j in i) + '\\n' for i in objs)\n"
] |
[
[
"numpy.mean",
"numpy.percentile"
]
] |
xwind-h/gluon-nlp
|
[
"01727fc5f6be9b3329571bd688a340067408ad88"
] |
[
"src/gluonnlp/data/utils.py"
] |
[
"# coding: utf-8\n\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Utility classes and functions. They help organize and keep statistics of datasets.\"\"\"\nfrom __future__ import absolute_import, print_function\n\nimport collections\nimport os\nimport sys\nimport tarfile\nimport zipfile\n\nimport numpy as np\nfrom mxnet.gluon.data import SimpleDataset\nfrom mxnet.gluon.utils import _get_repo_url, check_sha1, download\n\nfrom .. import _constants as C\nfrom ..base import get_home_dir\n\n__all__ = [\n 'Counter', 'count_tokens', 'concat_sequence', 'slice_sequence', 'train_valid_split',\n 'line_splitter', 'whitespace_splitter', 'Splitter'\n]\n\nclass Counter(collections.Counter): # pylint: disable=abstract-method\n \"\"\"Counter class for keeping token frequencies.\"\"\"\n\n def discard(self, min_freq, unknown_token):\n \"\"\"Discards tokens with frequency below min_frequency and represents them\n as `unknown_token`.\n\n Parameters\n ----------\n min_freq: int\n Tokens whose frequency is under min_freq is counted as `unknown_token` in\n the Counter returned.\n unknown_token: str\n The representation for any unknown token.\n\n Returns\n -------\n The Counter instance.\n\n Examples\n --------\n >>> a = gluonnlp.data.Counter({'a': 10, 'b': 1, 'c': 1})\n >>> a.discard(3, '<unk>')\n Counter({'a': 10, '<unk>': 2})\n \"\"\"\n freq = 0\n ret = Counter({})\n for token, count in self.items():\n if count < min_freq:\n freq += count\n else:\n ret[token] = count\n ret[unknown_token] = ret.get(unknown_token, 0) + freq\n return ret\n\n\nclass DefaultLookupDict(dict):\n \"\"\"Dictionary class with fall-back look-up with default value set in the constructor.\"\"\"\n\n def __init__(self, default, d=None):\n if d:\n super(DefaultLookupDict, self).__init__(d)\n else:\n super(DefaultLookupDict, self).__init__()\n self._default = default\n\n def __getitem__(self, k):\n return self.get(k, self._default)\n\n\ndef count_tokens(tokens, to_lower=False, counter=None):\n r\"\"\"Counts tokens in the specified string.\n\n For token_delim='(td)' and seq_delim='(sd)', a specified string of two sequences of tokens may\n look like::\n\n (td)token1(td)token2(td)token3(td)(sd)(td)token4(td)token5(td)(sd)\n\n\n Parameters\n ----------\n tokens : list of str\n A source list of tokens.\n to_lower : bool, default False\n Whether to convert the source source_str to the lower case.\n counter : Counter or None, default None\n The Counter instance to be updated with the counts of `tokens`. If\n None, return a new Counter instance counting tokens from `tokens`.\n\n Returns\n -------\n The `counter` Counter instance after being updated with the token\n counts of `source_str`. If `counter` is None, return a new Counter\n instance counting tokens from `source_str`.\n\n Examples\n --------\n >>> import re\n >>> source_str = ' Life is great ! \\n life is good . \\n'\n >>> source_str_tokens = filter(None, re.split(' |\\n', source_str))\n >>> gluonnlp.data.count_tokens(source_str_tokens)\n Counter({'is': 2, 'Life': 1, 'great': 1, '!': 1, 'life': 1, 'good': 1, '.': 1})\n\n \"\"\"\n if to_lower:\n tokens = [t.lower() for t in tokens]\n\n if counter is None:\n return Counter(tokens)\n else:\n counter.update(tokens)\n return counter\n\n\ndef concat_sequence(sequences):\n \"\"\"Concatenate sequences of tokens into a single flattened list of tokens.\n\n Parameters\n ----------\n sequences : list of list of object\n Sequences of tokens, each of which is an iterable of tokens.\n\n Returns\n -------\n Flattened list of tokens.\n\n \"\"\"\n return [token for seq in sequences for token in seq if token]\n\n\ndef slice_sequence(sequence, length, pad_last=False, pad_val=C.PAD_TOKEN, overlap=0):\n \"\"\"Slice a flat sequence of tokens into sequences tokens, with each\n inner sequence's length equal to the specified `length`, taking into account the requested\n sequence overlap.\n\n Parameters\n ----------\n sequence : list of object\n A flat list of tokens.\n length : int\n The length of each of the samples.\n pad_last : bool, default False\n Whether to pad the last sequence when its length doesn't align. If the last sequence's\n length doesn't align and ``pad_last`` is False, it will be dropped.\n pad_val : object, default\n The padding value to use when the padding of the last sequence is enabled. In general,\n the type of ``pad_val`` should be the same as the tokens.\n overlap : int, default 0\n The extra number of items in current sample that should overlap with the\n next sample.\n\n Returns\n -------\n List of list of tokens, with the length of each inner list equal to `length`.\n\n \"\"\"\n if length <= overlap:\n raise ValueError('length needs to be larger than overlap')\n\n if pad_last:\n pad_len = _slice_pad_length(len(sequence), length, overlap)\n sequence = sequence + [pad_val] * pad_len\n num_samples = (len(sequence) - length) // (length - overlap) + 1\n\n return [sequence[i * (length - overlap): ((i + 1) * length - i * overlap)]\n for i in range(num_samples)]\n\n\ndef _slice_pad_length(num_items, length, overlap=0):\n \"\"\"Calculate the padding length needed for sliced samples in order not to discard data.\n\n Parameters\n ----------\n num_items : int\n Number of items in dataset before collating.\n length : int\n The length of each of the samples.\n overlap : int, default 0\n The extra number of items in current sample that should overlap with the\n next sample.\n\n Returns\n -------\n Length of paddings.\n\n \"\"\"\n if length <= overlap:\n raise ValueError('length needs to be larger than overlap')\n\n step = length - overlap\n span = num_items - length\n residual = span % step\n if residual:\n return step - residual\n else:\n return 0\n\n\n_vocab_sha1 = {'wikitext-2': 'be36dc5238c2e7d69720881647ab72eb506d0131',\n 'gbw': 'ebb1a287ca14d8fa6f167c3a779e5e7ed63ac69f',\n 'WMT2014_src': '230ebb817b1d86950d71e2e765f192a4e4f34415',\n 'WMT2014_tgt': '230ebb817b1d86950d71e2e765f192a4e4f34415',\n 'book_corpus_wiki_en_cased': '2d62af22535ed51f35cc8e2abb607723c89c2636',\n 'book_corpus_wiki_en_uncased': 'a66073971aa0b1a262453fe51342e57166a8abcf',\n 'wiki_multilingual_cased': '0247cb442074237c38c62021f36b7a4dbd2e55f7',\n 'wiki_cn_cased': 'ddebd8f3867bca5a61023f73326fb125cf12b4f5',\n 'wiki_cn': 'ddebd8f3867bca5a61023f73326fb125cf12b4f5',\n 'wiki_multilingual_uncased': '2b2514cc539047b9179e9d98a4e68c36db05c97a',\n 'wiki_multilingual': '2b2514cc539047b9179e9d98a4e68c36db05c97a'}\n\n\n_url_format = '{repo_url}gluon/dataset/vocab/{file_name}.zip'\n\n\ndef train_valid_split(dataset, valid_ratio=0.05):\n \"\"\"Split the dataset into training and validation sets.\n\n Parameters\n ----------\n dataset : list\n A list of training samples.\n valid_ratio : float, default 0.05\n Proportion of training samples to use for validation set\n range: [0, 1]\n\n Returns\n -------\n train : SimpleDataset\n valid : SimpleDataset\n \"\"\"\n if not 0.0 <= valid_ratio <= 1.0:\n raise ValueError('valid_ratio should be in [0, 1]')\n\n num_train = len(dataset)\n num_valid = np.ceil(num_train * valid_ratio).astype('int')\n indices = np.arange(num_train)\n\n np.random.shuffle(indices)\n valid = SimpleDataset([dataset[indices[i]] for i in range(num_valid)])\n train = SimpleDataset([dataset[indices[i + num_valid]] for i in range(num_train - num_valid)])\n return train, valid\n\n\ndef short_hash(name):\n if name not in _vocab_sha1:\n raise ValueError('Vocabulary for {name} is not available.'.format(name=name))\n return _vocab_sha1[name][:8]\n\n\ndef _load_pretrained_vocab(name, root=os.path.join(get_home_dir(), 'models'), cls=None):\n \"\"\"Load the accompanying vocabulary object for pre-trained model.\n\n Parameters\n ----------\n name : str\n Name of the vocabulary, usually the name of the dataset.\n root : str, default '$MXNET_HOME/models'\n Location for keeping the model parameters.\n MXNET_HOME defaults to '~/.mxnet'.\n cls : nlp.Vocab or nlp.vocab.BERTVocab, default nlp.Vocab\n\n Returns\n -------\n Vocab or nlp.vocab.BERTVocab\n Loaded vocabulary object for the pre-trained model.\n \"\"\"\n file_name = '{name}-{short_hash}'.format(name=name,\n short_hash=short_hash(name))\n root = os.path.expanduser(root)\n file_path = os.path.join(root, file_name + '.vocab')\n sha1_hash = _vocab_sha1[name]\n if os.path.exists(file_path):\n if check_sha1(file_path, sha1_hash):\n return _load_vocab_file(file_path, cls)\n else:\n print('Detected mismatch in the content of model vocab file. Downloading again.')\n else:\n print('Vocab file is not found. Downloading.')\n\n if not os.path.exists(root):\n os.makedirs(root)\n\n zip_file_path = os.path.join(root, file_name + '.zip')\n repo_url = _get_repo_url()\n if repo_url[-1] != '/':\n repo_url = repo_url + '/'\n download(_url_format.format(repo_url=repo_url, file_name=file_name),\n path=zip_file_path,\n overwrite=True)\n with zipfile.ZipFile(zip_file_path) as zf:\n zf.extractall(root)\n os.remove(zip_file_path)\n\n if check_sha1(file_path, sha1_hash):\n return _load_vocab_file(file_path, cls)\n else:\n raise ValueError('Downloaded file has different hash. Please try again.')\n\n\ndef _load_vocab_file(file_path, cls):\n with open(file_path, 'r') as f:\n if cls is None:\n from ..vocab import Vocab\n cls = Vocab\n\n return cls.from_json(f.read())\n\n\ndef _extract_archive(file, target_dir): # pylint: disable=redefined-builtin\n \"\"\"Extract archive file\n\n Parameters\n ----------\n file : str\n Absolute path of the archive file.\n target_dir : str\n Target directory of the archive to be uncompressed\n\n \"\"\"\n if file.endswith('.gz') or file.endswith('.tar') or file.endswith('.tgz'):\n archive = tarfile.open(file, 'r')\n elif file.endswith('.zip'):\n archive = zipfile.ZipFile(file, 'r')\n else:\n raise Exception('Unrecognized file type: ' + file)\n archive.extractall(path=target_dir)\n archive.close()\n\n\ndef line_splitter(s):\n \"\"\"Split a string at newlines.\n\n Parameters\n ----------\n s : str\n The string to be split\n\n Returns\n --------\n List[str]\n List of strings. Obtained by calling s.splitlines().\n\n \"\"\"\n return s.splitlines()\n\n\ndef whitespace_splitter(s):\n \"\"\"Split a string at whitespace (space, tab, newline, return, formfeed).\n\n Parameters\n ----------\n s : str\n The string to be split\n\n Returns\n --------\n List[str]\n List of strings. Obtained by calling s.split().\n \"\"\"\n return s.split()\n\n\nclass Splitter(object):\n \"\"\"Split a string based on a separator.\n\n Parameters\n ----------\n separator : str\n The separator based on which string is split.\n \"\"\"\n\n def __init__(self, separator=None):\n self._separator = separator\n\n def __call__(self, s):\n \"\"\"Split a string based on the separator.\n\n Parameters\n ----------\n s : str\n The string to be split\n\n Returns\n --------\n List[str]\n List of strings. Obtained by calling s.split(separator).\n \"\"\"\n return s.split(self._separator)\n\n\ndef _convert_to_unicode(text):\n \"\"\"Converts `text` to Unicode.\n\n Parameters\n ----------\n text : str or bytes\n text to be converted to unicode\n\n Returns\n -------\n str\n unicode string\n \"\"\"\n py_version = sys.version_info[0]\n if py_version == 3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode('utf-8', 'ignore')\n else:\n raise ValueError('Unsupported string type: %s' % (type(text)))\n elif py_version == 2:\n if isinstance(text, str):\n return text.decode('utf-8', 'ignore')\n elif isinstance(text, unicode): # noqa: F821\n return text\n else:\n raise ValueError('Unsupported string type: %s' % (type(text)))\n else:\n raise ValueError('Not running on Python2 or Python 3?')\n"
] |
[
[
"numpy.arange",
"numpy.random.shuffle",
"numpy.ceil"
]
] |
jhunkeler/pysiaf
|
[
"5b6d00f43b438405924811ac0d76e030bed2803c"
] |
[
"pysiaf/tests/test_nirspec.py"
] |
[
"#!/usr/bin/env python\n\"\"\"Test NIRSpec transformations internally and against the test dataset delivered by IDT.\n\nAuthors\n-------\n\n Johannes Sahlmann\n\n\"\"\"\n\nimport os\n\nfrom astropy.io import fits\nfrom astropy.table import Table\nimport numpy as np\nimport pylab as pl\n# import pytest\n\n\n\nfrom ..constants import JWST_TEMPORARY_DATA_ROOT, TEST_DATA_ROOT\nfrom ..siaf import Siaf\n\n\ninstrument = 'NIRSpec'\n\ndef test_against_test_data():\n \"\"\"NIRSpec test data comparison.\n\n Mean and RMS difference between the IDT computations and the pysiaf computations are\n computed and compared against acceptable thresholds.\n\n \"\"\"\n siaf = Siaf(instrument)\n # directory that holds SIAF XML file\n # test_dir = os.path.join(JWST_TEMPORARY_DATA_ROOT, instrument, 'generate_test')\n # siaf_xml_file = os.path.join(test_dir, '{}_SIAF.xml'.format(instrument))\n # siaf = Siaf(instrument, filename=siaf_xml_file)\n\n test_data_dir = os.path.join(TEST_DATA_ROOT, instrument)\n\n\n include_tilt = False\n\n if include_tilt is False:\n ta_transform_data_dir = os.path.join(test_data_dir, 'testDataSet_TA', 'testDataNoTilt')\n\n filter_list = 'CLEAR F110W F140X'.split()\n sca_list = ['SCA491', 'SCA492']\n # filter_list = 'CLEAR'.split()\n # sca_list = ['SCA491']\n # sca_list = ['SCA492']\n\n difference_metrics = {}\n index = 0\n for sca_name in sca_list:\n for filter_name in filter_list:\n\n test_data_file = os.path.join(ta_transform_data_dir, 'testDataTA_{}{}.fits'.format(sca_name, filter_name))\n test_data = Table(fits.getdata(test_data_file))\n\n if sca_name == 'SCA491':\n AperName = 'NRS1_FULL_OSS'\n elif sca_name == 'SCA492':\n AperName = 'NRS2_FULL_OSS'\n\n aperture = siaf[AperName]\n\n if 0:\n pl.figure(figsize=(8, 8), facecolor='w', edgecolor='k'); pl.clf()\n aperture.plot(name_label=True)\n siaf['NRS2_FULL_OSS'].plot(name_label=True)\n pl.plot(test_data['XAN']*u.deg.to(u.arcsecond), test_data['YAN']*u.deg.to(u.arcsecond), 'b.')\n pl.show()\n\n 1/0\n\n\n # SCI to GWA detector side (Step 1. in Sections 2.3.3, 5.5.2 of JWST-STScI-005921 , see also Table 4.7.1)\n test_data['pysiaf_GWAout_X'], test_data['pysiaf_GWAout_Y'] = aperture.sci_to_gwa(test_data['SCA_X'], test_data['SCA_Y'])\n\n # effect of mirror, transform from GWA detector side to GWA skyward side\n if include_tilt is False:\n # last equation in Secion 5.5.2\n test_data['pysiaf_GWAin_X'] = -1 * test_data['pysiaf_GWAout_X']\n test_data['pysiaf_GWAin_Y'] = -1 * test_data['pysiaf_GWAout_Y']\n\n # transform to OTE frame (XAN, YAN)\n test_data['pysiaf_XAN'], test_data['pysiaf_YAN'] = aperture.gwa_to_ote(\n test_data['pysiaf_GWAin_X'], test_data['pysiaf_GWAin_Y'], filter_name)\n\n for axis_name in ['X', 'Y']:\n for parameter_name in ['{}AN'.format(axis_name)]:\n\n # compute differences between SIAF implementation and IDT test dataset\n test_data['difference_{}'.format(parameter_name)] = test_data['pysiaf_{}'.format(parameter_name)] - test_data['{}'.format(parameter_name)]\n\n for key_seed in ['mean', 'rms']:\n key_name = 'diff_{}_{}'.format(parameter_name, key_seed)\n if key_name not in difference_metrics.keys():\n difference_metrics[key_name] = []\n if key_seed == 'mean':\n difference_metrics[key_name].append(np.mean(test_data['difference_{}'.format(parameter_name)]))\n elif key_seed == 'rms':\n difference_metrics[key_name].append(np.std(test_data['difference_{}'.format(parameter_name)]))\n\n\n\n print('{} {} SCA_to_OTE transform comparison to {:>10} {:>10} MEAN={:+1.3e} RMS={:1.3e}'.format(sca_name, filter_name, AperName, parameter_name, difference_metrics['diff_{}_{}'.format(parameter_name, 'mean')][index], difference_metrics['diff_{}_{}'.format(parameter_name, 'rms')][index]))\n\n assert difference_metrics['diff_{}_{}'.format(parameter_name, 'mean')][index] < 1e-9\n assert difference_metrics['diff_{}_{}'.format(parameter_name, 'rms')][index] < 5e-9\n\n if 0:\n threshold = 1e-6\n if (difference_metrics['diff_{}_{}'.format(parameter_name, 'rms')][index] > threshold):\n pl.figure(figsize=(8, 8), facecolor='w', edgecolor='k'); pl.clf()\n pl.quiver(test_data['SCA_X'], test_data['SCA_Y'], test_data['difference_XAN'], test_data['difference_YAN'], angles='xy')\n pl.title('Difference IDT and pysiaf')\n pl.show()\n\n index += 1\n\n\ndef test_nirspec_aperture_transforms(verbose=False):\n \"\"\"Test transformations between frames.\n\n Transform back and forth between frames and verify that input==output.\n\n Parameters\n ----------\n verbose\n\n \"\"\"\n\n siaf = Siaf(instrument)\n\n labels = ['X', 'Y']\n threshold = 0.2\n\n from_frame = 'sci'\n to_frames = 'det gwa idl tel'.split()\n\n x_sci = np.linspace(-10, 10, 3)\n y_sci = np.linspace(10, -10, 3)\n\n for aper_name in siaf.apertures.keys():\n skip = False\n\n # aperture\n aperture = siaf[aper_name]\n\n if (aperture.AperType in ['COMPOUND', 'TRANSFORM', 'SLIT']) or ('_FULL' not in aper_name):\n skip = True\n # if (aperture.AperType in ['COMPOUND', 'TRANSFORM']) or (\n # siaf.instrument in ['NIRCam', 'MIRI', 'NIRSpec'] and\n # aperture.AperType == 'SLIT'):\n # skip = True\n\n if skip is False:\n # test transformations\n if verbose:\n print('testing {} {}'.format(siaf.instrument, aper_name))\n\n for to_frame in to_frames:\n forward_transform = getattr(aperture, '{}_to_{}'.format(from_frame, to_frame))\n backward_transform = getattr(aperture, '{}_to_{}'.format(to_frame, from_frame))\n\n x_out, y_out = backward_transform(*forward_transform(x_sci, y_sci))\n x_mean_error = np.mean(np.abs(x_sci - x_out))\n y_mean_error = np.mean(np.abs(y_sci - y_out))\n for i, error in enumerate([x_mean_error, y_mean_error]):\n if verbose:\n print('{} {}: Error in {}<->{} {}-transform is {:02.6f})'.format(\n siaf.instrument, aper_name, from_frame, to_frame, labels[i], error))\n assert error < threshold\n"
] |
[
[
"numpy.abs",
"numpy.linspace"
]
] |
vuducmanh1407/sign-language-translation
|
[
"8df83befcf2896ad23dbcf7f34eb22a6884495fb"
] |
[
"modules/embeddings.py"
] |
[
"import numpy as np\nfrom turtle import forward\nimport torch\nimport torch.nn as nn\n\nclass WordEmbedding(nn.Module):\n \n def __init__(\n self,\n embedding_size=300,\n hidden_size=512,\n pretrained_embedding=None,\n device=None\n ):\n\n super(WordEmbedding, self).__init__()\n\n self.pretrained_embedding = pretrained_embedding\n self.device = device\n self.linear = nn.Linear(int(embedding_size), int(hidden_size))\n self.batchnorm = nn.BatchNorm1d(num_features=int(hidden_size))\n self.activation = nn.Softmax(dim=2)\n\n \n def forward(self, tensor):\n if not isinstance(tensor, list):\n tensor = tensor.tolist()\n tensor = [[self.pretrained_embedding[int(x)] for x in row] for row in tensor]\n tensor = np.array(tensor)\n tensor = torch.Tensor(tensor)\n tensor = self.device.data_to_device(tensor)\n tensor = self.linear(tensor).transpose(1,2)\n if tensor.size(0) > 1:\n tensor = self.batchnorm(tensor).transpose(1,2)\n else:\n tensor = tensor.transpose(1,2)\n tensor = self.activation(tensor)\n return tensor\n\n\n\n\n \n\n\n"
] |
[
[
"torch.nn.Softmax",
"numpy.array",
"torch.Tensor"
]
] |
abhijit810/Twitter-ETL-Airflow
|
[
"c9da4c33f82585609fc49885937620dfdea51bab"
] |
[
"airflow/dags/Twitter_DAG.py"
] |
[
"from datetime import datetime, timedelta\nfrom airflow import DAG\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.contrib.hooks.cassandra_hook import CassandraHook\nimport pandas as pd\n\nfrom airflow.models import Variable\n\nfrom cassandra.cluster import Cluster\nfrom cassandra.auth import PlainTextAuthProvider\nfrom cassandra_sigv4.auth import SigV4AuthProvider\n\nimport tweepy\nimport json\n\nconsumer_key = Variable.get(\"consumer_key\")\nconsumer_secret = Variable.get(\"consumer_secret\")\naccess_token = Variable.get(\"access_token\")\naccess_token_secret = Variable.get(\"access_token_secret\")\nstate= 'Valid'\n\ndef put_into_cassandra(data_frame, table_name):\n cluster = Cluster(['127.0.0.1'], port=9042)\n session = cluster.connect('twitter_ks')\n columns = \" \"\n column_count = \" \"\n for column_name in data_frame.columns:\n formatted = (str(column_name) + ',').replace('.', '_')\n columns = columns + formatted\n column_count = column_count + \"?,\"\n columns = columns[:-1]\n column_count = column_count[:-1]\n query = \"INSERT INTO \"+table_name+\"(\"+columns+\") VALUES (\"+column_count+\")\"\n prepared = session.prepare(query)\n\n for item in data_frame.iterrows():\n row = []\n for cell in item[1]:\n row.append(str(cell))\n session.execute(prepared, row)\n\ndef get_tweets(username):\n\n # Authorization to consumer key and consumer secret\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n\n # Access to user's access key and access secret\n auth.set_access_token(access_token, access_token_secret)\n\n # Calling api\n api = tweepy.API(auth)\n\n tweets = api.user_timeline(screen_name=username)\n\n # create array of tweet information: username,\n # tweet id, date/time, text\n tweets_for_csv = [tweet.text for tweet in tweets] # CSV file created\n tweet_series = pd.Series(tweets_for_csv)\n \n return tweet_series\n\ndef fetch_followed_tweets(state, ti):\n users_list=ti.xcom_pull(key='users_list', task_ids='Fetch_users_from_Twitter')\n for user in users_list:\n frame = { 'screen_name': user, 'tweet': get_tweets(user) }\n tweets_df = pd.DataFrame(frame)\n print(tweets_df)\n put_into_cassandra(tweets_df, 'dim_tweets')\n\ndef get_users(state, ti):\n\n # Authorization to consumer key and consumer secret\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n\n # Access to user's access key and access secret\n auth.set_access_token(access_token, access_token_secret)\n\n # Calling api\n api = tweepy.API(auth)\n users = api.search_users(100)\n \n users_for_json = [user._json for user in users] # CSV file created\n users_df = pd.json_normalize(users_for_json)\n\n users_list = users_df['screen_name'].to_list()\n put_into_cassandra(users_df, 'dim_users')\n\n ti.xcom_push(key='users_list', value=users_list)\n\n #return users_df\n\n# Define the default dag arguments.\ndefault_args = {\n\t\t'owner' : 'Abhijit Patra',\n\t\t'depends_on_past' :False,\n\t\t'email' :['[email protected]'],\n\t\t'email_on_failure': False,\n\t\t'email_on_retry': False,\n\t\t'retries': 1,\n\t\t'retry_delay': timedelta(minutes=1)\n\t\t}\n\n\n# Define the dag, the start date and how frequently it runs.\ndag = DAG(\n\t\tdag_id='DAG_Twitter_ETL',\n\t\tdefault_args=default_args,\n\t\tstart_date=datetime(2022,1,1),\n\t\tschedule_interval=None,\n catchup=False\n )\n\nFetch_users = PythonOperator(\n task_id='Fetch_users_from_Twitter',\n python_callable=get_users,\n provide_context=True,\n op_kwargs={\n 'state':state\n },\n dag=dag\n)\n\nFetch_tweets = PythonOperator(\n task_id='Fetch_tweets_from_Twitter',\n python_callable=fetch_followed_tweets,\n provide_context=True,\n op_kwargs={\n 'state':state\n },\n dag=dag\n)\n\nFetch_users >> Fetch_tweets"
] |
[
[
"pandas.json_normalize",
"pandas.Series",
"pandas.DataFrame"
]
] |
zhanghhong/efficientdet-tf2-zhh
|
[
"0ff4d5e2cc60746740ad4309fb90031867e84db9"
] |
[
"utils/utils.py"
] |
[
"import math\r\nimport warnings\r\n\r\nimport numpy as np\r\nfrom PIL import Image\r\nfrom tensorflow import keras\r\n\r\n\r\ndef letterbox_image(image, size):\r\n iw, ih = image.size\r\n w, h = size\r\n scale = min(w/iw, h/ih)\r\n nw = int(iw*scale)\r\n nh = int(ih*scale)\r\n\r\n image = image.resize((nw,nh), Image.BICUBIC)\r\n new_image = Image.new('RGB', size, (0,0,0))\r\n new_image.paste(image, ((w-nw)//2, (h-nh)//2))\r\n return new_image\r\n\r\ndef efficientdet_correct_boxes(top, left, bottom, right, input_shape, image_shape):\r\n new_shape = image_shape*np.min(input_shape/image_shape)\r\n\r\n offset = (input_shape-new_shape)/2./input_shape\r\n scale = input_shape/new_shape\r\n\r\n box_yx = np.concatenate(((top+bottom)/2,(left+right)/2),axis=-1)\r\n box_hw = np.concatenate((bottom-top,right-left),axis=-1)\r\n\r\n box_yx = (box_yx - offset) * scale\r\n box_hw *= scale\r\n\r\n box_mins = box_yx - (box_hw / 2.)\r\n box_maxes = box_yx + (box_hw / 2.)\r\n boxes = np.concatenate([\r\n box_mins[:, 0:1],\r\n box_mins[:, 1:2],\r\n box_maxes[:, 0:1],\r\n box_maxes[:, 1:2]\r\n ],axis=-1)\r\n boxes *= np.concatenate([image_shape, image_shape],axis=-1)\r\n return boxes\r\n\r\nclass PriorProbability(keras.initializers.Initializer):\r\n \"\"\" Apply a prior probability to the weights.\r\n \"\"\"\r\n\r\n def __init__(self, probability=0.01):\r\n self.probability = probability\r\n\r\n def get_config(self):\r\n return {\r\n 'probability': self.probability\r\n }\r\n\r\n def __call__(self, shape, dtype=None):\r\n # set bias to -log((1 - p)/p) for foreground\r\n result = np.ones(shape) * -math.log((1 - self.probability) / self.probability)\r\n\r\n return result\r\n\r\nclass BBoxUtility(object):\r\n def __init__(self, num_classes, priors=None, overlap_threshold=0.5,ignore_threshold=0.4,\r\n nms_thresh=0.3, top_k=400):\r\n self.num_classes = num_classes\r\n self.priors = priors\r\n self.num_priors = 0 if priors is None else len(priors)\r\n self.overlap_threshold = overlap_threshold\r\n self.ignore_threshold = ignore_threshold\r\n self._nms_thresh = nms_thresh\r\n self._top_k = top_k\r\n\r\n def iou(self, box):\r\n # 计算出每个真实框与所有的先验框的iou\r\n # 判断真实框与先验框的重合情况\r\n inter_upleft = np.maximum(self.priors[:, :2], box[:2])\r\n inter_botright = np.minimum(self.priors[:, 2:4], box[2:])\r\n\r\n inter_wh = inter_botright - inter_upleft\r\n inter_wh = np.maximum(inter_wh, 0)\r\n inter = inter_wh[:, 0] * inter_wh[:, 1]\r\n # 真实框的面积\r\n area_true = (box[2] - box[0]) * (box[3] - box[1])\r\n # 先验框的面积\r\n area_gt = (self.priors[:, 2] - self.priors[:, 0])*(self.priors[:, 3] - self.priors[:, 1])\r\n # 计算iou\r\n union = area_true + area_gt - inter\r\n\r\n iou = inter / union\r\n return iou\r\n\r\n def encode_ignore_box(self, box, return_iou=True):\r\n iou = self.iou(box)\r\n ignored_box = np.zeros((self.num_priors, 1))\r\n #---------------------------------------------------#\r\n # 找到处于忽略门限值范围内的先验框\r\n #---------------------------------------------------#\r\n assign_mask_ignore = (iou > self.ignore_threshold) & (iou < self.overlap_threshold)\r\n ignored_box[:, 0][assign_mask_ignore] = iou[assign_mask_ignore]\r\n\r\n encoded_box = np.zeros((self.num_priors, 4 + return_iou))\r\n #---------------------------------------------------#\r\n # 找到每一个真实框,重合程度较高的先验框\r\n #---------------------------------------------------#\r\n assign_mask = iou > self.overlap_threshold\r\n if not assign_mask.any():\r\n assign_mask[iou.argmax()] = True\r\n if return_iou:\r\n encoded_box[:, -1][assign_mask] = iou[assign_mask]\r\n \r\n assigned_priors = self.priors[assign_mask]\r\n #---------------------------------------------#\r\n # 逆向编码,将真实框转化为Efficientdet预测结果的格式\r\n # 先计算真实框的中心与长宽\r\n #---------------------------------------------#\r\n box_center = 0.5 * (box[:2] + box[2:])\r\n box_wh = box[2:] - box[:2]\r\n #---------------------------------------------#\r\n # 再计算重合度较高的先验框的中心与长宽\r\n #---------------------------------------------#\r\n assigned_priors_center = 0.5 * (assigned_priors[:, :2] +\r\n assigned_priors[:, 2:4])\r\n assigned_priors_wh = (assigned_priors[:, 2:4] -\r\n assigned_priors[:, :2])\r\n \r\n #------------------------------------------------#\r\n # 逆向求取efficientdet应该有的预测结果\r\n # 先求取中心的预测结果,再求取宽高的预测结果\r\n #------------------------------------------------#\r\n encoded_box[:, :2][assign_mask] = box_center - assigned_priors_center\r\n encoded_box[:, :2][assign_mask] /= assigned_priors_wh\r\n\r\n encoded_box[:, 2:4][assign_mask] = np.log(box_wh / assigned_priors_wh)\r\n\r\n return encoded_box.ravel(), ignored_box.ravel()\r\n\r\n def assign_boxes(self, boxes):\r\n #---------------------------------------------------#\r\n # assignment分为3个部分\r\n # :4 的内容为网络应该有的回归预测结果\r\n # 4 的内容为当前先验框是否包含目标\r\n # 5:-1 的内容为先验框所对应的种类\r\n # -1 的内容为当前先验框是否包含目标\r\n #---------------------------------------------------#\r\n assignment = np.zeros((self.num_priors, 4 + 1 + self.num_classes + 1))\r\n assignment[:, 4] = 0.0\r\n assignment[:, -1] = 0.0\r\n if len(boxes) == 0:\r\n return assignment\r\n\r\n #---------------------------------------------------#\r\n # 对每一个真实框都进行iou计算\r\n #---------------------------------------------------#\r\n apply_along_axis_boxes = np.apply_along_axis(self.encode_ignore_box, 1, boxes[:, :4])\r\n encoded_boxes = np.array([apply_along_axis_boxes[i, 0] for i in range(len(apply_along_axis_boxes))])\r\n ingored_boxes = np.array([apply_along_axis_boxes[i, 1] for i in range(len(apply_along_axis_boxes))])\r\n\r\n #---------------------------------------------------#\r\n # 在reshape后,获得的ingored_boxes的shape为:\r\n # [num_true_box, num_priors, 1] 其中1为iou\r\n #---------------------------------------------------#\r\n ingored_boxes = ingored_boxes.reshape(-1, self.num_priors, 1)\r\n ignore_iou = ingored_boxes[:, :, 0].max(axis=0)\r\n ignore_iou_mask = ignore_iou > 0\r\n\r\n assignment[:, 4][ignore_iou_mask] = -1\r\n assignment[:, -1][ignore_iou_mask] = -1\r\n\r\n #---------------------------------------------------#\r\n # 在reshape后,获得的encoded_boxes的shape为:\r\n # [num_true_box, num_priors, 4+1]\r\n # 4是编码后的结果,1为iou\r\n #---------------------------------------------------#\r\n encoded_boxes = encoded_boxes.reshape(-1, self.num_priors, 5)\r\n\r\n #---------------------------------------------------#\r\n # [num_priors]求取每一个先验框重合度最大的真实框\r\n #---------------------------------------------------#\r\n best_iou = encoded_boxes[:, :, -1].max(axis=0)\r\n best_iou_idx = encoded_boxes[:, :, -1].argmax(axis=0)\r\n best_iou_mask = best_iou > 0\r\n best_iou_idx = best_iou_idx[best_iou_mask]\r\n\r\n #---------------------------------------------------#\r\n # 计算一共有多少先验框满足需求\r\n #---------------------------------------------------#\r\n assign_num = len(best_iou_idx)\r\n\r\n # 将编码后的真实框取出\r\n encoded_boxes = encoded_boxes[:, best_iou_mask, :]\r\n assignment[:, :4][best_iou_mask] = encoded_boxes[best_iou_idx,np.arange(assign_num),:4]\r\n #----------------------------------------------------------#\r\n # 4和-1代表为当前先验框是否包含目标\r\n #----------------------------------------------------------#\r\n assignment[:, 4][best_iou_mask] = 1\r\n assignment[:, 5:-1][best_iou_mask] = boxes[best_iou_idx, 4:]\r\n assignment[:, -1][best_iou_mask] = 1\r\n\r\n return assignment\r\n\r\n def decode_boxes(self, mbox_loc, mbox_priorbox):\r\n # 获得先验框的宽与高\r\n prior_width = mbox_priorbox[:, 2] - mbox_priorbox[:, 0]\r\n prior_height = mbox_priorbox[:, 3] - mbox_priorbox[:, 1]\r\n # 获得先验框的中心点\r\n prior_center_x = 0.5 * (mbox_priorbox[:, 2] + mbox_priorbox[:, 0])\r\n prior_center_y = 0.5 * (mbox_priorbox[:, 3] + mbox_priorbox[:, 1])\r\n\r\n # 真实框距离先验框中心的xy轴偏移情况\r\n decode_bbox_center_x = mbox_loc[:, 0] * prior_width\r\n decode_bbox_center_x += prior_center_x\r\n decode_bbox_center_y = mbox_loc[:, 1] * prior_height\r\n decode_bbox_center_y += prior_center_y\r\n \r\n # 真实框的宽与高的求取\r\n decode_bbox_width = np.exp(mbox_loc[:, 2])\r\n decode_bbox_width *= prior_width\r\n decode_bbox_height = np.exp(mbox_loc[:, 3])\r\n decode_bbox_height *= prior_height\r\n\r\n # 获取真实框的左上角与右下角\r\n decode_bbox_xmin = decode_bbox_center_x - 0.5 * decode_bbox_width\r\n decode_bbox_ymin = decode_bbox_center_y - 0.5 * decode_bbox_height\r\n decode_bbox_xmax = decode_bbox_center_x + 0.5 * decode_bbox_width\r\n decode_bbox_ymax = decode_bbox_center_y + 0.5 * decode_bbox_height\r\n\r\n # 真实框的左上角与右下角进行堆叠\r\n decode_bbox = np.concatenate((decode_bbox_xmin[:, None],\r\n decode_bbox_ymin[:, None],\r\n decode_bbox_xmax[:, None],\r\n decode_bbox_ymax[:, None]), axis=-1)\r\n # 防止超出0与1\r\n decode_bbox = np.minimum(np.maximum(decode_bbox, 0.0), 1.0)\r\n return decode_bbox\r\n\r\n def detection_out(self, predictions, mbox_priorbox, confidence_threshold=0.4):\r\n #---------------------------------------------------#\r\n # 预测结果分为两部分,0为回归预测结果\r\n # 1为分类预测结果\r\n #---------------------------------------------------#\r\n mbox_loc = predictions[0]\r\n mbox_conf = predictions[1]\r\n \r\n #------------------------#\r\n # 获得先验框\r\n #------------------------#\r\n mbox_priorbox = mbox_priorbox\r\n \r\n results = []\r\n # 对每一张图片进行处理,由于在predict.py的时候,我们只输入一张图片,所以for i in range(len(mbox_loc))只进行一次\r\n for i in range(len(mbox_loc)):\r\n #------------------------------------------------#\r\n # 非极大抑制过程与Efficientdet视频中不同\r\n # 具体过程可参考\r\n # https://www.bilibili.com/video/BV1Lz411B7nQ\r\n #------------------------------------------------#\r\n decode_bbox = self.decode_boxes(mbox_loc[i], mbox_priorbox)\r\n\r\n bs_class_conf = mbox_conf[i]\r\n \r\n class_conf = np.expand_dims(np.max(bs_class_conf, 1),-1)\r\n class_pred = np.expand_dims(np.argmax(bs_class_conf, 1),-1)\r\n #--------------------------------#\r\n # 判断置信度是否大于门限要求\r\n #--------------------------------#\r\n conf_mask = (class_conf >= confidence_threshold)[:,0]\r\n\r\n #--------------------------------#\r\n # 将预测结果进行堆叠\r\n #--------------------------------#\r\n detections = np.concatenate((decode_bbox[conf_mask], class_conf[conf_mask], class_pred[conf_mask]), 1)\r\n unique_class = np.unique(detections[:,-1])\r\n\r\n best_box = []\r\n if len(unique_class) == 0:\r\n results.append(best_box)\r\n continue\r\n #---------------------------------------------------------------#\r\n # 4、对种类进行循环,\r\n # 非极大抑制的作用是筛选出一定区域内属于同一种类得分最大的框,\r\n # 对种类进行循环可以帮助我们对每一个类分别进行非极大抑制。\r\n #---------------------------------------------------------------#\r\n for c in unique_class:\r\n cls_mask = detections[:,-1] == c\r\n detection = detections[cls_mask]\r\n scores = detection[:,4]\r\n #------------------------------------------#\r\n # 5、根据得分对该种类进行从大到小排序。\r\n #------------------------------------------#\r\n arg_sort = np.argsort(scores)[::-1]\r\n detection = detection[arg_sort]\r\n while np.shape(detection)[0]>0:\r\n #-------------------------------------------------------------------------------------#\r\n # 6、每次取出得分最大的框,计算其与其它所有预测框的重合程度,重合程度过大的则剔除。\r\n #-------------------------------------------------------------------------------------#\r\n best_box.append(detection[0])\r\n if len(detection) == 1:\r\n break\r\n ious = iou(best_box[-1],detection[1:])\r\n detection = detection[1:][ious<self._nms_thresh]\r\n results.append(best_box)\r\n #-----------------------------------------------------------------------------#\r\n # 获得,在所有预测结果里面,置信度比较高的框\r\n # 还有,利用先验框和efficientdet的预测结果,处理获得了预测框的位置\r\n #-----------------------------------------------------------------------------#\r\n return results\r\n \r\ndef iou(b1,b2):\r\n b1_x1, b1_y1, b1_x2, b1_y2 = b1[0], b1[1], b1[2], b1[3]\r\n b2_x1, b2_y1, b2_x2, b2_y2 = b2[:, 0], b2[:, 1], b2[:, 2], b2[:, 3]\r\n\r\n inter_rect_x1 = np.maximum(b1_x1, b2_x1)\r\n inter_rect_y1 = np.maximum(b1_y1, b2_y1)\r\n inter_rect_x2 = np.minimum(b1_x2, b2_x2)\r\n inter_rect_y2 = np.minimum(b1_y2, b2_y2)\r\n \r\n inter_area = np.maximum(inter_rect_x2 - inter_rect_x1, 0) * \\\r\n np.maximum(inter_rect_y2 - inter_rect_y1, 0)\r\n \r\n area_b1 = (b1_x2-b1_x1)*(b1_y2-b1_y1)\r\n area_b2 = (b2_x2-b2_x1)*(b2_y2-b2_y1)\r\n \r\n iou = inter_area/np.maximum((area_b1+area_b2-inter_area),1e-6)\r\n return iou\r\n\r\nclass ModelCheckpoint(keras.callbacks.Callback):\r\n def __init__(self, filepath, monitor='val_loss', verbose=0,\r\n save_best_only=False, save_weights_only=False,\r\n mode='auto', period=1):\r\n super(ModelCheckpoint, self).__init__()\r\n self.monitor = monitor\r\n self.verbose = verbose\r\n self.filepath = filepath\r\n self.save_best_only = save_best_only\r\n self.save_weights_only = save_weights_only\r\n self.period = period\r\n self.epochs_since_last_save = 0\r\n\r\n if mode not in ['auto', 'min', 'max']:\r\n warnings.warn('ModelCheckpoint mode %s is unknown, '\r\n 'fallback to auto mode.' % (mode),\r\n RuntimeWarning)\r\n mode = 'auto'\r\n\r\n if mode == 'min':\r\n self.monitor_op = np.less\r\n self.best = np.Inf\r\n elif mode == 'max':\r\n self.monitor_op = np.greater\r\n self.best = -np.Inf\r\n else:\r\n if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):\r\n self.monitor_op = np.greater\r\n self.best = -np.Inf\r\n else:\r\n self.monitor_op = np.less\r\n self.best = np.Inf\r\n\r\n def on_epoch_end(self, epoch, logs=None):\r\n logs = logs or {}\r\n self.epochs_since_last_save += 1\r\n if self.epochs_since_last_save >= self.period:\r\n self.epochs_since_last_save = 0\r\n filepath = self.filepath.format(epoch=epoch + 1, **logs)\r\n if self.save_best_only:\r\n current = logs.get(self.monitor)\r\n if current is None:\r\n warnings.warn('Can save best model only with %s available, '\r\n 'skipping.' % (self.monitor), RuntimeWarning)\r\n else:\r\n if self.monitor_op(current, self.best):\r\n if self.verbose > 0:\r\n print('\\nEpoch %05d: %s improved from %0.5f to %0.5f,'\r\n ' saving model to %s'\r\n % (epoch + 1, self.monitor, self.best,\r\n current, filepath))\r\n self.best = current\r\n if self.save_weights_only:\r\n self.model.save_weights(filepath, overwrite=True)\r\n else:\r\n self.model.save(filepath, overwrite=True)\r\n else:\r\n if self.verbose > 0:\r\n print('\\nEpoch %05d: %s did not improve' %\r\n (epoch + 1, self.monitor))\r\n else:\r\n if self.verbose > 0:\r\n print('\\nEpoch %05d: saving model to %s' % (epoch + 1, filepath))\r\n if self.save_weights_only:\r\n self.model.save_weights(filepath, overwrite=True)\r\n else:\r\n self.model.save(filepath, overwrite=True)\r\n\r\n"
] |
[
[
"numpy.log",
"numpy.maximum",
"numpy.minimum",
"numpy.min",
"numpy.unique",
"numpy.arange",
"numpy.ones",
"numpy.concatenate",
"numpy.max",
"numpy.apply_along_axis",
"numpy.argmax",
"numpy.shape",
"numpy.argsort",
"numpy.exp",
"numpy.zeros"
]
] |
cefect/myCanFlood
|
[
"84dfcee10d86d6fb3edbe5e3ce452f3f15ca6919"
] |
[
"canflood/hlpr/plug.py"
] |
[
"'''\nCreated on Feb. 25, 2020\n\n@author: cefect\n\n\nhelper functions for use in plugins\n'''\n\n\n\n#==============================================================================\n# imports------------\n#==============================================================================\n#python\nimport logging, configparser, datetime, sys, os, types\nimport pandas as pd\nimport numpy as np\n\n#Qgis imports\nfrom qgis.core import QgsVectorLayer, Qgis, QgsProject, QgsLogger, QgsMessageLog, QgsMapLayer\nfrom qgis.gui import QgisInterface\n\n#pyQt\nfrom PyQt5.QtWidgets import QFileDialog, QGroupBox, QComboBox, QTableWidgetItem\nfrom PyQt5.QtGui import QStandardItem, QStandardItemModel\nfrom PyQt5.QtCore import Qt, QAbstractTableModel, QObject \nfrom PyQt5 import QtCore\n\n\n#==============================================================================\n# custom imports\n#==============================================================================\n\nfrom hlpr.exceptions import QError as Error\nfrom hlpr.Q import MyFeedBackQ, Qcoms\nimport hlpr.Q\nfrom hlpr.basic import force_open_dir, view, ComWrkr\nfrom hlpr.plt_qt import PltWindow\n \n\n#==============================================================================\n# classes-----------\n#==============================================================================\nclass QMenuAction(Qcoms): #base class for actions assigned to Q menus\n \n \n groupName = 'CanFlood' #default group for loading layers to canvas\n #action parameters\n icon_fn = 'help-circle.svg'\n icon_name = 'SomeAction'\n icon_location = 'menu'\n \n \n def __init__(self,\n iface = None,\n session=None, #main CanFlood.CanFlood session worker \n logger=None,\n ):\n \n \"\"\"WARNING: make sure QprojPlug children dont call this\"\"\"\n \n \n self.qproj_setup(plogger=logger, iface=iface, session=session)\n \n \"\"\"dont execut super cascade\n super().__init__(logger=self.logger,\n **kwargs) #initilzie teh baseclass\"\"\"\n \n \n \n \n self.connect_slots()\n \n def qproj_setup(self,\n iface = None,\n plogger=None, #alternate logger for standalone tests\n session=None, #main CanFlood.CanFlood session worker \n \n ): #project inits for Dialog Classes\n \n \"\"\"\n called by QprojPlug during custom __init__\n called by QMenuAction during default __init__\n \"\"\"\n\n #=======================================================================\n # attacyhments\n #=======================================================================\n self.session=session #used for passing between windows\n \n self.launch_actions = dict() #container of actions to execute when 'launch' is pressed\n\n \n #=======================================================================\n # interface\n #=======================================================================\n if not iface is None:\n \"\"\"only checking real iface for compatabilitgy\"\"\"\n assert isinstance(iface, QgisInterface), 'got bad iface type: %s'%type(iface)\n self.iface = iface\n \n #=======================================================================\n # logger\n #=======================================================================\n \"\"\"for debug runs... pass a plugLogger like class\n see dial_coms\"\"\"\n if plogger is None: \n \"\"\"this needs iface to be set\"\"\"\n plogger = plugLogger(self) \n \n self.logger = plogger\n \n \n #=======================================================================\n # Qsetupts\n #=======================================================================\n self.qproj = QgsProject.instance()\n\n \n \n self.crs = self.qproj.crs()\n \n self.layerTree = QgsProject.instance().layerTreeRoot() #for groups\n \n \"\"\"connect to UI's progress bar\n expects 'progressBar' as the widget name\n start feedback instance\"\"\"\n \n self.setup_feedback(progressBar = self.progressBar,\n feedback = MyFeedBackQ())\n \n\n self.set_vdrivers()\n \n #set some dummies for children\n self.qap = None #I dont think this ever gets referenced\n self.algo_init=True\n \n #=======================================================================\n # default directories\n #=======================================================================\n\n self.pars_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), '_pars')\n assert os.path.exists(self.pars_dir)\n \n def connect_slots(self): #placeholder for connection slots (for consistency)\n pass\n def launch(self):\n raise Error('overwrite with your own method')\n \n\nclass QprojPlug(QMenuAction): #baseclass for plugin dialogs\n \n \n \n tag='scenario1'\n overwrite=True\n wd = ''\n progress = 0\n \n \n loadRes = False #whether to load layers to canvas\n \n plt_window = False #control whether to launch the plot window\n \n first_launch=True\n \n dev=False #handle for development code\n \n \n \n \"\"\"not a great way to init this one\n Plugin classes are only initilaizing the first baseclass\n def __init__(self):\n self.logger = logger()\"\"\"\n \n\n\n def launch(self): #placeholder for launching the dialog\n \"\"\"allows children to customize what happens when called\"\"\"\n log = self.logger.getChild('launch')\n \n #=======================================================================\n # launch setup\n #=======================================================================\n if self.first_launch:\n self.connect_slots()\n #=======================================================================\n # #customs\n #=======================================================================\n \"\"\"\n lets each dialog attach custom functions when they are launched\n useful for automatically setting some dialog boxes\n \n prioritizinmg inheritanve over customs\n \"\"\"\n for fName, f in self.launch_actions.items():\n log.debug('%s: %s'%(fName, f))\n try:\n f()\n except Exception as e:\n log.warning('failed to execute \\'%s\\' w/ \\n %s'%(fName, e))\n \n #=======================================================================\n # inherit from other tools\n #=======================================================================\n \"\"\"for dialogs with control files\"\"\"\n \n #my control file path\n if hasattr(self, 'lineEdit_cf_fp'):\n #try and set the control file path from the session if there\n if os.path.exists(self.session.cf_fp):\n #set the control file path\n self.lineEdit_cf_fp.setText(self.session.cf_fp)\n \n #woking directory\n if hasattr(self, 'lineEdit_wdir'):\n \n #from session control file\n if os.path.exists(self.session.cf_fp):\n newdir = os.path.join(os.path.dirname(self.session.cf_fp))\n assert os.path.exists(newdir), 'this should exist...%s'%newdir\n self.lineEdit_wdir.setText(newdir)\n \n \n #default catch for working directory\n if self.lineEdit_wdir.text() == '':\n newdir = os.path.join(os.getcwd(), 'CanFlood')\n if not os.path.exists(newdir): os.makedirs(newdir)\n self.lineEdit_wdir.setText(newdir)\n \n \n #inventory vector layer\n if isinstance(self.session.finv_vlay, QgsVectorLayer):\n if hasattr(self, 'comboBox_JGfinv'): #should just skip the Build\n self.comboBox_JGfinv.setLayer(self.session.finv_vlay)\n \n \n\n self.first_launch=False\n self.show()\n\n\n\n \n def _load_toCanvas(self, #helper to load a layers to canvas w/ some reporting\n layers, \n \n groupName=None, #optional group name to load to\n style_fn = None, #optional qml styule file name to apply\n logger=None, \n ):\n \n #=======================================================================\n # defaults\n #=======================================================================\n \"\"\"forcing layers into a group\"\"\"\n if logger is None: logger=self.logger\n log=logger.getChild('load_toCanvas')\n if groupName is None: groupName = self.groupName\n if style_fn == '': style_fn=None\n log.debug('loading \\'%s\\': %s'%(type(layers), layers))\n #=======================================================================\n # precheck\n #=======================================================================\n if not self.loadRes: log.warning('load results to canvas control mismatch!')\n \n #=======================================================================\n # groups\n #=======================================================================\n if not groupName is None:\n group = self.layerTree.findGroup(groupName) #search\n if group is None: #nothign found.. add the group\n group = self.layerTree.addGroup(groupName)\n log.debug('group not found.. added \\'%s\\''%groupName)\n else:\n group = None\n \n def add_layer(lay):\n assert isinstance(lay, QgsMapLayer), 'passed bad layer\\'%s\\''%lay\n if not group is None:\n group.addLayer(lay)\n self.qproj.addMapLayer(lay, False) #add tot he project, but hide\n else:\n self.qproj.addMapLayer(lay, True) #just add to teh selected group\n \n \n #=======================================================================\n # #load\n #=======================================================================\n if isinstance(layers, list):\n for layer in layers:\n add_layer(layer)\n\n #report\n layNames = [lay.name() for lay in layers]\n log.info('loaded %i layers: %s'%(len(layNames), layNames))\n \n elif isinstance(layers, QgsMapLayer):\n add_layer(layers)\n log.info('laoded \\'%s\\' to project'%layers.name())\n layers = [layers] #throw it into a list for below\n \n else:\n raise Error('unrecognized layer container type: %s'%type(layers))\n \n #=======================================================================\n # stylieze\n #=======================================================================\n if not style_fn is None:\n\n style_fp = os.path.join(self.pars_dir, 'qmls', style_fn)\n assert os.path.exists(style_fp)\n for layer in layers:\n layer.loadNamedStyle(style_fp)\n layer.triggerRepaint()\n \n return\n \n \n #===========================================================================\n # widget setup----------\n #===========================================================================\n \n def browse_button(self, #browse to a directory\n lineEdit, #text bar where selected directory should be displayed\n prompt = 'Select Directory', #title of box\n qfd = QFileDialog.getExistingDirectory, #dialog to launch\n ):\n \"\"\"\n TODO: migrate to standalone function?\n see fileSelect_button() for borwsing to a file\n \"\"\"\n \n \n #get the currently displayed filepath\n fp_old = lineEdit.text()\n \n #change to default if nothing useful is there\n if not os.path.exists(fp_old):\n fp_old = os.getcwd()\n \n #launch the dialog and get the new fp from the user\n fp = qfd(self, prompt, fp_old)\n \n #just take the first\n if len(fp) == 2:\n fp = fp[0]\n \n #see if they picked something\n if fp == '':\n self.logger.debug('user failed to make a selection. skipping')\n return \n \n #update the bar\n lineEdit.setText(fp)\n \n self.logger.debug('user selected: %s'%fp)\n \n def fileSelect_button(self, #\n lineEdit, #text bar where selected directory should be displayed\n caption = 'Select File', #title of box\n path = None,\n filters = \"All Files (*)\",\n qfd = QFileDialog.getOpenFileName, #dialog to launch\n ):\n #=======================================================================\n # defaults\n #=======================================================================\n if path is None:\n path = os.getcwd()\n \n if not os.path.exists(path):\n path = os.getcwd()\n \n #ask the user for the path\n \"\"\"\n using the Dialog instance as the QWidge parent\n \"\"\"\n self.logger.info(filters)\n \n fp = qfd(self, caption, path, filters)\n \n #just take the first\n if len(fp) == 2:\n fp = fp[0]\n \n #see if they picked something\n if fp == '':\n self.logger.warning('user failed to make a selection. skipping')\n return \n \n #update the bar\n lineEdit.setText(fp)\n \n self.logger.info('user selected: \\n %s'%fp)\n \n def newFileSelect_button(self,\n lineEdit, #text bar where selected file should be displayed\n caption = 'Specify new file name', #title of box\n path = None,\n filters = \"All Files (*)\",\n qfd = QFileDialog.getSaveFileName, #dialog to launch\n ):\n \n #=======================================================================\n # defaults\n #=======================================================================\n if path is None:\n path = os.getcwd()\n \n if not os.path.exists(path):\n path = os.getcwd()\n \n #ask the user for the path\n \"\"\"\n using the Dialog instance as the QWidge parent\n \"\"\"\n \n fp = qfd(self, caption, path, filters)\n \n #just take the first\n if len(fp) == 2:\n fp = fp[0]\n \n #see if they picked something\n if fp == '':\n self.logger.warning('user failed to make a selection. skipping')\n return \n \n #update the bar\n lineEdit.setText(fp)\n \n self.logger.info('user selected: \\n %s'%fp)\n \n \n def mfcb_connect(self, #helper to update a field combo box\n mfcb, #mFieldComboBox\n layer, #layer to set in the combo box\n fn_str = None, #optional field name for auto setting\n fn_no_str = None, #optional field name to EXCVLUDE from auto setting\n ):\n \"\"\"\n TODO: migrate to bind function\n \"\"\"\n log = self.logger.getChild('mfcb_connect')\n mfcb.clear()\n if isinstance(layer, QgsVectorLayer):\n try:\n mfcb.setLayer(layer)\n \n #try and match\n for field in layer.fields():\n if not fn_no_str is None:\n if field.name()==fn_no_str: continue #keep looking\n \n if fn_str in field.name():\n break\n \n mfcb.setField(field.name())\n \n except Exception as e:\n log.warning('failed set current layer w/ \\n %s'%e)\n else:\n log.debug('passed layer w/ unrecognized type: %s'%(type(layer)))\n \n return \n \n\n\n \n\n \n def setup_comboBox(self, #helper for setting up a combo box with a default selection\n comboBox,\n selection_l, #list of values to set as selectable options\n default = None, #default selection string ot set\n \n ):\n \"\"\"\n TODO: change this to bind to the combo box\n \"\"\"\n \n assert isinstance(selection_l, list)\n \n\n \n comboBox.clear()\n #set the selection\n comboBox.addItems(selection_l)\n \n #set the default\n if not default is None:\n index = comboBox.findText(default, Qt.MatchFixedString)\n if index >= 0:\n comboBox.setCurrentIndex(index)\n \n def _connect_wdir(self, #connect the workint direcotry buttons\n browseButton, openButton, lineEdit,\n default_wdir = None\n ):\n #=======================================================================\n # connect buttons\n #=======================================================================\n #Working Directory browse \n browseButton.clicked.connect(\n lambda: self.browse_button(lineEdit, \n prompt='Select Working Directory',\n qfd = QFileDialog.getExistingDirectory)\n )\n\n #WD Open\n openButton.clicked.connect(\n lambda: force_open_dir(lineEdit.text()))\n \n #=======================================================================\n # set default\n #=======================================================================\n if not default_wdir is None:\n lineEdit.setText(default_wdir)\n \n if not os.path.exists(default_wdir): os.makedirs(default_wdir)\n \n #===========================================================================\n # run function helpers------\n #===========================================================================\n def get_cf_fp(self):\n \"\"\"\"\n TODO: migrate all of these to _set_setup\n \"\"\"\n cf_fp = self.lineEdit_cf_fp.text()\n \n if cf_fp is None or cf_fp == '':\n raise Error('need to specficy a control file path')\n if not os.path.exists(cf_fp):\n raise Error('need to specficy a valid control file path')\n \n if not os.path.splitext(cf_fp)[1] == '.txt':\n raise Error('unexpected extension on Control File')\n \n return cf_fp\n\n def get_cf_par(self, #load a parameter value from a controlFile path\n cf_fp, #control file path\n sectName='results_fps',\n varName = 'r_ttl',\n varType = str,\n logger=None,\n ):\n \"\"\"\n wrapper for _get_from_cpar()\n but loads the control file each time and \n \"\"\"\n #handle the default empty selection\n if varName=='':return 'no selection'\n if cf_fp == '':return 'no selection'\n #=======================================================================\n # defaults\n #=======================================================================\n if logger is None: logger=self.logger\n log=logger.getChild('get_cf_par')\n \n #=======================================================================\n # load the control file\n #=======================================================================\n\n assert os.path.exists(cf_fp), 'provided parameter file path does not exist \\n %s'%cf_fp\n\n pars = configparser.ConfigParser(inline_comment_prefixes='#')\n log.debug('reading parameters from \\n %s'%pars.read(cf_fp))\n \n #=======================================================================\n # get the value\n #=======================================================================\n \"\"\"\n see _get_from_cpar() for fancy typesetting\n seems like we should only be pulling strings here...\n \"\"\"\n return varType(pars[sectName][varName])\n \n def _set_setup(self, set_cf_fp=True,): #attach parameters from setup tab\n \n inherit_fieldNames = ['logger', 'out_dir','tag', 'overwrite', 'absolute_fp', 'feedback']\n \n #secssion controls\n self.tag = self.linEdit_ScenTag.text()\n self.out_dir = self.lineEdit_wdir.text()\n \n assert not self.out_dir == '', 'must specify a working directory!'\n if not os.path.exists(self.out_dir): os.makedirs(self.out_dir)\n \n if set_cf_fp:\n\n #pull from the line\n self.cf_fp = self.lineEdit_cf_fp.text()\n assert not self.cf_fp=='', 'must specify a control file!'\n assert os.path.exists(self.cf_fp), 'got invalid controlFile path: %s'%self.cf_fp\n \n inherit_fieldNames.append('cf_fp')\n \n self.session.cf_fp = self.cf_fp #set for other dialogs\n\n \n #file behavior\n self.overwrite=self.checkBox_SSoverwrite.isChecked()\n self.absolute_fp = self.radioButton_SS_fpAbs.isChecked()\n \n #layer loading\n self.groupName = 'CanFlood.%s'%self.tag\n self.inherit_fieldNames = inherit_fieldNames\n \n #plot window\n if hasattr(self, 'radioButton_s_pltW'):\n self.plt_window = self.radioButton_s_pltW.isChecked()\n \n #qgis handles\n self.crs = self.qproj.crs()\n self.crsid = self.crs.authid()\n \n self._upd_qd() #update the q handles\n \n \n def _check_finv(self, logger=None): #check the finv and some paramter logic\n \"\"\"\n see also Model.check_finv() for data level checks\n \"\"\"\n if logger is None: logger=self.logger\n log=logger.getChild('_check_finv')\n #=======================================================================\n # selection checks\n #=======================================================================\n assert not self.cid is None, 'must specify a valid cid'\n assert isinstance(self.finv_vlay, QgsVectorLayer), 'must select a VectorLayer for the finv'\n \n \n #=======================================================================\n # data checks\n #=======================================================================\n #CRS\n assert self.finv_vlay.crs()==self.qproj.crs(), 'finv CRS (%s) does not match projects (%s)'%(\n self.finv_vlay.crs(), self.qproj.crs())\n \n \n \n #cid in the fields\n fields_d = {f.name():f for f in self.finv_vlay.fields()}\n assert self.cid in fields_d, 'specified cid not found on finv'\n \n #field type\n assert 'int' in fields_d[self.cid].typeName().lower(), \\\n 'cid field \\'%s\\' must be integer type not \\'%s\\''%(\n self.cid, fields_d[self.cid].typeName())\n \n #unique values\n cid_ser = hlpr.Q.vlay_get_fdata(self.finv_vlay, fieldn=self.cid, fmt='df', logger=log)\n boolidx = cid_ser.duplicated(keep=False)\n if boolidx.any():\n log.debug('duplicated values \\n%s'%cid_ser[boolidx])\n \n raise Error('passed finv cid=\\'%s\\' values contain %i duplicates... see logger'%(\n self.cid, boolidx.sum()))\n \n\n\n \n \n \n \n #===========================================================================\n # OUTPUTS------------\n #===========================================================================\n def output_fig(self, fig,\n \n plt_window=None, #whether to launch the matplotlib window\n \n #file controls\n out_dir = None, \n overwrite=None,\n fname = None, #filename\n \n #figure write controls\n fmt='svg', \n transparent=True, \n dpi = 150,\n logger=None,\n ):\n #======================================================================\n # defaults\n #======================================================================\n if out_dir is None: out_dir = self.out_dir\n if overwrite is None: overwrite = self.overwrite\n if plt_window is None: plt_window=self.plt_window\n if logger is None: logger=self.logger\n log = logger.getChild('output_fig')\n \n #=======================================================================\n # precheck\n #=======================================================================\n \"\"\"avoiding importing matplotlib here\"\"\"\n #assert isinstance(fig, self.matplotlib.figure.Figure)\n log.debug('plt_window=%s on %s'%(plt_window, fig))\n #======================================================================\n # save file\n #======================================================================\n if not plt_window:\n #file setup\n if fname is None:\n try:\n fname = fig._suptitle.get_text()\n except:\n fname = self.name\n \n out_fp = os.path.join(out_dir, '%s.%s'%(fname, fmt))\n \n if os.path.exists(out_fp): assert overwrite\n \n \n #write the file\n try: \n fig.savefig(out_fp, dpi = dpi, format = fmt, transparent=transparent)\n log.info('saved figure to file: %s'%out_fp)\n except Exception as e:\n raise Error('failed to write figure to file w/ \\n %s'%e)\n \n return out_fp\n \n #=======================================================================\n # launch window\n #=======================================================================\n else:\n \"\"\"not working\"\"\"\n app = PltWindow(fig, out_dir=out_dir)\n app.show()\n log.info('launched matplotlib window on %s'%fig._suptitle.get_text())\n app.activateWindow()\n app.raise_()\n \n \n \n \n\nclass plugLogger(object): #workaround for qgis logging pythonic\n \"\"\"\n plugin logging\n \n\n 0.4.1\n log messages sent to 2 places based on level\n \n TODO: allow directly calling (e.g., logger('msg')\n \"\"\"\n log_tabnm = 'CanFlood' # qgis logging panel tab name\n \n log_nm = 'cf' #logger name\n \n def __init__(self, \n parent,\n statusQlab = None, #Qlabel widget to duplicate push messages\n log_nm = None,\n ):\n \"\"\"called by session, then again by each getChild\"\"\"\n #attach\n self.parent = parent\n \n #nest the name\n \"\"\"theres probably a more elegant way to do this...\"\"\"\n if log_nm is None: #normal calls\n self.log_nm = '%s.%s'%(self.log_nm, self.parent.__class__.__name__)\n else: #getChild calls\n self.log_nm = log_nm\n \n \n \n self.iface = parent.iface\n \n self.statusQlab = statusQlab\n \n \"\"\"dont want to call this during getChild\n self.debug('logger initilized for %s at %s'%(parent.__class__.__name__, datetime.datetime.now()))\"\"\"\n \n def getChild(self, new_childnm):\n \n if hasattr(self.parent, 'logger'):\n log_nm = '%s.%s'%(self.parent.logger.log_nm, new_childnm)\n else:\n log_nm = new_childnm\n \n #build a new logger\n child_log = plugLogger(self.parent, \n statusQlab=self.statusQlab,\n log_nm=log_nm)\n \n\n \n return child_log\n \n def setLevel(self,*args):\n \"\"\"\n todo: have this behave more like a real python logger\n \"\"\"\n pass \n \n def info(self, msg):\n self._loghlp(msg, Qgis.Info, push=False, status=True)\n\n\n def debug(self, msg):\n self._loghlp(msg, -1, push=False, status=False)\n \"\"\"\n msg = '%s: %s'%(self.log_nm, msg_raw)\n QgsLogger.debug(msg)\n \"\"\"\n \n def warning(self, msg):\n self._loghlp(msg, Qgis.Warning, push=False)\n\n def push(self, msg):\n self._loghlp(msg, Qgis.Info, push=True)\n\n def error(self, msg):\n \"\"\"similar behavior to raising a QError.. but without throwing the execption\"\"\"\n self._loghlp(msg, Qgis.Critical, push=True)\n \n def _loghlp(self, #helper function for generalized logging\n msg_raw, qlevel, \n push=False, #treat as a push message on Qgis' bar\n status=False, #whether to send to the status widget\n ):\n \"\"\"\n QgsMessageLog writes to the message panel\n optionally, users can enable file logging\n this file logger \n \"\"\"\n\n #=======================================================================\n # send message based on qlevel\n #=======================================================================\n msgDebug = '%s %s: %s'%(datetime.datetime.now().strftime('%d-%H.%M.%S'), self.log_nm, msg_raw)\n if qlevel < 0: #file logger only\n \n QgsLogger.debug('D_%s'%msgDebug)\n push, status = False, False #should never trip\n else:#console logger\n msg = '%s: %s'%(self.log_nm, msg_raw)\n QgsMessageLog.logMessage(msg, self.log_tabnm, level=qlevel)\n QgsLogger.debug('%i_%s'%(qlevel, msgDebug)) #also send to file\n \n #Qgis bar\n if push:\n try:\n self.iface.messageBar().pushMessage(self.log_tabnm, msg_raw, level=qlevel)\n except:\n QgsLogger.debug('failed to push to interface') #used for standalone tests\n \n #Optional widget\n if status or push:\n if not self.statusQlab is None:\n self.statusQlab.setText(msg_raw)\n\n\n \nclass pandasModel(QAbstractTableModel):\n \"\"\"from here:\n https://learndataanalysis.org/display-pandas-dataframe-with-pyqt5-qtableview-widget/\n \n this is handy for displaying with a QTableView\n NOTE: QTableView wont display the df.index\n \"\"\"\n def __init__(self, data):\n assert isinstance(data, pd.DataFrame)\n QAbstractTableModel.__init__(self)\n self._data = data\n\n def rowCount(self, parent=None):\n return self._data.shape[0]\n\n def columnCount(self, parnet=None):\n return self._data.shape[1]\n\n def data(self, index, role=Qt.DisplayRole):\n if index.isValid():\n if role == Qt.DisplayRole:\n return str(self._data.iloc[index.row(), index.column()])\n if role == Qt.ToolTipRole:\n return str(self._data.iloc[index.row(), index.column()])\n return None\n\n def headerData(self, col, orientation, role):\n if orientation == Qt.Horizontal and role == Qt.DisplayRole:\n \"\"\"expanded to handle empty dataframes\"\"\"\n try:\n return self._data.columns[col]\n except:\n return None\n return None\n \n#===============================================================================\n# widget binds-------------\n#===============================================================================\nclass ListModel(QStandardItemModel): #wrapper for list functions with check boxes\n \n def add_checkable_data(self, data_l):\n \n for item in data_l:\n item.setCheckable(True)\n item.setCheckState(Qt.Unchecked)\n self.appendRow(item)\n \n def get_items(self):\n return [self.item(i) for i in range(self.rowCount())]\n def get_checked(self, state=Qt.Checked): #retrieve all items taht are checked\n return [i for i in self.get_items() if i.checkState()==state]\n\n def set_checked_byVal(self, val_l): #assign check state to items based on those matching the values\n for item in self.get_items():\n if item.text() in val_l:\n item.setCheckState(Qt.Checked)\n else:\n item.setCheckState(Qt.Unchecked)\n \n def set_checked_all(self, state=Qt.Unchecked):\n for item in self.get_items():\n item.setCheckState(state)\n\n\n\n#===============================================================================\n# WIDGET CUSTOM BINDINGS------------\n#===============================================================================\ndef bind_layersListWidget(widget, #instanced widget\n log,\n layerType=None, #optional layertype to enforce\n iface=None,\n \n ):\n \"\"\"\n because Qgis passes instanced widgets, need to bind any new methods programatically\n \"\"\"\n #assert not iface is None\n \n widget.iface = iface\n widget.layerType = layerType\n widget.setModel(ListModel())\n \n #===========================================================================\n # populating and setting selection\n #===========================================================================\n def populate_layers(self, layers=None):\n if layers is None:\n #get all from the project\n layers = [layer for layer in QgsProject.instance().mapLayers().values()]\n \n #apply filters\n if not self.layerType is None:\n layers = self._apply_filter(layers)\n \n \n assert isinstance(layers, list), 'bad type on layeres: %s'%type(layers)\n model = self.model()\n \n model.clear()\n \n model.add_checkable_data([QStandardItem(l.name()) for l in layers])\n\n \n def _apply_filter(self, layers):\n return [rl for rl in layers if rl.type()==self.layerType]\n \n def select_visible(self):\n #print('selecint only visible layers')\n lays_l = self.iface.mapCanvas().layers()\n self.model().set_checked_byVal([l.name() for l in lays_l])\n \n def select_canvas(self):\n \n lays_l = self.iface.layerTreeView().selectedLayers()\n #log.info('setting selection to %i layers from layerTreeView'%len(lays_l))\n self.model().set_checked_byVal([l.name() for l in lays_l])\n \n\n def clear_checks(self):\n self.model().set_checked_all()\n \n \n def check_all(self):\n self.model().set_checked_all(state=Qt.Checked)\n \n def check_byName(self, layName_l):\n self.model().set_checked_byVal(layName_l)\n \n #===========================================================================\n # retriving selection\n #===========================================================================\n def get_selected_layers(self):\n qproj = QgsProject.instance()\n\n items = self.model().get_checked() #names of layers checked by user\n nms_l = [item.text() for item in items]\n \n assert len(nms_l)>0, 'no selection!'\n \n \n #retrieve layers from canvas\n lays_d = {nm:qproj.mapLayersByName(nm) for nm in nms_l} \n \n \n \n \n #check we only got one hit\n d = dict()\n for k,hits_all in lays_d.items():\n \n \"\"\"when a raster and vector layer have the same name\"\"\"\n hits = self._apply_filter(hits_all) #remove any not matching the type\n \n \n assert not len(hits)>1, 'matched multiple layers for \\'%s\\'... layers need unique names'%k\n assert not len(hits)==0, 'failed to match any layers with \\'%s\\''%k\n \n lay = hits[0]\n assert isinstance(lay, QgsMapLayer), 'bad type on %s: %s'%(k, type(lay))\n \n d[k] = lay\n \n #drop to singular elements\n \n return d\n \n \n #===========================================================================\n # bind them\n #===========================================================================\n for fName in ['populate_layers', '_apply_filter', 'select_visible', 'select_canvas', \n 'get_selected_layers', 'clear_checks','check_all', 'check_byName']:\n setattr(widget, fName, types.MethodType(eval(fName), widget)) \n \ndef bind_MapLayerComboBox(widget, #add some bindings to layer combo boxes\n iface=None, layerType=None): \n \n widget.iface=iface\n #default selection\n if not layerType is None:\n widget.setFilters(layerType)\n widget.setAllowEmptyLayer(True)\n widget.setCurrentIndex(-1) #set selection to none\n \n #===========================================================================\n # define new methods\n #===========================================================================\n def attempt_selection(self, layName):\n layer = get_layerbyName(layName)\n \n if not layer is None:\n self.setLayer(layer)\n \n #===========================================================================\n # bind functions\n #===========================================================================\n for fName in ['attempt_selection']:\n setattr(widget, fName, types.MethodType(eval(fName), widget)) \n \n \ndef bind_link_boxes(widget, #wrapper for widget containing comboboxes linking layers (1:1)\n types_d, #column type parameterse {comboBox.name string: comboBox layer type}\n childWidgetType=QComboBox, #lowest container with layer selection\n iface=None):\n \n widget.iface=iface\n #===========================================================================\n # #collect all the widgets and set the filters\n #===========================================================================\n d = dict()\n for gBox in widget.findChildren(QGroupBox):\n d[gBox.objectName()] = dict() #start page\n allChildren = gBox.findChildren(childWidgetType)\n \n #get all those matching the the name strings\n for name_str, layType in types_d.items():\n \n #find children matching the name\n childMatch = [c for c in allChildren if name_str in c.objectName()]\n \n assert len(childMatch)==1, '%s.%s got multiple children matching %s'%(\n widget.objectName(), gBox.objectName(), name_str)\n \n #collect\n d[gBox.objectName()][name_str] = childMatch[0]\n \n #apply the filter\n childMatch[0].setFilters(layType)\n childMatch[0].setAllowEmptyLayer(True)\n childMatch[0].setCurrentIndex(-1)\n #wrap gBox\n #wrap widget\n widget.children_links_d = d #{groupBoxName: {name:ComboBox}}\n #set all the filters\n \n def get_linked_layers(self, #get all the layers selected in the combo boxes\n keyByFirst=False, #whether to re-key the results by the first layer's name\n ):\n \n rLib = dict()\n for gName, gd in self.children_links_d.items():\n #get from each widget\n rLib[gName] = {nstr:w.currentLayer() for nstr,w in gd.items() if not w.currentLayer() is None}\n \n #clear empties\n rLib = {k:v for k,v in rLib.items() if len(v)>0}\n \n #=======================================================================\n # fancy re-key by the first layers name (then drop that layer)\n #=======================================================================\n if keyByFirst:\n d = dict()\n\n for k, sub_d in rLib.items():\n \n first = True\n for nstr, layer in sub_d.items():\n if first:\n newKey =layer.name()\n first =False\n else:\n d[newKey] = layer\n break #just taking the first\n rLib = d #reset the result\n \n return rLib \n \n def clear_all(self): #clear all the combo boxes\n for child in widget.findChildren(childWidgetType):\n child.setCurrentIndex(-1)\n \n def fill_down(self, #take the first entry in the combo box column matching the name, and propagate\n name_str,\n name_str2= None, #optional paired name_str to stop filling when blank\n ):\n \n first = True\n for gName, gd in self.children_links_d.items():\n assert name_str in gd\n \n #get the selection to propagate\n if first:\n layer1 = gd[name_str].currentLayer()\n first = False\n else:\n if not name_str2 is None:\n if gd[name_str2].currentLayer() is None:\n break #stop the filling here\n gd[name_str].setLayer(layer1)\n \n def set_selections(self, #populate a column with layers\n name_str,\n layers, #list of layers to populate combo boxes with\n ):\n \n for indx, (gName, gd) in enumerate(self.children_links_d.items()):\n assert name_str in gd\n if len(layers)<indx+1: break #stop here\n \n \n gd[name_str].setLayer(layers[indx])\n\n \n\n #===========================================================================\n # bind functions\n #===========================================================================\n for fName in ['get_linked_layers', 'clear_all', 'fill_down', 'set_selections']:\n setattr(widget, fName, types.MethodType(eval(fName), widget)) \n \ndef bind_fieldSelector( #setup a groupbox collection for field selection\n groupBox, # groupbox containing the collection\n layerWidget, #widget w/ layer\n logger,\n default_selection = ['xid'], #default field to select\n ):\n \n groupBox.logger=logger\n \n #collect children widgets\n \n \n \n \n \n #define funcs\n def launch_selector(self):\n pass\n \n def set_selection(self, select_fields):\n pass\n \n def get_selection(self):\n pass\n \n def clear(self):\n pass\n \n\n \n \n #===========================================================================\n # bind functions\n #===========================================================================\n for fName in ['launch_selector', 'get_selection', 'clear']:\n setattr(groupBox, fName, types.MethodType(eval(fName), groupBox)) \n \n \n #connect to layer\n layerWidget.layerChanged.connect(groupBox.clear())\n \n #set the default\n groupBox.set_selection(default_selection)\n \n\ndef bind_TableWidget( #add some custom bindings to a TableWidget\n widget, #instanced widget\n log,\n \n ):\n \n \n def get_indexer(self, #flexible index retrieval\n indexer,\n axis=0):\n \n if isinstance(indexer, str):\n #get headers\n headers_d = self.get_headers(axis=axis)\n return headers_d[indexer]\n elif isinstance(indexer, int):\n return indexer\n else:\n raise Error('bad type')\n \n \n def get_headers( #retrieve the header labeels\n self,\n axis=0,\n ):\n \n \n if axis ==0: #rows\n return {self.verticalHeaderItem(i).text():i for i in range(0,self.rowCount(),1)}\n elif axis==1: #columns\n return {self.horizontalHeaderItem(i).text():i for i in range(0,self.columnCount(),1)}\n else:\n raise Error('dome')\n \n def get_value(self,#retrieve a value by lable\n colName,\n rowName,\n ):\n \n #get the indexers\n i = self.get_headers(axis=0)[rowName]\n j = self.get_headers(axis=1)[colName]\n \n return self.item(i,j).text()\n \n def get_values( #retrieve values by label or index\n self,\n indexer, #label (str) or index (int)\n axis=0, #axis to retrieve on\n ):\n \n #get the items\n items_d = self.get_items(indexer, axis=axis)\n \n #read\n raw_d = {k:item.text() for k, item in items_d.items()}\n \n #=======================================================================\n # handle nulls\n #=======================================================================\n d1 = dict()\n for k,v in raw_d.items():\n if (v =='') or (v is None):\n d1[k]=np.nan\n else:\n d1[k]=v\n \n return d1\n \n def get_items( #retrieve values by label or index\n self,\n indexer, #label (str) or index (int)\n axis=0, #axis to retrieve on\n ):\n \n \n #=======================================================================\n # retrieve indexer\n #=======================================================================\n index = self.get_indexer(indexer, axis=axis)\n \n #=======================================================================\n # get rows\n #=======================================================================\n \n if axis == 0:\n raw_d= {self.horizontalHeaderItem(i).text():self.item(index, i) for i in range(0, self.columnCount())}\n elif axis ==1:\n raw_d= {self.verticalHeaderItem(i).text():self.item(i, index) for i in range(0, self.rowCount())}\n else:\n raise Error('dome')\n \n\n return raw_d\n \n \n def get_df(#retreive the full table df\n self,\n ):\n \n \n #populate dict\n d = dict()\n for j in range(0, self.columnCount()):\n d[j] = dict()\n for i in range(0, self.rowCount()):\n d[j][i] = self.item(i,j).text()\n \n #build the dataframe\n df = pd.DataFrame.from_dict(d)\n df.columns = self.get_headers(axis=1)\n df.index = self.get_headers(axis=0)\n \n #handle types\n bx = df==''\n if bx.any().any():\n df = df.where(~bx, other=np.nan)\n \n \"\"\"\n view(df)\n \"\"\"\n \n \n return df.infer_objects()\n \n def save_df(self,\n caption = 'Specify new file name', #title of box\n path = None,\n filters = \"Data Files (*.csv)\",\n ):\n \n #path default\n if path is None:\n path = os.getcwd()\n if not os.path.exists(path):\n path = os.getcwd()\n #=======================================================================\n # get the filepath from the user\n #=======================================================================\n out_fp = QFileDialog.getSaveFileName(self, caption, path, filters)[0]\n if out_fp == '':\n log.warning('user failed to make a selection. skipping')\n return \n \n assert out_fp.endswith('csv')\n \n \n #=======================================================================\n # collect the results\n #=======================================================================\n df = self.get_df()\n \n \n #=======================================================================\n # write\n #=======================================================================\n df.to_csv(out_fp, index=None)\n log.push('saved %s to %s'%(str(df.shape), out_fp))\n return out_fp\n \n \n \n \n def set_values(#set values from a dict on a single row/column\n self,\n indexer,\n vals_d, #values on indexer to set {indx2:newVal}\n axis=0\n ):\n \n #get index\n index = self.get_indexer(indexer, axis=axis)\n \n #=======================================================================\n # set value\n #=======================================================================\n for j, val in vals_d.items():\n assert isinstance(j, int)\n if axis==0:\n self.setItem(index, j, QTableWidgetItem(val))\n else:\n self.setItem(j,index,QTableWidgetItem(val))\n \n \n \n def populate(self, #populate with a dataframe\n df):\n \n self.clear() #clear everything\n \n #setup dimensions\n self.setColumnCount(len(df.columns))\n self.setRowCount(len(df.index))\n \n #set lables\n self.setVerticalHeaderLabels(df.index.astype(str).values.tolist())\n self.setHorizontalHeaderLabels(df.columns.astype(str).values.tolist())\n \n #set values\n for j, (colName, col) in enumerate(df.items()):\n for i, val in enumerate(col):\n self.setItem(i,j, QTableWidgetItem(str(val)))\n \n \n \n def call_all_items( #call a method on all items in the table\n self,\n methodName,\n *args,**kwargs):\n \n #loop through each item\n d = dict()\n for j in range(0, self.columnCount()):\n d[j] = dict()\n for i in range(0, self.rowCount()):\n \n f = getattr(self.item(i,j), methodName)\n d[j][i] = f(*args, **kwargs)\n \n return d\n \n def call_row_items(self,\n methodName,\n indexer,*args,\n axis=1,\n **kwargs):\n \n #=======================================================================\n # retrieve \n #=======================================================================\n item_d = self.get_items(indexer, axis=axis)\n \n #=======================================================================\n # call\n #=======================================================================\n res_d = dict()\n for k, item in item_d.items():\n f = getattr(item, methodName)\n res_d[k] = f(*args, **kwargs)\n \n return res_d\n \n \n \n def call_all_headers(#call a method on all header items\n self,\n methodName,\n \n *args, axis=1, **kwargs):\n \n \n #=======================================================================\n # retrieve \n #=======================================================================\n if axis ==0: #rows\n head_d= {self.verticalHeaderItem(i).text():self.verticalHeaderItem(i) for i in range(0,self.rowCount(),1)}\n elif axis==1: #columns\n head_d= {self.horizontalHeaderItem(i).text():self.horizontalHeaderItem(i) for i in range(0,self.columnCount(),1)}\n else:\n raise Error('dome')\n \n #=======================================================================\n # apply\n #=======================================================================\n res_d = dict()\n for headName, headerObject in head_d.items():\n f = getattr(headerObject, methodName)\n res_d[headName] = f(*args, **kwargs)\n \n \n \n \n \n \n #===========================================================================\n # #bind\n #===========================================================================\n #loop through each new method, and bind as a lambda\n for fName, func in {\n 'get_indexer':lambda self,i, axis=0: get_indexer(self,i, axis=axis),\n 'get_headers':lambda self, axis=0: get_headers(self, axis=axis),\n 'get_value':lambda self, i,j:get_value(self,i,j),\n 'get_values':lambda self, indexName, axis=0: get_values(self, indexName, axis=axis),\n 'get_items':lambda self, indexName, axis=0: get_items(self, indexName, axis=axis),\n 'get_df':lambda self:get_df(self),\n 'save_df':lambda self, **kwargs:save_df(self, **kwargs),\n 'set_values':lambda self, i,d,axis=0:set_values(self,i,d,axis=axis),\n 'call_all_items':lambda self, n, *args, **kwargs: call_all_items(self, n, *args, **kwargs),\n 'call_row_items':lambda self,n,i, *args, axis=1, **kwargs: call_row_items(self, n, i, *args,axis=axis, **kwargs),\n 'call_all_headers':lambda self, n, *args, axis=1,**kwargs: call_all_headers(self, n, *args,axis=axis, **kwargs),\n 'populate':lambda self, df:populate(self, df),\n \n }.items():\n \n setattr(widget, fName, types.MethodType(func, widget))\n \n \n \n#==============================================================================\n# HELPER FUNCTIONS-----------\n#==============================================================================\ndef get_layerbyName(layName, #flexible search for layers by name\n qproj = None,\n ):\n \"\"\"\n couldnt find native support for partial name matching\n \"\"\"\n if qproj is None: qproj = QgsProject.instance()\n \n names = [layer.name() for layer in qproj.mapLayers().values()]\n \n #find the matching name\n match = None\n for name in names:\n if layName.lower() in name.lower().strip():\n match = name\n break\n \n #get this layer\n if not match is None:\n layer = qproj.mapLayersByName(match)[0]\n \n else:\n layer=None\n \n return layer\n \n \ndef qtbl_get_df( #extract data to a frame from a qtable\n table, \n ):\n \"\"\"\n for indx in range(0, table.rowCount()+1):\n print(indx)\n print(table.horizontalHeaderItem(indx).text())\n \"\"\"\n\n #get lables \n coln_l = qtlb_get_axis_l(table, axis=1)\n rown_l = qtlb_get_axis_l(table, axis=0)\n \n\n\n tmp_df = pd.DataFrame( \n columns=coln_l, # Fill columnets\n index=rown_l # Fill rows\n ) \n\n for i in range(len(rown_l)):\n for j in range(len(coln_l)):\n qval = table.item(i, j)\n \n if not qval is None:\n tmp_df.iloc[i, j] = qval.text()\n \n #assert len(tmp_df.columns)>1\n\n return tmp_df\n\n\ndef qtlb_get_axis_l(table, axis=0): #get axis lables from a qtable\n \n if axis == 1: #column names\n q_l = [table.horizontalHeaderItem(cnt) for cnt in range(0, table.columnCount())]\n elif axis == 0: #row names\n q_l = [table.verticalHeaderItem(cnt) for cnt in range(0, table.rowCount())]\n \n \"\"\"\n \n \"\"\"\n l = []\n #get data\n\n for qval in q_l:\n\n if qval is None:\n l.append('UnNamed')\n else:\n l.append(qval.text())\n \n return l\n\n\nif __name__ ==\"__main__\": \n \n \n \n print('?\"??')\n "
] |
[
[
"pandas.DataFrame",
"pandas.DataFrame.from_dict"
]
] |
Thunderbrook/Paddle
|
[
"4870c9bc99c6bd3b814485d7d4f525fe68ccd9a5"
] |
[
"python/paddle/fluid/dygraph/nn.py"
] |
[
"# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nfrom six.moves import reduce\nfrom .. import core\nfrom ..layers import utils\nfrom ..layers import nn\nfrom .. import dygraph_utils\nfrom . import layers\nfrom ..framework import Variable, in_dygraph_mode, OpProtoHolder, Parameter, _dygraph_tracer, _varbase_creator\nfrom ..param_attr import ParamAttr\nfrom ..initializer import Normal, Constant, NumpyArrayInitializer\nfrom .. import unique_name\nfrom .layer_object_helper import LayerObjectHelper\nimport numpy as np\nimport numbers\nimport logging\n\n__all__ = [\n 'Conv2D', 'Conv3D', 'Pool2D', 'Linear', 'BatchNorm', 'Embedding', 'GRUUnit',\n 'LayerNorm', 'NCE', 'PRelu', 'BilinearTensorProduct', 'Conv2DTranspose',\n 'Conv3DTranspose', 'GroupNorm', 'SpectralNorm', 'TreeConv'\n]\n\n\nclass Conv2D(layers.Layer):\n \"\"\"\n This interface is used to construct a callable object of the ``Conv2D`` class.\n For more details, refer to code examples.\n The convolution2D layer calculates the output based on the input, filter\n and strides, paddings, dilations, groups parameters. Input and\n Output are in NCHW format, where N is batch size, C is the number of\n the feature map, H is the height of the feature map, and W is the width of the feature map.\n Filter's shape is [MCHW] , where M is the number of output feature map,\n C is the number of input feature map, H is the height of the filter,\n and W is the width of the filter. If the groups is greater than 1,\n C will equal the number of input feature map divided by the groups.\n Please refer to UFLDL's `convolution\n <http://ufldl.stanford.edu/tutorial/supervised/FeatureExtractionUsingConvolution/>`_\n for more detials.\n If bias attribution and activation type are provided, bias is added to the\n output of the convolution, and the corresponding activation function is\n applied to the final result.\n\n For each input :math:`X`, the equation is:\n\n .. math::\n\n Out = \\\\sigma (W \\\\ast X + b)\n\n Where:\n\n * :math:`X`: Input value, a ``Tensor`` with NCHW format.\n * :math:`W`: Filter value, a ``Tensor`` with shape [MCHW] .\n * :math:`\\\\ast`: Convolution operation.\n * :math:`b`: Bias value, a 2-D ``Tensor`` with shape [M, 1].\n * :math:`\\\\sigma`: Activation function.\n * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.\n\n Example:\n\n - Input:\n\n Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`\n\n Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)`\n\n - Output:\n\n Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`\n\n Where\n\n .. math::\n\n H_{out}&= \\\\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\\\\\\n W_{out}&= \\\\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1\n\n Parameters:\n num_channels(int): The number of channels in the input image.\n num_filters(int): The number of filter. It is as same as the output\n feature map.\n filter_size (int or tuple): The filter size. If filter_size is a tuple,\n it must contain two integers, (filter_size_H, filter_size_W).\n Otherwise, the filter will be a square.\n stride (int or tuple, optional): The stride size. If stride is a tuple, it must\n contain two integers, (stride_H, stride_W). Otherwise, the\n stride_H = stride_W = stride. Default: 1.\n padding (int or tuple, optional): The padding size. If padding is a tuple, it must\n contain two integers, (padding_H, padding_W). Otherwise, the\n padding_H = padding_W = padding. Default: 0.\n dilation (int or tuple, optional): The dilation size. If dilation is a tuple, it must\n contain two integers, (dilation_H, dilation_W). Otherwise, the\n dilation_H = dilation_W = dilation. Default: 1.\n groups (int, optional): The groups number of the Conv2d Layer. According to grouped\n convolution in Alex Krizhevsky's Deep CNN paper: when group=2,\n the first half of the filters is only connected to the first half\n of the input channels, while the second half of the filters is only\n connected to the second half of the input channels. Default: 1.\n param_attr (ParamAttr, optional): The parameter attribute for learnable weights(Parameter)\n of conv2d. If it is set to None or one attribute of ParamAttr, conv2d\n will create ParamAttr as param_attr. If the Initializer of the param_attr\n is not set, the parameter is initialized with :math:`Normal(0.0, std)`,\n and the :math:`std` is :math:`(\\\\frac{2.0 }{filter\\_elem\\_num})^{0.5}`. Default: None.\n bias_attr (ParamAttr or bool, optional): The attribute for the bias of conv2d.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, conv2d\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. Default: None.\n use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn\n library is installed. Default: True.\n act (str, optional): Activation type, if it is set to None, activation is not appended.\n Default: None.\n dtype (str, optional): Data type, it can be \"float32\" or \"float64\". Default: \"float32\".\n\n Attribute:\n **weight** (Parameter): the learnable weights of filter of this layer.\n\n **bias** (Parameter or None): the learnable bias of this layer.\n\n Returns:\n None\n \n Raises:\n ValueError: if ``use_cudnn`` is not a bool value.\n\n Examples:\n .. code-block:: python\n\n from paddle.fluid.dygraph.base import to_variable\n import paddle.fluid as fluid\n from paddle.fluid.dygraph import Conv2D\n import numpy as np\n\n data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32')\n with fluid.dygraph.guard():\n conv2d = Conv2D(3, 2, 3)\n data = to_variable(data)\n conv = conv2d(data)\n\n \"\"\"\n\n def __init__(self,\n num_channels,\n num_filters,\n filter_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=None,\n param_attr=None,\n bias_attr=None,\n use_cudnn=True,\n act=None,\n dtype='float32'):\n assert param_attr is not False, \"param_attr should not be False here.\"\n super(Conv2D, self).__init__()\n self._num_channels = num_channels\n self._groups = groups\n self._stride = utils.convert_to_list(stride, 2, 'stride')\n self._padding = utils.convert_to_list(padding, 2, 'padding')\n self._dilation = utils.convert_to_list(dilation, 2, 'dilation')\n self._act = act\n if not isinstance(use_cudnn, bool):\n raise ValueError(\"use_cudnn should be True or False\")\n self._use_cudnn = use_cudnn\n self._filter_size = filter_size\n self._num_filters = num_filters\n self._param_attr = param_attr\n self._bias_attr = bias_attr\n self._dtype = dtype\n\n if (self._num_channels == self._groups and\n num_filters % self._num_channels == 0 and not self._use_cudnn):\n self._l_type = 'depthwise_conv2d'\n else:\n self._l_type = 'conv2d'\n\n self._num_channels = num_channels\n if self._groups is None:\n num_filter_channels = self._num_channels\n else:\n if self._num_channels % self._groups != 0:\n raise ValueError(\"num_channels must be divisible by groups.\")\n num_filter_channels = self._num_channels // self._groups\n filter_size = utils.convert_to_list(self._filter_size, 2, 'filter_size')\n filter_shape = [self._num_filters, num_filter_channels] + filter_size\n\n def _get_default_param_initializer():\n filter_elem_num = filter_size[0] * filter_size[\n 1] * self._num_channels\n std = (2.0 / filter_elem_num)**0.5\n return Normal(0.0, std, 0)\n\n self.weight = self.create_parameter(\n attr=self._param_attr,\n shape=filter_shape,\n dtype=self._dtype,\n default_initializer=_get_default_param_initializer())\n\n self.bias = self.create_parameter(\n attr=self._bias_attr,\n shape=[self._num_filters],\n dtype=self._dtype,\n is_bias=True)\n\n def forward(self, input):\n inputs = {\n 'Input': [input],\n 'Filter': [self.weight],\n }\n attrs = {\n 'strides': self._stride,\n 'paddings': self._padding,\n 'dilations': self._dilation,\n 'groups': self._groups if self._groups else 1,\n 'use_cudnn': self._use_cudnn,\n 'use_mkldnn': False,\n }\n\n if in_dygraph_mode() and self._l_type == 'conv2d':\n outs = core.ops.conv2d(inputs, attrs)\n pre_bias = outs['Output'][0]\n\n pre_act = dygraph_utils._append_bias_in_dygraph(pre_bias, self.bias,\n 1)\n\n return dygraph_utils._append_activation_in_dygraph(pre_act,\n self._act)\n\n pre_bias = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n\n self._helper.append_op(\n type=self._l_type,\n inputs={\n 'Input': input,\n 'Filter': self.weight,\n },\n outputs={\"Output\": pre_bias},\n attrs=attrs)\n\n if self.bias is not None:\n pre_act = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n self._helper.append_op(\n type='elementwise_add',\n inputs={'X': [pre_bias],\n 'Y': [self.bias]},\n outputs={'Out': [pre_act]},\n attrs={'axis': 1})\n else:\n pre_act = pre_bias\n\n # Currently, we don't support inplace in dygraph mode\n return self._helper.append_activation(pre_act, act=self._act)\n\n\nclass Conv3D(layers.Layer):\n \"\"\"\n **Convlution3D Layer**\n\n The convolution3D layer calculates the output based on the input, filter\n and strides, paddings, dilations, groups parameters. Input(Input) and\n Output(Output) are multidimensional tensors with a shape of \n :math:`[N, C, D, H, W]` . Where N is batch size, C is the number of\n channels, D is the depth of the feature, H is the height of the feature,\n and W is the width of the feature. Convlution3D is similar with Convlution2D\n but adds one dimension(depth). If bias attribution and activation type are\n provided, bias is added to the output of the convolution, and the\n corresponding activation function is applied to the final result.\n\n For each input :math:`X`, the equation is:\n\n .. math::\n\n Out = \\sigma (W \\\\ast X + b)\n\n In the above equation:\n\n * :math:`X`: Input value, a tensor with NCDHW or NDHWC format.\n * :math:`W`: Filter value, a tensor with MCDHW format.\n * :math:`\\\\ast`: Convolution operation.\n * :math:`b`: Bias value, a 2-D tensor with shape [M, 1].\n * :math:`\\\\sigma`: Activation function.\n * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.\n\n Example:\n\n - Input:\n\n Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`\n\n Filter shape: :math:`(C_{out}, C_{in}, D_f, H_f, W_f)`\n\n - Output:\n Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`\n\n Where\n\n .. math::\n\n D_{out}&= \\\\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\\\\\\\\n H_{out}&= \\\\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\\\\\\n W_{out}&= \\\\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1\n\n Parameters:\n num_channels(int): The number of channels in the input image.\n num_filters(int): The number of filter. It is as same as the output image channel.\n filter_size (int|tuple, optional): The filter size. If filter_size is a tuple,\n it must contain three integers, (filter_size_D, filter_size_H, filter_size_W).\n Otherwise, the filter will be a square, filter_size_depth = filter_size_height\n = filter_size_width = filter_size.\n stride (int|tuple, optional): The stride size. If stride is a tuple, it must\n contain three integers, (stride_D, stride_H, stride_W). Otherwise, the\n stride_D = stride_H = stride_W = stride. The default value is 1.\n padding (int|tuple, optional): The padding size. If padding is a tuple, it must\n contain three integers, (padding_D, padding_H, padding_W). Otherwise, the\n padding_D = padding_H = padding_W = padding. The default value is 0.\n dilation (int|tuple, optional): The dilation size. If dilation is a tuple, it must\n contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the\n dilation_D = dilation_H = dilation_W = dilation. The default value is 1.\n groups (int, optional): The groups number of the Conv3d Layer. According to grouped\n convolution in Alex Krizhevsky's Deep CNN paper: when group=2,\n the first half of the filters is only connected to the first half\n of the input channels, while the second half of the filters is only\n connected to the second half of the input channels. The default value is 1.\n param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights\n of conv3d. If it is set to None or one attribute of ParamAttr, conv3d\n will create ParamAttr as param_attr. If it is set to None, the parameter\n is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is\n :math:`(\\\\frac{2.0 }{filter\\_elem\\_num})^{0.5}`. The default value is None.\n bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv3d.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, conv3d\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. The default value is None.\n use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn\n library is installed. The default value is True.\n act (str, optional): Activation type, if it is set to None, activation is not appended.\n The default value is None.\n dtype (str, optional): Data type, it can be \"float32\" or \"float64\". Default: \"float32\".\n\n Attribute:\n **weight** (Parameter): the learnable weights of filters of this layer.\n\n **bias** (Parameter): the learnable bias of this layer.\n\n Returns:\n None.\n\n Raises:\n ValueError: If the shapes of input, filter_size, stride, padding and\n groups mismatch.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n with fluid.dygraph.guard():\n data = numpy.random.random((5, 3, 12, 32, 32)).astype('float32')\n conv3d = fluid.dygraph.nn.Conv3D(\n num_channels=3, num_filters=2, filter_size=3, act=\"relu\")\n ret = conv3d(fluid.dygraph.base.to_variable(data))\n\n \"\"\"\n\n def __init__(self,\n num_channels,\n num_filters,\n filter_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=None,\n param_attr=None,\n bias_attr=None,\n use_cudnn=True,\n act=None,\n dtype='float32'):\n assert param_attr is not False, \"param_attr should not be False here.\"\n super(Conv3D, self).__init__()\n self._num_channels = num_channels\n self._groups = groups\n self._stride = utils.convert_to_list(stride, 3, 'stride')\n self._padding = utils.convert_to_list(padding, 3, 'padding')\n self._dilation = utils.convert_to_list(dilation, 3, 'dilation')\n self._act = act\n self._use_cudnn = use_cudnn\n self._filter_size = filter_size\n self._num_filters = num_filters\n self._param_attr = param_attr\n self._bias_attr = bias_attr\n self._dtype = dtype\n\n if self._groups is None:\n num_filter_channels = self._num_channels\n else:\n if self._num_channels % self._groups != 0:\n raise ValueError(\"num_channels must be divisible by groups.\")\n num_filter_channels = self._num_channels // self._groups\n\n filter_size = utils.convert_to_list(self._filter_size, 3, 'filter_size')\n filter_shape = [self._num_filters, num_filter_channels] + filter_size\n\n def _get_default_param_initializer():\n filter_elem_num = filter_size[0] * filter_size[1] * filter_size[\n 2] * self._num_channels\n std = (2.0 / filter_elem_num)**0.5\n return Normal(0.0, std, 0)\n\n self.weight = self.create_parameter(\n attr=self._param_attr,\n shape=filter_shape,\n dtype=self._dtype,\n default_initializer=_get_default_param_initializer())\n\n self.bias = self.create_parameter(\n attr=self._bias_attr,\n shape=[self._num_filters],\n dtype=self._dtype,\n is_bias=True)\n\n def forward(self, input):\n pre_bias = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n\n self._helper.append_op(\n type='conv3d',\n inputs={\n 'Input': input,\n 'Filter': self.weight,\n },\n outputs={\"Output\": pre_bias},\n attrs={\n 'strides': self._stride,\n 'paddings': self._padding,\n 'dilations': self._dilation,\n 'groups': self._groups if self._groups else 1,\n 'use_cudnn': self._use_cudnn,\n 'use_mkldnn': False\n })\n\n if self.bias is not None:\n pre_act = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n self._helper.append_op(\n type='elementwise_add',\n inputs={'X': [pre_bias],\n 'Y': [self.bias]},\n outputs={'Out': [pre_act]},\n attrs={'axis': 1})\n else:\n pre_act = pre_bias\n\n return self._helper.append_activation(pre_act, act=self._act)\n\n\nclass Conv3DTranspose(layers.Layer):\n \"\"\"\n **Convlution3D transpose layer**\n\n The convolution3D transpose layer calculates the output based on the input,\n filter, and dilations, strides, paddings. Input(Input) and output(Output)\n are in NCDHW format. Where N is batch size, C is the number of channels,\n D is the depth of the feature, H is the height of the feature, and W\n is the width of the feature. Parameters(dilations, strides, paddings) are\n two elements. These two elements represent height and width, respectively.\n The details of convolution transpose layer, please refer to the following\n explanation and references `therein <http://www.matthewzeiler.com/wp-content/uploads/2017/07/cvpr2010.pdf>`_.\n If bias attribution and activation type are provided, bias is added to\n the output of the convolution, and the corresponding activation function\n is applied to the final result.\n\n For each input :math:`X`, the equation is:\n\n .. math::\n\n Out = \\sigma (W \\\\ast X + b)\n\n In the above equation:\n\n * :math:`X`: Input value, a tensor with NCDHW format.\n * :math:`W`: Filter value, a tensor with MCDHW format.\n * :math:`\\\\ast`: Convolution operation.\n * :math:`b`: Bias value, a 2-D tensor with shape [M, 1].\n * :math:`\\\\sigma`: Activation function.\n * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.\n\n Example:\n\n - Input:\n\n Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`\n\n Filter shape: :math:`(C_{in}, C_{out}, D_f, H_f, W_f)`\n\n - Output:\n\n Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`\n\n Where\n\n .. math::\n\n D^\\prime_{out} &= (D_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (D_f - 1) + 1 \\\\\\\\\n H^\\prime_{out} &= (H_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (H_f - 1) + 1 \\\\\\\\\n W^\\prime_{out} &= (W_{in} - 1) * strides[2] - 2 * paddings[2] + dilations[2] * (W_f - 1) + 1 \\\\\\\\\n D_{out} &\\in [ D^\\prime_{out}, D^\\prime_{out} + strides[0] ] \\\\\\\\\n H_{out} &\\in [ H^\\prime_{out}, H^\\prime_{out} + strides[1] ] \\\\\\\\\n\n **Note**:\n\n The conv3d_transpose can be seen as the backward of the conv3d. For conv3d, \n when stride > 1, conv3d maps multiple input shape to the same output shape, \n so for conv3d_transpose, when stride > 1, input shape maps multiple output shape.\n If output_size is None, :math:`H_{out} = H^\\prime_{out}, :math:`H_{out} = \\\n H^\\prime_{out}, W_{out} = W^\\prime_{out}`; else, the :math:`D_{out}` of the output \n size must between :math:`D^\\prime_{out}` and :math:`D^\\prime_{out} + strides[0]`, \n the :math:`H_{out}` of the output size must between :math:`H^\\prime_{out}` \n and :math:`H^\\prime_{out} + strides[1]`, and the :math:`W_{out}` of the output size must \n between :math:`W^\\prime_{out}` and :math:`W^\\prime_{out} + strides[2]`, \n conv3d_transpose can compute the kernel size automatically.\n\n\n Parameters:\n num_channels(int): The number of channels in the input image.\n num_filters(int): The number of the filter. It is as same as the output\n image channel.\n filter_size(int|tuple): The filter size. If filter_size is a tuple,\n it must contain three integers, (filter_size_D, filter_size_H, filter_size_W).\n Otherwise, the filter will be a square.\n padding(int|tuple, optional): The padding size. The padding argument effectively\n adds `dilation * (kernel - 1)` amount of zero-padding on both sides of input. If `padding` is a string,\n either 'VALID' or 'SAME' supported, which is the padding algorithm. If `padding`\n is a tuple or list, it could be in three forms: `[pad_depth, pad_height, pad_width]` or\n `[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,\n and when `data_format` is `'NCDHW'`, `padding` can be in the form\n `[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.\n when `data_format` is `'NDHWC'`, `padding` can be in the form\n `[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.\n The default value is 0.\n stride(int|tuple, optional): The stride size. It means the stride in transposed convolution. \n If stride is a tuple, it must contain three integers, (stride_depth, stride_height, \n stride_width). Otherwise, stride_depth = stride_height = stride_width = stride. \n The default value is 1.\n dilation(int|tuple, optional): The dilation size. If dilation is a tuple, it must\n contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the\n dilation_D = dilation_H = dilation_W = dilation. The default value is 1.\n groups(int, optional): The groups number of the Conv3d transpose layer. Inspired by\n grouped convolution in Alex Krizhevsky's Deep CNN paper, in which\n when group=2, the first half of the filters is only connected to the\n first half of the input channels, while the second half of the\n filters is only connected to the second half of the input channels.\n The default value is 1.\n param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights\n of conv3d_transpose. If it is set to None or one attribute of ParamAttr, conv3d_transpose\n will create ParamAttr as param_attr. If the Initializer of the param_attr\n is not set, the parameter is initialized with Xavier. The default value is None.\n bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv3d_transpose.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, conv3d_transpose\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. The default value is None.\n use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn\n library is installed. The default value is True.\n act (str, optional): Activation type, if it is set to None, activation is not appended.\n The default value is None.\n name(str, optional): The default value is None. Normally there is no need for user \n to set this property. For more information, please refer to :ref:`api_guide_Name`.\n\n Attribute:\n **weight** (Parameter): the learnable weights of filters of this layer.\n\n **bias** (Parameter): the learnable bias of this layer.\n\n Returns:\n None.\n\n Raises:\n ValueError: If the shapes of input, filter_size, stride, padding and\n groups mismatch.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n with fluid.dygraph.guard():\n data = numpy.random.random((5, 3, 12, 32, 32)).astype('float32')\n conv3dTranspose = fluid.dygraph.nn.Conv3DTranspose(\n num_channels=3,\n num_filters=12,\n filter_size=12,\n use_cudnn=False)\n ret = conv3dTranspose(fluid.dygraph.base.to_variable(data))\n\n \"\"\"\n\n def __init__(self,\n num_channels,\n num_filters,\n filter_size,\n padding=0,\n stride=1,\n dilation=1,\n groups=None,\n param_attr=None,\n bias_attr=None,\n use_cudnn=True,\n act=None,\n dtype='float32'):\n super(Conv3DTranspose, self).__init__()\n if not isinstance(use_cudnn, bool):\n raise ValueError(\"use_cudnn should be True or False\")\n assert param_attr is not False, \"param_attr should not be False in conv3d_transpose.\"\n self._padding = utils.convert_to_list(padding, 3, 'padding')\n self._stride = utils.convert_to_list(stride, 3, 'stride')\n self._dilation = utils.convert_to_list(dilation, 3, 'dilation')\n self._param_attr = param_attr\n self._num_channels = num_channels\n self._filter_size = filter_size\n self._groups = 1 if groups is None else groups\n self._num_filters = num_filters\n self._use_cudnn = use_cudnn\n self._bias_attr = bias_attr\n self._act = act\n self._dtype = dtype\n\n self._filter_size = utils.convert_to_list(\n self._filter_size, 3, 'conv3d_transpose.filter_size')\n\n filter_shape = [self._num_channels, self._num_filters // self._groups\n ] + self._filter_size\n self.weight = self.create_parameter(\n dtype=self._dtype, shape=filter_shape, attr=self._param_attr)\n if self._bias_attr:\n self.bias = self.create_parameter(\n attr=self._bias_attr,\n shape=[self._num_filters],\n dtype=self._dtype,\n is_bias=True)\n\n def forward(self, input):\n pre_bias = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n self._helper.append_op(\n type=\"conv3d_transpose\",\n inputs={'Input': [input],\n 'Filter': [self.weight]},\n outputs={'Output': pre_bias},\n attrs={\n 'strides': self._stride,\n 'paddings': self._padding,\n 'dilations': self._dilation,\n 'groups': self._groups if self._groups else 1,\n 'use_cudnn': self._use_cudnn\n })\n\n if self._bias_attr:\n pre_act = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n self._helper.append_op(\n type='elementwise_add',\n inputs={'X': [pre_bias],\n 'Y': [self.bias]},\n outputs={'Out': [pre_act]},\n attrs={'axis': 1})\n else:\n pre_act = pre_bias\n\n # Currently, we don't support inplace in imperative mode\n return self._helper.append_activation(pre_act, act=self._act)\n\n\nclass Pool2D(layers.Layer):\n \"\"\"\n This interface is used to construct a callable object of the ``Pool2D`` class.\n For more details, refer to code examples.\n The pooling2d operation calculates the output based on the input, pool_type and pool_size, pool_stride,\n pool_padding parameters.Input and output are in NCHW format, where N is batch size, C is the number of feature map,\n H is the height of the feature map, and W is the width of the feature map.\n Parameters(ksize, strides, paddings) are two elements. These two elements represent height and width, respectively.\n The input(X) size and output(Out) size may be different.\n\n Example:\n\n - Input:\n\n Input shape: :math:`(N, C, H_{in}, W_{in})`\n\n - Output:\n\n Output shape: :math:`(N, C, H_{out}, W_{out})`\n\n If ``ceil_mode`` = False:\n\n .. math::\n\n H_{out} = \\\\frac{(H_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1 \\\\\\\\\n W_{out} = \\\\frac{(W_{in} - ksize[1] + 2 * paddings[1])}{strides[1]} + 1\n\n If ``ceil_mode`` = True:\n\n .. math::\n\n H_{out} = \\\\frac{(H_{in} - ksize[0] + 2 * paddings[0] + strides[0] - 1)}{strides[0]} + 1 \\\\\\\\\n W_{out} = \\\\frac{(W_{in} - ksize[1] + 2 * paddings[1] + strides[1] - 1)}{strides[1]} + 1\n\n If ``exclusive`` = False:\n\n .. math::\n\n hstart &= i * strides[0] - paddings[0] \\\\\\\\\n hend &= hstart + ksize[0] \\\\\\\\\n wstart &= j * strides[1] - paddings[1] \\\\\\\\\n wend &= wstart + ksize[1] \\\\\\\\\n Output(i ,j) &= \\\\frac{sum(Input[hstart:hend, wstart:wend])}{ksize[0] * ksize[1]}\n\n If ``exclusive`` = True:\n\n .. math::\n\n hstart &= max(0, i * strides[0] - paddings[0])\\\\\\\\\n hend &= min(H, hstart + ksize[0]) \\\\\\\\\n wstart &= max(0, j * strides[1] - paddings[1]) \\\\\\\\\n wend & = min(W, wstart + ksize[1]) \\\\\\\\\n Output(i ,j) & = \\\\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)}\n\n Parameters:\n pool_size (int or list or tuple, optional): The pool kernel size. If pool kernel size is a tuple or list,\n it must contain two integers, (pool_size_Height, pool_size_Width).\n Otherwise, the pool kernel size will be a square of an int. Default: -1.\n pool_type(str, optional) : The pooling type, can be \"max\" for max-pooling and \"avg\" for average-pooling. \n Default: max.\n pool_stride (int or list or tuple, optional): The pool stride size. If pool stride size is a tuple or list,\n it must contain two integers, (pool_stride_Height, pool_stride_Width). Otherwise,\n the pool stride size will be a square of an int. Default: 1.\n pool_padding (int or list or tuple, optional): The padding size for pooling operation. \n If ``pool_padding`` is a tuple,\n it must contain two integers, (pool_padding_on_Height, pool_padding_on_Width).\n Otherwise, the padding size for pooling operation will be a square of an int. Default: 0.\n global_pooling (bool, optional): Whether to use the global pooling. If global_pooling = true,\n kernel size and paddings will be ignored. Default: False.\n use_cudnn (bool, optional): Only used in cudnn kernel, need install cudnn. Default: True.\n ceil_mode (bool, optional): Whether to use the ceil function to calculate output height and width.\n False is the default. If it is set to False, the floor function will be used. Default: False.\n exclusive (bool, optional): Whether to exclude padding points in average pooling mode. Default: True.\n\n Returns:\n None\n\n Raises:\n ValueError: If 'pool_type' is not \"max\" nor \"avg\"\n ValueError: If 'global_pooling' is False and 'pool_size' is -1\n ValueError: If 'use_cudnn' is not a bool value.\n\n Examples:\n\n .. code-block:: python\n\n import paddle.fluid as fluid\n from paddle.fluid.dygraph.base import to_variable\n import numpy as np\n\n with fluid.dygraph.guard():\n data = numpy.random.random((3, 32, 32, 5)).astype('float32')\n pool2d = fluid.dygraph.Pool2D(pool_size=2,\n pool_type='max',\n pool_stride=1,\n global_pooling=False)\n pool2d_res = pool2d(to_variable(data))\n\n \"\"\"\n\n def __init__(self,\n pool_size=-1,\n pool_type=\"max\",\n pool_stride=1,\n pool_padding=0,\n global_pooling=False,\n use_cudnn=True,\n ceil_mode=False,\n exclusive=True):\n if pool_type not in [\"max\", \"avg\"]:\n raise ValueError(\n \"Unknown pool_type: '%s'. It can only be 'max' or 'avg'.\",\n str(pool_type))\n\n if global_pooling is False and pool_size == -1:\n raise ValueError(\n \"When the global_pooling is False, pool_size must be passed \"\n \"and be a valid value. Received pool_size: \" + str(pool_size))\n\n if not isinstance(use_cudnn, bool):\n raise ValueError(\"use_cudnn should be True or False\")\n\n super(Pool2D, self).__init__()\n\n self._pool_type = pool_type\n self._pool_size = utils.convert_to_list(pool_size, 2, 'pool_size')\n self._pool_padding = utils.convert_to_list(pool_padding, 2,\n 'pool_padding')\n self._pool_stride = utils.convert_to_list(pool_stride, 2, 'pool_stride')\n self._global_pooling = global_pooling\n self._use_cudnn = use_cudnn\n self._ceil_mode = ceil_mode\n self._exclusive = exclusive\n self._l_type = 'pool2d'\n\n def forward(self, input):\n attrs = {\n \"pooling_type\": self._pool_type,\n \"ksize\": self._pool_size,\n \"global_pooling\": self._global_pooling,\n \"strides\": self._pool_stride,\n \"paddings\": self._pool_padding,\n \"use_cudnn\": self._use_cudnn,\n \"ceil_mode\": self._ceil_mode,\n \"use_mkldnn\": False,\n \"exclusive\": self._exclusive,\n }\n inputs = {\"X\": [input]}\n\n if in_dygraph_mode():\n outs = core.ops.pool2d(inputs, attrs)\n return outs['Out'][0]\n\n pool_out = self._helper.create_variable_for_type_inference(self._dtype)\n\n self._helper.append_op(\n type=self._l_type,\n inputs={\"X\": input},\n outputs={\"Out\": pool_out},\n attrs=attrs)\n return pool_out\n\n\nclass Linear(layers.Layer):\n \"\"\"\n Fully-connected linear transformation layer:\n\n .. math::\n\n Out = Act({XW + b})\n\n where :math:`X` is the input Tensor, :math:`W` and :math:`b` are weight and bias respectively.\n\n Linear layer takes only one ``Tensor`` input.\n The Linear layer multiplies input tensor with weight matrix and\n produces an output Tensor of shape [N, *, `output_dim`],\n where N is batch size and `*` means any number of additional dimensions.\n If ``bias_attr`` is not None, a bias variable will be created and added to the output.\n Finally, if ``act`` is not None, it will be applied to the output as well.\n\n Parameters:\n input_dim(int): The number of input units in this layer.\n output_dim(int): The number of output units in this layer.\n param_attr(ParamAttr or list of ParamAttr, optional): The parameter attribute for learnable\n weights(Parameter) of this layer. Default: None.\n bias_attr(ParamAttr or list of ParamAttr, optional): The attribute for the bias\n of this layer. If it is set to False, no bias will be added to the output units.\n If it is set to None, the bias is initialized zero. Default: None.\n act(str, optional): Activation to be applied to the output of this layer. Default: None.\n dtype(str, optional): Dtype used for weight, it can be \"float32\" or \"float64\". Default: \"float32\".\n\n Attributes:\n **weight** (Parameter): the learnable weights of this layer.\n\n **bias** (Parameter or None): the learnable bias of this layer.\n\n Returns:\n None\n\n Examples:\n .. code-block:: python\n\n from paddle.fluid.dygraph.base import to_variable\n import paddle.fluid as fluid\n from paddle.fluid.dygraph import Linear\n import numpy as np\n\n data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32')\n with fluid.dygraph.guard():\n linear = Linear(32, 64)\n data = to_variable(data)\n res = linear(data) # [30, 10, 64]\n \"\"\"\n\n def __init__(self,\n input_dim,\n output_dim,\n param_attr=None,\n bias_attr=None,\n act=None,\n dtype=\"float32\"):\n super(Linear, self).__init__()\n self._act = act\n self._dtype = dtype\n self.weight = self.create_parameter(\n shape=[input_dim, output_dim],\n attr=param_attr,\n dtype=dtype,\n is_bias=False)\n self.bias = self.create_parameter(\n shape=[output_dim], attr=bias_attr, dtype=dtype, is_bias=True)\n\n def forward(self, input):\n attrs = {\n \"transpose_X\": False,\n \"transpose_Y\": False,\n \"alpha\": 1,\n }\n inputs = {\"X\": [input], \"Y\": [self.weight]}\n\n if in_dygraph_mode():\n outs = core.ops.matmul(inputs, attrs)\n pre_bias = outs['Out'][0]\n\n pre_act = dygraph_utils._append_bias_in_dygraph(\n pre_bias, self.bias, axis=len(input.shape) - 1)\n\n return dygraph_utils._append_activation_in_dygraph(pre_act,\n self._act)\n\n tmp = self._helper.create_variable_for_type_inference(self._dtype)\n self._helper.append_op(\n type=\"matmul\", inputs=inputs, outputs={\"Out\": tmp}, attrs=attrs)\n if self.bias:\n pre_activation = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n self._helper.append_op(\n type='elementwise_add',\n inputs={'X': [tmp],\n 'Y': [self.bias]},\n outputs={'Out': [pre_activation]},\n attrs={'axis': len(input.shape) - 1})\n else:\n pre_activation = tmp\n return self._helper.append_activation(pre_activation, act=self._act)\n\n\nclass BatchNorm(layers.Layer):\n \"\"\"\n This interface is used to construct a callable object of the ``BatchNorm`` class.\n For more details, refer to code examples.\n It implements the function of the Batch Normalization Layer and can be used \n as a normalizer function for conv2d and fully connected operations.\n The data is normalized by the mean and variance of the channel based on the current batch data.\n Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing\n Internal Covariate Shift <https://arxiv.org/pdf/1502.03167.pdf>`_\n for more details.\n\n When use_global_stats = False, the :math:`\\\\mu_{\\\\beta}` \n and :math:`\\\\sigma_{\\\\beta}^{2}` are the statistics of one mini-batch.\n Calculated as follows:\n\n .. math::\n\n \\\\mu_{\\\\beta} &\\\\gets \\\\frac{1}{m} \\\\sum_{i=1}^{m} x_i \\\\qquad &//\\\\\n \\ mini-batch\\ mean \\\\\\\\\n \\\\sigma_{\\\\beta}^{2} &\\\\gets \\\\frac{1}{m} \\\\sum_{i=1}^{m}(x_i - \\\\\n \\\\mu_{\\\\beta})^2 \\\\qquad &//\\ mini-batch\\ variance \\\\\\\\\n\n - :math:`x` : mini-batch data\n - :math:`m` : the size of the mini-batch data\n\n When use_global_stats = True, the :math:`\\\\mu_{\\\\beta}`\n and :math:`\\\\sigma_{\\\\beta}^{2}` are not the statistics of one mini-batch.\n They are global or running statistics (moving_mean and moving_variance). It usually got from the\n pre-trained model. Calculated as follows:\n\n .. math::\n moving\\_mean = moving\\_mean * momentum + \\mu_{\\beta} * (1. - momentum) \\quad &// global mean \\\\\n moving\\_variance = moving\\_variance * momentum + \\sigma_{\\beta}^{2} * (1. - momentum) \\quad &// global variance \\\\\n\n The normalization function formula is as follows:\n \n .. math::\n\n \\\\hat{x_i} &\\\\gets \\\\frac{x_i - \\\\mu_\\\\beta} {\\\\sqrt{\\\\\n \\\\sigma_{\\\\beta}^{2} + \\\\epsilon}} \\\\qquad &//\\ normalize \\\\\\\\\n y_i &\\\\gets \\\\gamma \\\\hat{x_i} + \\\\beta \\\\qquad &//\\ scale\\ and\\ shift\n\n - :math:`\\\\epsilon` : add a smaller value to the variance to prevent division by zero\n - :math:`\\\\gamma` : trainable proportional parameter\n - :math:`\\\\beta` : trainable deviation parameter\n\n Parameters:\n num_channels(int): Indicate the number of channels of the input ``Tensor``.\n act(str, optional): Activation to be applied to the output of batch normalizaiton. Default: None.\n is_test (bool, optional): A flag indicating whether it is in test phrase or not. Default: False.\n momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.\n epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.\n param_attr(ParamAttr, optional): The parameter attribute for Parameter `scale`\n of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm\n will create ParamAttr as param_attr. If the Initializer of the param_attr\n is not set, the parameter is initialized with Xavier. Default: None.\n bias_attr(ParamAttr, optional): The parameter attribute for the bias of batch_norm.\n If it is set to None or one attribute of ParamAttr, batch_norm\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. Default: None.\n dtype(str, optional): Indicate the data type of the input ``Tensor``,\n which can be float32 or float64. Default: float32.\n data_layout(str, optional): Specify the input data format, the data format can be \"NCHW\" or \"NHWC\". Default: NCHW.\n in_place(bool, optional): Make the input and output of batch norm reuse memory. Default: False.\n moving_mean_name(str, optional): The name of moving_mean which store the global Mean. Default: None.\n moving_variance_name(str, optional): The name of the moving_variance which store the global Variance. Default: None.\n do_model_average_for_mean_and_var(bool, optional): Whether parameter mean and variance should do model\n average when model average is enabled. Default: True.\n use_global_stats(bool, optional): Whether to use global mean and\n variance. In inference or test mode, set use_global_stats to true\n or is_test to true, and the behavior is equivalent.\n In train mode, when setting use_global_stats True, the global mean\n and variance are also used during train period. Default: False.\n trainable_statistics(bool, optional): Whether to calculate mean and var in eval mode. In eval mode, when\n setting trainable_statistics True, mean and variance will be calculated by current batch statistics.\n Default: False.\n\n Returns:\n None\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n from paddle.fluid.dygraph.base import to_variable\n import numpy as np\n\n x = np.random.random(size=(3, 10, 3, 7)).astype('float32')\n with fluid.dygraph.guard():\n x = to_variable(x)\n batch_norm = fluid.BatchNorm(10)\n hidden1 = batch_norm(x)\n \"\"\"\n\n def __init__(self,\n num_channels,\n act=None,\n is_test=False,\n momentum=0.9,\n epsilon=1e-05,\n param_attr=None,\n bias_attr=None,\n dtype='float32',\n data_layout='NCHW',\n in_place=False,\n moving_mean_name=None,\n moving_variance_name=None,\n do_model_average_for_mean_and_var=True,\n use_global_stats=False,\n trainable_statistics=False):\n super(BatchNorm, self).__init__()\n self._param_attr = param_attr\n self._bias_attr = bias_attr\n self._act = act\n\n assert bias_attr is not False, \"bias_attr should not be False in batch_norm.\"\n\n if dtype == \"float16\":\n self._dtype = \"float32\"\n else:\n self._dtype = dtype\n\n param_shape = [num_channels]\n\n # create parameter\n self.weight = self.create_parameter(\n attr=self._param_attr,\n shape=param_shape,\n dtype=self._dtype,\n default_initializer=Constant(1.0))\n self.weight.stop_gradient = use_global_stats and self._param_attr.learning_rate == 0.\n\n self.bias = self.create_parameter(\n attr=self._bias_attr,\n shape=param_shape,\n dtype=self._dtype,\n is_bias=True)\n self.bias.stop_gradient = use_global_stats and self._param_attr.learning_rate == 0.\n\n self._mean = self.create_parameter(\n attr=ParamAttr(\n name=moving_mean_name,\n initializer=Constant(0.0),\n trainable=False,\n do_model_average=do_model_average_for_mean_and_var),\n shape=param_shape,\n dtype=self._dtype)\n self._mean.stop_gradient = True\n\n self._variance = self.create_parameter(\n attr=ParamAttr(\n name=moving_variance_name,\n initializer=Constant(1.0),\n trainable=False,\n do_model_average=do_model_average_for_mean_and_var),\n shape=param_shape,\n dtype=self._dtype)\n self._variance.stop_gradient = True\n\n self._in_place = in_place\n self._data_layout = data_layout\n self._momentum = momentum\n self._epsilon = epsilon\n self._is_test = is_test\n self._fuse_with_relu = False\n self._use_global_stats = use_global_stats\n self._trainable_statistics = trainable_statistics\n\n def forward(self, input):\n # create output\n # mean and mean_out share the same memory\n mean_out = self._mean\n # variance and variance out share the same memory\n\n variance_out = self._variance\n attrs = {\n \"momentum\": self._momentum,\n \"epsilon\": self._epsilon,\n \"is_test\": self._is_test,\n \"data_layout\": self._data_layout,\n \"use_mkldnn\": False,\n \"fuse_with_relu\": self._fuse_with_relu,\n \"use_global_stats\": self._use_global_stats,\n \"trainable_statistics\": self._trainable_statistics\n }\n\n inputs = {\n \"X\": [input],\n \"Scale\": [self.weight],\n \"Bias\": [self.bias],\n \"Mean\": [self._mean],\n \"Variance\": [self._variance]\n }\n\n if in_dygraph_mode():\n attrs['is_test'] = not _dygraph_tracer()._train_mode\n saved_mean = _varbase_creator(dtype=self._dtype)\n saved_variance = _varbase_creator(dtype=self._dtype)\n batch_norm_out = _varbase_creator(dtype=self._dtype)\n batch_norm_out.stop_gradient = False\n # inplace is not supported currently\n else:\n saved_mean = self._helper.create_variable_for_type_inference(\n dtype=self._dtype, stop_gradient=True)\n saved_variance = self._helper.create_variable_for_type_inference(\n dtype=self._dtype, stop_gradient=True)\n batch_norm_out = input if self._in_place else self._helper.create_variable_for_type_inference(\n self._dtype)\n\n outputs = {\n \"Y\": [batch_norm_out],\n \"MeanOut\": [mean_out],\n \"VarianceOut\": [variance_out],\n \"SavedMean\": [saved_mean],\n \"SavedVariance\": [saved_variance]\n }\n\n if in_dygraph_mode():\n outs = core.ops.batch_norm(inputs, attrs, outputs)\n return dygraph_utils._append_activation_in_dygraph(\n batch_norm_out, act=self._act)\n\n self._helper.append_op(\n type=\"batch_norm\", inputs=inputs, outputs=outputs, attrs=attrs)\n\n # Currently, we don't support inplace in dygraph mode\n return self._helper.append_activation(batch_norm_out, self._act)\n\n\nclass Embedding(layers.Layer):\n \"\"\"\n **Embedding Layer**\n\n This interface is used to construct a callable object of the ``Embedding`` class.\n For specific usage, refer to code examples. It implements the function of the Embedding Layer.\n This layer is used to lookup embeddings vector of ids provided by :attr:`input` .\n It automatically constructs a 2D embedding matrix based on the\n input :attr:`size` (vocab_size, emb_size) and :attr:`dtype` .\n\n The shape of output Tensor is generated by appending an emb_size dimension to the\n last dimension of the input Tensor shape.\n\n **Note:** The id in :attr:`input` must satisfy :math:`0 =< id < size[0]` ,\n otherwise the program will throw an exception and exit.\n\n .. code-block:: text\n\n Case 1:\n\n input is a Tensor. padding_idx = -1\n input.data = [[1, 3], [2, 4], [4, 127]\n input.shape = [3, 2]\n Given size = [128, 16]\n output is a Tensor:\n out.shape = [3, 2, 16]\n out.data = [[[0.129435295, 0.244512452, ..., 0.436322452],\n [0.345421456, 0.524563927, ..., 0.144534654]],\n\n [[0.345249859, 0.124939536, ..., 0.194353745],\n [0.945345345, 0.435394634, ..., 0.435345365]],\n \n [[0.945345345, 0.435394634, ..., 0.435345365],\n [0.0, 0.0, ..., 0.0 ]]] # padding data\n The input padding_idx is less than 0, it is automatically converted to padding_idx = -1 + 128 = 127\n It will pad all-zero data when ids is 127.\n\n Parameters:\n size(tuple|list): The shape of the look up table parameter. It should have two elements which indicate the size\n of the dictionary of embeddings and the size of each embedding vector respectively.\n is_sparse(bool): The flag indicating whether to use sparse update. This parameter only\n affects the performance of the backwards gradient update. It is recommended to set \n True because sparse update is faster. But some optimizer does not support sparse update,\n such as :ref:`api_fluid_optimizer_AdadeltaOptimizer` , :ref:`api_fluid_optimizer_AdamaxOptimizer` , \n :ref:`api_fluid_optimizer_DecayedAdagradOptimizer` , :ref:`api_fluid_optimizer_FtrlOptimizer` ,\n :ref:`api_fluid_optimizer_LambOptimizer` and :ref:`api_fluid_optimizer_LarsMomentumOptimizer` .\n In these case, is_sparse must be False. Default: False.\n is_distributed(bool): Whether to store the embedding matrix in a distributed manner. Only used\n in multi-machine distributed CPU training. Default: False.\n padding_idx(int|long|None): padding_idx needs to be in the interval [-vocab_size, vocab_size). \n If :math:`padding\\_idx < 0`, the :math:`padding\\_idx` will automatically be converted\n to :math:`vocab\\_size + padding\\_idx` . It will output all-zero padding data whenever lookup\n encounters :math:`padding\\_idx` in id. And the padding data will not be updated while training.\n If set None, it makes no effect to output. Default: None.\n param_attr(ParamAttr): To specify the weight parameter property. Default: None, which means the\n default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` . In addition,\n user-defined or pre-trained word vectors can be loaded with the :attr:`param_attr` parameter. \n The local word vector needs to be transformed into numpy format, and the shape of local word\n vector shoud be consistent with :attr:`size` . Then :ref:`api_fluid_initializer_NumpyArrayInitializer`\n is used to load custom or pre-trained word vectors. See code example 2 for details.\n dtype(np.dtype|core.VarDesc.VarType|str): It refers to the data type of output Tensor.\n It must be \"float32\" or \"float64\". Default: \"float32\".\n\n Attribute:\n **weight** (Parameter): the learnable weights of this layer.\n\n Returns:\n Variable: Embedding Tensor or LoDTensor mapped by input. The data type is the same as :attr:`dtype` .\n\n Examples:\n\n .. code-block:: python\n\n import paddle.fluid as fluid\n import paddle.fluid.dygraph.base as base\n import numpy as np\n\n # example 1\n inp_word = np.array([[2, 3, 5], [4, 2, 1]]).astype('int64')\n inp_word.shape # [2, 3]\n dict_size = 20\n with fluid.dygraph.guard():\n emb = fluid.dygraph.Embedding(\n size=[dict_size, 32],\n param_attr='emb.w',\n is_sparse=False)\n static_rlt3 = emb(base.to_variable(inp_word))\n static_rlt3.shape # [2, 3, 32]\n\n # example 2: load custom or pre-trained word vectors\n weight_data = np.random.random(size=(128, 100)) # word vectors with numpy format\n w_param_attrs = fluid.ParamAttr(\n name=\"emb_weight\",\n learning_rate=0.5,\n initializer=fluid.initializer.NumpyArrayInitializer(weight_data),\n trainable=True)\n with fluid.dygraph.guard():\n emb = fluid.dygraph.Embedding(\n size=[128, 100],\n param_attr= w_param_attrs,\n is_sparse=False)\n static_rlt3 = emb(base.to_variable(inp_word)) \n \"\"\"\n\n def __init__(self,\n size,\n is_sparse=False,\n is_distributed=False,\n padding_idx=None,\n param_attr=None,\n dtype='float32'):\n super(Embedding, self).__init__()\n self._size = size\n self._is_sparse = is_sparse\n self._is_distributed = is_distributed\n self._padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else (\n size[0] + padding_idx)\n\n self._param_attr = param_attr\n self._dtype = dtype\n self._remote_prefetch = self._is_sparse and (not self._is_distributed)\n if self._remote_prefetch:\n assert self._is_sparse is True and self._is_distributed is False\n\n self.weight = self.create_parameter(\n attr=self._param_attr,\n shape=self._size,\n dtype=self._dtype,\n is_bias=False)\n\n def forward(self, input):\n attrs = {\n 'is_sparse': self._is_sparse,\n 'is_distributed': self._is_distributed,\n 'remote_prefetch': self._remote_prefetch,\n 'padding_idx': self._padding_idx\n }\n\n if in_dygraph_mode():\n inputs = {'Ids': [input], 'W': [self.weight]}\n outs = core.ops.lookup_table_v2(inputs, attrs)\n return outs['Out'][0]\n\n out = self._helper.create_variable_for_type_inference(self._dtype)\n self._helper.append_op(\n type='lookup_table_v2',\n inputs={'Ids': input,\n 'W': self.weight},\n outputs={'Out': out},\n attrs=attrs)\n\n return out\n\n\nclass LayerNorm(layers.Layer):\n \"\"\"\n This interface is used to construct a callable object of the ``LayerNorm`` class.\n For more details, refer to code examples.\n It implements the function of the Layer Normalization Layer and can be applied to mini-batch input data.\n Refer to `Layer Normalization <https://arxiv.org/pdf/1607.06450v1.pdf>`_\n\n The formula is as follows:\n\n .. math::\n\n \\\\mu & = \\\\frac{1}{H}\\\\sum_{i=1}^{H} x_i\n\n \\\\sigma & = \\\\sqrt{\\\\frac{1}{H}\\sum_{i=1}^{H}{(x_i - \\\\mu)^2} + \\\\epsilon}\n\n y & = f(\\\\frac{g}{\\\\sigma}(x - \\\\mu) + b)\n\n - :math:`x`: the vector representation of the summed inputs to the neurons in that layer.\n - :math:`H`: the number of hidden units in a layers\n - :math:`\\\\epsilon`: the small value added to the variance to prevent division by zero.\n - :math:`g`: the trainable scale parameter.\n - :math:`b`: the trainable bias parameter.\n\n Parameters:\n normalized_shape(int or list or tuple): Input shape from an expected input of\n size :math:`[*, normalized_shape[0], normalized_shape[1], ..., normalized_shape[-1]]`.\n If it is a single integer, this module will normalize over the last dimension\n which is expected to be of that specific size.\n scale(bool, optional): Whether to learn the adaptive gain :math:`g` after\n normalization. Default: True.\n shift(bool, optional): Whether to learn the adaptive bias :math:`b` after\n normalization. Default: True.\n epsilon(float, optional): The small value added to the variance to prevent\n division by zero. Default: 1e-05.\n param_attr(ParamAttr, optional): The parameter attribute for the learnable\n gain :math:`g`. If :attr:`scale` is False, :attr:`param_attr` is\n omitted. If :attr:`scale` is True and :attr:`param_attr` is None,\n a default :code:`ParamAttr` would be added as scale. The\n :attr:`param_attr` is initialized as 1 if it is added. Default: None.\n bias_attr(ParamAttr, optional): The parameter attribute for the learnable\n bias :math:`b`. If :attr:`shift` is False, :attr:`bias_attr` is\n omitted. If :attr:`shift` is True and :attr:`param_attr` is None,\n a default :code:`ParamAttr` would be added as bias. The\n :attr:`bias_attr` is initialized as 0 if it is added. Default: None.\n act(str, optional): Activation to be applied to the output of layer normalizaiton.\n Default: None.\n dtype (str, optional): Data type, it can be \"float32\" or \"float64\". Default: \"float32\".\n\n Returns:\n None\n\n Examples:\n\n .. code-block:: python\n\n import paddle.fluid as fluid\n from paddle.fluid.dygraph.base import to_variable\n import numpy\n\n x = numpy.random.random((3, 32, 32)).astype('float32')\n with fluid.dygraph.guard():\n x = to_variable(x)\n layerNorm = fluid.LayerNorm([32, 32])\n ret = layerNorm(x)\n\n \"\"\"\n\n def __init__(self,\n normalized_shape,\n scale=True,\n shift=True,\n epsilon=1e-05,\n param_attr=None,\n bias_attr=None,\n act=None,\n dtype='float32'):\n super(LayerNorm, self).__init__()\n if isinstance(normalized_shape, numbers.Integral):\n normalized_shape = [normalized_shape]\n\n self._normalized_shape = list(normalized_shape)\n self._scale = scale\n self._shift = shift\n self._epsilon = epsilon\n self._param_attr = param_attr\n self._bias_attr = bias_attr\n self._act = act\n self._dtype = dtype\n param_shape = [np.prod(self._normalized_shape)]\n if self._scale:\n self.weight = self.create_parameter(\n attr=self._param_attr,\n shape=param_shape,\n dtype=self._dtype,\n default_initializer=Constant(1.0))\n else:\n if self._param_attr:\n logging.warn(\"param_attr are only avaliable with scale is True\")\n\n if self._shift:\n assert self._bias_attr is not False\n self.bias = self.create_parameter(\n attr=self._bias_attr,\n shape=param_shape,\n dtype=self._dtype,\n is_bias=True)\n else:\n if self._bias_attr:\n logging.warn(\"bias_attr are only avaliable with shift is True\")\n\n def forward(self, input):\n input_shape = list(input.shape)\n input_ndim = len(input_shape)\n normalized_ndim = len(self._normalized_shape)\n self._begin_norm_axis = input_ndim - normalized_ndim\n if input_ndim < normalized_ndim or input_shape[\n self._begin_norm_axis:] != self._normalized_shape:\n str_normalized_shape = str(self._normalized_shape)\n raise ValueError(\n 'Given normalized_shape is ' + str_normalized_shape +\n ', expected input with shape [*, ' + str_normalized_shape[\n 1:] + ', but got input shape ' + str(input_shape))\n inputs = dict()\n inputs['X'] = [input]\n if self._scale:\n inputs['Scale'] = [self.weight]\n if self._shift:\n inputs['Bias'] = [self.bias]\n\n attrs = {\n \"epsilon\": self._epsilon,\n \"begin_norm_axis\": self._begin_norm_axis\n }\n\n if in_dygraph_mode():\n outs = core.ops.layer_norm(inputs, attrs)\n pre_act = outs['Y'][0]\n return dygraph_utils._append_activation_in_dygraph(\n pre_act, act=self._act)\n\n # create output\n mean_out = self._helper.create_variable_for_type_inference(\n dtype=self._dtype, stop_gradient=True)\n variance_out = self._helper.create_variable_for_type_inference(\n dtype=self._dtype, stop_gradient=True)\n layer_norm_out = self._helper.create_variable_for_type_inference(\n self._dtype)\n\n self._helper.append_op(\n type=\"layer_norm\",\n inputs=inputs,\n outputs={\n \"Y\": layer_norm_out,\n \"Mean\": mean_out,\n \"Variance\": variance_out,\n },\n attrs={\n \"epsilon\": self._epsilon,\n \"begin_norm_axis\": self._begin_norm_axis\n })\n\n return self._helper.append_activation(layer_norm_out, act=self._act)\n\n\nclass GRUUnit(layers.Layer):\n \"\"\"\n **GRU unit layer**\n \n It creates a callable object from GRUUnit class.\n If origin_mode is True, then the equation of a gru step is from paper\n `Learning Phrase Representations using RNN Encoder-Decoder for Statistical \n Machine Translation <https://arxiv.org/pdf/1406.1078.pdf>`_\n\n .. math::\n u_t & = actGate(xu_{t} + W_u h_{t-1} + b_u)\n\n r_t & = actGate(xr_{t} + W_r h_{t-1} + b_r)\n\n m_t & = actNode(xm_t + W_c dot(r_t, h_{t-1}) + b_m)\n\n h_t & = dot(u_t, h_{t-1}) + dot((1-u_t), m_t)\n\n If origin_mode is False, then the equation of a gru step is from paper\n `Empirical Evaluation of Gated Recurrent Neural Networks on Sequence\n Modeling <https://arxiv.org/pdf/1412.3555.pdf>`_\n\n .. math::\n u_t & = actGate(xu_{t} + W_u h_{t-1} + b_u)\n\n r_t & = actGate(xr_{t} + W_r h_{t-1} + b_r)\n\n m_t & = actNode(xm_t + W_c dot(r_t, h_{t-1}) + b_m)\n\n h_t & = dot((1-u_t), h_{t-1}) + dot(u_t, m_t)\n\n\n The inputs of gru unit includes :math:`z_t`, :math:`h_{t-1}`. In terms\n of the equation above, the :math:`z_t` is split into 3 parts -\n :math:`xu_t`, :math:`xr_t` and :math:`xm_t`. This means that in order to\n implement a full GRU unit operator for an input, a fully\n connected layer has to be applied, such that :math:`z_t = W_{fc}x_t`.\n\n The terms :math:`u_t` and :math:`r_t` represent the update and reset gates\n of the GRU cell. Unlike LSTM, GRU has one lesser gate. However, there is\n an intermediate candidate hidden output, which is denoted by :math:`m_t`.\n This layer has three outputs :math:`h_t`, :math:`dot(r_t, h_{t-1})`\n and concatenation of :math:`u_t`, :math:`r_t` and :math:`m_t`.\n\n Parameters:\n size (int): The input dimension value.\n param_attr(ParamAttr, optional): The parameter attribute for the learnable\n hidden-hidden weight matrix. \n \n **Note**:\n \n 1. The shape of the weight matrix is :math:`[T, 3*D]`, where D is the hidden size.\n 2. All elements in the weight matrix can be divided into two parts. The first \n part are weights of the update gate and reset gate with shape :math:`[D, 2*D]`, \n and the second part are weights for candidate hidden state with shape :math:`[D, D]`.\n\n\n If it is set to None or one attribute of ParamAttr, gru_unit will\n create ParamAttr as param_attr. If the Initializer of the param_attr\n is not set, the parameter is initialized with Xavier. The default \n value is None.\n bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias\n of GRU.Note that the bias with :math:`[1, 3*D]` concatenates\n the bias in the update gate, reset gate and candidate calculations.\n If it is set to False, no bias will be applied to the update gate,\n reset gate and candidate calculations. If it is set to None or one\n attribute of ParamAttr, gru_unit will create ParamAttr as\n bias_attr. If the Initializer of the bias_attr is not set, the bias\n is initialized zero. The default value is None.\n activation (str): The activation type for cell (actNode).\n The default value is 'tanh'.\n gate_activation (str): The activation type for gates (actGate).\n The default value is 'sigmoid'.\n dtype(str): The dtype of the layers. The data type can be set as\n 'float32', 'float64'. The default value is 'float32'.\n\n Attribute:\n **weight** (Parameter): the learnable weights of this layer.\n\n **bias** (Parameter): the learnable bias of this layer.\n\n Returns:\n tuple: The hidden value, reset-hidden value and gate values. The hidden value\n is a 2-D tensor with shape :math:`[T, D]` . The reset-hidden value is a\n 2-D tensor with shape :math:`[T, D]` . The gate value is a 2-D tensor with \n shape :math:`[T, 3*D]`.\n\n Examples:\n\n .. code-block:: python\n\n import paddle.fluid as fluid\n import paddle.fluid.dygraph.base as base\n import numpy\n\n lod = [[2, 4, 3]]\n D = 5\n T = sum(lod[0])\n\n input = numpy.random.rand(T, 3 * D).astype('float32')\n hidden_input = numpy.random.rand(T, D).astype('float32')\n with fluid.dygraph.guard():\n x = numpy.random.random((3, 32, 32)).astype('float32')\n gru = fluid.dygraph.GRUUnit(size=D * 3)\n dy_ret = gru(\n base.to_variable(input), base.to_variable(hidden_input))\n\n \"\"\"\n\n def __init__(self,\n size,\n param_attr=None,\n bias_attr=None,\n activation='tanh',\n gate_activation='sigmoid',\n origin_mode=False,\n dtype='float32'):\n super(GRUUnit, self).__init__()\n self._bias_attr = bias_attr\n activation_dict = dict(\n identity=0,\n sigmoid=1,\n tanh=2,\n relu=3, )\n self.activation = activation_dict[activation]\n self.gate_activation = activation_dict[gate_activation]\n\n self._dtype = dtype\n size = size // 3\n # create weight\n self.weight = self.create_parameter(\n attr=param_attr, shape=[size, 3 * size], dtype=dtype)\n\n # create bias\n bias_size = [1, 3 * size]\n self._bias_size = bias_size\n self.bias = self.create_parameter(\n attr=bias_attr, shape=bias_size, dtype=dtype, is_bias=True)\n\n def forward(self, input, hidden):\n inputs = {\n 'Input': [input],\n 'HiddenPrev': [hidden],\n 'Weight': [self.weight]\n }\n if self.bias:\n inputs['Bias'] = [self.bias]\n attrs = {\n 'activation': self.activation,\n 'gate_activation': self.gate_activation,\n }\n\n if in_dygraph_mode():\n outs = core.ops.gru_unit(inputs, attrs)\n return outs['Hidden'][0], outs['ResetHiddenPrev'][0], outs['Gate'][\n 0]\n\n gate = self._helper.create_variable_for_type_inference(self._dtype)\n reset_hidden_pre = self._helper.create_variable_for_type_inference(\n self._dtype)\n updated_hidden = self._helper.create_variable_for_type_inference(\n self._dtype)\n self._helper.append_op(\n type='gru_unit',\n inputs=inputs,\n outputs={\n 'Gate': gate,\n 'ResetHiddenPrev': reset_hidden_pre,\n 'Hidden': updated_hidden,\n },\n attrs={\n 'activation': self.activation,\n 'gate_activation': self.gate_activation,\n })\n\n return updated_hidden, reset_hidden_pre, gate\n\n\nclass NCE(layers.Layer):\n \"\"\"\n This interface is used to construct a callable object of the ``NCE`` class.\n For more details, refer to code examples.\n It implements the function of the ``NCE`` loss function.\n By default this function uses a uniform distribution for sampling, and it\n compute and return the noise-contrastive estimation training loss. See\n `Noise-contrastive estimation: A new estimation principle for unnormalized statistical models <http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf>`_ .\n\n Parameters:\n num_total_classes (int): Total number of classes in all samples.\n dim (int): Dimension of input (possibly embedding dim).\n param_attr (ParamAttr, optional): The parameter attribute for learnable weights(Parameter)\n of nce. If it is set to None or one attribute of ParamAttr, nce\n will create ParamAttr as param_attr. If the Initializer of the param_attr\n is not set, the parameter is initialized with Xavier. Default: None.\n bias_attr (ParamAttr or bool, optional): The attribute for the bias of nce.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, nce\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. Default: None.\n num_neg_samples (int, optional): The number of negative classes. The default value is 10.\n sampler (str, optional): The sampler used to sample class from negtive classes.\n It can be 'uniform', 'log_uniform' or 'custom_dist'.\n default: 'uniform'.\n custom_dist (float[], optional): A float[] with size=num_total_classes.\n It is used when sampler is set to 'custom_dist'.\n custom_dist[i] is the probability of i-th class to be sampled.\n Default: None.\n seed (int, optional): The seed used in sampler. Default: 0.\n is_sparse(bool, optional): The flag indicating whether to use sparse update. If is_sparse is True, the weight@GRAD and bias@GRAD will be changed to SelectedRows. Default: False.\n dtype (str, optional): Data type, it can be \"float32\" or \"float64\". Default: \"float32\".\n\n Attribute:\n **weight** (Parameter): the learnable weights of this layer.\n\n **bias** (Parameter or None): the learnable bias of this layer.\n \n Returns:\n None\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n import paddle.fluid as fluid\n\n window_size = 5\n dict_size = 20\n label_word = int(window_size // 2) + 1\n inp_word = np.array([[1], [2], [3], [4], [5]]).astype('int64')\n nid_freq_arr = np.random.dirichlet(np.ones(20) * 1000).astype('float32')\n\n with fluid.dygraph.guard():\n words = []\n for i in range(window_size):\n words.append(fluid.dygraph.base.to_variable(inp_word[i]))\n\n emb = fluid.Embedding(\n size=[dict_size, 32],\n param_attr='emb.w',\n is_sparse=False)\n\n embs3 = []\n for i in range(window_size):\n if i == label_word:\n continue\n\n emb_rlt = emb(words[i])\n embs3.append(emb_rlt)\n\n embs3 = fluid.layers.concat(input=embs3, axis=1)\n nce = fluid.NCE(\n num_total_classes=dict_size,\n dim=embs3.shape[1],\n num_neg_samples=2,\n sampler=\"custom_dist\",\n custom_dist=nid_freq_arr.tolist(),\n seed=1,\n param_attr='nce.w',\n bias_attr='nce.b')\n\n wl = fluid.layers.unsqueeze(words[label_word], axes=[0])\n nce_loss3 = nce(embs3, wl)\n\n \"\"\"\n\n def __init__(self,\n num_total_classes,\n dim,\n sample_weight=None,\n param_attr=None,\n bias_attr=None,\n num_neg_samples=None,\n sampler=\"uniform\",\n custom_dist=None,\n seed=0,\n is_sparse=False,\n dtype='float32'):\n super(NCE, self).__init__()\n self._param_attr = param_attr\n self._bias_attr = bias_attr\n self._num_total_classes = num_total_classes\n self._dtype = dtype\n self._inputs = dict()\n self._inputs['SampleWeight'] = sample_weight if sample_weight is not None else []\n if sampler == \"uniform\":\n sampler = 0\n elif sampler == \"log_uniform\":\n sampler = 1\n elif sampler == \"custom_dist\":\n assert custom_dist is not None\n # assert isinstance(custom_dist, Variable)\n\n custom_dist_len = len(custom_dist)\n alias_probs_ = [0] * custom_dist_len\n alias_ = [0] * custom_dist_len\n bigs = []\n littles = []\n for i in range(custom_dist_len):\n normal_prob = custom_dist[i] * custom_dist_len\n if normal_prob - 1.0 > 0:\n bigs.append((i, normal_prob))\n elif 1.0 - normal_prob > 0:\n littles.append((i, normal_prob))\n else:\n alias_probs_[i] = normal_prob\n alias_[i] = -1\n\n while len(bigs) and len(littles):\n big = bigs.pop(0)\n little = littles.pop(0)\n\n big_idx = big[0]\n big_prob = big[1]\n\n alias_probs_[little[0]] = little[1]\n alias_[little[0]] = big_idx\n big_left = big[1] + little[1] - 1\n if big_left - 1.0 > 0:\n bigs.append((big_idx, big_left))\n elif 1.0 - big_left > 0:\n littles.append((big_idx, big_left))\n else:\n alias_probs_[big_idx] = big_left\n alias_[big_idx] = -1\n\n if len(bigs):\n big = bigs.pop(0)\n alias_probs_[big[0]] = 1.0\n alias_[big[0]] = -1\n if len(littles):\n little = littles.pop(0)\n alias_probs_[little[0]] = 1.0\n alias_[little[0]] = -1\n\n def _init_by_numpy_array(numpy_array):\n ret = self.create_parameter(\n attr=ParamAttr(),\n shape=numpy_array.shape,\n dtype=numpy_array.dtype,\n default_initializer=NumpyArrayInitializer(numpy_array))\n ret.stop_gradient = True\n return ret\n\n self._inputs['CustomDistProbs'] = _init_by_numpy_array(\n np.array(custom_dist).astype('float32'))\n self._inputs['CustomDistAlias'] = _init_by_numpy_array(\n np.array(alias_).astype('int32'))\n self._inputs['CustomDistAliasProbs'] = _init_by_numpy_array(\n np.array(alias_probs_).astype('float32'))\n sampler = 2\n else:\n raise Exception(\"Unsupported sampler type.\")\n\n if num_neg_samples is None:\n num_neg_samples = 10\n else:\n num_neg_samples = int(num_neg_samples)\n self._num_neg_samples = num_neg_samples\n remote_prefetch = is_sparse\n print(\n \"With sparse mode, if your models has only small parameter prefetch may cause speed down\"\n )\n self._attrs = {\n 'num_total_classes': int(num_total_classes),\n 'num_neg_samples': num_neg_samples,\n 'seed': seed,\n 'sampler': sampler,\n 'is_sparse': is_sparse,\n 'remote_prefetch': remote_prefetch\n }\n\n self.weight = self.create_parameter(\n attr=self._param_attr,\n shape=[self._num_total_classes, dim],\n is_bias=False,\n dtype=self._dtype)\n if self._bias_attr:\n self.bias = self.create_parameter(\n attr=self._bias_attr,\n shape=[self._num_total_classes, 1],\n is_bias=True,\n dtype=self._dtype)\n self._inputs['Bias'] = self.bias\n self._inputs['Weight'] = self.weight\n\n def forward(self, input, label, sample_weight=None):\n assert isinstance(input, Variable)\n assert isinstance(label, Variable)\n\n self._inputs['Input'] = input\n self._inputs['Label'] = label\n self._inputs['SampleWeight'] = sample_weight if sample_weight is not None else []\n\n cost = self._helper.create_variable_for_type_inference(\n dtype=input.dtype)\n sample_logits = self._helper.create_variable_for_type_inference(\n dtype=input.dtype)\n sample_labels = self._helper.create_variable_for_type_inference(\n dtype=label.dtype)\n\n self._helper.append_op(\n type='nce',\n inputs=self._inputs,\n outputs={\n 'Cost': cost,\n 'SampleLogits': sample_logits,\n 'SampleLabels': sample_labels\n },\n attrs=self._attrs)\n return cost / (self._num_neg_samples + 1)\n\n\nclass PRelu(layers.Layer):\n \"\"\"\n This interface is used to construct a callable object of the ``PRelu`` class.\n For more details, refer to code examples.\n It implements three activation methods of the ``PRelu`` activation function.\n\n Equation:\n\n .. math::\n y = \\max(0, x) + \\\\alpha * \\min(0, x)\n\n Parameters:\n mode (str): The mode for weight sharing. It supports all, channel\n and element. all: all elements share same weight\n channel:elements in a channel share same weight\n element:each element has a weight\n channel (int, optional): The number of channels.\n This argument is required when mode is \"channel\".\n Default: None.\n input_shape (list or tuple, optional): The shape of input.\n This argument is required when mode is \"element\".\n Default: None.\n param_attr(ParamAttr, optional): The parameter attribute for the learnable\n weight (alpha). Default: None.\n dtype (str, optional): Data type, it can be \"float32\" or \"float64\". Default: \"float32\".\n\n Attribute:\n **weight** (Parameter): the learnable weights of this layer.\n \n Returns:\n None\n\n Examples:\n\n .. code-block:: python\n\n import paddle.fluid as fluid\n from paddle.fluid.dygraph.base import to_variable\n import numpy as np\n\n inp_np = np.ones([5, 200, 100, 100]).astype('float32')\n with fluid.dygraph.guard():\n inp_np = to_variable(inp_np)\n prelu0 = fluid.PRelu(\n mode='all',\n param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(1.0)))\n dy_rlt0 = prelu0(inp_np)\n prelu1 = fluid.PRelu(\n mode='channel',\n channel=200,\n param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(1.0)))\n dy_rlt1 = prelu1(inp_np)\n prelu2 = fluid.PRelu(\n mode='element',\n input_shape=inp_np.shape,\n param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(1.0)))\n dy_rlt2 = prelu2(inp_np)\n\n \"\"\"\n\n def __init__(self,\n mode,\n channel=None,\n input_shape=None,\n param_attr=None,\n dtype='float32'):\n # need specify name_scope since snake-cased 'PRelu' is 'p_relu'\n super(PRelu, self).__init__(name_scope='prelu')\n self._mode = mode\n self._param_attr = param_attr\n self._dtype = dtype\n if mode == 'all':\n self._alpha_shape = [1]\n elif mode == 'channel':\n assert isinstance(\n channel,\n int), \"channel argument is required when mode is 'channel'.\"\n self._alpha_shape = [1, channel, 1, 1]\n elif mode == 'element':\n assert isinstance(input_shape, (\n list, tuple\n )), \"input_shape argument is required when mode is 'element'.\"\n self._alpha_shape = [1] + list(input_shape)[1:]\n else:\n raise ValueError('mode should be one of all, channel, element.')\n self.weight = self.create_parameter(\n attr=self._param_attr,\n shape=self._alpha_shape,\n dtype='float32',\n is_bias=False,\n default_initializer=Constant(1.0))\n\n def forward(self, input):\n out = self._helper.create_variable_for_type_inference(self._dtype)\n self._helper.append_op(\n type=\"prelu\",\n inputs={\"X\": input,\n 'Alpha': self.weight},\n attrs={\"mode\": self._mode},\n outputs={\"Out\": out})\n return out\n\n\nclass BilinearTensorProduct(layers.Layer):\n \"\"\"\n **Add Bilinear Tensor Product Layer**\n\n This layer performs bilinear tensor product on two inputs.\n For example:\n\n .. math::\n out_{i} = x * W_{i} * {y^\\mathrm{T}}, i=0,1,...,size-1\n\n In this formula:\n - :math:`x`: the first input contains M elements, shape is [batch_size, M].\n - :math:`y`: the second input contains N elements, shape is [batch_size, N].\n - :math:`W_{i}`: the i-th learned weight, shape is [M, N]\n - :math:`out_{i}`: the i-th element of out, shape is [batch_size, size].\n - :math:`y^\\mathrm{T}`: the transpose of :math:`y`.\n\n Parameters:\n input1_dim (int): The dimension of each first input.\n input2_dim (int): The dimension of each second input.\n output_dim (int): The dimension of output of this layer.\n name (str, optional): The default value is None. Normally there is no need for user\n to set this property. For more information, please refer to :ref:`api_guide_Name`. Default: None.\n act (str, optional): Activation to be applied to the output of this layer. The default value is None.\n param_attr (ParamAttr, optional): The parameter attribute for the learnable w, parameters/weights of \n this layer. The default value is None.\n bias_attr (ParamAttr, optional): The parameter attribute for the bias\n of this layer. If it is set to False, no bias will be added to the output units.\n If it is set to None, the bias is initialized zero. The default value is None.\n dtype (str, optional): Data type, it can be \"float32\" or \"float64\". Default: \"float32\".\n\n Attribute:\n **weight** (Parameter): the learnable weights of this layer.\n\n **bias** (Parameter): the learnable bias of this layer.\n\n Returns:\n Variable: A 2-D Tensor of shape [batch_size, size].\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n with fluid.dygraph.guard():\n layer1 = numpy.random.random((5, 5)).astype('float32')\n layer2 = numpy.random.random((5, 4)).astype('float32')\n bilinearTensorProduct = fluid.dygraph.nn.BilinearTensorProduct(\n input1_dim=5, input2_dim=4, output_dim=1000)\n ret = bilinearTensorProduct(fluid.dygraph.base.to_variable(layer1),\n fluid.dygraph.base.to_variable(layer2))\n \"\"\"\n\n def __init__(self,\n input1_dim,\n input2_dim,\n output_dim,\n name=None,\n act=None,\n param_attr=None,\n bias_attr=None,\n dtype='float32'):\n super(BilinearTensorProduct, self).__init__()\n self._param_attr = param_attr\n self._bias_attr = bias_attr\n self._act = act\n self._name = name\n self._input1_dim = input1_dim\n self._input2_dim = input2_dim\n self._output_dim = output_dim\n self._inputs = dict()\n self._dtype = dtype\n\n param_shape = [self._output_dim, self._input1_dim, self._input2_dim]\n self.weight = self.create_parameter(\n attr=self._param_attr,\n shape=param_shape,\n dtype=self._dtype,\n is_bias=False)\n bias_size = [1, self._output_dim]\n self.bias = self.create_parameter(\n attr=self._bias_attr,\n shape=bias_size,\n dtype=self._dtype,\n is_bias=True)\n\n def forward(self, x, y):\n self._inputs = {\"X\": x, \"Y\": y, \"Weight\": self.weight}\n if self.bias:\n self._inputs[\"Bias\"] = self.bias\n if self._name is not None:\n out = self._helper.create_variable(\n name=\".\".join([self.full_name(), self._name]),\n dtype=self._dtype,\n persistable=False)\n else:\n out = self._helper.create_variable(\n dtype=self._dtype, persistable=False)\n self._helper.append_op(\n type=\"bilinear_tensor_product\",\n inputs=self._inputs,\n outputs={\"Out\": out})\n\n # add activation\n return self._helper.append_activation(out, act=self._act)\n\n\nclass Conv2DTranspose(layers.Layer):\n \"\"\"\n This interface is used to construct a callable object of the ``Conv2DTranspose`` class.\n For more details, refer to code examples.\n The convolution2D transpose layer calculates the output based on the input,\n filter, and dilations, strides, paddings. Input and output\n are in NCHW format. Where N is batch size, C is the number of feature map,\n H is the height of the feature map, and W is the width of the feature map.\n Filter's shape is [MCHW] , where M is the number of input feature map,\n C is the number of output feature map, H is the height of the filter,\n and W is the width of the filter. If the groups is greater than 1,\n C will equal the number of input feature map divided by the groups.\n If bias attribution and activation type are provided, bias is added to\n the output of the convolution, and the corresponding activation function\n is applied to the final result.\n The details of convolution transpose layer, please refer to the following explanation and references\n `conv2dtranspose <http://www.matthewzeiler.com/wp-content/uploads/2017/07/cvpr2010.pdf>`_ .\n\n For each input :math:`X`, the equation is:\n\n .. math::\n\n Out = \\sigma (W \\\\ast X + b)\n\n Where:\n\n * :math:`X`: Input value, a ``Tensor`` with NCHW format.\n * :math:`W`: Filter value, a ``Tensor`` with shape [MCHW] .\n * :math:`\\\\ast`: Convolution operation.\n * :math:`b`: Bias value, a 2-D ``Tensor`` with shape [M, 1].\n * :math:`\\\\sigma`: Activation function.\n * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.\n\n Example:\n\n - Input:\n\n Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`\n\n Filter shape: :math:`(C_{in}, C_{out}, H_f, W_f)`\n\n - Output:\n\n Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`\n\n Where\n\n .. math::\n\n H^\\prime_{out} &= (H_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (H_f - 1) + 1 \\\\\\\\\n W^\\prime_{out} &= (W_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (W_f - 1) + 1 \\\\\\\\\n H_{out} &\\in [ H^\\prime_{out}, H^\\prime_{out} + strides[0] ) \\\\\\\\\n W_{out} &\\in [ W^\\prime_{out}, W^\\prime_{out} + strides[1] )\n\n Parameters:\n num_channels(int): The number of channels in the input image.\n num_filters(int): The number of the filter. It is as same as the output\n feature map.\n filter_size(int or tuple): The filter size. If filter_size is a tuple,\n it must contain two integers, (filter_size_H, filter_size_W).\n Otherwise, the filter will be a square.\n output_size(int or tuple, optional): The output image size. If output size is a\n tuple, it must contain two integers, (image_H, image_W). None if use\n filter_size, padding, and stride to calculate output_size.\n if output_size and filter_size are specified at the same time, They\n should follow the formula above. Default: None.\n padding(int or tuple, optional): The padding size. If padding is a tuple, it must\n contain two integers, (padding_H, padding_W). Otherwise, the\n padding_H = padding_W = padding. Default: 0.\n stride(int or tuple, optional): The stride size. If stride is a tuple, it must\n contain two integers, (stride_H, stride_W). Otherwise, the\n stride_H = stride_W = stride. Default: 1.\n dilation(int or tuple, optional): The dilation size. If dilation is a tuple, it must\n contain two integers, (dilation_H, dilation_W). Otherwise, the\n dilation_H = dilation_W = dilation. Default: 1.\n groups(int, optional): The groups number of the Conv2d transpose layer. Inspired by\n grouped convolution in Alex Krizhevsky's Deep CNN paper, in which\n when group=2, the first half of the filters is only connected to the\n first half of the input channels, while the second half of the\n filters is only connected to the second half of the input channels.\n Default: 1.\n param_attr (ParamAttr, optional): The parameter attribute for learnable weights(Parameter)\n of conv2d_transpose. If it is set to None or one attribute of ParamAttr, conv2d_transpose\n will create ParamAttr as param_attr. If the Initializer of the param_attr\n is not set, the parameter is initialized with Xavier. Default: None.\n bias_attr (ParamAttr or bool, optional): The attribute for the bias of conv2d_transpose.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, conv2d_transpose\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. Default: None.\n use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn\n library is installed. Default: True.\n act (str, optional): Activation type, if it is set to None, activation is not appended.\n Default: None.\n dtype (str, optional): Data type, it can be \"float32\" or \"float64\". Default: \"float32\".\n\n Attribute:\n **weight** (Parameter): the learnable weights of filters of this layer.\n\n **bias** (Parameter or None): the learnable bias of this layer.\n\n Returns:\n None\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy as np\n\n with fluid.dygraph.guard():\n data = np.random.random((3, 32, 32, 5)).astype('float32')\n conv2DTranspose = fluid.dygraph.nn.Conv2DTranspose(\n num_channels=32, num_filters=2, filter_size=3)\n ret = conv2DTranspose(fluid.dygraph.base.to_variable(data))\n\n \"\"\"\n\n def __init__(self,\n num_channels,\n num_filters,\n filter_size,\n output_size=None,\n padding=0,\n stride=1,\n dilation=1,\n groups=None,\n param_attr=None,\n bias_attr=None,\n use_cudnn=True,\n act=None,\n dtype='float32'):\n super(Conv2DTranspose, self).__init__()\n assert param_attr is not False, \"param_attr should not be False in conv2d_transpose.\"\n self._param_attr = param_attr\n self._bias_attr = bias_attr\n self._act = act\n self._groups = groups\n self._num_channels = num_channels\n self._num_filters = num_filters\n self._use_cudnn = use_cudnn\n self._padding = padding\n self._stride = stride\n self._dilation = dilation\n self._filter_size = filter_size\n self._output_size = output_size\n self._dtype = dtype\n\n if (self._num_channels == self._groups and\n self._num_filters == self._num_channels and\n not self._use_cudnn):\n self._op_type = 'depthwise_conv2d_transpose'\n else:\n self._op_type = 'conv2d_transpose'\n\n self._padding = utils.convert_to_list(self._padding, 2, 'padding')\n self._stride = utils.convert_to_list(self._stride, 2, 'stride')\n self._dilation = utils.convert_to_list(self._dilation, 2, 'dilation')\n\n self._filter_size = utils.convert_to_list(\n self._filter_size, 2, 'conv2d_transpose.filter_size')\n\n if self._output_size is None:\n self._output_size = []\n elif isinstance(self._output_size, list) or isinstance(\n self._output_size, int):\n self._output_size = utils.convert_to_list(self._output_size, 2,\n 'output_size')\n else:\n raise ValueError(\"output_size should be list or int\")\n self._padding = utils.convert_to_list(self._padding, 2, 'padding')\n self._groups = 1 if self._groups is None else self._groups\n filter_shape = [self._num_channels, self._num_filters // self._groups\n ] + self._filter_size\n\n self.weight = self.create_parameter(\n dtype=self._dtype, shape=filter_shape, attr=self._param_attr)\n\n self.bias = self.create_parameter(\n attr=self._bias_attr,\n shape=[self._num_filters],\n dtype=self._dtype,\n is_bias=True)\n\n def forward(self, input):\n inputs = {'Input': [input], 'Filter': [self.weight]}\n attrs = {\n 'output_size': self._output_size,\n 'strides': self._stride,\n 'paddings': self._padding,\n 'dilations': self._dilation,\n 'groups': self._groups,\n 'use_cudnn': self._use_cudnn\n }\n\n if in_dygraph_mode():\n op = getattr(core.ops, self._op_type)\n outs = op(inputs, attrs)\n pre_bias = outs['Output'][0]\n pre_act = dygraph_utils._append_bias_in_dygraph(pre_bias, self.bias,\n 1)\n return dygraph_utils._append_activation_in_dygraph(\n pre_act, act=self._act)\n\n pre_bias = self._helper.create_variable_for_type_inference(\n dtype=input.dtype)\n self._helper.append_op(\n type=self._op_type,\n inputs=inputs,\n outputs={'Output': pre_bias},\n attrs=attrs)\n\n if self.bias is not None:\n pre_act = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n self._helper.append_op(\n type='elementwise_add',\n inputs={'X': [pre_bias],\n 'Y': [self.bias]},\n outputs={'Out': [pre_act]},\n attrs={'axis': 1})\n else:\n pre_act = pre_bias\n\n out = self._helper.append_activation(pre_act, act=self._act)\n return out\n\n\nclass SequenceConv(layers.Layer):\n \"\"\"\n This function creates the op for sequence_conv, using the inputs and\n other convolutional configurations for the filters and stride as given\n in the input parameters to the function.\n\n Parameters:\n name_scope(str): The name of this class.\n num_filters (int): number of filters.\n filter_size (int): the filter size (H and W). Default: 3.\n filter_stride (int): stride of the filter. Default: 1.\n padding (bool|None): if True, add paddings. Default: None\n bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of sequence_conv.\n If it is set to False, no bias will be added to the output units.\n If it is set to None or one attribute of ParamAttr, sequence_conv\n will create ParamAttr as bias_attr. If the Initializer of the bias_attr\n is not set, the bias is initialized zero. Default: None.\n param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights\n of sequence_conv. If it is set to None or one attribute of ParamAttr, sequence_conv\n will create ParamAttr as param_attr. If the Initializer of the param_attr\n is not set, the parameter is initialized with Xavier. Default: None.\n act (str): Activation type, if it is set to None, activation is not appended.\n Default: None.\n\n Attributes:\n weight (Parameter): the learnable weights of filters of this layer.\n bias (Parameter|None): the learnable bias of this layer.\n\n Returns:\n Variable: output of sequence_conv\n \"\"\"\n\n def __init__(self,\n name_scope,\n num_filters,\n filter_size=3,\n filter_stride=1,\n padding=None,\n bias_attr=None,\n param_attr=None,\n act=None):\n assert not in_dygraph_mode(\n ), \"SequenceConv is not supported by dynamic graph mode yet!\"\n super(SequenceConv, self).__init__(name_scope)\n self._num_filters = num_filters\n self._filter_size = filter_size\n self._filter_stride = filter_stride\n self._padding = padding\n self._bias_attr = bias_attr\n self._param_attr = param_attr\n self._act = act\n\n def _build_once(self, input):\n self._dtype = self._helper.input_dtype(input)\n filter_shape = [self._filter_size * input.shape[1], self._num_filters]\n self.weight = self.create_parameter(\n attr=self._param_attr, shape=filter_shape, dtype=self._dtype)\n\n self.bias = self.create_parameter(\n attr=self._bias_attr,\n shape=[self._num_filters],\n dtype=self._dtype,\n is_bias=True)\n\n def forward(self, input):\n pre_bias = self._helper.create_variable_for_type_inference(self._dtype)\n self._helper.append_op(\n type='sequence_conv',\n inputs={\n 'X': [input],\n 'Filter': [self.weight],\n },\n outputs={\"Out\": pre_bias},\n attrs={\n 'contextStride': self._filter_stride,\n 'contextStart': -int(self._filter_size // 2),\n 'contextLength': self._filter_size\n })\n\n if self.bias is not None:\n pre_act = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n self._helper.append_op(\n type='elementwise_add',\n inputs={'X': [pre_bias],\n 'Y': [self.bias]},\n outputs={'Out': [pre_act]},\n attrs={'axis': 1})\n else:\n pre_act = pre_bias\n\n return self._helper.append_activation(pre_act, act=self._act)\n\n\nclass RowConv(layers.Layer):\n \"\"\"\n ***Row-convolution operator***\n\n The row convolution is called lookahead convolution. This operator was introduced in the following paper for DeepSpeech2:\n http://www.cs.cmu.edu/~dyogatam/papers/wang+etal.iclrworkshop2016.pdf\n\n The main motivation is that a bidirectional RNN, useful in DeepSpeech like speech models, learns representation for a sequence by performing a\n forward and a backward pass through the entire sequence. However, unlike\n unidirectional RNNs, bidirectional RNNs are challenging to deploy in an online\n and low-latency setting. The lookahead convolution incorporates information\n from future subsequences in a computationally efficient manner to improve\n unidirectional recurrent neural networks. The row convolution operator is\n different from the 1D sequence convolution, and is computed as follows:\n\n Given an input sequence X of length t and input dimension D, and a filter (W) of size context * D.\n\n More details about row_conv please refer to the design document https://github.com/PaddlePaddle/Paddle/issues/2228#issuecomment-303903645 .\n\n Parameters:\n name_scope(str): The name of this class.\n future_context_size (int): Future context size. Please note, the shape\n of convolution kernel is [future_context_size + 1, D].\n param_attr (ParamAttr): Attributes of parameters, including\n name, initializer etc. Default: None.\n act (str): Non-linear activation to be applied to output variable. Default: None.\n\n Attributes:\n weight (Parameter): the learnable weights of this layer.\n\n Returns:\n the output(Out) is a LodTensor, which supports variable time-length input sequences.\n The underlying tensor in this LodTensor is a matrix with shape T x N, i.e., the same shape as X.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n with fluid.dygraph.guard():\n x = numpy.random.random((16)).astype('float32')\n rowConv = fluid.dygraph.nn.RowConv(\n 'RowConv', future_context_size=2)\n ret = rowConv(fluid.dygraph.base.to_variable(x))\n\n \"\"\"\n\n def __init__(self,\n name_scope,\n future_context_size,\n param_attr=None,\n act=None):\n assert not in_dygraph_mode(\n ), \"RowConv is not supported by dynamic graph mode yet!\"\n super(RowConv, self).__init__(name_scope)\n self._act = act\n self._param_attr = param_attr\n self._future_context_size = future_context_size\n\n def _build_once(self, input):\n self._dtype = self._helper.input_dtype(input)\n filter_shape = [self._future_context_size + 1, input.shape[1]]\n self.weight = self.create_parameter(\n attr=self._param_attr,\n shape=filter_shape,\n dtype=self._dtype,\n is_bias=False)\n\n def forward(self, input):\n out = self._helper.create_variable_for_type_inference(self._dtype)\n self._helper.append_op(\n type='row_conv',\n inputs={'X': [input],\n 'Filter': [self.weight]},\n outputs={'Out': [out]})\n return self._helper.append_activation(out, act=self._act)\n\n\nclass GroupNorm(layers.Layer):\n \"\"\"\n This interface is used to construct a callable object of the ``GroupNorm`` class.\n For more details, refer to code examples.\n It implements the function of the Group Normalization Layer.\n Refer to `Group Normalization <https://arxiv.org/abs/1803.08494>`_ .\n\n Parameters:\n channels(int): The number of channels of input.\n groups(int): The number of groups that divided from channels.\n epsilon(float, optional): The small value added to the variance to prevent\n division by zero. Default: 1e-05.\n param_attr(ParamAttr, optional): The parameter attribute for the learnable\n scale :math:`g`. If it is set to False, no scale will be added to the output units.\n If it is set to None, the bias is initialized one. Default: None.\n bias_attr(ParamAttr, optional): The parameter attribute for the learnable\n bias :math:`b`. If it is set to False, no bias will be added to the output units.\n If it is set to None, the bias is initialized zero. Default: None.\n act(str, optional): Activation to be applied to the output of group normalizaiton. Default: None.\n data_layout(str, optional): Specify the input data format. Only NCHW is supported. Default: NCHW.\n\n Returns:\n None\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy as np\n\n with fluid.dygraph.guard():\n x = np.random.random((8, 32, 32)).astype('float32')\n groupNorm = fluid.dygraph.nn.GroupNorm(channels=32, groups=4)\n ret = groupNorm(fluid.dygraph.base.to_variable(x))\n\n \"\"\"\n\n def __init__(self,\n channels,\n groups,\n epsilon=1e-05,\n param_attr=None,\n bias_attr=None,\n act=None,\n data_layout='NCHW',\n dtype='float32'):\n super(GroupNorm, self).__init__()\n self._param_attr = param_attr\n self._bias_attr = bias_attr\n self._epsilon = epsilon\n self._channels = channels\n self._groups = groups\n self._act = act\n self._dtype = dtype\n if data_layout != 'NCHW':\n raise ValueError(\"unsupported data layout:\" + data_layout)\n\n param_shape = [self._channels]\n\n self.weight = self.create_parameter(\n attr=self._param_attr or False,\n shape=param_shape,\n dtype=self._dtype,\n default_initializer=Constant(1.0))\n\n self.bias = self.create_parameter(\n attr=self._bias_attr or False,\n shape=param_shape,\n dtype=self._dtype,\n is_bias=True)\n\n def forward(self, input):\n inputs = {'X': input}\n if self.bias:\n inputs['Bias'] = self.bias\n if self.weight:\n inputs['Scale'] = self.weight\n\n # create output\n mean_out = self._helper.create_variable_for_type_inference(\n dtype=self._dtype, stop_gradient=True)\n variance_out = self._helper.create_variable_for_type_inference(\n dtype=self._dtype, stop_gradient=True)\n group_norm_out = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n\n self._helper.append_op(\n type=\"group_norm\",\n inputs=inputs,\n outputs={\n \"Y\": group_norm_out,\n \"Mean\": mean_out,\n \"Variance\": variance_out,\n },\n attrs={\"epsilon\": self._epsilon,\n \"groups\": self._groups})\n\n return self._helper.append_activation(group_norm_out, self._act)\n\n\nclass SpectralNorm(layers.Layer):\n \"\"\"\n This interface is used to construct a callable object of the ``SpectralNorm`` class.\n For more details, refer to code examples. It implements the function of the Spectral Normalization Layer.\n This layer calculates the spectral normalization value of weight parameters of\n fc, conv1d, conv2d, conv3d layers which should be 2-D, 3-D, 4-D, 5-D\n Parameters. Calculations are showed as follows.\n\n Step 1:\n Generate vector U in shape of [H], and V in shape of [W].\n While H is the :attr:`dim` th dimension of the input weights,\n and W is the product result of remaining dimensions.\n\n Step 2:\n :attr:`power_iters` shoule be a positive interger, do following\n calculations with U and V for :attr:`power_iters` rounds.\n\n .. math::\n\n \\mathbf{v} := \\\\frac{\\mathbf{W}^{T} \\mathbf{u}}{\\|\\mathbf{W}^{T} \\mathbf{u}\\|_2}\n\n \\mathbf{u} := \\\\frac{\\mathbf{W}^{T} \\mathbf{v}}{\\|\\mathbf{W}^{T} \\mathbf{v}\\|_2}\n\n Step 3:\n Calculate :math:`\\sigma(\\mathbf{W})` and normalize weight values.\n\n .. math::\n\n \\sigma(\\mathbf{W}) = \\mathbf{u}^{T} \\mathbf{W} \\mathbf{v}\n\n \\mathbf{W} = \\\\frac{\\mathbf{W}}{\\sigma(\\mathbf{W})}\n\n\n Refer to `Spectral Normalization <https://arxiv.org/abs/1802.05957>`_ .\n\n Parameters:\n weight_shape(list or tuple): The shape of weight parameter.\n dim(int, optional): The index of dimension which should be permuted to the first before reshaping Input(Weight) to matrix, it should be set as 0 if Input(Weight) is the weight of fc layer, and should be set as 1 if Input(Weight) is the weight of conv layer. Default: 0.\n power_iters(int, optional): The number of power iterations to calculate spectral norm. Default: 1.\n eps(float, optional): The epsilon for numerical stability in calculating norms. Default: 1e-12.\n name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .\n dtype (str, optional): Data type, it can be \"float32\" or \"float64\". Default: \"float32\".\n\n Returns:\n None\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy as np\n\n with fluid.dygraph.guard():\n weight = np.random.random((2, 8, 32, 32)).astype('float32')\n spectralNorm = fluid.dygraph.nn.SpectralNorm(weight.shape, dim=1, power_iters=2)\n ret = spectralNorm(fluid.dygraph.base.to_variable(weight))\n\n \"\"\"\n\n def __init__(self,\n weight_shape,\n dim=0,\n power_iters=1,\n eps=1e-12,\n dtype='float32'):\n super(SpectralNorm, self).__init__()\n self._power_iters = power_iters\n self._eps = eps\n self._dim = dim\n self._dtype = dtype\n\n self._weight_shape = list(weight_shape)\n h = self._weight_shape[self._dim]\n w = np.prod(self._weight_shape) // h\n\n self.weight_u = self.create_parameter(\n attr=ParamAttr(),\n shape=[h],\n dtype=self._dtype,\n default_initializer=Normal(0., 1.))\n self.weight_u.stop_gradient = True\n\n self.weight_v = self.create_parameter(\n attr=ParamAttr(),\n shape=[w],\n dtype=self._dtype,\n default_initializer=Normal(0., 1.))\n self.weight_v.stop_gradient = True\n\n def forward(self, weight):\n inputs = {'Weight': weight, 'U': self.weight_u, 'V': self.weight_v}\n out = self._helper.create_variable_for_type_inference(self._dtype)\n self._helper.append_op(\n type=\"spectral_norm\",\n inputs=inputs,\n outputs={\"Out\": out, },\n attrs={\n \"dim\": self._dim,\n \"power_iters\": self._power_iters,\n \"eps\": self._eps,\n })\n\n return out\n\n\nclass TreeConv(layers.Layer):\n \"\"\"\n This interface is used to construct a callable object of the ``TreeConv`` class.\n For more details, refer to code examples.\n Tree-Based Convolution is a kind of convolution based on tree structure.\n Tree-Based Convolution is a part of Tree-Based Convolution Neural Network(TBCNN),\n which is used to classify tree structures, such as Abstract Syntax Tree.\n Tree-Based Convolution proposed a kind of data structure called continuous binary tree,\n which regards multiway tree as binary tree.\n The paper of Tree-Based Convolution Operator is here: `tree-based convolution <https://arxiv.org/abs/1409.5718v1/>`_ .\n \n Parameters:\n feature_size(int): last dimension of nodes_vector.\n output_size(int): output feature width.\n num_filters(int, optional): number of filters, Default: 1.\n max_depth(int, optional): max depth of filters, Default: 2.\n act(str, optional): activation function, Default: tanh.\n param_attr(ParamAttr, optional): the parameter attribute for the filters, Default: None.\n bias_attr(ParamAttr, optional): the parameter attribute for the bias of this layer, Default: None.\n name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .\n dtype (str, optional): Data type, it can be \"float32\" or \"float64\". Default: \"float32\".\n\n Attribute:\n **weight** (Parameter): the learnable weights of filters of this layer.\n\n **bias** (Parameter or None): the learnable bias of this layer.\n\n Returns:\n None\n\n Examples:\n\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n with fluid.dygraph.guard():\n nodes_vector = numpy.random.random((1, 10, 5)).astype('float32')\n edge_set = numpy.random.random((1, 9, 2)).astype('int32')\n treeConv = fluid.dygraph.nn.TreeConv(\n feature_size=5, output_size=6, num_filters=1, max_depth=2)\n ret = treeConv(fluid.dygraph.base.to_variable(nodes_vector), fluid.dygraph.base.to_variable(edge_set))\n \"\"\"\n\n def __init__(self,\n feature_size,\n output_size,\n num_filters=1,\n max_depth=2,\n act='tanh',\n param_attr=None,\n bias_attr=None,\n name=None,\n dtype='float32'):\n super(TreeConv, self).__init__()\n self._name = name\n self._feature_size = feature_size\n self._output_size = output_size\n self._act = act\n self._max_depth = max_depth\n self._num_filters = num_filters\n self._bias_attr = bias_attr\n self._param_attr = param_attr\n self._dtype = dtype\n w_shape = [self._feature_size, 3, self._output_size, self._num_filters]\n if self._bias_attr:\n self.bias = self.create_parameter(\n attr=self._bias_attr,\n shape=[self._num_filters],\n dtype=self._dtype,\n is_bias=True)\n self.weight = self.create_parameter(\n attr=self._param_attr,\n shape=w_shape,\n dtype=self._dtype,\n is_bias=False)\n\n def forward(self, nodes_vector, edge_set):\n if self._name:\n out = self.create_variable(\n name=self._name, dtype=self._dtype, persistable=False)\n else:\n out = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n self._helper.append_op(\n type='tree_conv',\n inputs={\n 'NodesVector': nodes_vector,\n 'EdgeSet': edge_set,\n 'Filter': self.weight\n },\n outputs={'Out': out, },\n attrs={'max_depth': self._max_depth})\n if self._bias_attr:\n pre_activation = self._helper.create_variable_for_type_inference(\n dtype=self._dtype)\n self._helper.append_op(\n type='elementwise_add',\n inputs={'X': [out],\n 'Y': [self.bias]},\n outputs={'Out': [pre_activation]},\n attrs={'axis': 1})\n else:\n pre_activation = out\n return self._helper.append_activation(pre_activation, act=self._act)\n"
] |
[
[
"numpy.array",
"numpy.prod"
]
] |
BradhamLab/transPipe
|
[
"7a4fb14492f14114e3a90daca7237a41131365ad"
] |
[
"scripts/python/utils.py"
] |
[
"import itertools\nimport os\nimport re\nimport subprocess as sbp\n\nimport numpy as np\nimport yaml\n\n\ndef link_ids_to_input(data_dir, sample_regex, replicate_regex=''):\n \"\"\"\n Link sample ids to data files.\n\n Args:\n data_dir (string): parent directory containing all sample-specific\n directories/files.\n sample_regex (string): regex pattern to extract sample ids from\n directory/file names names. Pattern should match all characters\n proceding {sample.id}, such that extracting up to the match will\n result in sample id extraction (e.g. with a file name formatted\n \"/foo/bar/{sample_id}{sample_regex}.input\", the pattern\n {sample_regex} should be provided.)\n replicate_regex (string, optional): regex pattern to match samples with\n their associated replicate status. Use this to only return samples\n from a specific replicate run. Default behavior has no replicate\n filtering with a default value of ''.\n Returns:\n (dict, string): dictionary linking sample id to data files\n \n \"\"\"\n sample_dict = {}\n sample_pattern = re.compile(sample_regex)\n replicate_pattern = re.compile(replicate_regex)\n for sample_data in os.listdir(data_dir):\n sample_match = re.search(sample_pattern, sample_data)\n replicate_match = re.search(replicate_pattern, sample_data)\n if sample_match is not None and replicate_match is not None:\n sample_id = sample_data[0:sample_match.span()[0]]\n data_loc = os.path.join(data_dir, sample_data)\n sample_dict[sample_id] = data_loc\n return sample_dict\n\n\ndef configure_run(config_dict):\n \"\"\"Parse a run-specific configuration file.\"\"\"\n with open(config_dict['config'], 'r') as f:\n config = yaml.load(f)\n return config\n\n\n# STAR helper functions\n# =====================\n\n# function to get genomeChrBinNBits parameter for STAR alignment.\ndef estimate_STAR_ChrBinNbits(genome_file, read_length):\n \"\"\"\n Estimate the `ChrBinNBits` parameter for genome indexing in STAR\n\n Estimate the `ChrBinNBits` parameter for genome indexing in STAR. Value\n must be estimated due to memory constraints caused by the large number\n of scaffolds present in some genomes (i.e. the LV genome). If estimation\n is unnecessary, flag `star_est_ChrBinNbits: False` in configuration file.\n\n Args:\n genome_file (string): path to fasta file containing genome reference\n sequences.\n read_length (int): length of reads from RNAseq experiment.\n\n Return:\n (int) new value for scaling RAM consumption\n\n References:\n https://github.com/alexdobin/STAR/blob/master/doc/STARmanual.pdf (p. 7)\n https://github.com/alexdobin/STAR/issues/103\n \"\"\"\n len_call = 'grep -v \">\" {} | wc | awk '.format(genome_file)\\\n + \"'{print $3-$1}'\"\n n_ref_call = 'grep \"^>\" {} | wc -l'.format(genome_file)\n\n return_values = [None, None]\n for i, call in enumerate([len_call, n_ref_call]):\n p = sbp.Popen(call, stdin=sbp.PIPE, stdout=sbp.PIPE, stderr=sbp.PIPE,\n shell=True)\n output, err = p.communicate()\n if p.returncode == 0:\n return_values[i] = int(output.strip())\n else:\n raise OSError(err)\n estimate = max([int(np.log2(return_values[0] / return_values[1])),\n int(np.log2(read_length))])\n return min(18, estimate)\n\n\ndef get_star_genome_params(config_dict):\n \"\"\"\n Extract parameters for genome indexing in STAR.\n\n Args:\n config_dict (dictionary): configuration dictionary created by snakemake\n via configfile: {file.name}\n Returns:\n (string): string of arguments to pass STAR.\n \"\"\"\n\n star_genome_params = config_dict['params']['star_genome']\n if config_dict['flags']['star_est_ChrBinsNbits'] == True:\n nbits = estimate_STAR_ChrBinNbits(config_dict['files']['genome_fasta'],\n config_dict['dataset']['read_length'])\n star_genome_params += ' --genomeChrBinNbits {}'.format(nbits)\n return star_genome_params\n\n\ndef get_star_genome_files(star_dir):\n \"\"\"\n Get file paths for all expected output files from genome indexing in STAR.\n\n Args:\n star_dir: directory where all output files will be written.\n Returns:\n (list, string): list of output file paths.\n \"\"\"\n\n files = ['chrLength.txt', 'exonInfo.tab', 'SAindex',\n 'chrNameLength.txt', 'geneInfo.tab', 'sjdbInfo.txt',\n 'chrName.txt', 'Genome', 'sjdbList.fromGTF.out.tab',\n 'chrStart.txt', 'genomeParameters.txt', 'sjdbList.out.tab',\n 'exonGeTrInfo.tab', 'SA', 'transcriptInfo.tab']\n out = [os.path.join(*each) for each in itertools.product([star_dir], files)]\n return out\n"
] |
[
[
"numpy.log2"
]
] |
tinlun/cs350-convex-hull
|
[
"93e63a7383085cea5a068ad4d5d510469cd0d321"
] |
[
"utils.py"
] |
[
"import numpy as np\n\ndef left_most_point(points):\n '''\n Takes in the array of points, and returns the point\n furthest on the left side.\n '''\n leftMost = np.array([])\n for point in points:\n if leftMost.size == 0 or point[0] < leftMost[0]:\n leftMost = point\n return leftMost\n\ndef lr_most_point(points):\n '''\n Takes in the array of points, and returns the points\n furthest on the left and right sides as a tuple.\n '''\n leftMost = np.array([])\n rightMost = np.array([])\n for point in points:\n if leftMost.size == 0 or point[0] < leftMost[0]:\n leftMost = point\n if rightMost.size == 0 or point[0] > rightMost[0]:\n rightMost = point\n return (leftMost, rightMost)\n\ndef dist(A, B, C):\n '''\n Returns the shortest distance between the point C,\n and the line segment connected by A and B.\n '''\n P = A - C\n Q = A - B\n magQ = np.linalg.norm(Q)\n\n proj = (np.dot(P, Q) / (magQ*magQ)) * Q\n return np.linalg.norm(P - proj)\n\n\ndef sort_by_x(points):\n '''\n Sort the array by x coordinate, or by y coordinate in case of tie.\n Returns a array of data points. (Does not modify the array)\n '''\n indices = np.lexsort((points[:,1], points[:,0]))\n return points[indices]\n \n\ndef cross(a, b, o):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n"
] |
[
[
"numpy.dot",
"numpy.array",
"numpy.lexsort",
"numpy.linalg.norm"
]
] |
hhf1357924680/RL-FIN
|
[
"be8dc15e1b6890551eae5637c179d23521956f58"
] |
[
"RL_2020_ENSEMBLE.py"
] |
[
"import warnings # python运行代码的时候,经常会碰到代码可以正常运行但是会提出警告,不想看到这些不重要的警告,所以使用控制警告输出\n\nwarnings.filterwarnings(\"ignore\") # 使用警告过滤器来控制忽略发出的警告\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib # python中类似于MATLAB的绘图工具,是一个2D绘图库\nimport matplotlib.pyplot as plt\nimport datetime # datetime模块提供了各种类,用于操作日期和时间\n\n\n# %matplotlib inline 表示内嵌绘图,有了这个命令就可以省略掉plt.show()命令了\nfrom finrl.config import config # 引入finrl包的配置\nfrom finrl.marketdata.yahoodownloader import YahooDownloader\nfrom finrl.preprocessing.preprocessors import FeatureEngineer\nfrom finrl.preprocessing.data import data_split\nfrom finrl.env.env_stocktrading import StockTradingEnv\nfrom finrl.model.models import DRLAgent, DRLEnsembleAgent\n\nfrom finrl.trade.backtest import (\n backtest_stats,\n get_daily_return,\n get_baseline,\n backtest_plot,\n)\nfrom pprint import pprint # 用于打印 Python 数据结构. 使输出数据格式整齐, 便于阅读\n\nimport sys # 该语句告诉Python,我们想要使用sys,此模块包含了与Python解释器和它的环境有关的函数\n\nsys.path.append(\"../FinRL-Library\")\n# 在Python执行import sys语句的时候,python会根据sys.path的路径来寻找sys.py模块。\n# 添加自己的模块路径, Sys.path.append(“mine module path”)\n\nimport itertools # itertools模块中的函数可以用来对数据进行循环操作\n\n\"\"\"\nos.path.exists(path),如果path是一个存在的路径,返回True,否则返回 False\nos.path.exists(path)的应用:判断路径是否存在,不存在则创建\n举例子\nlog_dir = \"logs/\"\nif not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\"\"\"\n\nimport os\n\nif not os.path.exists(\"./\" + config.DATA_SAVE_DIR): # \"./\"代表当前目录\n os.makedirs(\"./\" + config.DATA_SAVE_DIR)\nif not os.path.exists(\"./\" + config.TRAINED_MODEL_DIR):\n os.makedirs(\"./\" + config.TRAINED_MODEL_DIR)\nif not os.path.exists(\"./\" + config.TENSORBOARD_LOG_DIR):\n os.makedirs(\"./\" + config.TENSORBOARD_LOG_DIR)\nif not os.path.exists(\"./\" + config.RESULTS_DIR):\n os.makedirs(\"./\" + config.RESULTS_DIR)\n\n# 下载数据\n# Attributes\n# ----------\n# start_date : str\n# start date of the data (modified from config.py)\n# end_date : str\n# end date of the data (modified from config.py)\n# ticker_list : list\n# a list of stock tickers (modified from config.py)\n\n# Methods\n# -------\n# fetch_data()\n# Fetches data from yahoo API\n# from config.py start_date is a string\n\nconfig.START_DATE\nconfig.END_DATE\nprint(config.DOW_30_TICKER)\n\n# 缓存数据,如果日期或者股票列表发生变化,需要删除该缓存文件重新下载\nSAVE_PATH = \"./datasets/20210616-12h19.csv\"\nif os.path.exists(SAVE_PATH):\n df = pd.read_csv(SAVE_PATH)\nelse:\n df = YahooDownloader(\n config.START_DATE, #'2000-01-01',\n config.END_DATE, # 2021-01-01,预计将改日期改为'2021-06-20'(今日日期)\n ticker_list=config.DOW_30_TICKER,\n ).fetch_data() # DOW_30_TICKER)道琼斯30只股票\n df.to_csv(SAVE_PATH)\n\ndf.head() # 最开始5条\ndf.tail() # tail仅展示了最后五条数据\ndf.shape\ndf.sort_values([\"date\", \"tic\"]).head() # ticker表示股票代码,e.g.AAPL是苹果的股票\n\"\"\"\npandas中的sort_values()函数可以根据指定行、列的数据进行排序\n#DataFrame.sort_values(by=‘##’-按照指定列、行排序,ascending=True-默认升序排列, inplace=False-默认不替换原来数据集, na_position=‘last’-默认缺失值位置为last最后一个)\n#按照df数据集的['date','tic']两列排序,其余参数取默认值\n\"\"\"\n# 数据预处理\n\"\"\"\nfeatures:4+2+1+1(turb)\n1.1. Add technical indicators: MACD RSI cci adx\n1.2. user_defined_feature: stock prices, current holding shares\n1.3. current balance(当前账户所有额)\n2. turbulence index. \n FinRL employs the financial turbulence index that measures extreme asset price fluctuation.\n\nAttributes\n ----------\n use_technical_indicator : boolean 注意:boolean(布尔值)取值为true or false,默认值是false\n we technical indicator or not\n tech_indicator_list : list\n a list of technical indicator names (modified from config.py)\n use_turbulence : boolean \n use turbulence index or not\n user_defined_feature:boolean\n user user defined features or not\n注意:本文创新点是引入了一个turbulence提高模型的抗风险能力。文章通过定义turbulence的指数来反应股市状况,在崩盘的时候,则强制抛售所有资产以抵御股市崩盘的金融风险。\n但是该特征比较trick,eg,崩盘事件概率太小,一般模型使用该指标合理么?且turbulence的阈值是超参数\n事实上,当崩盘发生的时候,根本没时间反应\n\n\nMethods\n -------\n preprocess_data()\n main method to do the feature engineering\n\"\"\"\n\ntech_indicators = [\"macd\", \"rsi_30\", \"cci_30\", \"dx_30\"]\n\nfe = FeatureEngineer(\n use_technical_indicator=True,\n tech_indicator_list=tech_indicators,\n use_turbulence=True,\n user_defined_feature=False,\n)\n##使用finrl.preprocessing.preprocessors中的FeatureEngineer来对股价数据进行预处理\n\n# 缓存数据,如果日期或者股票列表发生变化,需要删除该缓存文件重新下载\nSAVE_PATH = \"./datasets/20210616-12h19.preprocess.csv\"\nif os.path.exists(SAVE_PATH):\n processed = pd.read_csv(SAVE_PATH)\nelse:\n processed = fe.preprocess_data(df)\n processed.to_csv(SAVE_PATH)\n\nlist_ticker = processed[\"tic\"].unique().tolist() # 按照processed的\"tic\"列去重\nlist_date = list(\n pd.date_range(processed[\"date\"].min(), processed[\"date\"].max()).astype(str)\n) # 成一个固定频率的时间索引\ncombination = list(itertools.product(list_date, list_ticker))\n\"\"\"\n1.pandas.date_range(start=None, end=None, periods=None, freq='D', tz=None, normalize=False, name=None, closed=None, **kwargs)\n由于import pandas as pd,所以也可以写成pd.date_range(start=None, end=None)\n该函数主要用于生成一个固定频率的时间索引,使用时必须指定start、end、periods中的两个参数值,否则报错。\n2.df.astype('str') #改变整个df变成str数据类型\n3.itertools.product(*iterables[, repeat]) # 对应有序的重复抽样过程\n itertools.product(a,b),将a,b元组中的每个分量依次乘开。\n\"\"\"\n\nprocessed_full = pd.DataFrame(combination, columns=[\"date\", \"tic\"]).merge(\n processed, on=[\"date\", \"tic\"], how=\"left\"\n)\n\"\"\"1. pd.DataFrame( 某数据集 ,index ,columns ),给某数据集加上行名index和列名columns\n 此处只有pd.DataFrame( 某数据集 ,columns ),第一列加列名date,第二列加列名tic.\n 2. merge(df1,df2,on='key',how)\n 按照[\"date\",\"tic\"]为关键字链接,以左边的dataframe为主导,左侧dataframe取全部数据,右侧dataframe配合左边\n\"\"\"\n\nprocessed_full = processed_full[processed_full[\"date\"].isin(processed[\"date\"])]\n# isin函数,清洗数据,删选过滤掉processed_full中一些行,processed_full新加一列['date']若和processed_full中的['date']不相符合,则被剔除\nprocessed_full = processed_full.sort_values([\"date\", \"tic\"])\n\nprocessed_full = processed_full.fillna(0)\n# 对于processed_full数据集中的缺失值使用 0 来填充.\nprocessed_full.sample(5) # sample()是random模块中的一个函数,即随机取五个样本展示\n\n\n# 设计强化学习实验环境\n# trading environments, based on OpenAI Gym framework, simulate live stock markets with real market data\n# according to the principle of time-driven simulation.\n# action space:{-1,0,1}-{selling,holding,buying};\n# {k,...,-1,0,1,...,k}-{number of shares to sell,number of shares to hold,number of shares to buy}\n\n\n# The continuous action space needs to be normalized to [-1, 1],\n# since the policy is defined on a Gaussian distribution,\n# which needs to be normalized and symmetric.\n\nstock_dimension = len(processed_full.tic.unique())\nstate_space = 1 + 2 * stock_dimension + len(tech_indicators) * stock_dimension\nprint(f\"Stock Dimension: {stock_dimension}, State Space: {state_space}\")\n\n\"\"\"\n1.按照processed_full的\"tic\"列去重并计算个数\n2.计算状态空间的维数\n\"\"\"\nenv_kwargs = {\n \"hmax\": 100,\n \"initial_amount\": 1000000, # Since in Indonesia the minimum number of shares per trx is 100, then we scaled the initial amount by dividing it with 100\n \"buy_cost_pct\": 0.001, # IPOT has 0.1% buy cost\n \"sell_cost_pct\": 0.001, # IPOT has 0.1% sell cost\n \"state_space\": state_space,\n \"stock_dim\": stock_dimension,\n \"tech_indicator_list\": tech_indicators,\n \"action_space\": stock_dimension,\n \"reward_scaling\": 1e-4,\n \"print_verbosity\": 5,\n}\n\n# 使用DRL算法(validating 3 agents:A2C、PPO、DDPG)\nrebalance_window = 25 # rebalance_window is the number of days to retrain the model\nvalidation_window = (\n 25 # validation_window is the number of days to do validation and trading\n)\n# e.g. if validation_window=63, then both validation and trading period will be 63 days\ntrain_start = \"2017-01-01\"\ntrain_end = \"2020-07-01\"\nval_test_start = \"2020-07-01\"\nval_test_end = \"2021-01-01\"\n\nensemble_agent = DRLEnsembleAgent(\n df=processed_full,\n train_period=(train_start, train_end),\n val_test_period=(val_test_start, val_test_end),\n rebalance_window=rebalance_window,\n validation_window=validation_window,\n **env_kwargs,\n)\n\nA2C_model_kwargs = {\"n_steps\": 5, \"ent_coef\": 0.01, \"learning_rate\": 0.0005}\n\nPPO_model_kwargs = {\n \"ent_coef\": 0.01,\n \"n_steps\": 2048,\n \"learning_rate\": 0.00025,\n \"batch_size\": 128,\n}\n\nDDPG_model_kwargs = {\n \"action_noise\": \"ornstein_uhlenbeck\",\n \"buffer_size\": 50_000,\n \"learning_rate\": 0.000005,\n \"batch_size\": 128,\n}\n\ntimesteps_dict = {\"a2c\": 30_000, \"ppo\": 100_000, \"ddpg\": 10_000}\n# 疑问,为什么这里两个变量一样,赋值不一样呢?\ntimesteps_dict = {\"a2c\": 1_000, \"ppo\": 1_000, \"ddpg\": 1_000}\n\n\ndf_summary,model_ppo,model_a2c,model_ddpg = ensemble_agent.run_ensemble_strategy(\n A2C_model_kwargs, PPO_model_kwargs, DDPG_model_kwargs, timesteps_dict\n)\n\nmodels = [model_ppo,model_a2c,model_ddpg]\n\n# r(s_t,a_t,s_(t+1) = (b_(t+1)+p_(t+1)*(h_(t+1)))-((b_t)+p_t*h_t)-ct\n# 1.未使用model的累计收益\n# 2.使用A2C得到的累计收益:A2C_model_kwargs\n# 3.使用ppo得到的累计收益:PPO_model_kwargs\n# 4.使用DDPG得到的累计收益:DDPG_model_kwargs\n# 5.使用 集成策略 得到的累计收益:df_summary\n\n\ndef stat_result():\n # Backtest of Ensemble Strategy\n unique_trade_date = processed_full[\n (processed_full.date > val_test_start) & (processed_full.date <= val_test_end)\n ].date.unique() # 使用划分好的验证集作为trade数据\n\n df_trade_date = pd.DataFrame(\n {\"datadate\": unique_trade_date}\n ) # 建立一个新的表,列名是datadate:内容是trade_date\n\n # 结果数据缓存在./result中,拼接所有的结果数据进行画图\n # ensemble, A2C, PPO, DDPG, (不使用策略)\n df_ensemble = pd.DataFrame() \n for i in range(\n rebalance_window + validation_window,\n len(unique_trade_date) + 1,\n rebalance_window,\n ):\n temp = pd.read_csv(\n \"results/account_value_trade_{}_{}.csv\".format(\"ensemble\", i)\n )\n df_ensemble = df_ensemble.append(temp, ignore_index=True) \n\n sharpe = (\n (252 ** 0.5)\n * dfs[0].account_value.pct_change(1).mean()\n / dfs[0].account_value.pct_change(1).std()\n )\n print(\"Ensemble Sharpe Ratio: \", sharpe)\n dfs[0] = dfs[0].join(df_trade_date[validation_window:].reset_index(drop=True))\n\n #用3种模型去交易验证集中的数据\n ensemble_agent.DRL_validation(model=model_ddpg,test_data=validation,test_env=val_env_ddpg,test_obs=val_obs_ddpg)\n\n\n return dfs\n\n\ndfs = stat_result() \n# 将所有数据画到同一个图中\nfor i in [1,2,3]:\n x = dfs[i][\"date\"]\n y = dfs[i][\"account_value\"]\n #backtest_plot(dfs[i], '2020-07-02', '2020-11-20')\n plt.plot(x, y)\n\nplt.savefig('account_value_plot.png')\n"
] |
[
[
"matplotlib.pyplot.plot",
"pandas.read_csv",
"matplotlib.pyplot.savefig",
"pandas.DataFrame"
]
] |
ykosuke0508/interdependence-model
|
[
"4bb10a8b0c89c8e79d054820b08e0955aee4bb8b"
] |
[
"proposed_method/interdependence_model.py"
] |
[
"# -*- coding:utf-8 -*-\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport sklearn.svm as svm\nimport pandas as pd\nimport copy\nimport time\nimport random\nfrom operator import itemgetter\nimport sklearn.ensemble as ensemble\nimport scipy.optimize as optimize\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sympy import *\nfrom collections import Counter\n\nIDD_PRED_METHODS = {\"GIBBS\", \"FPI\", \"ESwP\", \"ESwPfLR\"}\n\nclass inner_dummy_binary_classifier(object):\n def __init__(self):\n self.coef_ = None\n self.intercept_ = None\n self.output_label = None\n\n def fit(X,Y):\n self.output_label = int(Y[0])\n\nclass problem_transformation_methods_template(object):\n def __init__(self):\n self._clf = None\n self._n_classes = None\n self._name = None\n self._training_time = None\n self._prediction_time = None\n raise ValueError\n\n @property\n def train_time(self):\n return self._training_time\n\n @property\n def prediction_time(self):\n return self._prediction_time\n\n @property\n def name(self):\n return self._name\n\n @property\n def n_classes(self):\n return self._n_classes\n\n def print_time(self):\n print(\"Method Name : {}\".format(self._name))\n if self._training_time is None:\n try:\n raise ValueError\n except:\n print(\"The model has not trained yet!\")\n else:\n print(\"Training Time : {0:04.4f} [sec]\".format(self._training_time))\n\n if self._prediction_time is None:\n try:\n raise ValueError\n except:\n print(\"The model has not predicted yet!\")\n else:\n print(\"Prediction Time : {0:04.4f} [sec]\".format(self._prediction_time))\n\nclass InterdependenceModel(problem_transformation_methods_template):\n def __init__(self,clf,prediction_method = \"GIBBS\"):\n\n if ('fit' not in dir(clf)) or ('predict' not in dir(clf)):\n raise ValueError(\"There is not the inner classifier.\")\n\n if prediction_method not in IDD_PRED_METHODS:\n raise ValueError(\"There is not {} as an prediction method for IDD model.\".format(prediction_method))\n\n self._clf = clf\n self._n_classes = None\n self._name = \"Interdependence Model\"\n self._prediction_method = prediction_method\n self._training_time = None\n self._prediction_time = None\n self._wX = None\n self._wS = None\n self._max_n_labels = None\n # (self._constant) is dict type and recode the case that labels are all zero or all one.\n self._constant = {}\n\n def _label_check(self, train_S):\n for (i,row) in enumerate(train_S.T):\n if len(set(row)) < 2:\n self._constant[i] = int(row[0])\n\n def fit(self, train_X, train_S):\n self._training_time = time.time()\n self._label_check(train_S)\n keys = list(self._constant.keys())\n if len(keys) != 0:\n train_S = np.delete(train_S, keys, 1)\n self._n_classes = train_S.shape[1]\n feature_size = train_X.shape[1]\n self._wX = np.zeros([feature_size + 1, self._n_classes])\n self._wS = np.zeros([self._n_classes, self._n_classes])\n\n for i in range(self._n_classes):\n train_Y = train_S[:,i]\n if i == 0:\n new_X = np.c_[train_X,train_S[:,i+1:]]\n elif i == self._n_classes - 1:\n new_X = np.c_[train_X,train_S[:,:-1]]\n else:\n new_X = np.c_[train_X,train_S[:,:i]]\n new_X = np.c_[new_X, train_S[:,i+1:]]\n\n clf = copy.deepcopy(self._clf)\n clf.fit(new_X, train_Y)\n self._wX[1:,i] = clf.coef_[:,:-self._n_classes+1]\n self._wX[0,i] = clf.intercept_\n self._wS[:,i] = np.insert(clf.coef_[:,-self._n_classes+1:], i, 0)\n self._max_n_labels = np.max(train_S.sum(axis = 1))\n self._training_time = time.time() - self._training_time\n inner_info = self._clf\n return self._name, inner_info\n\n def predict(self, test_X, prior_knowledge=None, tau = 0.5, N = 10000, conv = 0.00001):\n self._prediction_time = time.time()\n if prior_knowledge is not None:\n org_pr_know = prior_knowledge.copy()\n else:\n org_pr_know = None\n keys = list(self._constant.keys())\n if prior_knowledge is not None and len(keys) != 0:\n prior_knowledge = np.delete(prior_knowledge, keys, 1)\n pred_ = None\n\n def make_bin_vector(n):\n m = self._n_classes\n return np.array(list(map(int,list(format(n, '0{}b'.format(m))))))\n\n def logistic_function(x):\n return 1 / (1 + np.exp(-x))\n\n test_X = np.insert(test_X,0,1,axis = 1)\n bias = test_X.dot(self._wX)\n self.y = np.ones_like(bias)\n\n # GIBBS\n if self._prediction_method == \"GIBBS\":\n if prior_knowledge is None:\n result = np.zeros([test_X.shape[0], self._n_classes])\n r = np.random.uniform(0, 1, N * self._n_classes)\n for i in range(N):\n for j in range(self._n_classes):\n p = logistic_function(bias + self.y.dot(self._wS))\n self.y[:,j] = np.where(p[:,j] > r[i * self._n_classes + j], 1, 0)\n if i > N * 0.01 - 1:\n result += self.y\n if tau < 0:\n pred_ = result / (N * 0.99)\n else:\n pred_ = np.where((result / (N * 0.99)) > tau, 1, 0)\n else: # with prior knowledge\n result = np.zeros([test_X.shape[0], self._n_classes])\n r = np.random.uniform(0, 1, N * self._n_classes)\n for i in range(N):\n for j in range(self._n_classes):\n p = logistic_function(bias + self.y.dot(self._wS))\n self.y[:,j] = np.where(p[:,j] > r[i * self._n_classes + j], 1, 0)\n self.y[:,j] = np.where(prior_knowledge[:,j] == prior_knowledge[:,j],\n prior_knowledge[:,j],\n self.y[:,j])\n if i > N * 0.01 - 1:\n result += self.y\n if tau < 0:\n pred_ = result / (N * 0.99)\n else:\n pred_ = np.where((result / (N * 0.99)) > tau, 1, 0)\n # FPI\n elif self._prediction_method == \"FPI\":\n if prior_knowledge is None:\n for i in range(N):\n old_y = copy.deepcopy(self.y)\n self.y = logistic_function(bias + self.y.dot(self._wS))\n if np.linalg.norm(old_y - self.y) < conv:\n break\n if tau < 0:\n pred_ = self.y\n else:\n pred_ = np.where(self.y > tau, 1, 0)\n else: # with prior knowledge\n for i in range(N):\n self.y = np.where(prior_knowledge == prior_knowledge, prior_knowledge, self.y)\n old_y = copy.deepcopy(self.y)\n self.y = logistic_function(bias + self.y.dot(self._wS))\n if np.linalg.norm(old_y - self.y) < conv:\n break\n if tau < 0:\n self.y = np.where(prior_knowledge == prior_knowledge, prior_knowledge, self.y)\n pred_ = self.y\n else:\n self.y = np.where(prior_knowledge == prior_knowledge, prior_knowledge, self.y)\n pred_ = np.where(self.y > tau, 1, 0)\n # ES with Pruning\n elif self._prediction_method == \"ESwP\":\n inf = float(\"inf\")\n\n def cross_entropy(b, labels):\n logf = logistic_function(b + labels.dot(self._wS))\n one_minus_logf = 1 - logf\n L_now = np.where(labels == 0, -np.log(one_minus_logf), -np.log(logf)).sum()\n return L_now\n\n def future_best(b, now):\n labels = np.copy(now)\n buf = np.where(labels == -1, 0, labels)\n b = b + buf.dot(self._wS)\n w_filter = np.tile(labels, (self.n_classes, 1))\n w = np.where(w_filter != -1, 0, self._wS.T)\n wp = np.where(w > 0, w, 0).sum(axis = 1)\n wm = np.where(w < 0, w, 0).sum(axis = 1)\n fmax = b + wp\n fmin = b + wm\n min1 = -np.log(logistic_function(fmax))\n min0 = -np.log(logistic_function(1 - fmin))\n # label = 0, label = 1, label = unknown\n L = np.where(labels == 0, min0, 0).sum() \\\n + np.where(labels == 1, min1, 0).sum() \\\n + np.where(labels == -1, np.minimum(min0, min1), 0).sum()\n return L\n\n def search_best(b, now, i, best_labels, best_L):\n if i == self.n_classes:\n now_L = cross_entropy(b, now)\n if best_L > now_L:\n best_labels = np.copy(now)\n best_L = now_L\n return best_labels, best_L\n\n updated_now = np.copy(now)\n if now[i] != -1:\n # Pruning\n if best_L < future_best(b, updated_now):\n return best_labels, best_L\n else:\n return search_best(b, updated_now, i+1, best_labels, best_L)\n\n if now[i] == -1:\n updated_now[i] = 0\n # Pruning\n if best_L < future_best(b, updated_now):\n pass\n else:\n best_labels, best_L = search_best(b, updated_now, i+1, best_labels, best_L)\n\n updated_now[i] = 1\n # Pruning\n if best_L < future_best(b, updated_now):\n return best_labels, best_L\n else:\n return search_best(b, updated_now, i+1, best_labels, best_L)\n\n if prior_knowledge is None:\n temp = np.ones_like(bias) * -1\n else: # with prior knowledge\n temp = np.where(prior_knowledge == prior_knowledge, prior_knowledge, -1)\n for (j,b) in enumerate(bias):\n now = temp[j]\n init = np.zeros([self.n_classes])\n init_L = cross_entropy(b, init)\n pred, _ = search_best(b, now, 0, init, init_L)\n self.y[j] = pred\n pred_ = self.y.astype(np.int32)\n # ES+ (for Logistic Regression)\n elif self._prediction_method == \"ESwPfLR\":\n inf = float(\"inf\")\n\n def first_pruning(now, b):\n for (j,(a, w)) in enumerate(zip(b, self._wS.T)):\n a = a + np.where(now == 1, w, 0).sum()\n w = np.where(now == 0, 0, w)\n w = np.where(now == 1, 0, w)\n wp = np.where(w > 0, w, 0)\n wm = np.where(w < 0, w, 0)\n maxg = a + wp.sum()\n ming = a + wm.sum()\n maxf = 1 / (1 + np.exp(-maxg))\n minf = 1 / (1 + np.exp(-ming))\n if -np.log(maxf) > -np.log(1 - maxf):\n now[j] = 0\n continue\n if -np.log(1 - minf) > -np.log(minf):\n now[j] = 1\n continue\n return now\n\n def cross_entropy(b, labels):\n logf = logistic_function(b + labels.dot(self._wS))\n one_minus_logf = 1 - logf\n L_now = np.where(labels == 0, -np.log(one_minus_logf), -np.log(logf)).sum()\n return L_now\n\n def future_best(b, now):\n labels = np.copy(now)\n buf = np.where(labels == -1, 0, labels)\n b = b + buf.dot(self._wS)\n w_filter = np.tile(labels, (self.n_classes, 1))\n w = np.where(w_filter != -1, 0, self._wS.T)\n wp = np.where(w > 0, w, 0).sum(axis = 1)\n wm = np.where(w < 0, w, 0).sum(axis = 1)\n fmax = b + wp\n fmin = b + wm\n min1 = -np.log(logistic_function(fmax))\n min0 = -np.log(logistic_function(1 - fmin))\n # label = 0, label = 1, label = unknown\n L = np.where(labels == 0, min0, 0).sum() \\\n + np.where(labels == 1, min1, 0).sum() \\\n + np.where(labels == -1, np.minimum(min0, min1), 0).sum()\n return L\n\n def search_best(b, now, i, best_labels, best_L):\n if i == self.n_classes:\n now_L = cross_entropy(b, now)\n if best_L > now_L:\n best_labels = np.copy(now)\n best_L = now_L\n return best_labels, best_L\n\n updated_now = np.copy(now)\n if now[i] != -1:\n # Pruning\n if best_L < future_best(b, updated_now):\n return best_labels, best_L\n else:\n return search_best(b, updated_now, i+1, best_labels, best_L)\n\n if now[i] == -1:\n updated_now[i] = 0\n # Pruning\n if best_L < future_best(b, updated_now):\n pass\n else:\n best_labels, best_L = search_best(b, updated_now, i+1, best_labels, best_L)\n\n updated_now[i] = 1\n # Pruning\n if best_L < future_best(b, updated_now):\n return best_labels, best_L\n else:\n return search_best(b, updated_now, i+1, best_labels, best_L)\n\n if prior_knowledge is None:\n temp = np.ones_like(bias) * -1\n else: # with prior knowledge\n temp = np.where(prior_knowledge == prior_knowledge, prior_knowledge, -1)\n for (j,b) in enumerate(bias):\n now = temp[j]\n while(1):\n last = np.copy(now)\n now = first_pruning(now, b)\n if np.all(now != -1) or np.all(last == now):\n break\n init = np.zeros([self.n_classes])\n init_L = cross_entropy(b, init)\n pred, _ = search_best(b, now, 0, init, init_L)\n self.y[j] = pred\n pred_ = self.y.astype(np.int32)\n\n if len(self._constant) != 0:\n n_instances = test_X.shape[0]\n sorted(self._constant.items(), key=lambda x: x[0])\n for k, v in self._constant.items():\n add_ = np.array([v for _ in range(n_instances)])\n pred_ = np.insert(pred_,k,add_,axis = 1)\n if org_pr_know is not None:\n pred_ = np.where(org_pr_know == org_pr_know, org_pr_know, pred_)\n self._prediction_time = time.time() - self._prediction_time\n return pred_\n\ndef macro_accuracy_score(y_true, y_pred):\n if y_true.shape != y_pred.shape:\n raise ValueError\n land = (np.array(y_true * y_pred)).sum(axis = 1)\n lor = np.where((y_true + y_pred) >= 1, 1, 0).sum(axis = 1)\n score = land / lor\n return score.mean()\n"
] |
[
[
"numpy.log",
"numpy.ones_like",
"numpy.minimum",
"numpy.tile",
"numpy.linalg.norm",
"numpy.all",
"numpy.delete",
"numpy.copy",
"numpy.insert",
"numpy.exp",
"numpy.random.uniform",
"numpy.array",
"numpy.where",
"numpy.zeros"
]
] |
anthonysimeonov/uois
|
[
"5c3bd855f3e24ca4816179de3ca36c3246f29e84"
] |
[
"src/uois/cluster.py"
] |
[
"import numpy as np\nimport torch\n\nfrom abc import ABC, abstractmethod\n\n# my libraries\nfrom .util import utilities as util_\n\n\n### Mean-Shift Clustering (PyTorch) ###\n\ndef euclidean_distances(x, y):\n \"\"\" Computes pairwise distances\n \n @param x: a [n x d] torch.FloatTensor of datapoints\n @param y: a [m x d] torch.FloatTensor of datapoints\n \n @return: a [n x m] torch.FloatTensor of pairwise distances \n \"\"\"\n return torch.norm(x.unsqueeze(1) - y.unsqueeze(0), dim=2)\n\ndef gaussian_kernel(x, y, sigma):\n \"\"\" Computes pairwise Gaussian kernel (without normalizing constant)\n (note this is kernel as defined in non-parametric statistics, not a kernel as in RKHS)\n \n @param x: a [n x d] torch.FloatTensor of datapoints\n @param y: a [m x d] torch.FloatTensor of datapoints\n @param sigma: Gaussian kernel bandwith. \n Either a scalar, or a [1 x m] torch.FloatTensor of datapoints\n \n @return: a [n x m] torch.FloatTensor of pairwise kernel computations, \n without normalizing constant\n \"\"\"\n return torch.exp( - .5 / (sigma**2) * euclidean_distances(x, y)**2 )\n\n\n\n\nclass MeanShift(ABC):\n \"\"\" Base abstract class for Mean Shift algorithms w/ diff kernels\n \"\"\"\n\n def __init__(self, num_seeds=100, max_iters=10, epsilon=1e-2, \n h=1., batch_size=None):\n self.num_seeds = num_seeds\n self.max_iters = max_iters\n self.epsilon = epsilon # connect components parameter\n self.h = h # kernel bandwidth parameter\n if batch_size is None:\n batch_size = 1000\n self.batch_size = batch_size\n\n # This should be a function that computes distances w/ func signature: (x,y)\n self.distance = None \n\n # This should be a function that computes a kernel w/ func signature: (x,y, h)\n self.kernel = None \n\n\n def connected_components(self, Z):\n \"\"\" Compute simple connected components algorithm.\n\n @param Z: a [n x d] torch.FloatTensor of datapoints\n\n @return: a [n] torch.LongTensor of cluster labels\n \"\"\"\n\n n, d = Z.shape\n K = 0\n\n # SAMPLING/GROUPING\n cluster_labels = torch.ones((n,), dtype=torch.long, device=Z.device) * -1\n for i in range(n):\n if cluster_labels[i] == -1:\n\n # Find all points close to it and label it the same\n distances = self.distance(Z, Z[i:i+1]) # Shape: [n x 1]\n component_seeds = distances[:,0] <= self.epsilon\n\n # If at least one component already has a label, then use the mode of the label\n if torch.unique(cluster_labels[component_seeds]).shape[0] > 1:\n temp = cluster_labels[component_seeds]\n temp = temp[temp != -1]\n label = torch.mode(temp)[0]\n else:\n label = torch.tensor(K)\n K += 1 # Increment number of clusters\n cluster_labels[component_seeds] = label.to(Z.device)\n\n return cluster_labels\n # return torch.from_numpy(cluster_labels)\n\n def seed_hill_climbing(self, X, Z):\n \"\"\" Run mean shift hill climbing algorithm on the seeds.\n The seeds climb the distribution given by the KDE of X\n\n @param X: a [n x d] torch.FloatTensor of d-dim unit vectors\n @param Z: a [m x d] torch.FloatTensor of seeds to run mean shift from\n \"\"\"\n\n n, d = X.shape\n m = Z.shape[0]\n\n for _iter in range(self.max_iters):\n\n # Create a new object for Z\n new_Z = Z.clone()\n\n # Compute the update in batches\n for i in range(0, m, self.batch_size):\n W = self.kernel(Z[i:i+self.batch_size], X, self.h) # Shape: [batch_size x n]\n Q = W / W.sum(dim=1, keepdim=True) # Shape: [batch_size x n]\n new_Z[i:i+self.batch_size] = torch.mm(Q, X)\n\n Z = new_Z\n\n return Z\n\n def select_smart_seeds(self, X):\n \"\"\" Randomly select seeds that far away\n\n @param X: a [n x d] torch.FloatTensor of d-dim unit vectors\n\n @return: a [num_seeds x d] matrix of seeds\n \"\"\"\n n, d = X.shape\n\n selected_indices = -1 * torch.ones(self.num_seeds, dtype=torch.long)\n\n # Initialize seeds matrix\n seeds = torch.empty((self.num_seeds, d), device=X.device)\n num_chosen_seeds = 0\n\n # Keep track of distances\n distances = torch.empty((n, self.num_seeds), device=X.device)\n\n # Select first seed\n selected_seed_index = np.random.randint(0,n)\n selected_indices[0] = selected_seed_index\n selected_seed = X[selected_seed_index, :]\n seeds[0, :] = selected_seed\n\n distances[:, 0] = self.distance(X, selected_seed.unsqueeze(0))[:,0]\n num_chosen_seeds += 1\n\n # Select rest of seeds\n for i in range(num_chosen_seeds, min(self.num_seeds,n)):\n \n # Find the point that has the furthest distance from the nearest seed\n distance_to_nearest_seed = torch.min(distances[:, :i], dim=1)[0] # Shape: [n]\n # selected_seed_index = torch.argmax(distance_to_nearest_seed)\n selected_seed_index = torch.multinomial(distance_to_nearest_seed, 1)\n selected_indices[i] = selected_seed_index\n selected_seed = torch.index_select(X, 0, selected_seed_index)[0,:]\n seeds[i, :] = selected_seed\n\n # Calculate distance to this selected seed\n distances[:, i] = self.distance(X, selected_seed.unsqueeze(0))[:,0]\n\n return seeds\n\n def mean_shift_with_seeds(self, X, Z):\n \"\"\" Run mean-shift\n\n @param X: a [n x d] torch.FloatTensor of d-dim unit vectors\n @param Z: a [m x d] torch.FloatTensor of seeds to run mean shift from\n \"\"\"\n\n Z = self.seed_hill_climbing(X, Z)\n\n # Connected components\n cluster_labels = self.connected_components(Z)\n\n return cluster_labels, Z\n\n @abstractmethod\n def mean_shift_smart_init(self):\n pass\n\nclass GaussianMeanShift(MeanShift):\n\n def __init__(self, num_seeds=100, max_iters=10, epsilon=0.05, \n sigma=1.0, subsample_factor=1, batch_size=None):\n super().__init__(num_seeds=num_seeds, \n max_iters=max_iters, \n epsilon=epsilon, \n h=sigma, \n batch_size=batch_size)\n self.subsample_factor = subsample_factor # Must be int\n self.distance = euclidean_distances\n self.kernel = gaussian_kernel\n\n def mean_shift_smart_init(self, X, sigmas=None):\n \"\"\" Run mean shift with carefully selected seeds\n\n @param X: a [n x d] torch.FloatTensor of d-dim unit vectors\n @param sigmas: a [n] torch.FLoatTensor of values for per-datapoint sigmas\n If None, use pre-specified value of sigma for all datapoints\n\n @return: a [n] array of cluster labels\n \"\"\"\n subsampled_X = X[::self.subsample_factor, ...] # Shape: [n//subsample_factor x d]\n if sigmas is not None:\n subsampled_sigmas = sigmas[::self.subsample_factor] # Shape: [n//subsample_factor]\n self.h = subsampled_sigmas.unsqueeze(0) # Shape: [1 x n//subsample_factor]\n\n # Get the seeds and subsampled points\n seeds = self.select_smart_seeds(subsampled_X)\n\n # Run mean shift\n seed_cluster_labels, updated_seeds = self.mean_shift_with_seeds(subsampled_X, seeds)\n\n # Get distances to updated seeds\n distances = self.distance(X, updated_seeds)\n\n # Get clusters by assigning point to closest seed\n closest_seed_indices = torch.argmin(distances, dim=1) # Shape: [n]\n cluster_labels = seed_cluster_labels[closest_seed_indices]\n\n # Save cluster centers and labels\n uniq_labels = torch.unique(seed_cluster_labels)\n uniq_cluster_centers = torch.zeros((uniq_labels.shape[0], updated_seeds.shape[1]), dtype=torch.float, device=updated_seeds.device)\n for i, label in enumerate(uniq_labels):\n uniq_cluster_centers[i, :] = updated_seeds[seed_cluster_labels == i, :].mean(dim=0)\n self.uniq_cluster_centers = uniq_cluster_centers\n self.uniq_labels = uniq_labels\n\n return cluster_labels.to(X.device) # Put it back on the device\n\n"
] |
[
[
"torch.mm",
"torch.ones",
"torch.empty",
"torch.mode",
"torch.zeros",
"torch.min",
"torch.argmin",
"torch.multinomial",
"torch.tensor",
"torch.unique",
"torch.index_select",
"numpy.random.randint"
]
] |
eric-bonfadini/rasa_nlu
|
[
"eba7abcac4bda325b25fcab13a54f9676a04f562"
] |
[
"rasa/core/policies/ted_policy.py"
] |
[
"from __future__ import annotations\nimport logging\n\nfrom rasa.engine.recipes.default_recipe import DefaultV1Recipe\nfrom pathlib import Path\nfrom collections import defaultdict\nimport contextlib\n\nimport numpy as np\nimport tensorflow as tf\nfrom typing import Any, List, Optional, Text, Dict, Tuple, Union, Type\n\nfrom rasa.engine.graph import ExecutionContext\nfrom rasa.engine.storage.resource import Resource\nfrom rasa.engine.storage.storage import ModelStorage\nfrom rasa.exceptions import ModelNotFound\nfrom rasa.nlu.constants import TOKENS_NAMES\nfrom rasa.nlu.extractors.extractor import EntityTagSpec, EntityExtractorMixin\nimport rasa.core.actions.action\nfrom rasa.core.featurizers.precomputation import MessageContainerForCoreFeaturization\nfrom rasa.core.featurizers.tracker_featurizers import TrackerFeaturizer\nfrom rasa.core.featurizers.tracker_featurizers import MaxHistoryTrackerFeaturizer\nfrom rasa.shared.exceptions import RasaException\nfrom rasa.shared.nlu.constants import (\n ACTION_TEXT,\n ACTION_NAME,\n INTENT,\n TEXT,\n ENTITIES,\n FEATURE_TYPE_SENTENCE,\n ENTITY_ATTRIBUTE_TYPE,\n ENTITY_TAGS,\n EXTRACTOR,\n SPLIT_ENTITIES_BY_COMMA,\n SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,\n)\nfrom rasa.core.policies.policy import PolicyPrediction, Policy, SupportedData\nfrom rasa.core.constants import (\n DIALOGUE,\n POLICY_MAX_HISTORY,\n DEFAULT_MAX_HISTORY,\n DEFAULT_POLICY_PRIORITY,\n POLICY_PRIORITY,\n)\nfrom rasa.shared.constants import DIAGNOSTIC_DATA\nfrom rasa.shared.core.constants import ACTIVE_LOOP, SLOTS, ACTION_LISTEN_NAME\nfrom rasa.shared.core.trackers import DialogueStateTracker\nfrom rasa.shared.core.generator import TrackerWithCachedStates\nfrom rasa.shared.core.events import EntitiesAdded, Event\nfrom rasa.shared.core.domain import Domain\nfrom rasa.shared.nlu.training_data.message import Message\nfrom rasa.shared.nlu.training_data.features import Features\nimport rasa.shared.utils.io\nimport rasa.utils.io\nfrom rasa.utils import train_utils\nfrom rasa.utils.tensorflow.models import RasaModel, TransformerRasaModel\nfrom rasa.utils.tensorflow import rasa_layers\nfrom rasa.utils.tensorflow.model_data import (\n RasaModelData,\n FeatureSignature,\n FeatureArray,\n Data,\n)\nfrom rasa.utils.tensorflow.model_data_utils import convert_to_data_format\nfrom rasa.utils.tensorflow.constants import (\n LABEL,\n IDS,\n TRANSFORMER_SIZE,\n NUM_TRANSFORMER_LAYERS,\n NUM_HEADS,\n BATCH_SIZES,\n BATCH_STRATEGY,\n EPOCHS,\n RANDOM_SEED,\n LEARNING_RATE,\n RANKING_LENGTH,\n RENORMALIZE_CONFIDENCES,\n LOSS_TYPE,\n SIMILARITY_TYPE,\n NUM_NEG,\n EVAL_NUM_EXAMPLES,\n EVAL_NUM_EPOCHS,\n NEGATIVE_MARGIN_SCALE,\n REGULARIZATION_CONSTANT,\n SCALE_LOSS,\n USE_MAX_NEG_SIM,\n MAX_NEG_SIM,\n MAX_POS_SIM,\n EMBEDDING_DIMENSION,\n DROP_RATE_DIALOGUE,\n DROP_RATE_LABEL,\n DROP_RATE,\n DROP_RATE_ATTENTION,\n CONNECTION_DENSITY,\n KEY_RELATIVE_ATTENTION,\n VALUE_RELATIVE_ATTENTION,\n MAX_RELATIVE_POSITION,\n CROSS_ENTROPY,\n AUTO,\n BALANCED,\n TENSORBOARD_LOG_DIR,\n TENSORBOARD_LOG_LEVEL,\n CHECKPOINT_MODEL,\n ENCODING_DIMENSION,\n UNIDIRECTIONAL_ENCODER,\n SEQUENCE,\n SENTENCE,\n SEQUENCE_LENGTH,\n DENSE_DIMENSION,\n CONCAT_DIMENSION,\n SPARSE_INPUT_DROPOUT,\n DENSE_INPUT_DROPOUT,\n MASKED_LM,\n MASK,\n HIDDEN_LAYERS_SIZES,\n FEATURIZERS,\n ENTITY_RECOGNITION,\n CONSTRAIN_SIMILARITIES,\n MODEL_CONFIDENCE,\n SOFTMAX,\n BILOU_FLAG,\n EPOCH_OVERRIDE,\n USE_GPU,\n)\n\n\nlogger = logging.getLogger(__name__)\n\nE2E_CONFIDENCE_THRESHOLD = \"e2e_confidence_threshold\"\nLABEL_KEY = LABEL\nLABEL_SUB_KEY = IDS\nLENGTH = \"length\"\nINDICES = \"indices\"\nSENTENCE_FEATURES_TO_ENCODE = [INTENT, TEXT, ACTION_NAME, ACTION_TEXT]\nSEQUENCE_FEATURES_TO_ENCODE = [TEXT, ACTION_TEXT, f\"{LABEL}_{ACTION_TEXT}\"]\nLABEL_FEATURES_TO_ENCODE = [\n f\"{LABEL}_{ACTION_NAME}\",\n f\"{LABEL}_{ACTION_TEXT}\",\n f\"{LABEL}_{INTENT}\",\n]\nSTATE_LEVEL_FEATURES = [ENTITIES, SLOTS, ACTIVE_LOOP]\nPREDICTION_FEATURES = STATE_LEVEL_FEATURES + SENTENCE_FEATURES_TO_ENCODE + [DIALOGUE]\n\n\[email protected](\n DefaultV1Recipe.ComponentType.POLICY_WITH_END_TO_END_SUPPORT, is_trainable=True\n)\nclass TEDPolicy(Policy):\n \"\"\"Transformer Embedding Dialogue (TED) Policy.\n\n The model architecture is described in\n detail in https://arxiv.org/abs/1910.00486.\n In summary, the architecture comprises of the\n following steps:\n - concatenate user input (user intent and entities), previous system actions,\n slots and active forms for each time step into an input vector to\n pre-transformer embedding layer;\n - feed it to transformer;\n - apply a dense layer to the output of the transformer to get embeddings of a\n dialogue for each time step;\n - apply a dense layer to create embeddings for system actions for each time\n step;\n - calculate the similarity between the dialogue embedding and embedded system\n actions. This step is based on the StarSpace\n (https://arxiv.org/abs/1709.03856) idea.\n \"\"\"\n\n @staticmethod\n def get_default_config() -> Dict[Text, Any]:\n \"\"\"Returns the default config (see parent class for full docstring).\"\"\"\n # please make sure to update the docs when changing a default parameter\n return {\n # ## Architecture of the used neural network\n # Hidden layer sizes for layers before the embedding layers for user message\n # and labels.\n # The number of hidden layers is equal to the length of the corresponding\n # list.\n HIDDEN_LAYERS_SIZES: {\n TEXT: [],\n ACTION_TEXT: [],\n f\"{LABEL}_{ACTION_TEXT}\": [],\n },\n # Dense dimension to use for sparse features.\n DENSE_DIMENSION: {\n TEXT: 128,\n ACTION_TEXT: 128,\n f\"{LABEL}_{ACTION_TEXT}\": 128,\n INTENT: 20,\n ACTION_NAME: 20,\n f\"{LABEL}_{ACTION_NAME}\": 20,\n ENTITIES: 20,\n SLOTS: 20,\n ACTIVE_LOOP: 20,\n },\n # Default dimension to use for concatenating sequence and sentence features.\n CONCAT_DIMENSION: {\n TEXT: 128,\n ACTION_TEXT: 128,\n f\"{LABEL}_{ACTION_TEXT}\": 128,\n },\n # Dimension size of embedding vectors before the dialogue transformer\n # encoder.\n ENCODING_DIMENSION: 50,\n # Number of units in transformer encoders\n TRANSFORMER_SIZE: {\n TEXT: 128,\n ACTION_TEXT: 128,\n f\"{LABEL}_{ACTION_TEXT}\": 128,\n DIALOGUE: 128,\n },\n # Number of layers in transformer encoders\n NUM_TRANSFORMER_LAYERS: {\n TEXT: 1,\n ACTION_TEXT: 1,\n f\"{LABEL}_{ACTION_TEXT}\": 1,\n DIALOGUE: 1,\n },\n # Number of attention heads in transformer\n NUM_HEADS: 4,\n # If 'True' use key relative embeddings in attention\n KEY_RELATIVE_ATTENTION: False,\n # If 'True' use value relative embeddings in attention\n VALUE_RELATIVE_ATTENTION: False,\n # Max position for relative embeddings. Only in effect if key- or value\n # relative\n # attention are turned on\n MAX_RELATIVE_POSITION: 5,\n # Use a unidirectional or bidirectional encoder\n # for `text`, `action_text`, and `label_action_text`.\n UNIDIRECTIONAL_ENCODER: False,\n # ## Training parameters\n # Initial and final batch sizes:\n # Batch size will be linearly increased for each epoch.\n BATCH_SIZES: [64, 256],\n # Strategy used whenc creating batches.\n # Can be either 'sequence' or 'balanced'.\n BATCH_STRATEGY: BALANCED,\n # Number of epochs to train\n EPOCHS: 1,\n # Set random seed to any 'int' to get reproducible results\n RANDOM_SEED: None,\n # Initial learning rate for the optimizer\n LEARNING_RATE: 0.001,\n # ## Parameters for embeddings\n # Dimension size of embedding vectors\n EMBEDDING_DIMENSION: 20,\n # The number of incorrect labels. The algorithm will minimize\n # their similarity to the user input during training.\n NUM_NEG: 20,\n # Type of similarity measure to use, either 'auto' or 'cosine' or 'inner'.\n SIMILARITY_TYPE: AUTO,\n # The type of the loss function, either 'cross_entropy' or 'margin'.\n LOSS_TYPE: CROSS_ENTROPY,\n # Number of top actions for which confidences should be predicted.\n # The number of Set to `0` if confidences for all actions should be\n # predicted. The confidences for all other actions will be set to 0.\n RANKING_LENGTH: 0,\n # Determines wether the confidences of the chosen top actions should be\n # renormalized so that they sum up to 1. By default, we do not renormalize\n # and return the confidences for the top actions as is.\n # Note that renormalization only makes sense if confidences are generated\n # via `softmax`.\n RENORMALIZE_CONFIDENCES: False,\n # Indicates how similar the algorithm should try to make embedding vectors\n # for correct labels.\n # Should be 0.0 < ... < 1.0 for 'cosine' similarity type.\n MAX_POS_SIM: 0.8,\n # Maximum negative similarity for incorrect labels.\n # Should be -1.0 < ... < 1.0 for 'cosine' similarity type.\n MAX_NEG_SIM: -0.2,\n # If 'True' the algorithm only minimizes maximum similarity over\n # incorrect intent labels, used only if 'loss_type' is set to 'margin'.\n USE_MAX_NEG_SIM: True,\n # If 'True' scale loss inverse proportionally to the confidence\n # of the correct prediction\n SCALE_LOSS: True,\n # ## Regularization parameters\n # The scale of regularization\n REGULARIZATION_CONSTANT: 0.001,\n # The scale of how important is to minimize the maximum similarity\n # between embeddings of different labels,\n # used only if 'loss_type' is set to 'margin'.\n NEGATIVE_MARGIN_SCALE: 0.8,\n # Dropout rate for embedding layers of dialogue features.\n DROP_RATE_DIALOGUE: 0.1,\n # Dropout rate for embedding layers of utterance level features.\n DROP_RATE: 0.0,\n # Dropout rate for embedding layers of label, e.g. action, features.\n DROP_RATE_LABEL: 0.0,\n # Dropout rate for attention.\n DROP_RATE_ATTENTION: 0.0,\n # Fraction of trainable weights in internal layers.\n CONNECTION_DENSITY: 0.2,\n # If 'True' apply dropout to sparse input tensors\n SPARSE_INPUT_DROPOUT: True,\n # If 'True' apply dropout to dense input tensors\n DENSE_INPUT_DROPOUT: True,\n # If 'True' random tokens of the input message will be masked. Since there\n # is no related loss term used inside TED, the masking effectively becomes\n # just input dropout applied to the text of user utterances.\n MASKED_LM: False,\n # ## Evaluation parameters\n # How often calculate validation accuracy.\n # Small values may hurt performance.\n EVAL_NUM_EPOCHS: 20,\n # How many examples to use for hold out validation set\n # Large values may hurt performance, e.g. model accuracy.\n # Set to 0 for no validation.\n EVAL_NUM_EXAMPLES: 0,\n # If you want to use tensorboard to visualize training and validation\n # metrics, set this option to a valid output directory.\n TENSORBOARD_LOG_DIR: None,\n # Define when training metrics for tensorboard should be logged.\n # Either after every epoch or for every training step.\n # Valid values: 'epoch' and 'batch'\n TENSORBOARD_LOG_LEVEL: \"epoch\",\n # Perform model checkpointing\n CHECKPOINT_MODEL: False,\n # Only pick e2e prediction if the policy is confident enough\n E2E_CONFIDENCE_THRESHOLD: 0.5,\n # Specify what features to use as sequence and sentence features.\n # By default all features in the pipeline are used.\n FEATURIZERS: [],\n # If set to true, entities are predicted in user utterances.\n ENTITY_RECOGNITION: True,\n # if 'True' applies sigmoid on all similarity terms and adds\n # it to the loss function to ensure that similarity values are\n # approximately bounded. Used inside cross-entropy loss only.\n CONSTRAIN_SIMILARITIES: False,\n # Model confidence to be returned during inference. Currently, the only\n # possible value is `softmax`.\n MODEL_CONFIDENCE: SOFTMAX,\n # 'BILOU_flag' determines whether to use BILOU tagging or not.\n # If set to 'True' labelling is more rigorous, however more\n # examples per entity are required.\n # Rule of thumb: you should have more than 100 examples per entity.\n BILOU_FLAG: True,\n # Split entities by comma, this makes sense e.g. for a list of\n # ingredients in a recipe, but it doesn't make sense for the parts of\n # an address\n SPLIT_ENTITIES_BY_COMMA: SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,\n # Max history of the policy, unbounded by default\n POLICY_MAX_HISTORY: DEFAULT_MAX_HISTORY,\n # Determines the importance of policies, higher values take precedence\n POLICY_PRIORITY: DEFAULT_POLICY_PRIORITY,\n USE_GPU: True,\n }\n\n def __init__(\n self,\n config: Dict[Text, Any],\n model_storage: ModelStorage,\n resource: Resource,\n execution_context: ExecutionContext,\n model: Optional[RasaModel] = None,\n featurizer: Optional[TrackerFeaturizer] = None,\n fake_features: Optional[Dict[Text, List[Features]]] = None,\n entity_tag_specs: Optional[List[EntityTagSpec]] = None,\n ) -> None:\n \"\"\"Declares instance variables with default values.\"\"\"\n super().__init__(\n config, model_storage, resource, execution_context, featurizer=featurizer\n )\n self.split_entities_config = rasa.utils.train_utils.init_split_entities(\n config[SPLIT_ENTITIES_BY_COMMA], SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE\n )\n self._load_params(config)\n\n self.model = model\n\n self._entity_tag_specs = entity_tag_specs\n\n self.fake_features = fake_features or defaultdict(list)\n # TED is only e2e if only text is present in fake features, which represent\n # all possible input features for current version of this trained ted\n self.only_e2e = TEXT in self.fake_features and INTENT not in self.fake_features\n\n self._label_data: Optional[RasaModelData] = None\n self.data_example: Optional[Dict[Text, Dict[Text, List[FeatureArray]]]] = None\n\n self.tmp_checkpoint_dir = None\n if self.config[CHECKPOINT_MODEL]:\n self.tmp_checkpoint_dir = Path(rasa.utils.io.create_temporary_directory())\n\n @staticmethod\n def model_class() -> Type[TED]:\n \"\"\"Gets the class of the model architecture to be used by the policy.\n\n Returns:\n Required class.\n \"\"\"\n return TED\n\n @classmethod\n def _metadata_filename(cls) -> Optional[Text]:\n return \"ted_policy\"\n\n def _load_params(self, config: Dict[Text, Any]) -> None:\n new_config = rasa.utils.train_utils.check_core_deprecated_options(config)\n self.config = new_config\n self._auto_update_configuration()\n\n def _auto_update_configuration(self) -> None:\n \"\"\"Takes care of deprecations and compatibility of parameters.\"\"\"\n self.config = rasa.utils.train_utils.update_confidence_type(self.config)\n rasa.utils.train_utils.validate_configuration_settings(self.config)\n self.config = rasa.utils.train_utils.update_similarity_type(self.config)\n self.config = rasa.utils.train_utils.update_evaluation_parameters(self.config)\n\n def _create_label_data(\n self,\n domain: Domain,\n precomputations: Optional[MessageContainerForCoreFeaturization],\n ) -> Tuple[RasaModelData, List[Dict[Text, List[Features]]]]:\n # encode all label_ids with policies' featurizer\n state_featurizer = self.featurizer.state_featurizer\n encoded_all_labels = (\n state_featurizer.encode_all_labels(domain, precomputations)\n if state_featurizer is not None\n else []\n )\n\n attribute_data, _ = convert_to_data_format(\n encoded_all_labels, featurizers=self.config[FEATURIZERS]\n )\n\n label_data = self._assemble_label_data(attribute_data, domain)\n\n return label_data, encoded_all_labels\n\n def _assemble_label_data(\n self, attribute_data: Data, domain: Domain\n ) -> RasaModelData:\n \"\"\"Constructs data regarding labels to be fed to the model.\n\n The resultant model data can possibly contain one or both of the\n keys - [`label_action_name`, `label_action_text`] but will definitely\n contain the `label` key.\n `label_action_*` will contain the sequence, sentence and mask features\n for corresponding labels and `label` will contain the numerical label ids.\n\n Args:\n attribute_data: Feature data for all labels.\n domain: Domain of the assistant.\n\n Returns:\n Features of labels ready to be fed to the model.\n \"\"\"\n label_data = RasaModelData()\n label_data.add_data(attribute_data, key_prefix=f\"{LABEL_KEY}_\")\n label_data.add_lengths(\n f\"{LABEL}_{ACTION_TEXT}\",\n SEQUENCE_LENGTH,\n f\"{LABEL}_{ACTION_TEXT}\",\n SEQUENCE,\n )\n label_ids = np.arange(domain.num_actions)\n label_data.add_features(\n LABEL_KEY,\n LABEL_SUB_KEY,\n [FeatureArray(np.expand_dims(label_ids, -1), number_of_dimensions=2)],\n )\n return label_data\n\n @staticmethod\n def _should_extract_entities(\n entity_tags: List[List[Dict[Text, List[Features]]]]\n ) -> bool:\n for turns_tags in entity_tags:\n for turn_tags in turns_tags:\n # if turn_tags are empty or all entity tag indices are `0`\n # it means that all the inputs only contain NO_ENTITY_TAG\n if turn_tags and np.any(turn_tags[ENTITY_TAGS][0].features):\n return True\n return False\n\n def _create_data_for_entities(\n self, entity_tags: Optional[List[List[Dict[Text, List[Features]]]]]\n ) -> Optional[Data]:\n if not self.config[ENTITY_RECOGNITION]:\n return None\n\n # check that there are real entity tags\n if entity_tags and self._should_extract_entities(entity_tags):\n entity_tags_data, _ = convert_to_data_format(entity_tags)\n return entity_tags_data\n\n # there are no \"real\" entity tags\n logger.debug(\n f\"Entity recognition cannot be performed, \"\n f\"set '{ENTITY_RECOGNITION}' config parameter to 'False'.\"\n )\n self.config[ENTITY_RECOGNITION] = False\n\n return None\n\n def _create_model_data(\n self,\n tracker_state_features: List[List[Dict[Text, List[Features]]]],\n label_ids: Optional[np.ndarray] = None,\n entity_tags: Optional[List[List[Dict[Text, List[Features]]]]] = None,\n encoded_all_labels: Optional[List[Dict[Text, List[Features]]]] = None,\n ) -> RasaModelData:\n \"\"\"Combine all model related data into RasaModelData.\n\n Args:\n tracker_state_features: a dictionary of attributes\n (INTENT, TEXT, ACTION_NAME, ACTION_TEXT, ENTITIES, SLOTS, ACTIVE_LOOP)\n to a list of features for all dialogue turns in all training trackers\n label_ids: the label ids (e.g. action ids) for every dialogue turn in all\n training trackers\n entity_tags: a dictionary of entity type (ENTITY_TAGS) to a list of features\n containing entity tag ids for text user inputs otherwise empty dict\n for all dialogue turns in all training trackers\n encoded_all_labels: a list of dictionaries containing attribute features\n for label ids\n\n Returns:\n RasaModelData\n \"\"\"\n model_data = RasaModelData(label_key=LABEL_KEY, label_sub_key=LABEL_SUB_KEY)\n\n if label_ids is not None and encoded_all_labels is not None:\n label_ids = np.array(\n [np.expand_dims(seq_label_ids, -1) for seq_label_ids in label_ids]\n )\n model_data.add_features(\n LABEL_KEY,\n LABEL_SUB_KEY,\n [FeatureArray(label_ids, number_of_dimensions=3)],\n )\n\n attribute_data, self.fake_features = convert_to_data_format(\n tracker_state_features, featurizers=self.config[FEATURIZERS]\n )\n\n entity_tags_data = self._create_data_for_entities(entity_tags)\n if entity_tags_data is not None:\n model_data.add_data(entity_tags_data)\n else:\n # method is called during prediction\n attribute_data, _ = convert_to_data_format(\n tracker_state_features,\n self.fake_features,\n featurizers=self.config[FEATURIZERS],\n )\n\n model_data.add_data(attribute_data)\n model_data.add_lengths(TEXT, SEQUENCE_LENGTH, TEXT, SEQUENCE)\n model_data.add_lengths(ACTION_TEXT, SEQUENCE_LENGTH, ACTION_TEXT, SEQUENCE)\n\n # add the dialogue lengths\n attribute_present = next(iter(list(attribute_data.keys())))\n dialogue_lengths = np.array(\n [\n np.size(np.squeeze(f, -1))\n for f in model_data.data[attribute_present][MASK][0]\n ]\n )\n model_data.data[DIALOGUE][LENGTH] = [\n FeatureArray(dialogue_lengths, number_of_dimensions=1)\n ]\n\n # make sure all keys are in the same order during training and prediction\n model_data.sort()\n\n return model_data\n\n @staticmethod\n def _get_trackers_for_training(\n trackers: List[TrackerWithCachedStates],\n ) -> List[TrackerWithCachedStates]:\n \"\"\"Filters out the list of trackers which should not be used for training.\n\n Args:\n trackers: All trackers available for training.\n\n Returns:\n Trackers which should be used for training.\n \"\"\"\n # By default, we train on all available trackers.\n return trackers\n\n def _prepare_for_training(\n self,\n trackers: List[TrackerWithCachedStates],\n domain: Domain,\n precomputations: MessageContainerForCoreFeaturization,\n **kwargs: Any,\n ) -> Tuple[RasaModelData, np.ndarray]:\n \"\"\"Prepares data to be fed into the model.\n\n Args:\n trackers: List of training trackers to be featurized.\n domain: Domain of the assistant.\n precomputations: Contains precomputed features and attributes.\n **kwargs: Any other arguments.\n\n Returns:\n Featurized data to be fed to the model and corresponding label ids.\n \"\"\"\n training_trackers = self._get_trackers_for_training(trackers)\n # dealing with training data\n tracker_state_features, label_ids, entity_tags = self._featurize_for_training(\n training_trackers,\n domain,\n precomputations=precomputations,\n bilou_tagging=self.config[BILOU_FLAG],\n **kwargs,\n )\n\n if not tracker_state_features:\n return RasaModelData(), label_ids\n\n self._label_data, encoded_all_labels = self._create_label_data(\n domain, precomputations=precomputations\n )\n\n # extract actual training data to feed to model\n model_data = self._create_model_data(\n tracker_state_features, label_ids, entity_tags, encoded_all_labels\n )\n\n if self.config[ENTITY_RECOGNITION]:\n self._entity_tag_specs = (\n self.featurizer.state_featurizer.entity_tag_specs\n if self.featurizer.state_featurizer is not None\n else []\n )\n\n # keep one example for persisting and loading\n self.data_example = model_data.first_data_example()\n\n return model_data, label_ids\n\n def run_training(\n self, model_data: RasaModelData, label_ids: Optional[np.ndarray] = None\n ) -> None:\n \"\"\"Feeds the featurized training data to the model.\n\n Args:\n model_data: Featurized training data.\n label_ids: Label ids corresponding to the data points in `model_data`.\n These may or may not be used by the function depending\n on how the policy is trained.\n \"\"\"\n if not self.finetune_mode:\n # This means the model wasn't loaded from a\n # previously trained model and hence needs\n # to be instantiated.\n self.model = self.model_class()(\n model_data.get_signature(),\n self.config,\n isinstance(self.featurizer, MaxHistoryTrackerFeaturizer),\n self._label_data,\n self._entity_tag_specs,\n )\n self.model.compile(\n optimizer=tf.keras.optimizers.Adam(self.config[LEARNING_RATE])\n )\n (\n data_generator,\n validation_data_generator,\n ) = rasa.utils.train_utils.create_data_generators(\n model_data,\n self.config[BATCH_SIZES],\n self.config[EPOCHS],\n self.config[BATCH_STRATEGY],\n self.config[EVAL_NUM_EXAMPLES],\n self.config[RANDOM_SEED],\n )\n callbacks = rasa.utils.train_utils.create_common_callbacks(\n self.config[EPOCHS],\n self.config[TENSORBOARD_LOG_DIR],\n self.config[TENSORBOARD_LOG_LEVEL],\n self.tmp_checkpoint_dir,\n )\n\n if self.model is None:\n raise ModelNotFound(\"No model was detected prior to training.\")\n\n self.model.fit(\n data_generator,\n epochs=self.config[EPOCHS],\n validation_data=validation_data_generator,\n validation_freq=self.config[EVAL_NUM_EPOCHS],\n callbacks=callbacks,\n verbose=False,\n shuffle=False, # we use custom shuffle inside data generator\n )\n\n def train(\n self,\n training_trackers: List[TrackerWithCachedStates],\n domain: Domain,\n precomputations: Optional[MessageContainerForCoreFeaturization] = None,\n **kwargs: Any,\n ) -> Resource:\n \"\"\"Trains the policy (see parent class for full docstring).\"\"\"\n if not training_trackers:\n rasa.shared.utils.io.raise_warning(\n f\"Skipping training of `{self.__class__.__name__}` \"\n f\"as no data was provided. You can exclude this \"\n f\"policy in the configuration \"\n f\"file to avoid this warning.\",\n category=UserWarning,\n )\n return self._resource\n\n training_trackers = SupportedData.trackers_for_supported_data(\n self.supported_data(), training_trackers\n )\n\n model_data, label_ids = self._prepare_for_training(\n training_trackers, domain, precomputations\n )\n\n if model_data.is_empty():\n rasa.shared.utils.io.raise_warning(\n f\"Skipping training of `{self.__class__.__name__}` \"\n f\"as no data was provided. You can exclude this \"\n f\"policy in the configuration \"\n f\"file to avoid this warning.\",\n category=UserWarning,\n )\n return self._resource\n\n with (\n contextlib.nullcontext() if self.config[\"use_gpu\"] else tf.device(\"/cpu:0\")\n ):\n self.run_training(model_data, label_ids)\n\n self.persist()\n\n return self._resource\n\n def _featurize_tracker(\n self,\n tracker: DialogueStateTracker,\n domain: Domain,\n precomputations: Optional[MessageContainerForCoreFeaturization],\n rule_only_data: Optional[Dict[Text, Any]],\n ) -> List[List[Dict[Text, List[Features]]]]:\n # construct two examples in the batch to be fed to the model -\n # one by featurizing last user text\n # and second - an optional one (see conditions below),\n # the first example in the constructed batch either does not contain user input\n # or uses intent or text based on whether TED is e2e only.\n tracker_state_features = self._featurize_for_prediction(\n tracker,\n domain,\n precomputations=precomputations,\n use_text_for_last_user_input=self.only_e2e,\n rule_only_data=rule_only_data,\n )\n # the second - text, but only after user utterance and if not only e2e\n if (\n tracker.latest_action_name == ACTION_LISTEN_NAME\n and TEXT in self.fake_features\n and not self.only_e2e\n ):\n tracker_state_features += self._featurize_for_prediction(\n tracker,\n domain,\n precomputations=precomputations,\n use_text_for_last_user_input=True,\n rule_only_data=rule_only_data,\n )\n return tracker_state_features\n\n def _pick_confidence(\n self, confidences: np.ndarray, similarities: np.ndarray, domain: Domain\n ) -> Tuple[np.ndarray, bool]:\n # the confidences and similarities have shape (batch-size x number of actions)\n # batch-size can only be 1 or 2;\n # in the case batch-size==2, the first example contain user intent as features,\n # the second - user text as features\n if confidences.shape[0] > 2:\n raise ValueError(\n \"We cannot pick prediction from batches of size more than 2.\"\n )\n # we use heuristic to pick correct prediction\n if confidences.shape[0] == 2:\n # we use similarities to pick appropriate input,\n # since it seems to be more accurate measure,\n # policy is trained to maximize the similarity not the confidence\n non_e2e_action_name = domain.action_names_or_texts[\n np.argmax(confidences[0])\n ]\n logger.debug(f\"User intent lead to '{non_e2e_action_name}'.\")\n e2e_action_name = domain.action_names_or_texts[np.argmax(confidences[1])]\n logger.debug(f\"User text lead to '{e2e_action_name}'.\")\n if (\n np.max(confidences[1]) > self.config[E2E_CONFIDENCE_THRESHOLD]\n # TODO maybe compare confidences is better\n and np.max(similarities[1]) > np.max(similarities[0])\n ):\n logger.debug(f\"TED predicted '{e2e_action_name}' based on user text.\")\n return confidences[1], True\n\n logger.debug(f\"TED predicted '{non_e2e_action_name}' based on user intent.\")\n return confidences[0], False\n\n # by default the first example in a batch is the one to use for prediction\n predicted_action_name = domain.action_names_or_texts[np.argmax(confidences[0])]\n basis_for_prediction = \"text\" if self.only_e2e else \"intent\"\n logger.debug(\n f\"TED predicted '{predicted_action_name}' \"\n f\"based on user {basis_for_prediction}.\"\n )\n return confidences[0], self.only_e2e\n\n def predict_action_probabilities(\n self,\n tracker: DialogueStateTracker,\n domain: Domain,\n rule_only_data: Optional[Dict[Text, Any]] = None,\n precomputations: Optional[MessageContainerForCoreFeaturization] = None,\n **kwargs: Any,\n ) -> PolicyPrediction:\n \"\"\"Predicts the next action (see parent class for full docstring).\"\"\"\n if self.model is None:\n return self._prediction(self._default_predictions(domain))\n\n # create model data from tracker\n tracker_state_features = self._featurize_tracker(\n tracker, domain, precomputations, rule_only_data=rule_only_data\n )\n model_data = self._create_model_data(tracker_state_features)\n outputs: Dict[Text, np.ndarray] = self.model.run_inference(model_data)\n\n # take the last prediction in the sequence\n similarities = outputs[\"similarities\"][:, -1, :]\n confidences = outputs[\"scores\"][:, -1, :]\n # take correct prediction from batch\n confidence, is_e2e_prediction = self._pick_confidence(\n confidences, similarities, domain\n )\n\n # rank and mask the confidence (if we need to)\n ranking_length = self.config[RANKING_LENGTH]\n if 0 < ranking_length < len(confidence):\n renormalize = (\n self.config[RENORMALIZE_CONFIDENCES]\n and self.config[MODEL_CONFIDENCE] == SOFTMAX\n )\n _, confidence = train_utils.rank_and_mask(\n confidence, ranking_length=ranking_length, renormalize=renormalize\n )\n\n optional_events = self._create_optional_event_for_entities(\n outputs, is_e2e_prediction, precomputations, tracker\n )\n\n return self._prediction(\n confidence.tolist(),\n is_end_to_end_prediction=is_e2e_prediction,\n optional_events=optional_events,\n diagnostic_data=outputs.get(DIAGNOSTIC_DATA),\n )\n\n def _create_optional_event_for_entities(\n self,\n prediction_output: Dict[Text, tf.Tensor],\n is_e2e_prediction: bool,\n precomputations: Optional[MessageContainerForCoreFeaturization],\n tracker: DialogueStateTracker,\n ) -> Optional[List[Event]]:\n if tracker.latest_action_name != ACTION_LISTEN_NAME or not is_e2e_prediction:\n # entities belong only to the last user message\n # and only if user text was used for prediction,\n # a user message always comes after action listen\n return None\n\n if not self.config[ENTITY_RECOGNITION]:\n # entity recognition is not turned on, no entities can be predicted\n return None\n\n # The batch dimension of entity prediction is not the same as batch size,\n # rather it is the number of last (if max history featurizer else all)\n # text inputs in the batch\n # therefore, in order to pick entities from the latest user message\n # we need to pick entities from the last batch dimension of entity prediction\n predicted_tags, confidence_values = rasa.utils.train_utils.entity_label_to_tags(\n prediction_output,\n self._entity_tag_specs,\n self.config[BILOU_FLAG],\n prediction_index=-1,\n )\n\n if ENTITY_ATTRIBUTE_TYPE not in predicted_tags:\n # no entities detected\n return None\n\n # entities belong to the last message of the tracker\n # convert the predicted tags to actual entities\n text = tracker.latest_message.text if tracker.latest_message is not None else \"\"\n if precomputations is not None:\n parsed_message = precomputations.lookup_message(user_text=text)\n else:\n parsed_message = Message(data={TEXT: text})\n tokens = parsed_message.get(TOKENS_NAMES[TEXT])\n entities = EntityExtractorMixin.convert_predictions_into_entities(\n text,\n tokens,\n predicted_tags,\n self.split_entities_config,\n confidences=confidence_values,\n )\n\n # add the extractor name\n for entity in entities:\n entity[EXTRACTOR] = \"TEDPolicy\"\n\n return [EntitiesAdded(entities)]\n\n def persist(self) -> None:\n \"\"\"Persists the policy to a storage.\"\"\"\n if self.model is None:\n logger.debug(\n \"Method `persist(...)` was called without a trained model present. \"\n \"Nothing to persist then!\"\n )\n return\n\n with self._model_storage.write_to(self._resource) as model_path:\n model_filename = self._metadata_filename()\n tf_model_file = model_path / f\"{model_filename}.tf_model\"\n\n rasa.shared.utils.io.create_directory_for_file(tf_model_file)\n\n self.featurizer.persist(model_path)\n\n if self.config[CHECKPOINT_MODEL] and self.tmp_checkpoint_dir:\n self.model.load_weights(self.tmp_checkpoint_dir / \"checkpoint.tf_model\")\n # Save an empty file to flag that this model has been\n # produced using checkpointing\n checkpoint_marker = model_path / f\"{model_filename}.from_checkpoint.pkl\"\n checkpoint_marker.touch()\n\n self.model.save(str(tf_model_file))\n\n self.persist_model_utilities(model_path)\n\n def persist_model_utilities(self, model_path: Path) -> None:\n \"\"\"Persists model's utility attributes like model weights, etc.\n\n Args:\n model_path: Path where model is to be persisted\n \"\"\"\n model_filename = self._metadata_filename()\n rasa.utils.io.json_pickle(\n model_path / f\"{model_filename}.priority.pkl\", self.priority\n )\n rasa.utils.io.pickle_dump(\n model_path / f\"{model_filename}.meta.pkl\", self.config\n )\n rasa.utils.io.pickle_dump(\n model_path / f\"{model_filename}.data_example.pkl\", self.data_example\n )\n rasa.utils.io.pickle_dump(\n model_path / f\"{model_filename}.fake_features.pkl\", self.fake_features\n )\n rasa.utils.io.pickle_dump(\n model_path / f\"{model_filename}.label_data.pkl\",\n dict(self._label_data.data) if self._label_data is not None else {},\n )\n entity_tag_specs = (\n [tag_spec._asdict() for tag_spec in self._entity_tag_specs]\n if self._entity_tag_specs\n else []\n )\n rasa.shared.utils.io.dump_obj_as_json_to_file(\n model_path / f\"{model_filename}.entity_tag_specs.json\", entity_tag_specs\n )\n\n @classmethod\n def _load_model_utilities(cls, model_path: Path) -> Dict[Text, Any]:\n \"\"\"Loads model's utility attributes.\n\n Args:\n model_path: Path where model is to be persisted.\n \"\"\"\n tf_model_file = model_path / f\"{cls._metadata_filename()}.tf_model\"\n loaded_data = rasa.utils.io.pickle_load(\n model_path / f\"{cls._metadata_filename()}.data_example.pkl\"\n )\n label_data = rasa.utils.io.pickle_load(\n model_path / f\"{cls._metadata_filename()}.label_data.pkl\"\n )\n fake_features = rasa.utils.io.pickle_load(\n model_path / f\"{cls._metadata_filename()}.fake_features.pkl\"\n )\n label_data = RasaModelData(data=label_data)\n priority = rasa.utils.io.json_unpickle(\n model_path / f\"{cls._metadata_filename()}.priority.pkl\"\n )\n entity_tag_specs = rasa.shared.utils.io.read_json_file(\n model_path / f\"{cls._metadata_filename()}.entity_tag_specs.json\"\n )\n entity_tag_specs = [\n EntityTagSpec(\n tag_name=tag_spec[\"tag_name\"],\n ids_to_tags={\n int(key): value for key, value in tag_spec[\"ids_to_tags\"].items()\n },\n tags_to_ids={\n key: int(value) for key, value in tag_spec[\"tags_to_ids\"].items()\n },\n num_tags=tag_spec[\"num_tags\"],\n )\n for tag_spec in entity_tag_specs\n ]\n model_config = rasa.utils.io.pickle_load(\n model_path / f\"{cls._metadata_filename()}.meta.pkl\"\n )\n\n return {\n \"tf_model_file\": tf_model_file,\n \"loaded_data\": loaded_data,\n \"fake_features\": fake_features,\n \"label_data\": label_data,\n \"priority\": priority,\n \"entity_tag_specs\": entity_tag_specs,\n \"model_config\": model_config,\n }\n\n @classmethod\n def load(\n cls,\n config: Dict[Text, Any],\n model_storage: ModelStorage,\n resource: Resource,\n execution_context: ExecutionContext,\n **kwargs: Any,\n ) -> TEDPolicy:\n \"\"\"Loads a policy from the storage (see parent class for full docstring).\"\"\"\n try:\n with model_storage.read_from(resource) as model_path:\n return cls._load(\n model_path, config, model_storage, resource, execution_context\n )\n except ValueError:\n logger.debug(\n f\"Failed to load {cls.__class__.__name__} from model storage. Resource \"\n f\"'{resource.name}' doesn't exist.\"\n )\n return cls(config, model_storage, resource, execution_context)\n\n @classmethod\n def _load(\n cls,\n model_path: Path,\n config: Dict[Text, Any],\n model_storage: ModelStorage,\n resource: Resource,\n execution_context: ExecutionContext,\n ) -> TEDPolicy:\n featurizer = TrackerFeaturizer.load(model_path)\n\n if not (model_path / f\"{cls._metadata_filename()}.data_example.pkl\").is_file():\n return cls(\n config,\n model_storage,\n resource,\n execution_context,\n featurizer=featurizer,\n )\n\n model_utilities = cls._load_model_utilities(model_path)\n\n config = cls._update_loaded_params(config)\n if execution_context.is_finetuning and EPOCH_OVERRIDE in config:\n config[EPOCHS] = config.get(EPOCH_OVERRIDE)\n\n (\n model_data_example,\n predict_data_example,\n ) = cls._construct_model_initialization_data(model_utilities[\"loaded_data\"])\n\n model = None\n\n with (contextlib.nullcontext() if config[\"use_gpu\"] else tf.device(\"/cpu:0\")):\n model = cls._load_tf_model(\n model_utilities,\n model_data_example,\n predict_data_example,\n featurizer,\n execution_context.is_finetuning,\n )\n\n return cls._load_policy_with_model(\n config,\n model_storage,\n resource,\n execution_context,\n featurizer=featurizer,\n model_utilities=model_utilities,\n model=model,\n )\n\n @classmethod\n def _load_policy_with_model(\n cls,\n config: Dict[Text, Any],\n model_storage: ModelStorage,\n resource: Resource,\n execution_context: ExecutionContext,\n featurizer: TrackerFeaturizer,\n model: TED,\n model_utilities: Dict[Text, Any],\n ) -> TEDPolicy:\n return cls(\n config,\n model_storage,\n resource,\n execution_context,\n model=model,\n featurizer=featurizer,\n fake_features=model_utilities[\"fake_features\"],\n entity_tag_specs=model_utilities[\"entity_tag_specs\"],\n )\n\n @classmethod\n def _load_tf_model(\n cls,\n model_utilities: Dict[Text, Any],\n model_data_example: RasaModelData,\n predict_data_example: RasaModelData,\n featurizer: TrackerFeaturizer,\n should_finetune: bool,\n ) -> TED:\n model = cls.model_class().load(\n str(model_utilities[\"tf_model_file\"]),\n model_data_example,\n predict_data_example,\n data_signature=model_data_example.get_signature(),\n config=model_utilities[\"model_config\"],\n max_history_featurizer_is_used=isinstance(\n featurizer, MaxHistoryTrackerFeaturizer\n ),\n label_data=model_utilities[\"label_data\"],\n entity_tag_specs=model_utilities[\"entity_tag_specs\"],\n finetune_mode=should_finetune,\n )\n return model\n\n @classmethod\n def _construct_model_initialization_data(\n cls, loaded_data: Dict[Text, Dict[Text, List[FeatureArray]]]\n ) -> Tuple[RasaModelData, RasaModelData]:\n model_data_example = RasaModelData(\n label_key=LABEL_KEY, label_sub_key=LABEL_SUB_KEY, data=loaded_data\n )\n predict_data_example = RasaModelData(\n label_key=LABEL_KEY,\n label_sub_key=LABEL_SUB_KEY,\n data={\n feature_name: features\n for feature_name, features in model_data_example.items()\n if feature_name\n # we need to remove label features for prediction if they are present\n in PREDICTION_FEATURES\n },\n )\n return model_data_example, predict_data_example\n\n @classmethod\n def _update_loaded_params(cls, meta: Dict[Text, Any]) -> Dict[Text, Any]:\n meta = rasa.utils.train_utils.update_confidence_type(meta)\n meta = rasa.utils.train_utils.update_similarity_type(meta)\n\n return meta\n\n\nclass TED(TransformerRasaModel):\n \"\"\"TED model architecture from https://arxiv.org/abs/1910.00486.\"\"\"\n\n def __init__(\n self,\n data_signature: Dict[Text, Dict[Text, List[FeatureSignature]]],\n config: Dict[Text, Any],\n max_history_featurizer_is_used: bool,\n label_data: RasaModelData,\n entity_tag_specs: Optional[List[EntityTagSpec]],\n ) -> None:\n \"\"\"Initializes the TED model.\n\n Args:\n data_signature: the data signature of the input data\n config: the model configuration\n max_history_featurizer_is_used: if 'True'\n only the last dialogue turn will be used\n label_data: the label data\n entity_tag_specs: the entity tag specifications\n \"\"\"\n super().__init__(\"TED\", config, data_signature, label_data)\n\n self.max_history_featurizer_is_used = max_history_featurizer_is_used\n\n self.predict_data_signature = {\n feature_name: features\n for feature_name, features in data_signature.items()\n if feature_name in PREDICTION_FEATURES\n }\n\n self._entity_tag_specs = entity_tag_specs\n\n # metrics\n self.action_loss = tf.keras.metrics.Mean(name=\"loss\")\n self.action_acc = tf.keras.metrics.Mean(name=\"acc\")\n self.entity_loss = tf.keras.metrics.Mean(name=\"e_loss\")\n self.entity_f1 = tf.keras.metrics.Mean(name=\"e_f1\")\n self.metrics_to_log += [\"loss\", \"acc\"]\n if self.config[ENTITY_RECOGNITION]:\n self.metrics_to_log += [\"e_loss\", \"e_f1\"]\n\n # needed for efficient prediction\n self.all_labels_embed: Optional[tf.Tensor] = None\n\n self._prepare_layers()\n\n def _check_data(self) -> None:\n if not any(key in [INTENT, TEXT] for key in self.data_signature.keys()):\n raise RasaException(\n f\"No user features specified. \"\n f\"Cannot train '{self.__class__.__name__}' model.\"\n )\n\n if not any(\n key in [ACTION_NAME, ACTION_TEXT] for key in self.data_signature.keys()\n ):\n raise ValueError(\n f\"No action features specified. \"\n f\"Cannot train '{self.__class__.__name__}' model.\"\n )\n if LABEL not in self.data_signature:\n raise ValueError(\n f\"No label features specified. \"\n f\"Cannot train '{self.__class__.__name__}' model.\"\n )\n\n # ---CREATING LAYERS HELPERS---\n\n def _prepare_layers(self) -> None:\n for name in self.data_signature.keys():\n self._prepare_input_layers(\n name, self.data_signature[name], is_label_attribute=False\n )\n self._prepare_encoding_layers(name)\n\n for name in self.label_signature.keys():\n self._prepare_input_layers(\n name, self.label_signature[name], is_label_attribute=True\n )\n self._prepare_encoding_layers(name)\n\n self._tf_layers[\n f\"transformer.{DIALOGUE}\"\n ] = rasa_layers.prepare_transformer_layer(\n attribute_name=DIALOGUE,\n config=self.config,\n num_layers=self.config[NUM_TRANSFORMER_LAYERS][DIALOGUE],\n units=self.config[TRANSFORMER_SIZE][DIALOGUE],\n drop_rate=self.config[DROP_RATE_DIALOGUE],\n # use bidirectional transformer, because\n # we will invert dialogue sequence so that the last turn is located\n # at the first position and would always have\n # exactly the same positional encoding\n unidirectional=not self.max_history_featurizer_is_used,\n )\n\n self._prepare_label_classification_layers(DIALOGUE)\n\n if self.config[ENTITY_RECOGNITION]:\n self._prepare_entity_recognition_layers()\n\n def _prepare_input_layers(\n self,\n attribute_name: Text,\n attribute_signature: Dict[Text, List[FeatureSignature]],\n is_label_attribute: bool = False,\n ) -> None:\n \"\"\"Prepares feature processing layers for sentence/sequence-level features.\n\n Distinguishes between label features and other features, not applying input\n dropout to the label ones.\n \"\"\"\n # Disable input dropout in the config to be used if this is a label attribute.\n if is_label_attribute:\n config_to_use = self.config.copy()\n config_to_use.update(\n {SPARSE_INPUT_DROPOUT: False, DENSE_INPUT_DROPOUT: False}\n )\n else:\n config_to_use = self.config\n # Attributes with sequence-level features also have sentence-level features,\n # all these need to be combined and further processed.\n if attribute_name in SEQUENCE_FEATURES_TO_ENCODE:\n self._tf_layers[\n f\"sequence_layer.{attribute_name}\"\n ] = rasa_layers.RasaSequenceLayer(\n attribute_name, attribute_signature, config_to_use\n )\n # Attributes without sequence-level features require some actual feature\n # processing only if they have sentence-level features. Attributes with no\n # sequence- and sentence-level features (dialogue, entity_tags, label) are\n # skipped here.\n elif SENTENCE in attribute_signature:\n self._tf_layers[\n f\"sparse_dense_concat_layer.{attribute_name}\"\n ] = rasa_layers.ConcatenateSparseDenseFeatures(\n attribute=attribute_name,\n feature_type=SENTENCE,\n feature_type_signature=attribute_signature[SENTENCE],\n config=config_to_use,\n )\n\n def _prepare_encoding_layers(self, name: Text) -> None:\n \"\"\"Create Ffnn encoding layer used just before combining all dialogue features.\n\n Args:\n name: attribute name\n \"\"\"\n # create encoding layers only for the features which should be encoded;\n if name not in SENTENCE_FEATURES_TO_ENCODE + LABEL_FEATURES_TO_ENCODE:\n return\n # check that there are SENTENCE features for the attribute name in data\n if (\n name in SENTENCE_FEATURES_TO_ENCODE\n and FEATURE_TYPE_SENTENCE not in self.data_signature[name]\n ):\n return\n # same for label_data\n if (\n name in LABEL_FEATURES_TO_ENCODE\n and FEATURE_TYPE_SENTENCE not in self.label_signature[name]\n ):\n return\n\n self._prepare_ffnn_layer(\n f\"{name}\",\n [self.config[ENCODING_DIMENSION]],\n self.config[DROP_RATE_DIALOGUE],\n prefix=\"encoding_layer\",\n )\n\n # ---GRAPH BUILDING HELPERS---\n\n @staticmethod\n def _compute_dialogue_indices(\n tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]]\n ) -> None:\n dialogue_lengths = tf.cast(tf_batch_data[DIALOGUE][LENGTH][0], dtype=tf.int32)\n # wrap in a list, because that's the structure of tf_batch_data\n tf_batch_data[DIALOGUE][INDICES] = [\n (\n tf.map_fn(\n tf.range,\n dialogue_lengths,\n fn_output_signature=tf.RaggedTensorSpec(\n shape=[None], dtype=tf.int32\n ),\n )\n ).values\n ]\n\n def _create_all_labels_embed(self) -> Tuple[tf.Tensor, tf.Tensor]:\n all_label_ids = self.tf_label_data[LABEL_KEY][LABEL_SUB_KEY][0]\n # labels cannot have all features \"fake\"\n all_labels_encoded = {}\n for key in self.tf_label_data.keys():\n if key != LABEL_KEY:\n attribute_features, _, _ = self._encode_real_features_per_attribute(\n self.tf_label_data, key\n )\n all_labels_encoded[key] = attribute_features\n\n x = self._collect_label_attribute_encodings(all_labels_encoded)\n\n # additional sequence axis is artifact of our RasaModelData creation\n # TODO check whether this should be solved in data creation\n x = tf.squeeze(x, axis=1)\n\n all_labels_embed = self._tf_layers[f\"embed.{LABEL}\"](x)\n\n return all_label_ids, all_labels_embed\n\n @staticmethod\n def _collect_label_attribute_encodings(\n all_labels_encoded: Dict[Text, tf.Tensor]\n ) -> tf.Tensor:\n # Initialize with at least one attribute first\n # so that the subsequent TF ops are simplified.\n all_attributes_present = list(all_labels_encoded.keys())\n x = all_labels_encoded.pop(all_attributes_present[0])\n\n # Add remaining attributes\n for attribute in all_labels_encoded:\n x += all_labels_encoded.get(attribute)\n return x\n\n def _embed_dialogue(\n self,\n dialogue_in: tf.Tensor,\n tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],\n ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, Optional[tf.Tensor]]:\n \"\"\"Creates dialogue level embedding and mask.\n\n Args:\n dialogue_in: The encoded dialogue.\n tf_batch_data: Batch in model data format.\n\n Returns:\n The dialogue embedding, the mask, and (for diagnostic purposes)\n also the attention weights.\n \"\"\"\n dialogue_lengths = tf.cast(tf_batch_data[DIALOGUE][LENGTH][0], tf.int32)\n mask = rasa_layers.compute_mask(dialogue_lengths)\n\n if self.max_history_featurizer_is_used:\n # invert dialogue sequence so that the last turn would always have\n # exactly the same positional encoding\n dialogue_in = tf.reverse_sequence(dialogue_in, dialogue_lengths, seq_axis=1)\n\n dialogue_transformed, attention_weights = self._tf_layers[\n f\"transformer.{DIALOGUE}\"\n ](dialogue_in, 1 - mask, self._training)\n dialogue_transformed = tf.nn.gelu(dialogue_transformed)\n\n if self.max_history_featurizer_is_used:\n # pick last vector if max history featurizer is used, since we inverted\n # dialogue sequence, the last vector is actually the first one\n dialogue_transformed = dialogue_transformed[:, :1, :]\n mask = tf.expand_dims(self._last_token(mask, dialogue_lengths), 1)\n elif not self._training:\n # during prediction we don't care about previous dialogue turns,\n # so to save computation time, use only the last one\n dialogue_transformed = tf.expand_dims(\n self._last_token(dialogue_transformed, dialogue_lengths), 1\n )\n mask = tf.expand_dims(self._last_token(mask, dialogue_lengths), 1)\n\n dialogue_embed = self._tf_layers[f\"embed.{DIALOGUE}\"](dialogue_transformed)\n\n return dialogue_embed, mask, dialogue_transformed, attention_weights\n\n def _encode_features_per_attribute(\n self, tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]], attribute: Text\n ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:\n # The input is a representation of 4d tensor of\n # shape (batch-size x dialogue-len x sequence-len x units) in 3d of shape\n # (sum of dialogue history length for all tensors in the batch x\n # max sequence length x number of features).\n\n # However, some dialogue turns contain non existent state features,\n # e.g. `intent` and `text` features are mutually exclusive,\n # as well as `action_name` and `action_text` are mutually exclusive,\n # or some dialogue turns don't contain any `slots`.\n # In order to create 4d full tensors, we created \"fake\" zero features for\n # these non existent state features. And filtered them during batch generation.\n # Therefore the first dimensions for different attributes are different.\n # It could happen that some batches don't contain \"real\" features at all,\n # e.g. large number of stories don't contain any `slots`.\n # Therefore actual input tensors will be empty.\n # Since we need actual numbers to create dialogue turn features, we create\n # zero tensors in `_encode_fake_features_per_attribute` for these attributes.\n return tf.cond(\n tf.shape(tf_batch_data[attribute][SENTENCE][0])[0] > 0,\n lambda: self._encode_real_features_per_attribute(tf_batch_data, attribute),\n lambda: self._encode_fake_features_per_attribute(tf_batch_data, attribute),\n )\n\n def _encode_fake_features_per_attribute(\n self, tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]], attribute: Text\n ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:\n \"\"\"Returns dummy outputs for fake features of a given attribute.\n\n Needs to match the outputs of `_encode_real_features_per_attribute` in shape\n but these outputs will be filled with zeros.\n\n Args:\n tf_batch_data: Maps each attribute to its features and masks.\n attribute: The attribute whose fake features will be \"processed\", e.g.\n `ACTION_NAME`, `INTENT`.\n\n Returns:\n attribute_features: A tensor of shape `(batch_size, dialogue_length, units)`\n filled with zeros.\n text_output: Only for `TEXT` attribute (otherwise an empty tensor): A tensor\n of shape `(combined batch_size & dialogue_length, max seq length,\n units)` filled with zeros.\n text_sequence_lengths: Only for `TEXT` attribute, otherwise an empty tensor:\n Of hape `(combined batch_size & dialogue_length, 1)`, filled with zeros.\n \"\"\"\n # we need to create real zero tensors with appropriate batch and dialogue dim\n # because they are passed to dialogue transformer\n attribute_mask = tf_batch_data[attribute][MASK][0]\n\n # determine all dimensions so that fake features of the correct shape can be\n # created\n batch_dim = tf.shape(attribute_mask)[0]\n dialogue_dim = tf.shape(attribute_mask)[1]\n if attribute in set(SENTENCE_FEATURES_TO_ENCODE + LABEL_FEATURES_TO_ENCODE):\n units = self.config[ENCODING_DIMENSION]\n else:\n # state-level attributes don't use an encoding layer, hence their size is\n # just the output size of the corresponding sparse+dense feature combining\n # layer\n units = self._tf_layers[\n f\"sparse_dense_concat_layer.{attribute}\"\n ].output_units\n\n attribute_features = tf.zeros(\n (batch_dim, dialogue_dim, units), dtype=tf.float32\n )\n\n # Only for user text, the transformer output and sequence lengths also have to\n # be created (here using fake features) to enable entity recognition training\n # and prediction.\n if attribute == TEXT:\n # we just need to get the correct last dimension size from the prepared\n # transformer\n text_units = self._tf_layers[f\"sequence_layer.{attribute}\"].output_units\n text_output = tf.zeros((0, 0, text_units), dtype=tf.float32)\n text_sequence_lengths = tf.zeros((0,), dtype=tf.int32)\n else:\n # simulate None with empty tensor of zeros\n text_output = tf.zeros((0,))\n text_sequence_lengths = tf.zeros((0,))\n\n return attribute_features, text_output, text_sequence_lengths\n\n @staticmethod\n def _create_last_dialogue_turns_mask(\n tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]], attribute: Text\n ) -> tf.Tensor:\n # Since max_history_featurizer_is_used is True,\n # we need to find the locations of last dialogue turns in\n # (combined batch dimension and dialogue length,) dimension,\n # so that we can use `_sequence_lengths` as a boolean mask to pick\n # which ones are \"real\" textual input in these last dialogue turns.\n\n # In order to do that we can use given `dialogue_lengths`.\n # For example:\n # If we have `dialogue_lengths = [2, 1, 3]`, than\n # `dialogue_indices = [0, 1, 0, 0, 1, 2]` here we can spot that `0`\n # always indicates the first dialogue turn,\n # which means that previous dialogue turn is the last dialogue turn.\n # Combining this with the fact that the last element in\n # `dialogue_indices` is always the last dialogue turn, we can add\n # a `0` to the end, getting\n # `_dialogue_indices = [0, 1, 0, 0, 1, 2, 0]`.\n # Then removing the first element\n # `_last_dialogue_turn_inverse_indicator = [1, 0, 0, 1, 2, 0]`\n # we see that `0` points to the last dialogue turn.\n # We convert all positive numbers to `True` and take\n # the inverse mask to get\n # `last_dialogue_mask = [0, 1, 1, 0, 0, 1],\n # which precisely corresponds to the fact that first dialogue is of\n # length 2, the second 1 and the third 3.\n last_dialogue_turn_mask = tf.math.logical_not(\n tf.cast(\n tf.concat(\n [\n tf_batch_data[DIALOGUE][INDICES][0],\n tf.zeros((1,), dtype=tf.int32),\n ],\n axis=0,\n )[1:],\n dtype=tf.bool,\n )\n )\n # get only the indices of real inputs\n return tf.boolean_mask(\n last_dialogue_turn_mask,\n tf.reshape(tf_batch_data[attribute][SEQUENCE_LENGTH][0], (-1,)),\n )\n\n def _encode_real_features_per_attribute(\n self, tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]], attribute: Text\n ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:\n \"\"\"Encodes features for a given attribute.\n\n Args:\n tf_batch_data: Maps each attribute to its features and masks.\n attribute: the attribute we will encode features for\n (e.g., ACTION_NAME, INTENT)\n\n Returns:\n attribute_features: A tensor of shape `(batch_size, dialogue_length, units)`\n with all features for `attribute` processed and combined. If sequence-\n level features are present, the sequence dimension is eliminated using\n a transformer.\n text_output: Only for `TEXT` attribute (otherwise an empty tensor): A tensor\n of shape `(combined batch_size & dialogue_length, max seq length,\n units)` containing token-level embeddings further used for entity\n extraction from user text. Similar to `attribute_features` but returned\n for all tokens, not just for the last one.\n text_sequence_lengths: Only for `TEXT` attribute, otherwise an empty tensor:\n Shape `(combined batch_size & dialogue_length, 1)`, containing the\n sequence length for user text examples in `text_output`. The sequence\n length is effectively the number of tokens + 1 (to account also for\n sentence-level features). Needed for entity extraction from user text.\n \"\"\"\n # simulate None with empty tensor of zeros\n text_output = tf.zeros((0,))\n text_sequence_lengths = tf.zeros((0,))\n\n if attribute in SEQUENCE_FEATURES_TO_ENCODE:\n # get lengths of real token sequences as a 3D tensor\n sequence_feature_lengths = self._get_sequence_feature_lengths(\n tf_batch_data, attribute\n )\n\n # sequence_feature_lengths contain `0` for \"fake\" features, while\n # tf_batch_data[attribute] contains only \"real\" features. Hence, we need to\n # get rid of the lengths that are 0. This step produces a 1D tensor.\n sequence_feature_lengths = tf.boolean_mask(\n sequence_feature_lengths, sequence_feature_lengths\n )\n\n attribute_features, _, _, _, _, _ = self._tf_layers[\n f\"sequence_layer.{attribute}\"\n ](\n (\n tf_batch_data[attribute][SEQUENCE],\n tf_batch_data[attribute][SENTENCE],\n sequence_feature_lengths,\n ),\n training=self._training,\n )\n\n combined_sentence_sequence_feature_lengths = sequence_feature_lengths + 1\n\n # Only for user text, the transformer output and sequence lengths also have\n # to be returned to enable entity recognition training and prediction.\n if attribute == TEXT:\n text_output = attribute_features\n text_sequence_lengths = combined_sentence_sequence_feature_lengths\n\n if self.max_history_featurizer_is_used:\n # get the location of all last dialogue inputs\n last_dialogue_turns_mask = self._create_last_dialogue_turns_mask(\n tf_batch_data, attribute\n )\n # pick outputs that correspond to the last dialogue turns\n text_output = tf.boolean_mask(text_output, last_dialogue_turns_mask)\n text_sequence_lengths = tf.boolean_mask(\n text_sequence_lengths, last_dialogue_turns_mask\n )\n\n # resulting attribute features will have shape\n # combined batch dimension and dialogue length x 1 x units\n attribute_features = tf.expand_dims(\n self._last_token(\n attribute_features, combined_sentence_sequence_feature_lengths\n ),\n axis=1,\n )\n\n # for attributes without sequence-level features, all we need is to combine the\n # sparse and dense sentence-level features into one\n else:\n # resulting attribute features will have shape\n # combined batch dimension and dialogue length x 1 x units\n attribute_features = self._tf_layers[\n f\"sparse_dense_concat_layer.{attribute}\"\n ]((tf_batch_data[attribute][SENTENCE],), training=self._training)\n\n if attribute in SENTENCE_FEATURES_TO_ENCODE + LABEL_FEATURES_TO_ENCODE:\n attribute_features = self._tf_layers[f\"encoding_layer.{attribute}\"](\n attribute_features, self._training\n )\n\n # attribute features have shape\n # (combined batch dimension and dialogue length x 1 x units)\n # convert them back to their original shape of\n # batch size x dialogue length x units\n attribute_features = self._convert_to_original_shape(\n attribute_features, tf_batch_data, attribute\n )\n\n return attribute_features, text_output, text_sequence_lengths\n\n @staticmethod\n def _convert_to_original_shape(\n attribute_features: tf.Tensor,\n tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],\n attribute: Text,\n ) -> tf.Tensor:\n \"\"\"Transform attribute features back to original shape.\n\n Given shape: (combined batch and dialogue dimension x 1 x units)\n Original shape: (batch x dialogue length x units)\n\n Args:\n attribute_features: the \"real\" features to convert\n tf_batch_data: dictionary mapping every attribute to its features and masks\n attribute: the attribute we will encode features for\n (e.g., ACTION_NAME, INTENT)\n\n Returns:\n The converted attribute features\n \"\"\"\n # in order to convert the attribute features with shape\n # (combined batch-size and dialogue length x 1 x units)\n # to a shape of (batch-size x dialogue length x units)\n # we use tf.scatter_nd. Therefore, we need the target shape and the indices\n # mapping the values of attribute features to the position in the resulting\n # tensor.\n\n # attribute_mask has shape batch x dialogue_len x 1\n attribute_mask = tf_batch_data[attribute][MASK][0]\n\n if attribute in SENTENCE_FEATURES_TO_ENCODE + STATE_LEVEL_FEATURES:\n dialogue_lengths = tf.cast(\n tf_batch_data[DIALOGUE][LENGTH][0], dtype=tf.int32\n )\n dialogue_indices = tf_batch_data[DIALOGUE][INDICES][0]\n else:\n # for labels, dialogue length is a fake dim and equal to 1\n dialogue_lengths = tf.ones((tf.shape(attribute_mask)[0],), dtype=tf.int32)\n dialogue_indices = tf.zeros((tf.shape(attribute_mask)[0],), dtype=tf.int32)\n\n batch_dim = tf.shape(attribute_mask)[0]\n dialogue_dim = tf.shape(attribute_mask)[1]\n units = attribute_features.shape[-1]\n\n # attribute_mask has shape (batch x dialogue_len x 1), remove last dimension\n attribute_mask = tf.cast(tf.squeeze(attribute_mask, axis=-1), dtype=tf.int32)\n # sum of attribute mask contains number of dialogue turns with \"real\" features\n non_fake_dialogue_lengths = tf.reduce_sum(attribute_mask, axis=-1)\n # create the batch indices\n batch_indices = tf.repeat(tf.range(batch_dim), non_fake_dialogue_lengths)\n\n # attribute_mask has shape (batch x dialogue_len x 1), while\n # dialogue_indices has shape (combined_dialogue_len,)\n # in order to find positions of real input we need to flatten\n # attribute mask to (combined_dialogue_len,)\n dialogue_indices_mask = tf.boolean_mask(\n attribute_mask, tf.sequence_mask(dialogue_lengths, dtype=tf.int32)\n )\n # pick only those indices that contain \"real\" input\n dialogue_indices = tf.boolean_mask(dialogue_indices, dialogue_indices_mask)\n\n indices = tf.stack([batch_indices, dialogue_indices], axis=1)\n\n shape = tf.convert_to_tensor([batch_dim, dialogue_dim, units])\n attribute_features = tf.squeeze(attribute_features, axis=1)\n\n return tf.scatter_nd(indices, attribute_features, shape)\n\n def _process_batch_data(\n self, tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]]\n ) -> Tuple[tf.Tensor, Optional[tf.Tensor], Optional[tf.Tensor]]:\n \"\"\"Encodes batch data.\n\n Combines intent and text and action name and action text if both are present.\n\n Args:\n tf_batch_data: dictionary mapping every attribute to its features and masks\n\n Returns:\n Tensor: encoding of all features in the batch, combined;\n \"\"\"\n # encode each attribute present in tf_batch_data\n text_output = None\n text_sequence_lengths = None\n batch_encoded = {}\n for attribute in tf_batch_data.keys():\n if attribute in SENTENCE_FEATURES_TO_ENCODE + STATE_LEVEL_FEATURES:\n (\n attribute_features,\n _text_output,\n _text_sequence_lengths,\n ) = self._encode_features_per_attribute(tf_batch_data, attribute)\n\n batch_encoded[attribute] = attribute_features\n if attribute == TEXT:\n text_output = _text_output\n text_sequence_lengths = _text_sequence_lengths\n\n # if both action text and action name are present, combine them; otherwise,\n # return the one which is present\n\n if (\n batch_encoded.get(ACTION_TEXT) is not None\n and batch_encoded.get(ACTION_NAME) is not None\n ):\n batch_action = batch_encoded.pop(ACTION_TEXT) + batch_encoded.pop(\n ACTION_NAME\n )\n elif batch_encoded.get(ACTION_TEXT) is not None:\n batch_action = batch_encoded.pop(ACTION_TEXT)\n else:\n batch_action = batch_encoded.pop(ACTION_NAME)\n # same for user input\n if (\n batch_encoded.get(INTENT) is not None\n and batch_encoded.get(TEXT) is not None\n ):\n batch_user = batch_encoded.pop(INTENT) + batch_encoded.pop(TEXT)\n elif batch_encoded.get(TEXT) is not None:\n batch_user = batch_encoded.pop(TEXT)\n else:\n batch_user = batch_encoded.pop(INTENT)\n\n batch_features = [batch_user, batch_action]\n # once we have user input and previous action,\n # add all other attributes (SLOTS, ACTIVE_LOOP, etc.) to batch_features;\n for key in batch_encoded.keys():\n batch_features.append(batch_encoded.get(key))\n\n batch_features = tf.concat(batch_features, axis=-1)\n\n return batch_features, text_output, text_sequence_lengths\n\n def _reshape_for_entities(\n self,\n tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],\n dialogue_transformer_output: tf.Tensor,\n text_output: tf.Tensor,\n text_sequence_lengths: tf.Tensor,\n ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:\n # The first dim of the output of the text sequence transformer is the same\n # as number of \"real\" features for `text` at the last dialogue turns\n # (let's call it `N`),\n # which corresponds to the first dim of the tag ids tensor.\n # To calculate the loss for entities we need the output of the text\n # sequence transformer (shape: N x sequence length x units),\n # the output of the dialogue transformer\n # (shape: batch size x dialogue length x units) and the tag ids for the\n # entities (shape: N x sequence length - 1 x units)\n # In order to process the tensors, they need to have the same shape.\n # Convert the output of the dialogue transformer to shape\n # (N x 1 x units).\n\n # Note: The CRF layer cannot handle 4D tensors. E.g. we cannot use the shape\n # batch size x dialogue length x sequence length x units\n\n # convert the output of the dialogue transformer\n # to shape (real entity dim x 1 x units)\n attribute_mask = tf_batch_data[TEXT][MASK][0]\n dialogue_lengths = tf.cast(tf_batch_data[DIALOGUE][LENGTH][0], tf.int32)\n\n if self.max_history_featurizer_is_used:\n # pick outputs that correspond to the last dialogue turns\n attribute_mask = tf.expand_dims(\n self._last_token(attribute_mask, dialogue_lengths), axis=1\n )\n dialogue_transformer_output = tf.boolean_mask(\n dialogue_transformer_output, tf.squeeze(attribute_mask, axis=-1)\n )\n\n # boolean mask removed axis=1, add it back\n dialogue_transformer_output = tf.expand_dims(\n dialogue_transformer_output, axis=1\n )\n\n # broadcast the dialogue transformer output sequence-length-times to get the\n # same shape as the text sequence transformer output\n dialogue_transformer_output = tf.tile(\n dialogue_transformer_output, (1, tf.shape(text_output)[1], 1)\n )\n\n # concat the output of the dialogue transformer to the output of the text\n # sequence transformer (adding context)\n # resulting shape (N x sequence length x 2 units)\n # N = number of \"real\" features for `text` at the last dialogue turns\n text_transformed = tf.concat(\n [text_output, dialogue_transformer_output], axis=-1\n )\n text_mask = rasa_layers.compute_mask(text_sequence_lengths)\n\n # add zeros to match the shape of text_transformed, because\n # max sequence length might differ, since it is calculated dynamically\n # based on a subset of sequence lengths\n sequence_diff = tf.shape(text_transformed)[1] - tf.shape(text_mask)[1]\n text_mask = tf.pad(text_mask, [[0, 0], [0, sequence_diff], [0, 0]])\n\n # remove additional dims and sentence features\n text_sequence_lengths = tf.reshape(text_sequence_lengths, (-1,)) - 1\n\n return text_transformed, text_mask, text_sequence_lengths\n\n # ---TRAINING---\n\n def _batch_loss_entities(\n self,\n tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],\n dialogue_transformer_output: tf.Tensor,\n text_output: tf.Tensor,\n text_sequence_lengths: tf.Tensor,\n ) -> tf.Tensor:\n # It could happen that some batches don't contain \"real\" features for `text`,\n # e.g. large number of stories are intent only.\n # Therefore actual `text_output` will be empty.\n # We cannot create a loss with empty tensors.\n # Since we need actual numbers to create a full loss, we output\n # zero in this case.\n return tf.cond(\n tf.shape(text_output)[0] > 0,\n lambda: self._real_batch_loss_entities(\n tf_batch_data,\n dialogue_transformer_output,\n text_output,\n text_sequence_lengths,\n ),\n lambda: tf.constant(0.0),\n )\n\n def _real_batch_loss_entities(\n self,\n tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],\n dialogue_transformer_output: tf.Tensor,\n text_output: tf.Tensor,\n text_sequence_lengths: tf.Tensor,\n ) -> tf.Tensor:\n\n text_transformed, text_mask, text_sequence_lengths = self._reshape_for_entities(\n tf_batch_data,\n dialogue_transformer_output,\n text_output,\n text_sequence_lengths,\n )\n\n tag_ids = tf_batch_data[ENTITY_TAGS][IDS][0]\n # add a zero (no entity) for the sentence features to match the shape of inputs\n sequence_diff = tf.shape(text_transformed)[1] - tf.shape(tag_ids)[1]\n tag_ids = tf.pad(tag_ids, [[0, 0], [0, sequence_diff], [0, 0]])\n\n loss, f1, _ = self._calculate_entity_loss(\n text_transformed,\n tag_ids,\n text_mask,\n text_sequence_lengths,\n ENTITY_ATTRIBUTE_TYPE,\n )\n\n self.entity_loss.update_state(loss)\n self.entity_f1.update_state(f1)\n\n return loss\n\n @staticmethod\n def _get_labels_embed(\n label_ids: tf.Tensor, all_labels_embed: tf.Tensor\n ) -> tf.Tensor:\n # instead of processing labels again, gather embeddings from\n # all_labels_embed using label ids\n\n indices = tf.cast(label_ids[:, :, 0], tf.int32)\n labels_embed = tf.gather(all_labels_embed, indices)\n\n return labels_embed\n\n def batch_loss(\n self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]]\n ) -> tf.Tensor:\n \"\"\"Calculates the loss for the given batch.\n\n Args:\n batch_in: The batch.\n\n Returns:\n The loss of the given batch.\n \"\"\"\n tf_batch_data = self.batch_to_model_data_format(batch_in, self.data_signature)\n self._compute_dialogue_indices(tf_batch_data)\n\n all_label_ids, all_labels_embed = self._create_all_labels_embed()\n\n label_ids = tf_batch_data[LABEL_KEY][LABEL_SUB_KEY][0]\n labels_embed = self._get_labels_embed(label_ids, all_labels_embed)\n\n dialogue_in, text_output, text_sequence_lengths = self._process_batch_data(\n tf_batch_data\n )\n (\n dialogue_embed,\n dialogue_mask,\n dialogue_transformer_output,\n _,\n ) = self._embed_dialogue(dialogue_in, tf_batch_data)\n dialogue_mask = tf.squeeze(dialogue_mask, axis=-1)\n\n losses = []\n\n loss, acc = self._tf_layers[f\"loss.{LABEL}\"](\n dialogue_embed,\n labels_embed,\n label_ids,\n all_labels_embed,\n all_label_ids,\n dialogue_mask,\n )\n losses.append(loss)\n\n if (\n self.config[ENTITY_RECOGNITION]\n and text_output is not None\n and text_sequence_lengths is not None\n ):\n losses.append(\n self._batch_loss_entities(\n tf_batch_data,\n dialogue_transformer_output,\n text_output,\n text_sequence_lengths,\n )\n )\n\n self.action_loss.update_state(loss)\n self.action_acc.update_state(acc)\n\n return tf.math.add_n(losses)\n\n # ---PREDICTION---\n def prepare_for_predict(self) -> None:\n \"\"\"Prepares the model for prediction.\"\"\"\n _, self.all_labels_embed = self._create_all_labels_embed()\n\n def batch_predict(\n self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]]\n ) -> Dict[Text, Union[tf.Tensor, Dict[Text, tf.Tensor]]]:\n \"\"\"Predicts the output of the given batch.\n\n Args:\n batch_in: The batch.\n\n Returns:\n The output to predict.\n \"\"\"\n if self.all_labels_embed is None:\n raise ValueError(\n \"The model was not prepared for prediction. \"\n \"Call `prepare_for_predict` first.\"\n )\n\n tf_batch_data = self.batch_to_model_data_format(\n batch_in, self.predict_data_signature\n )\n self._compute_dialogue_indices(tf_batch_data)\n\n dialogue_in, text_output, text_sequence_lengths = self._process_batch_data(\n tf_batch_data\n )\n (\n dialogue_embed,\n dialogue_mask,\n dialogue_transformer_output,\n attention_weights,\n ) = self._embed_dialogue(dialogue_in, tf_batch_data)\n dialogue_mask = tf.squeeze(dialogue_mask, axis=-1)\n\n sim_all, scores = self._tf_layers[\n f\"loss.{LABEL}\"\n ].get_similarities_and_confidences_from_embeddings(\n dialogue_embed[:, :, tf.newaxis, :],\n self.all_labels_embed[tf.newaxis, tf.newaxis, :, :],\n dialogue_mask,\n )\n\n predictions = {\n \"scores\": scores,\n \"similarities\": sim_all,\n DIAGNOSTIC_DATA: {\"attention_weights\": attention_weights},\n }\n\n if (\n self.config[ENTITY_RECOGNITION]\n and text_output is not None\n and text_sequence_lengths is not None\n ):\n pred_ids, confidences = self._batch_predict_entities(\n tf_batch_data,\n dialogue_transformer_output,\n text_output,\n text_sequence_lengths,\n )\n name = ENTITY_ATTRIBUTE_TYPE\n predictions[f\"e_{name}_ids\"] = pred_ids\n predictions[f\"e_{name}_scores\"] = confidences\n\n return predictions\n\n def _batch_predict_entities(\n self,\n tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],\n dialogue_transformer_output: tf.Tensor,\n text_output: tf.Tensor,\n text_sequence_lengths: tf.Tensor,\n ) -> Tuple[tf.Tensor, tf.Tensor]:\n # It could happen that current prediction turn don't contain\n # \"real\" features for `text`,\n # Therefore actual `text_output` will be empty.\n # We cannot predict entities with empty tensors.\n # Since we need to output some tensors of the same shape, we output\n # zero tensors.\n return tf.cond(\n tf.shape(text_output)[0] > 0,\n lambda: self._real_batch_predict_entities(\n tf_batch_data,\n dialogue_transformer_output,\n text_output,\n text_sequence_lengths,\n ),\n lambda: (\n # the output is of shape (batch_size, max_seq_len)\n tf.zeros(tf.shape(text_output)[:2], dtype=tf.int32),\n tf.zeros(tf.shape(text_output)[:2], dtype=tf.float32),\n ),\n )\n\n def _real_batch_predict_entities(\n self,\n tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],\n dialogue_transformer_output: tf.Tensor,\n text_output: tf.Tensor,\n text_sequence_lengths: tf.Tensor,\n ) -> Tuple[tf.Tensor, tf.Tensor]:\n\n text_transformed, _, text_sequence_lengths = self._reshape_for_entities(\n tf_batch_data,\n dialogue_transformer_output,\n text_output,\n text_sequence_lengths,\n )\n\n name = ENTITY_ATTRIBUTE_TYPE\n\n _logits = self._tf_layers[f\"embed.{name}.logits\"](text_transformed)\n\n return self._tf_layers[f\"crf.{name}\"](_logits, text_sequence_lengths)\n"
] |
[
[
"tensorflow.convert_to_tensor",
"tensorflow.device",
"numpy.expand_dims",
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.stack",
"tensorflow.cast",
"numpy.squeeze",
"numpy.max",
"numpy.any",
"tensorflow.pad",
"tensorflow.nn.gelu",
"tensorflow.boolean_mask",
"numpy.arange",
"tensorflow.squeeze",
"tensorflow.gather",
"numpy.argmax",
"tensorflow.keras.metrics.Mean",
"tensorflow.shape",
"tensorflow.scatter_nd",
"tensorflow.math.add_n",
"tensorflow.reverse_sequence",
"tensorflow.RaggedTensorSpec",
"tensorflow.sequence_mask",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.keras.optimizers.Adam"
]
] |
kmnhan/nexpy
|
[
"b5fb72b3c1ae2580e5d5504fbc5bd2f2cd13d98b"
] |
[
"src/nexpy/readers/readtxt.py"
] |
[
"# -----------------------------------------------------------------------------\n# Copyright (c) 2013-2021, NeXpy Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING, distributed with this software.\n# -----------------------------------------------------------------------------\n\n\"\"\"\nModule to read in a text file and convert it to NeXus.\n\nThis is provided as an example of writing an import dialog. Each new\nimporter needs to layout the GUI buttons necessary for defining the\nimported file and its attributes and a single module, get_data, which\nreturns an NXroot or NXentry object. This will be added to the NeXpy\ntree.\n\nTwo GUI elements are provided for convenience:\n\n ImportDialog.filebox: Contains a \"Choose File\" button and a text\n box. Both can be used to set the path to the\n imported file. This can be retrieved as a\n string using self.get_filename().\n ImportDialog.buttonbox: Contains a \"Cancel\" and \"OK\" button to close\n the dialog. This should be placed at the\n bottom of all import dialogs.\n\"\"\"\nimport os\n\nimport numpy as np\nfrom nexpy.gui.importdialog import NXImportDialog\nfrom nexpy.gui.utils import report_error\nfrom nexpy.gui.widgets import (NXCheckBox, NXComboBox, NXLabel, NXLineEdit,\n NXPushButton)\nfrom nexusformat.nexus import NXdata, NXentry, NXfield, NXgroup\nfrom qtpy import QtWidgets\n\nfiletype = \"Text File\"\n\n\nclass ImportDialog(NXImportDialog):\n \"\"\"Dialog to import a text file\"\"\"\n\n data_types = ['char', 'float32', 'float64', 'int8', 'uint8', 'int16',\n 'uint16', 'int32', 'uint32', 'int64', 'uint64']\n\n def __init__(self, parent=None):\n\n super().__init__(parent=parent)\n\n self.textbox = QtWidgets.QTextEdit()\n self.textbox.setMinimumWidth(400)\n self.textbox.setMinimumHeight(200)\n self.textbox.setReadOnly(True)\n\n self.skipbox = NXLineEdit(0, width=20, align='center')\n self. headbox = NXCheckBox()\n self.delimiters = {'Whitespace': None, 'Tab': '\\t', 'Space': ' ',\n 'Comma': ',', 'Colon': ':', 'Semicolon': ';'}\n self.delcombo = NXComboBox(items=self.delimiters)\n\n self.groupbox = NXLineEdit('data')\n standard_groups = sorted(list(set([g for g in\n self.mainwindow.nxclasses['NXentry'][2]])))\n other_groups = sorted([g for g in self.mainwindow.nxclasses\n if g not in standard_groups])\n self.groupcombo = NXComboBox(self.select_class, standard_groups)\n self.groupcombo.insertSeparator(self.groupcombo.count())\n self.groupcombo.add(*other_groups)\n self.groupcombo.select('NXdata')\n self.fieldcombo = NXComboBox(self.select_field)\n self.fieldbox = NXLineEdit(slot=self.update_field)\n self.typecombo = NXComboBox(self.update_field, self.data_types,\n default='float64')\n self.signalcombo = NXComboBox(self.update_field,\n ['field', 'signal', 'axis', 'errors',\n 'exclude'], default='field')\n self.field_layout = self.make_layout(\n NXLabel('Output Fields', bold=True),\n self.make_layout(self.fieldcombo, self.fieldbox,\n self.typecombo, self.signalcombo),\n spacing=5, vertical=True)\n self.customizebutton = NXPushButton('Customize Fields',\n self.customize_data)\n\n self.set_layout(self.filebox(slot=self.read_file), self.textbox,\n self.make_layout('Header Row', self.headbox,\n 'stretch',\n 'Skipped Rows', self.skipbox,\n 'stretch',\n 'Delimiters', self.delcombo),\n NXLabel('Output Group', bold=True),\n self.make_layout('Class', self.groupcombo,\n 'Name', self.groupbox, align='left'),\n self.make_layout(self.customizebutton,\n self.close_buttons(save=True),\n align='justified'),\n spacing=5)\n self.set_title(\"Import \"+str(filetype))\n self.data = None\n\n def read_file(self):\n if self.get_filename() == '':\n self.choose_file()\n if os.path.exists(self.get_filename()):\n self.import_file = self.get_filename()\n with open(self.import_file, 'r') as f:\n text = f.read()\n self.textbox.setText(text.replace('\\t', ' \\t\\u25B3'))\n self.textbox.repaint()\n self.text = []\n for line in text.splitlines():\n if line.split():\n self.text.append(line)\n if [s for s in self.text if '\\t' in s]:\n self.delcombo.select('Tab')\n\n def select_class(self):\n self.groupbox.setText(self.groupcombo.selected[2:])\n if self.groupcombo.selected not in ['NXdata', 'NXmonitor', 'NXlog']:\n for item in ['signal', 'axis', 'errors']:\n self.signalcombo.remove(item)\n else:\n self.signalcombo.add('signal', 'axis', 'errors')\n\n def select_field(self):\n col = self.fieldcombo.selected\n self.fieldbox.setText(self.data[col]['name'])\n self.typecombo.select(self.data[col]['dtype'])\n self.signalcombo.select(self.data[col]['signal'])\n\n def update_field(self):\n col = self.fieldcombo.selected\n self.data[col]['name'] = self.fieldbox.text()\n self.data[col]['dtype'] = self.typecombo.selected\n self.data[col]['signal'] = self.signalcombo.selected\n for c in [c for c in self.fieldcombo if c != col]:\n if (self.data[c]['signal'] in ['signal', 'axis', 'errors'] and\n self.data[c]['signal'] == self.data[col]['signal']):\n self.data[c]['signal'] = 'field'\n\n @property\n def header(self):\n if self.headbox.isChecked():\n return True\n else:\n return None\n\n def read_data(self):\n delimiter = self.delimiters[self.delcombo.selected]\n skip_header = int(self.skipbox.text())\n if self.header:\n self.headers = self.text[skip_header].split(delimiter)\n else:\n self.headers = None\n try:\n input = np.genfromtxt(self.text, delimiter=delimiter,\n names=self.header, skip_header=skip_header,\n dtype=None, autostrip=True, encoding='utf8')\n except ValueError as error:\n report_error(\"Importing Text File\", error)\n self.data = None\n return\n self.data = {}\n for i, _ in enumerate(input[0]):\n if input.dtype.names is not None:\n name = input.dtype.names[i]\n dtype = input.dtype[i].name\n else:\n name = 'Col'+str(i+1)\n dtype = input.dtype.name\n if dtype not in self.data_types:\n dtype = 'char'\n data = [c[i] for c in input]\n signal = 'field'\n if self.groupcombo.selected in ['NXdata', 'NXmonitor', 'NXlog']:\n if i <= 2 and dtype != 'char':\n signal = ['axis', 'signal', 'errors'][i]\n self.data['Col'+str(i+1)] = {'name': name, 'dtype': dtype,\n 'signal': signal, 'data': data}\n\n def customize_data(self):\n self.read_data()\n if self.data is not None:\n self.fieldcombo.add(*list(self.data))\n self.fieldcombo.select('Col1')\n self.fieldbox.setText(self.data['Col1']['name'])\n self.typecombo.select(self.data['Col1']['dtype'])\n self.signalcombo.select(self.data['Col1']['signal'])\n self.insert_layout(5, self.field_layout)\n\n def get_data(self):\n group = NXgroup(name=self.groupbox.text())\n group.nxclass = self.groupcombo.selected\n for i, col in enumerate([c for c in self.data\n if self.data[c]['signal'] != 'exclude']):\n name = self.data[col]['name']\n group[name] = NXfield(self.data[col]['data'],\n dtype=self.data[col]['dtype'])\n if self.header and name != self.headers[i]:\n group[name].long_name = self.headers[i]\n if isinstance(group, NXdata):\n if self.data[col]['signal'] == 'signal':\n group.nxsignal = group[name]\n elif self.data[col]['signal'] == 'axis':\n group.nxaxes = [group[name]]\n elif self.data[col]['signal'] == 'signal':\n group.nxerrors = group[name]\n return NXentry(group)\n\n def accept(self):\n \"\"\"\n Completes the data import.\n \"\"\"\n if self.data is None:\n self.read_data()\n if self.data is None:\n self.raise_()\n self.activateWindow()\n return\n self.accepted = True\n self.mainwindow.import_data()\n super().accept()\n"
] |
[
[
"numpy.genfromtxt"
]
] |
gmbrandt/banzai
|
[
"af6ddb529e8c35eaa87abf67372160f2dd99050b"
] |
[
"banzai/tests/utils.py"
] |
[
"from __future__ import absolute_import, division, print_function, unicode_literals\nimport pytest\nfrom banzai.utils import image_utils\nimport numpy as np\nfrom datetime import datetime\n\n\nclass FakeImage(object):\n def __init__(self, nx=101, ny=103, image_multiplier=1.0,\n ccdsum='2 2', epoch='20160101',):\n self.nx = nx\n self.ny = ny\n self.telescope_id = -1\n self.site = 'elp'\n self.instrument = 'kb76'\n self.ccdsum = ccdsum\n self.epoch = epoch\n self.data = image_multiplier * np.ones((ny, nx), dtype=np.float32)\n self.filename = 'test.fits'\n self.filter = 'U'\n self.dateobs = datetime(2016, 1, 1)\n self.header = {}\n self.caltype = ''\n self.bpm = np.zeros((ny, nx), dtype=np.uint8)\n self.request_number = '0000331403'\n\n def get_calibration_filename(self):\n return '/tmp/{0}_{1}_{2}_bin{3}.fits'.format(self.caltype, self.instrument,\n self.epoch,\n self.ccdsum.replace(' ', 'x'))\n\n def subtract(self, x):\n self.data -= x\n\n def add_history(self, msg):\n pass\n\n\nclass FakeContext(object):\n def __init__(self):\n self.processed_path = '/tmp'\n\n\ndef throws_inhomogeneous_set_exception(stagetype, context, keyword, value):\n stage = stagetype(context)\n\n with pytest.raises(image_utils.InhomogeneousSetException) as exception_info:\n kwargs = {keyword: value}\n images = [FakeImage(**kwargs)]\n images += [FakeImage() for x in range(6)]\n stage.do_stage(images)\n assert 'Images have different {0}s'.format(keyword) == str(exception_info.value)\n\n\ndef gaussian2d(image_shape, x0, y0, brightness, fwhm):\n x = np.arange(image_shape[1])\n y = np.arange(image_shape[0])\n x2d, y2d = np.meshgrid(x, y)\n\n sig = fwhm / 2.35482\n\n normfactor = brightness / 2.0 / np.pi * sig ** -2.0\n exponent = -0.5 * sig ** -2.0\n exponent *= (x2d - x0) ** 2.0 + (y2d - y0) ** 2.0\n\n return normfactor * np.exp(exponent)\n"
] |
[
[
"numpy.meshgrid",
"numpy.arange",
"numpy.ones",
"numpy.exp",
"numpy.zeros"
]
] |
mkuiper/deepmind-research
|
[
"1642ae3499c8d1135ec6fe620a68911091dd25ef"
] |
[
"density_functional_approximation_dm21/density_functional_approximation_dm21/neural_numint.py"
] |
[
"# Copyright 2021 DeepMind Technologies Limited.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"An interface to DM21 family of exchange-correlation functionals for PySCF.\"\"\"\n\nimport enum\nimport os\nfrom typing import Generator, Optional, Sequence, Tuple, Union\n\nimport attr\nimport numpy as np\nfrom pyscf import dft\nfrom pyscf import gto\nfrom pyscf.dft import numint\nimport tensorflow.compat.v1 as tf\nimport tensorflow_hub as hub\n\nfrom density_functional_approximation_dm21 import compute_hfx_density\n\ntf.disable_v2_behavior()\n\n# TODO(b/196260242): avoid depending upon private function\n_dot_ao_ao = numint._dot_ao_ao # pylint: disable=protected-access\n\n\[email protected]\nclass Functional(enum.Enum):\n \"\"\"Enum for exchange-correlation functionals in the DM21 family.\n\n Attributes:\n DM21: trained on molecules dataset, and fractional charge, and fractional\n spin constraints.\n DM21m: trained on molecules dataset.\n DM21mc: trained on molecules dataset, and fractional charge constraints.\n DM21mu: trained on molecules dataset, and electron gas constraints.\n \"\"\"\n # Break pylint's preferred naming pattern to match the functional names used\n # in the paper.\n # pylint: disable=invalid-name\n DM21 = enum.auto()\n DM21m = enum.auto()\n DM21mc = enum.auto()\n DM21mu = enum.auto()\n # pylint: enable=invalid-name\n\n\n# We use attr.s instead of here instead of dataclasses.dataclass as\n# dataclasses.asdict returns a deepcopy of the attributes. This is wasteful in\n# memory if they are large and breaks (as in the case of tf.Tensors) if they are\n# not serializable. attr.asdict does not perform this copy and so works with\n# both np.ndarrays and tf.Tensors.\[email protected](auto_attribs=True)\nclass FunctionalInputs:\n r\"\"\"\"Inputs required for DM21 functionals.\n\n Depending upon the context, this is either a set of numpy arrays (feature\n construction) or TF tensors (constructing placeholders/running functionals).\n\n Attributes:\n rho_a: Density information for the alpha electrons.\n PySCF for meta-GGAs supplies a single array for the total density\n (restricted calculations) and a pair of arrays, one for each spin channel\n (unrestricted calculations).\n Each array/tensor is of shape (6, N) and contains the density and density\n derivatives, where:\n rho(0, :) - density at each grid point\n rho(1, :) - norm of the derivative of the density at each grid point\n along x\n rho(2, :) - norm of the derivative of the density at each grid point\n along y\n rho(3, :) - norm of the derivative of the density at each grid point\n along z\n rho(4, :) - \\nabla^2 \\rho [not used]\n rho(5, :) - tau (1/2 (\\nabla \\rho)^2) at each grid point.\n See pyscf.dft.numint.eval_rho for more details.\n We require separate inputs for both alpha- and beta-spin densities, even\n in restricted calculations (where rho_a = rho_b = rho/2, where rho is the\n total density).\n rho_b: as for rho_a for the beta electrons.\n hfx_a: local Hartree-Fock energy density at each grid point for the alpha-\n spin density for each value of omega. Shape [N, len(omega_values)].\n See compute_hfx_density for more details.\n hfx_b: as for hfx_a for the beta-spin density.\n grid_coords: grid coordinates at which to evaluate the density. Shape\n (N, 3), where N is the number of grid points. Note that this is currently\n unused by the functional, but is still a required input.\n grid_weights: weight of each grid point. Shape (N).\n \"\"\"\n rho_a: Union[tf.Tensor, np.ndarray]\n rho_b: Union[tf.Tensor, np.ndarray]\n hfx_a: Union[tf.Tensor, np.ndarray]\n hfx_b: Union[tf.Tensor, np.ndarray]\n grid_coords: Union[tf.Tensor, np.ndarray]\n grid_weights: Union[tf.Tensor, np.ndarray]\n\n\[email protected](auto_attribs=True)\nclass _GridState:\n \"\"\"Internal state required for the numerical grid.\n\n Attributes:\n coords: coordinates of the grid. Shape (N, 3), where N is the number of grid\n points.\n weight: weight associated with each grid point. Shape (N).\n mask: mask indicating whether a shell is zero at a grid point. Shape\n (N, nbas) where nbas is the number of shells in the basis set. See\n pyscf.dft.gen_grids.make_mask.\n ao: atomic orbitals evaluated on the grid. Shape (N, nao), where nao is the\n number of atomic orbitals, or shape (:, N, nao), where the 0-th element\n contains the ao values, the next three elements contain the first\n derivatives, and so on.\n \"\"\"\n coords: np.ndarray\n weight: np.ndarray\n mask: np.ndarray\n ao: np.ndarray\n\n\[email protected](auto_attribs=True)\nclass _SystemState:\n \"\"\"Internal state required for system of interest.\n\n Attributes:\n mol: PySCF molecule\n dms: density matrix or matrices (unrestricted calculations only).\n Restricted calculations: shape (nao, nao), where nao is the number of\n atomic orbitals.\n Unrestricted calculations: shape (2, nao, nao) or a sequence (length 2) of\n arrays of shape (nao, nao), and dms[0] and dms[1] are the density matrices\n of the alpha and beta electrons respectively.\n \"\"\"\n mol: gto.Mole\n dms: Union[np.ndarray, Sequence[np.ndarray]]\n\n\ndef _get_number_of_density_matrices(dms):\n \"\"\"Returns the number of density matrices in dms.\"\"\"\n # See pyscf.numint.NumInt._gen_rho_evaluator\n if isinstance(dms, np.ndarray) and dms.ndim == 2:\n return 1\n return len(dms)\n\n\nclass NeuralNumInt(numint.NumInt):\n \"\"\"A wrapper around pyscf.dft.numint.NumInt for the DM21 functionals.\n\n In order to supply the local Hartree-Fock features required for the DM21\n functionals, we lightly wrap the NumInt class. The actual evaluation of the\n exchange-correlation functional is performed in NeuralNumInt.eval_xc.\n\n Usage:\n mf = dft.RKS(...) # dft.ROKS and dft.UKS are also supported.\n # Specify the functional by monkey-patching mf._numint rather than using\n # mf._xc or mf._define_xc_.\n mf._numint = NeuralNumInt(Functional.DM21)\n mf.kernel()\n \"\"\"\n\n def __init__(self,\n functional: Functional,\n *,\n checkpoint_path: Optional[str] = None):\n \"\"\"Constructs a NeuralNumInt object.\n\n Args:\n functional: member of Functional enum giving the name of the\n functional.\n checkpoint_path: Optional path to specify the directory containing the\n checkpoints of the DM21 family of functionals. If not specified, attempt\n to find the checkpoints using a path relative to the source code.\n \"\"\"\n\n self._functional_name = functional.name\n if checkpoint_path:\n self._model_path = os.path.join(checkpoint_path, self._functional_name)\n else:\n self._model_path = os.path.join(\n os.path.dirname(__file__), 'checkpoints', self._functional_name)\n\n # All DM21 functionals use local Hartree-Fock features with a non-range\n # separated 1/r kernel and a range-seperated kernel with \\omega = 0.4.\n # Note an omega of 0.0 is interpreted by PySCF and libcint to indicate no\n # range-separation.\n self._omega_values = [0.0, 0.4]\n self._graph = tf.Graph()\n with self._graph.as_default():\n self._build_graph()\n self._session = tf.Session()\n self._session.run(tf.global_variables_initializer())\n\n self._grid_state = None\n self._system_state = None\n self._vmat_hf = None\n super().__init__()\n\n def _build_graph(self, batch_dim: Optional[int] = None):\n \"\"\"Builds the TensorFlow graph for evaluating the functional.\n\n Args:\n batch_dim: the batch dimension of the grid to use in the model. Default:\n None (determine at runtime). This should only be set if building a model\n in order to export and ahead-of-time compile it into a standalone\n library.\n \"\"\"\n\n self._functional = hub.Module(spec=self._model_path)\n\n grid_coords = tf.placeholder(\n tf.float32, shape=[batch_dim, 3], name='grid_coords')\n grid_weights = tf.placeholder(\n tf.float32, shape=[batch_dim], name='grid_weights')\n\n # Density information.\n rho_a = tf.placeholder(tf.float32, shape=[6, batch_dim], name='rho_a')\n rho_b = tf.placeholder(tf.float32, shape=[6, batch_dim], name='rho_b')\n\n # Split into corresponding terms.\n rho_only_a, grad_a_x, grad_a_y, grad_a_z, _, tau_a = tf.unstack(\n rho_a, axis=0)\n rho_only_b, grad_b_x, grad_b_y, grad_b_z, _, tau_b = tf.unstack(\n rho_b, axis=0)\n\n # Evaluate |\\del \\rho|^2 for each spin density and for the total density.\n norm_grad_a = (grad_a_x**2 + grad_a_y**2 + grad_a_z**2)\n norm_grad_b = (grad_b_x**2 + grad_b_y**2 + grad_b_z**2)\n grad_x = grad_a_x + grad_b_x\n grad_y = grad_a_y + grad_b_y\n grad_z = grad_a_z + grad_b_z\n norm_grad = (grad_x**2 + grad_y**2 + grad_z**2)\n\n # The local Hartree-Fock energy densities at each grid point for the alpha-\n # and beta-spin densities for each value of omega.\n # Note an omega of 0 indicates no screening of the Coulomb potential.\n hfxa = tf.placeholder(\n tf.float32, shape=[batch_dim, len(self._omega_values)], name='hfxa')\n hfxb = tf.placeholder(\n tf.float32, shape=[batch_dim, len(self._omega_values)], name='hfxb')\n\n # Make all features 2D arrays on input for ease of handling inside the\n # functional.\n features = {\n 'grid_coords': grid_coords,\n 'grid_weights': tf.expand_dims(grid_weights, 1),\n 'rho_a': tf.expand_dims(rho_only_a, 1),\n 'rho_b': tf.expand_dims(rho_only_b, 1),\n 'tau_a': tf.expand_dims(tau_a, 1),\n 'tau_b': tf.expand_dims(tau_b, 1),\n 'norm_grad_rho_a': tf.expand_dims(norm_grad_a, 1),\n 'norm_grad_rho_b': tf.expand_dims(norm_grad_b, 1),\n 'norm_grad_rho': tf.expand_dims(norm_grad, 1),\n 'hfxa': hfxa,\n 'hfxb': hfxb,\n }\n tensor_dict = {f'tensor_dict${k}': v for k, v in features.items()}\n\n predictions = self._functional(tensor_dict, as_dict=True)\n local_xc = predictions['grid_contribution']\n weighted_local_xc = local_xc * grid_weights\n unweighted_xc = tf.reduce_sum(local_xc, axis=0)\n xc = tf.reduce_sum(weighted_local_xc, axis=0)\n\n # The potential is the local exchange correlation divided by the\n # total density. Add a small constant to deal with zero density.\n self._vxc = local_xc / (rho_only_a + rho_only_b + 1E-12)\n\n # The derivatives of the exchange-correlation (XC) energy with respect to\n # input features. PySCF weights the (standard) derivatives by the grid\n # weights, so we need to compute this with respect to the unweighted sum\n # over grid points.\n self._vrho = tf.gradients(\n unweighted_xc, [features['rho_a'], features['rho_b']],\n name='GRAD_RHO',\n unconnected_gradients=tf.UnconnectedGradients.ZERO)\n self._vsigma = tf.gradients(\n unweighted_xc, [\n features['norm_grad_rho_a'], features['norm_grad_rho_b'],\n features['norm_grad_rho']\n ],\n name='GRAD_SIGMA',\n unconnected_gradients=tf.UnconnectedGradients.ZERO)\n self._vtau = tf.gradients(\n unweighted_xc, [features['tau_a'], features['tau_b']],\n name='GRAD_TAU',\n unconnected_gradients=tf.UnconnectedGradients.ZERO)\n # Standard meta-GGAs do not have a dependency on local HF, so we need to\n # compute the contribution to the Fock matrix ourselves. Just use the\n # weighted XC energy to avoid having to weight this later.\n self._vhf = tf.gradients(\n xc, [features['hfxa'], features['hfxb']],\n name='GRAD_HFX',\n unconnected_gradients=tf.UnconnectedGradients.ZERO)\n\n self._placeholders = FunctionalInputs(\n rho_a=rho_a,\n rho_b=rho_b,\n hfx_a=hfxa,\n hfx_b=hfxb,\n grid_coords=grid_coords,\n grid_weights=grid_weights)\n\n outputs = {\n 'vxc': self._vxc,\n 'vrho': tf.stack(self._vrho),\n 'vsigma': tf.stack(self._vsigma),\n 'vtau': tf.stack(self._vtau),\n 'vhf': tf.stack(self._vhf),\n }\n # Create the signature for TF-Hub, including both the energy and functional\n # derivatives.\n # This is a no-op if _build_graph is called outside of\n # hub.create_module_spec.\n hub.add_signature(\n inputs=attr.asdict(self._placeholders), outputs=outputs)\n\n def export_functional_and_derivatives(\n self,\n export_path: str,\n batch_dim: Optional[int] = None,\n ):\n \"\"\"Exports the TensorFlow graph containing the functional and derivatives.\n\n The hub modules supplied contain the TensorFlow operations for the\n evaluation of the exchange-correlation energy. Evaluation of the functional\n derivatives, required for a self-consistent calculation, are added in\n _build_graph. The module created by export_functional_and_derivatives\n contains the evaluation of the functional and the functional derivatives.\n This is much simpler to use from languages other than Python, e.g. using the\n C or C++ TensorFlow API, or using tfcompile to create a standalone C++\n library.\n\n Args:\n export_path: path to write the Hub model to. The exported model can be\n loaded using either TF-Hub or SavedModel APIs.\n batch_dim: the batch dimension of the grid to use in the model. Default:\n None (determine at runtime). This should only be set if the exported\n model is to be ahead-of-time compiled into a standalone library.\n \"\"\"\n with tf.Graph().as_default():\n spec = hub.create_module_spec(\n self._build_graph, tags_and_args=[(set(), {'batch_dim': batch_dim})])\n functional_and_derivatives = hub.Module(spec=spec)\n with tf.Session() as session:\n session.run(tf.global_variables_initializer())\n functional_and_derivatives.export(export_path, session)\n\n # DM21* functionals include the hybrid term directly, so set the\n # range-separated and hybrid parameters expected by PySCF to 0 so PySCF\n # doesn't also add these contributions in separately.\n def rsh_coeff(self, *args):\n \"\"\"Returns the range separated parameters, omega, alpha, beta.\"\"\"\n return [0.0, 0.0, 0.0]\n\n def hybrid_coeff(self, *args, **kwargs):\n \"\"\"Returns the fraction of Hartree-Fock exchange to include.\"\"\"\n return 0.0\n\n def _xc_type(self, *args, **kwargs):\n return 'MGGA'\n\n def nr_rks(self,\n mol: gto.Mole,\n grids: dft.Grids,\n xc_code: str,\n dms: Union[np.ndarray, Sequence[np.ndarray]],\n relativity: int = 0,\n hermi: int = 0,\n max_memory: float = 20000,\n verbose=None) -> Tuple[float, float, np.ndarray]:\n \"\"\"Calculates RKS XC functional and potential matrix on a given grid.\n\n Args:\n mol: PySCF molecule.\n grids: grid on which to evaluate the functional.\n xc_code: XC code. Unused. NeuralNumInt hard codes the XC functional\n based upon the functional argument given to the constructor.\n dms: the density matrix or sequence of density matrices. Multiple density\n matrices are not currently supported. Shape (nao, nao), where nao is the\n number of atomic orbitals.\n relativity: Unused. (pyscf.numint.NumInt.nr_rks does not currently use\n this argument.)\n hermi: 0 if the density matrix is Hermitian, 1 if the density matrix is\n non-Hermitian.\n max_memory: the maximum cache to use, in MB.\n verbose: verbosity level. Unused. (PySCF currently does not handle the\n verbosity level passed in here.)\n\n Returns:\n nelec, excsum, vmat, where\n nelec is the number of electrons obtained by numerical integration of\n the density matrix.\n excsum is the functional's XC energy.\n vmat is the functional's XC potential matrix, shape (nao, nao).\n\n Raises:\n NotImplementedError: if multiple density matrices are supplied.\n \"\"\"\n # Wrap nr_rks so we can store internal variables required to evaluate the\n # contribution to the XC potential from local Hartree-Fock features.\n # See pyscf.dft.numint.nr_rks for more details.\n ndms = _get_number_of_density_matrices(dms)\n if ndms > 1:\n raise NotImplementedError(\n 'NeuralNumInt does not support multiple density matrices. '\n 'Only ground state DFT calculations are currently implemented.')\n nao = mol.nao_nr()\n self._vmat_hf = np.zeros((nao, nao))\n self._system_state = _SystemState(mol=mol, dms=dms)\n nelec, excsum, vmat = super().nr_rks(\n mol=mol,\n grids=grids,\n xc_code=xc_code,\n dms=dms,\n relativity=relativity,\n hermi=hermi,\n max_memory=max_memory,\n verbose=verbose)\n vmat += self._vmat_hf + self._vmat_hf.T\n\n # Clear internal state to prevent accidental re-use.\n self._system_state = None\n self._grid_state = None\n return nelec, excsum, vmat\n\n def nr_uks(self,\n mol: gto.Mole,\n grids: dft.Grids,\n xc_code: str,\n dms: Union[Sequence[np.ndarray], Sequence[Sequence[np.ndarray]]],\n relativity: int = 0,\n hermi: int = 0,\n max_memory: float = 20000,\n verbose=None) -> Tuple[np.ndarray, float, np.ndarray]:\n \"\"\"Calculates UKS XC functional and potential matrix on a given grid.\n\n Args:\n mol: PySCF molecule.\n grids: grid on which to evaluate the functional.\n xc_code: XC code. Unused. NeuralNumInt hard codes the XC functional\n based upon the functional argument given to the constructor.\n dms: the density matrix or sequence of density matrices for each spin\n channel. Multiple density matrices for each spin channel are not\n currently supported. Each density matrix is shape (nao, nao), where nao\n is the number of atomic orbitals.\n relativity: Unused. (pyscf.dft.numint.NumInt.nr_rks does not currently use\n this argument.)\n hermi: 0 if the density matrix is Hermitian, 1 if the density matrix is\n non-Hermitian.\n max_memory: the maximum cache to use, in MB.\n verbose: verbosity level. Unused. (PySCF currently does not handle the\n verbosity level passed in here.)\n\n Returns:\n nelec, excsum, vmat, where\n nelec is the number of alpha, beta electrons obtained by numerical\n integration of the density matrix as an array of size 2.\n excsum is the functional's XC energy.\n vmat is the functional's XC potential matrix, shape (2, nao, nao), where\n vmat[0] and vmat[1] are the potential matrices for the alpha and beta\n spin channels respectively.\n\n Raises:\n NotImplementedError: if multiple density matrices for each spin channel\n are supplied.\n \"\"\"\n # Wrap nr_uks so we can store internal variables required to evaluate the\n # contribution to the XC potential from local Hartree-Fock features.\n # See pyscf.dft.numint.nr_uks for more details.\n if isinstance(dms, np.ndarray) and dms.ndim == 2: # RHF DM\n ndms = _get_number_of_density_matrices(dms)\n else:\n ndms = _get_number_of_density_matrices(dms[0])\n if ndms > 1:\n raise NotImplementedError(\n 'NeuralNumInt does not support multiple density matrices. '\n 'Only ground state DFT calculations are currently implemented.')\n\n nao = mol.nao_nr()\n self._vmat_hf = np.zeros((2, nao, nao))\n self._system_state = _SystemState(mol=mol, dms=dms)\n nelec, excsum, vmat = super().nr_uks(\n mol=mol,\n grids=grids,\n xc_code=xc_code,\n dms=dms,\n relativity=relativity,\n hermi=hermi,\n max_memory=max_memory,\n verbose=verbose)\n vmat[0] += self._vmat_hf[0] + self._vmat_hf[0].T\n vmat[1] += self._vmat_hf[1] + self._vmat_hf[1].T\n\n # Clear internal state to prevent accidental re-use.\n self._system_state = None\n self._grid_state = None\n self._vmat_hf = None\n return nelec, excsum, vmat\n\n def block_loop(\n self,\n mol: gto.Mole,\n grids: dft.Grids,\n nao: Optional[int] = None,\n deriv: int = 0,\n max_memory: float = 2000,\n non0tab: Optional[np.ndarray] = None,\n blksize: Optional[int] = None,\n buf: Optional[np.ndarray] = None\n ) -> Generator[Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray], None,\n None]:\n \"\"\"Loops over the grid by blocks. See pyscf.dft.numint.NumInt.block_loop.\n\n Args:\n mol: PySCF molecule.\n grids: grid on which to evaluate the functional.\n nao: number of basis functions. If None, obtained from mol.\n deriv: unused. The first functional derivatives are always computed.\n max_memory: the maximum cache to use for the information on the grid, in\n MB. Determines the size of each block if blksize is None.\n non0tab: mask determining if a shell in the basis set is zero at a grid\n point. Shape (N, nbas), where N is the number of grid points and nbas\n the number of shells in the basis set. Obtained from grids if not\n supplied.\n blksize: size of each block. Calculated from max_memory if None.\n buf: buffer to use for storing ao. If None, a new array for ao is created\n for each block.\n\n Yields:\n ao, mask, weight, coords: information on a block of the grid containing N'\n points, where\n ao: atomic orbitals evaluated on the grid. Shape (N', nao), where nao is\n the number of atomic orbitals.\n mask: mask indicating whether a shell in the basis set is zero at a grid\n point. Shape (N', nbas).\n weight: weight associated with each grid point. Shape (N').\n coords: coordinates of the grid. Shape (N', 3).\n \"\"\"\n # Wrap block_loop so we can store internal variables required to evaluate\n # the contribution to the XC potential from local Hartree-Fock features.\n for ao, mask, weight, coords in super().block_loop(\n mol=mol,\n grids=grids,\n nao=nao,\n deriv=deriv,\n max_memory=max_memory,\n non0tab=non0tab,\n blksize=blksize,\n buf=buf):\n # Cache the curent block so we can access it in eval_xc.\n self._grid_state = _GridState(\n ao=ao, mask=mask, weight=weight, coords=coords)\n yield ao, mask, weight, coords\n\n def construct_functional_inputs(\n self,\n mol: gto.Mole,\n dms: Union[np.ndarray, Sequence[np.ndarray]],\n spin: int,\n coords: np.ndarray,\n weights: np.ndarray,\n rho: Union[np.ndarray, Tuple[np.ndarray, np.ndarray]],\n ao: Optional[np.ndarray] = None,\n ) -> Tuple[FunctionalInputs, Tuple[np.ndarray, np.ndarray]]:\n \"\"\"Constructs the input features required for the functional.\n\n Args:\n mol: PySCF molecule.\n dms: density matrix of shape (nao, nao) (restricted calculations) or of\n shape (2, nao, nao) (unrestricted calculations) or tuple of density\n matrices for each spin channel, each of shape (nao, nao) (unrestricted\n calculations).\n spin: 0 for a spin-unpolarized (restricted Kohn-Sham) calculation, and\n spin-polarized (unrestricted) otherwise.\n coords: coordinates of the grid. Shape (N, 3), where N is the number of\n grid points.\n weights: weight associated with each grid point. Shape (N).\n rho: density and density derivatives at each grid point. Single array\n containing the total density for restricted calculations, tuple of\n arrays for each spin channel for unrestricted calculations. Each array\n has shape (6, N). See pyscf.dft.numint.eval_rho and comments in\n FunctionalInputs for more details.\n ao: The atomic orbitals evaluated on the grid, shape (N, nao). Computed if\n not supplied.\n\n Returns:\n inputs, fxx, where\n inputs: FunctionalInputs object containing the inputs (as np.ndarrays)\n for the functional.\n fxx: intermediates, shape (N, nao) for the alpha- and beta-spin\n channels, required for computing the first derivative of the local\n Hartree-Fock density with respect to the density matrices. See\n compute_hfx_density for more details.\n \"\"\"\n if spin == 0:\n # RKS\n rhoa = rho / 2\n rhob = rho / 2\n else:\n # UKS\n rhoa, rhob = rho\n\n # Local HF features.\n exxa, exxb = [], []\n fxxa, fxxb = [], []\n for omega in sorted(self._omega_values):\n hfx_results = compute_hfx_density.get_hf_density(\n mol,\n dms,\n coords=coords,\n omega=omega,\n deriv=1,\n ao=ao)\n exxa.append(hfx_results.exx[0])\n exxb.append(hfx_results.exx[1])\n fxxa.append(hfx_results.fxx[0])\n fxxb.append(hfx_results.fxx[1])\n exxa = np.stack(exxa, axis=-1)\n fxxa = np.stack(fxxa, axis=-1)\n if spin == 0:\n exx = (exxa, exxa)\n fxx = (fxxa, fxxa)\n else:\n exxb = np.stack(exxb, axis=-1)\n fxxb = np.stack(fxxb, axis=-1)\n exx = (exxa, exxb)\n fxx = (fxxa, fxxb)\n\n return FunctionalInputs(\n rho_a=rhoa,\n rho_b=rhob,\n hfx_a=exx[0],\n hfx_b=exx[1],\n grid_coords=coords,\n grid_weights=weights), fxx\n\n def eval_xc(\n self,\n xc_code: str,\n rho: Union[np.ndarray, Tuple[np.ndarray, np.ndarray]],\n spin: int = 0,\n relativity: int = 0,\n deriv: int = 1,\n verbose=None\n ) -> Tuple[np.ndarray, Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray],\n None, None]:\n \"\"\"Evaluates the XC energy and functional derivatives.\n\n See pyscf.dft.libxc.eval_xc for more details on the interface.\n\n Note: this also sets self._vmat_extra, which contains the contribution the\n the potential matrix from the local Hartree-Fock terms in the functional.\n\n Args:\n xc_code: unused.\n rho: density and density derivatives at each grid point. Single array\n containing the total density for restricted calculations, tuple of\n arrays for each spin channel for unrestricted calculations. Each array\n has shape (6, N), where N is the number of grid points. See\n pyscf.dft.numint.eval_rho and comments in FunctionalInputs for more\n details.\n spin: 0 for a spin-unpolarized (restricted Kohn-Sham) calculation, and\n spin-polarized (unrestricted) otherwise.\n relativity: unused.\n deriv: unused. The first functional derivatives are always computed.\n verbose: unused.\n\n Returns:\n exc, vxc, fxc, kxc, where:\n exc is the exchange-correlation potential matrix evaluated at each grid\n point, shape (N).\n vxc is (vrho, vgamma, vlapl, vtau), the first-order functional\n derivatives evaluated at each grid point, each shape (N).\n fxc is set to None. (The second-order functional derivatives are not\n computed.)\n kxc is set to None. (The third-order functional derivatives are not\n computed.)\n \"\"\"\n del xc_code, verbose, relativity, deriv # unused\n\n # Retrieve cached state.\n ao = self._grid_state.ao\n if ao.ndim == 3:\n # Just need the AO values, not the gradients.\n ao = ao[0]\n if self._grid_state.weight is None:\n weights = np.array([1.])\n else:\n weights = self._grid_state.weight\n mask = self._grid_state.mask\n\n inputs, (fxxa, fxxb) = self.construct_functional_inputs(\n mol=self._system_state.mol,\n dms=self._system_state.dms,\n spin=spin,\n rho=rho,\n weights=weights,\n coords=self._grid_state.coords,\n ao=ao)\n\n with self._graph.as_default():\n feed_dict = dict(\n zip(\n attr.asdict(self._placeholders).values(),\n attr.asdict(inputs).values(),\n ))\n tensor_list = [\n self._vxc,\n self._vrho,\n self._vsigma,\n self._vtau,\n self._vhf,\n ]\n exc, vrho, vsigma, vtau, vhf = (\n self._session.run(tensor_list, feed_dict=feed_dict))\n\n mol = self._system_state.mol\n shls_slice = (0, mol.nbas)\n ao_loc_nr = mol.ao_loc_nr()\n # Note: tf.gradients returns a list of gradients.\n # vrho, vsigma, vtau are derivatives of objects that had\n # tf.expand_dims(..., 1) applied. The [:, 0] indexing undoes this by\n # selecting the 0-th (and only) element from the second dimension.\n if spin == 0:\n vxc_0 = (vrho[0][:, 0] + vrho[1][:, 0]) / 2.\n # pyscf expects derivatives with respect to:\n # grad_rho . grad_rho.\n # The functional uses the first and last as inputs, but then has\n # grad_(rho_a + rho_b) . grad_(rho_a + rho_b)\n # as input. The following computes the correct total derivatives.\n vxc_1 = (vsigma[0][:, 0] / 4. + vsigma[1][:, 0] / 4. + vsigma[2][:, 0])\n vxc_3 = (vtau[0][:, 0] + vtau[1][:, 0]) / 2.\n vxc_2 = np.zeros_like(vxc_3)\n vhfs = (vhf[0] + vhf[1]) / 2.\n # Local Hartree-Fock terms\n for i in range(len(self._omega_values)):\n # Factor of 1/2 is to account for adding vmat_hf + vmat_hf.T to vmat,\n # which we do to match existing PySCF style. Unlike other terms, vmat_hf\n # is already symmetric though.\n aow = np.einsum('pi,p->pi', fxxa[:, :, i], -0.5 * vhfs[:, i])\n self._vmat_hf += _dot_ao_ao(mol, ao, aow, mask, shls_slice,\n ao_loc_nr)\n else:\n vxc_0 = np.stack([vrho[0][:, 0], vrho[1][:, 0]], axis=1)\n # pyscf expects derivatives with respect to:\n # grad_rho_a . grad_rho_a\n # grad_rho_a . grad_rho_b\n # grad_rho_b . grad_rho_b\n # The functional uses the first and last as inputs, but then has\n # grad_(rho_a + rho_b) . grad_(rho_a + rho_b)\n # as input. The following computes the correct total derivatives.\n vxc_1 = np.stack([\n vsigma[0][:, 0] + vsigma[2][:, 0], 2. * vsigma[2][:, 0],\n vsigma[1][:, 0] + vsigma[2][:, 0]\n ],\n axis=1)\n vxc_3 = np.stack([vtau[0][:, 0], vtau[1][:, 0]], axis=1)\n vxc_2 = np.zeros_like(vxc_3)\n vhfs = np.stack([vhf[0], vhf[1]], axis=2)\n for i in range(len(self._omega_values)):\n # Factors of 1/2 are due to the same reason as in the spin=0 case.\n aow = np.einsum('pi,p->pi', fxxa[:, :, i], -0.5 * vhfs[:, i, 0])\n self._vmat_hf[0] += _dot_ao_ao(mol, ao, aow, mask, shls_slice,\n ao_loc_nr)\n aow = np.einsum('pi,p->pi', fxxb[:, :, i], -0.5 * vhfs[:, i, 1])\n self._vmat_hf[1] += _dot_ao_ao(mol, ao, aow, mask, shls_slice,\n ao_loc_nr)\n\n fxc = None # Second derivative not implemented\n kxc = None # Second derivative not implemented\n return exc, (vxc_0, vxc_1, vxc_2, vxc_3), fxc, kxc\n"
] |
[
[
"tensorflow.compat.v1.stack",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.disable_v2_behavior",
"numpy.einsum",
"tensorflow.compat.v1.reduce_sum",
"numpy.stack",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.gradients",
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.Graph",
"numpy.zeros_like",
"tensorflow.compat.v1.unstack",
"numpy.array",
"numpy.zeros"
]
] |
EelaNagarajQC/fletcher
|
[
"c5b675524abb88d3c3dbbce5876d3c305248fca5"
] |
[
"fletcher/algorithms/string.py"
] |
[
"from functools import singledispatch\nfrom typing import Any, List, Tuple\n\nimport numpy as np\nimport pyarrow as pa\n\nfrom fletcher._algorithms import _buffer_to_view, _merge_valid_bitmaps\nfrom fletcher._compat import njit\nfrom fletcher.algorithms.utils.chunking import (\n _calculate_chunk_offsets,\n _combined_in_chunk_offsets,\n apply_per_chunk,\n)\n\n\ndef _extract_string_buffers(arr: pa.Array) -> Tuple[np.ndarray, np.ndarray]:\n start = arr.offset\n end = arr.offset + len(arr)\n\n offsets = np.asanyarray(arr.buffers()[1]).view(np.int32)[start : end + 1]\n data = np.asanyarray(arr.buffers()[2]).view(np.uint8)\n\n return offsets, data\n\n\n@njit\ndef _merge_string_data(\n length: int,\n valid_bits: np.ndarray,\n offsets_a: np.ndarray,\n data_a: np.ndarray,\n offsets_b: np.ndarray,\n data_b: np.ndarray,\n result_offsets: np.ndarray,\n result_data: np.ndarray,\n) -> None:\n for i in range(length):\n byte_offset = i // 8\n bit_offset = i % 8\n mask = np.uint8(1 << bit_offset)\n valid = valid_bits[byte_offset] & mask\n\n if not valid:\n result_offsets[i + 1] = result_offsets[i]\n else:\n len_a = offsets_a[i + 1] - offsets_a[i]\n len_b = offsets_b[i + 1] - offsets_b[i]\n result_offsets[i + 1] = result_offsets[i] + len_a + len_b\n for j in range(len_a):\n result_data[result_offsets[i] + j] = data_a[offsets_a[i] + j]\n for j in range(len_b):\n result_data[result_offsets[i] + len_a + j] = data_b[offsets_b[i] + j]\n\n\n@singledispatch\ndef _text_cat_chunked(a: Any, b: pa.ChunkedArray) -> pa.ChunkedArray:\n raise NotImplementedError(\n \"_text_cat_chunked is only implemented for pa.Array and pa.ChunkedArray\"\n )\n\n\n@_text_cat_chunked.register(pa.ChunkedArray)\ndef _text_cat_chunked_1(a: pa.ChunkedArray, b: pa.ChunkedArray) -> pa.ChunkedArray:\n in_a_offsets, in_b_offsets = _combined_in_chunk_offsets(a, b)\n\n new_chunks: List[pa.Array] = []\n for a_offset, b_offset in zip(in_a_offsets, in_b_offsets):\n a_slice = a.chunk(a_offset[0])[a_offset[1] : a_offset[1] + a_offset[2]]\n b_slice = b.chunk(b_offset[0])[b_offset[1] : b_offset[1] + b_offset[2]]\n new_chunks.append(_text_cat(a_slice, b_slice))\n return pa.chunked_array(new_chunks)\n\n\n@_text_cat_chunked.register(pa.Array)\ndef _text_cat_chunked_2(a: pa.Array, b: pa.ChunkedArray) -> pa.ChunkedArray:\n new_chunks = []\n offsets = _calculate_chunk_offsets(b)\n for chunk, offset in zip(b.iterchunks(), offsets):\n new_chunks.append(_text_cat(a[offset : offset + len(chunk)], chunk))\n return pa.chunked_array(new_chunks)\n\n\ndef _text_cat_chunked_mixed(a: pa.ChunkedArray, b: pa.Array) -> pa.ChunkedArray:\n new_chunks = []\n offsets = _calculate_chunk_offsets(a)\n for chunk, offset in zip(a.iterchunks(), offsets):\n new_chunks.append(_text_cat(chunk, b[offset : offset + len(chunk)]))\n return pa.chunked_array(new_chunks)\n\n\ndef _text_cat(a: pa.Array, b: pa.Array) -> pa.Array:\n if len(a) != len(b):\n raise ValueError(\"Lengths of arrays don't match\")\n\n offsets_a, data_a = _extract_string_buffers(a)\n offsets_b, data_b = _extract_string_buffers(b)\n if len(a) > 0:\n valid = _merge_valid_bitmaps(a, b)\n result_offsets = np.empty(len(a) + 1, dtype=np.int32)\n result_offsets[0] = 0\n total_size = (offsets_a[-1] - offsets_a[0]) + (offsets_b[-1] - offsets_b[0])\n result_data = np.empty(total_size, dtype=np.uint8)\n _merge_string_data(\n len(a),\n valid,\n offsets_a,\n data_a,\n offsets_b,\n data_b,\n result_offsets,\n result_data,\n )\n buffers = [pa.py_buffer(x) for x in [valid, result_offsets, result_data]]\n return pa.Array.from_buffers(pa.string(), len(a), buffers)\n return a\n\n\n@njit\ndef _text_contains_case_sensitive_nonnull(\n length: int, offsets: np.ndarray, data: np.ndarray, pat, output: np.ndarray\n) -> None:\n for row_idx in range(length):\n str_len = offsets[row_idx + 1] - offsets[row_idx]\n\n contains = False\n for str_idx in range(max(0, str_len - len(pat) + 1)):\n pat_found = True\n for pat_idx in range(len(pat)):\n if data[offsets[row_idx] + str_idx + pat_idx] != pat[pat_idx]:\n pat_found = False\n break\n if pat_found:\n contains = True\n break\n\n # TODO: Set word-wise for better performance\n byte_offset_result = row_idx // 8\n bit_offset_result = row_idx % 8\n mask_result = np.uint8(1 << bit_offset_result)\n current = output[byte_offset_result]\n if contains: # must be logical, not bit-wise as different bits may be flagged\n output[byte_offset_result] = current | mask_result\n else:\n output[byte_offset_result] = current & ~mask_result\n\n\n@njit\ndef _text_contains_case_sensitive_nulls(\n length: int,\n valid_bits: np.ndarray,\n valid_offset: int,\n offsets: np.ndarray,\n data: np.ndarray,\n pat: bytes,\n output: np.ndarray,\n) -> None:\n for row_idx in range(length):\n # Check whether the current entry is null.\n byte_offset = (row_idx + valid_offset) // 8\n bit_offset = (row_idx + valid_offset) % 8\n mask = np.uint8(1 << bit_offset)\n valid = valid_bits[byte_offset] & mask\n\n # We don't need to set the result for nulls, the calling code is\n # already dealing with them by zero'ing the output.\n if not valid:\n continue\n\n str_len = offsets[row_idx + 1] - offsets[row_idx]\n\n contains = False\n # Try to find the pattern at each starting position\n for str_idx in range(max(0, str_len - len(pat) + 1)):\n pat_found = True\n # Compare at the current position byte-by-byte\n for pat_idx in range(len(pat)):\n if data[offsets[row_idx] + str_idx + pat_idx] != pat[pat_idx]:\n pat_found = False\n break\n if pat_found:\n contains = True\n break\n\n # Write out the result into the bit-mask\n byte_offset_result = row_idx // 8\n bit_offset_result = row_idx % 8\n mask_result = np.uint8(1 << bit_offset_result)\n current = output[byte_offset_result]\n if contains: # must be logical, not bit-wise as different bits may be flagged\n output[byte_offset_result] = current | mask_result\n else:\n output[byte_offset_result] = current & ~mask_result\n\n\n@njit\ndef _shift_unaligned_bitmap(\n valid_bits: np.ndarray, valid_offset: int, length: int, output: np.ndarray\n) -> None:\n for i in range(length):\n byte_offset = (i + valid_offset) // 8\n bit_offset = (i + valid_offset) % 8\n mask = np.uint8(1 << bit_offset)\n valid = valid_bits[byte_offset] & mask\n\n byte_offset_result = i // 8\n bit_offset_result = i % 8\n mask_result = np.uint8(1 << bit_offset_result)\n current = output[byte_offset_result]\n if valid:\n output[byte_offset_result] = current | mask_result\n\n\ndef shift_unaligned_bitmap(\n valid_buffer: pa.Buffer, offset: int, length: int\n) -> pa.Buffer:\n \"\"\"Shift an unaligned bitmap to be offsetted at 0.\"\"\"\n output_size = length // 8\n if length % 8 > 0:\n output_size += 1\n output = np.zeros(output_size, dtype=np.uint8)\n\n _shift_unaligned_bitmap(valid_buffer, offset, length, output)\n\n return pa.py_buffer(output)\n\n\n@apply_per_chunk\ndef _text_contains_case_sensitive(data: pa.Array, pat: str) -> pa.Array:\n \"\"\"\n Check for each element in the data whether it contains the pattern ``pat``.\n\n This implementation does basic byte-by-byte comparison and is independent\n of any locales or encodings.\n \"\"\"\n # Convert to UTF-8 bytes\n pat_bytes: bytes = pat.encode()\n\n # Initialise boolean (bit-packaed) output array.\n output_size = len(data) // 8\n if len(data) % 8 > 0:\n output_size += 1\n output = np.empty(output_size, dtype=np.uint8)\n if len(data) % 8 > 0:\n # Zero trailing bits\n output[-1] = 0\n\n offsets, data_buffer = _extract_string_buffers(data)\n\n if data.null_count == 0:\n valid_buffer = None\n _text_contains_case_sensitive_nonnull(\n len(data), offsets, data_buffer, pat_bytes, output\n )\n else:\n valid = _buffer_to_view(data.buffers()[0])\n _text_contains_case_sensitive_nulls(\n len(data), valid, data.offset, offsets, data_buffer, pat_bytes, output\n )\n valid_buffer = data.buffers()[0].slice(data.offset // 8)\n if data.offset % 8 != 0:\n valid_buffer = shift_unaligned_bitmap(\n valid_buffer, data.offset % 8, len(data)\n )\n\n return pa.Array.from_buffers(\n pa.bool_(), len(data), [valid_buffer, pa.py_buffer(output)], data.null_count\n )\n\n\n@njit\ndef _startswith(sa, needle, na, offset, out):\n for i in range(sa.size):\n if sa.isnull(i):\n out[offset + i] = na\n continue\n\n if sa.byte_length(i) < needle.length:\n out[offset + i] = 0\n continue\n\n for j in range(needle.length):\n if sa.get_byte(i, j) != needle.get_byte(j):\n out[offset + i] = 0\n break\n\n else:\n out[offset + i] = 1\n\n\n@njit\ndef _endswith(sa, needle, na, offset, out):\n for i in range(sa.size):\n if sa.isnull(i):\n out[offset + i] = na\n continue\n\n string_length = sa.byte_length(i)\n needle_length = needle.length\n if string_length < needle.length:\n out[offset + i] = 0\n continue\n\n out[offset + i] = 1\n for j in range(needle_length):\n if sa.get_byte(i, string_length - needle_length + j) != needle.get_byte(j):\n out[offset + i] = 0\n break\n"
] |
[
[
"numpy.uint8",
"numpy.zeros",
"numpy.empty"
]
] |
TheoSaify/Yolo-Detector
|
[
"f1ac387370982de323a4fc09109c57736b8ce8d6"
] |
[
"MorphologicalTrans.py"
] |
[
"import cv2\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\n\r\n\r\n###############################Erosion#########################\r\n# it erodes away the boundaries of foreground object\r\n\r\n\r\nimg = cv2.imread('C://Users//alsaifyt//Desktop//PythonOpenCvScripts//j.png',0)\r\nkernel = np.ones((5,5),np.uint8)\r\nerosion = cv2.erode(img,kernel,iterations = 1)\r\n\r\nplt.subplot(121),plt.imshow(img),plt.title('Original')\r\nplt.xticks([]), plt.yticks([])\r\nplt.subplot(122),plt.imshow(erosion),plt.title('Erosed')\r\nplt.xticks([]), plt.yticks([])\r\nplt.show()\r\n\r\n\r\n###############################Dilation#########################\r\n#it increases the white region in the image or size of foreground object increases.\r\n # It is useful in removing noise \r\n\r\ndilation = cv2.dilate(img,kernel,iterations = 1)\r\nplt.subplot(121),plt.imshow(img),plt.title('Original')\r\nplt.xticks([]), plt.yticks([])\r\nplt.subplot(122),plt.imshow(dilation),plt.title('Dilated')\r\nplt.xticks([]), plt.yticks([])\r\nplt.show()\r\n\r\n\r\n\r\n###############################Opening#########################\r\n#erosion followed by dilation\r\n\r\n\r\nopening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)\r\nplt.subplot(121),plt.imshow(img),plt.title('Original')\r\nplt.xticks([]), plt.yticks([])\r\nplt.subplot(122),plt.imshow(opening),plt.title('Opening')\r\nplt.xticks([]), plt.yticks([])\r\nplt.show()\r\n\r\n\r\n###############################Closing#########################\r\n#Dilation followed by Erosion\r\n#Useful in closing small holes inside the foreground objects, or small black points on the object.\r\n\r\nclosing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)\r\nplt.subplot(121),plt.imshow(img),plt.title('Original')\r\nplt.xticks([]), plt.yticks([])\r\nplt.subplot(122),plt.imshow(closing),plt.title('Closing')\r\nplt.xticks([]), plt.yticks([])\r\nplt.show()\r\n\r\n\r\n###############################Morphological Gradient#########################\r\n# It is the difference between dilation and erosion of an image.\r\n\r\n\r\ngradient = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel)\r\n\r\nplt.subplot(121),plt.imshow(img),plt.title('Original')\r\nplt.xticks([]), plt.yticks([])\r\nplt.subplot(122),plt.imshow(gradient),plt.title('Morphological Gradient')\r\nplt.xticks([]), plt.yticks([])\r\nplt.show()\r\n\r\n\r\n###############################Top Hat#########################\r\n# It is the difference between input image and Opening of the image\r\n\r\ntophat = cv2.morphologyEx(img, cv2.MORPH_TOPHAT, kernel)\r\n\r\n\r\n###############################black Hat#########################\r\n#he difference between the closing of the input image and input image.\r\n\r\nblackhat = cv2.morphologyEx(img, cv2.MORPH_BLACKHAT, kernel)\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
] |
[
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.title",
"numpy.ones",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show"
]
] |
vonum/style-transfer
|
[
"0cd9bcb3fa680298de727141824dfc201b7dc666"
] |
[
"losses.py"
] |
[
"import tensorflow as tf\n\ndef mean_squared_error(a, b):\n return tf.reduce_mean(tf.square(a - b))\n\ndef sum_squared_error(a, b):\n return tf.reduce_sum(tf.square(a - b))\n\ndef gram_matrix(tensor):\n shape = tensor.get_shape()\n\n # Get the number of feature channels for the input tensor,\n # which is assumed to be from a convolutional layer with 4-dim.\n num_channels = int(shape[3])\n\n matrix = tf.reshape(tensor, shape=[-1, num_channels])\n\n return tf.matmul(tf.transpose(matrix), matrix)\n\n# helps suppress noise in mixed image we are generating\ndef tv_loss(image):\n return tf.image.total_variation(image)\n"
] |
[
[
"tensorflow.reshape",
"tensorflow.transpose",
"tensorflow.square",
"tensorflow.image.total_variation"
]
] |
leolapidus/Concrete_Design_Tool
|
[
"7553f50d3db05e3a6c290e35acdc5d8bd1c51130"
] |
[
"FE_code/single_load.py"
] |
[
"\"\"\"This module only contains the single load element.\n\n\"\"\"\n\nimport numpy as np\n\nfrom FE_code.element import Element\n\nclass SingleLoad(Element):\n \"\"\"Creates a Single Load on the Nodes of the Element\n \n Attributes\n ----------\n node : object `Node`\n object of the class Node\n \n load_type : keyword arguments\n load values on the node\n \n Raises\n ------\n RuntimeError\n setting wrong loads\n \n Examples\n --------\n Create a single load on one Node\n >>> SingleLoadBeam(node, fx=4, fy=5, mz=8)\n \"\"\"\n\n def __init__(self, id, node, **load_types):\n \"\"\"Creates a single load\n \n Parameters\n ----------\n node : object\n object of the class Node\n load_type : keyword arguments\n load values on the node\n \n \"\"\"\n self.id = id\n self._node = node\n self._fx = load_types.get('fx')\n self._fy = load_types.get('fy')\n self._mz = load_types.get('mz')\n \n\n @property\n def dofs(self):\n node_id = self._node.id\n\n return [(node_id, 'u'), (node_id, 'v'), (node_id, 'phi')]\n\n @property \n def node_id(self):\n return self._node.id\n\n def get_load_vector(self):\n \"\"\"Calculate the local vector of nodal loads\n \n Returns\n -------\n load_vector : array_like\n vector of nodal loads\n \"\"\"\n fx = self._fx\n fy = self._fy\n mz = self._mz\n load_vector = np.array([fx, fy, mz])\n return load_vector\n\n def _get_dof_tuple_from_node_id(self, node_id):\n return [(node_id, 'u'), (node_id, 'v'), (node_id, 'phi')]\n"
] |
[
[
"numpy.array"
]
] |
masunder/3DSRCNN-pytorch-1
|
[
"74355561e94b9cbe947af7a87830846928d49d9f"
] |
[
"generate_subblocks.py"
] |
[
"\n# -*- coding: utf-8 -*-\n\nimport torch\nfrom torch.autograd import Variable\nimport numpy as np\nimport argparse\nimport utils\nimport os\ntorch.nn.Module.dump_patches = True\n##该程序相当于是拼接,只有全部生成了子图片(3d_vdsr_xxx),才能成功生成完整图像\nparser = argparse.ArgumentParser(description=\"PyTorch VDSR\")\nparser.add_argument(\"--countd\", type=int, default=0, help=\"d_size\")\nparser.add_argument(\"--counth\", type=int, default=0, help=\"h_size\")\nparser.add_argument(\"--countw\", type=int, default=0, help=\"w_size\")\nparser.add_argument(\"--dep\", type=int, default=40, help=\"d\")\nparser.add_argument(\"--hei\", type=int, default=2580, help=\"h\")\nparser.add_argument(\"--wid\", type=int, default=2580, help=\"w\")\nparser.add_argument('--interpath',type=str,default='images/LR/', help='path to interpolation images' )\nparser.add_argument('--crop',type=int,default=0,help='pre-processing')\nparser.add_argument('--block_size',type=int,default=50, help='reconstrucion size per time')\nparser.add_argument('--model',type=str,default=\"model/0809-1245L__model/model_epoch_2.pkl\", help='path to trained model')\nparser.add_argument('--cuda',type=int,default=0)\nparser.add_argument('--format', type=str, default='bmp', help=\"specified images format\")\nopt=parser.parse_args()\n\n\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"3\"\nCUDA_ENABLE = opt.cuda\nPRE_CROP=opt.crop\nimg_format = opt.format\n#if CUDA_ENABLE and not torch.cuda.is_available():\n# raise Exception(\"No GPU found, please run without --cuda\")\n\nmodel_path=opt.model#current filepath \nmodel = torch.load(model_path)['model']\n#params=model.state_dict()\n#print (params)\n\nif CUDA_ENABLE:\n model = model.cuda()\n #print ('Using GPU acceleration!')\nelse :\n model=model.cpu()\n print ('Using CPU to compute')\n \nimage_path=opt.interpath\n# print(image_path)\ndataset_interp=utils.read_imagecollection(image_path, image_format=img_format)\nif len(dataset_interp) == 0:\n raise IOError(\"Read Images Failed!!!\")\nif PRE_CROP:\n dataset_interp=utils.pre_crop(dataset_interp)\n#print('===>Load input low resolution image from : ',image_path)\ndataset_interp = dataset_interp/255.0#normlize the gray rank to 0-1\ndataset_interp = dataset_interp[:opt.wid,:opt.hei,:opt.dep]\n\nnum,h,w=dataset_interp.shape\nbatch_generate_size=20\nreconstruction_output=np.zeros((num,h,w))\ncount_d,count_h,count_w=opt.countd,opt.counth,opt.countw\npixel_start_d=count_d*batch_generate_size\npixel_end_d=(count_d+1)*batch_generate_size\npixel_start_h=count_h*batch_generate_size\npixel_end_h=(count_h+1)*batch_generate_size\npixel_start_w=count_w*batch_generate_size\npixel_end_w=(count_w+1)*batch_generate_size\ntestdata=dataset_interp[pixel_start_d:pixel_end_d,pixel_start_h:pixel_end_h,pixel_start_w:pixel_end_w]\n#print ('input data from interplation:',testdata.shape)\ntestdata=testdata.reshape(1,1,batch_generate_size,batch_generate_size,batch_generate_size)\n# testdata=torch.cuda.FloatTensor(testdata)\ntestdata=torch.Tensor(testdata)\nif CUDA_ENABLE:\n testdata_variable=Variable(testdata).cuda()\n testdata_output=model(testdata_variable)\n output=testdata_output.data.cpu().numpy().squeeze()\nelse :\n testdata_variable=Variable(testdata)\n testdata_output=model(testdata_variable)\n output=testdata_output.data.numpy().squeeze()\noutput=output*255#restore to the gray rank0-255\nreconstruction_output[pixel_start_d:pixel_end_d,pixel_start_h:pixel_end_h,pixel_start_w:pixel_end_w]=output#\n\ndataset_interp=dataset_interp*255\n#print ('PSNR of interp:',PSNR(dataset_interp,dataset_ori[:400,:400,:400]))\n#print ('PSNR of reconstructor:',PSNR(reconstruction_output,dataset_ori[:400,:400,:400]))\nutils.generate_2Dimage(output,save_mode='result/3D_VDSR_'+str(count_d)+str(count_h)+str(count_w)+'/')\n"
] |
[
[
"torch.autograd.Variable",
"numpy.zeros",
"torch.Tensor",
"torch.load"
]
] |
carolmanderson/NeMo
|
[
"ec6591e76ab5d33e050e0dded25bed43d5d6c3e4"
] |
[
"nemo/collections/nlp/models/duplex_text_normalization/duplex_tn.py"
] |
[
"# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom math import ceil\nfrom time import perf_counter\nfrom typing import List\n\nimport numpy as np\nimport torch.nn as nn\nfrom tqdm import tqdm\n\nfrom nemo.collections.nlp.data.text_normalization import TextNormalizationTestDataset, constants\nfrom nemo.collections.nlp.data.text_normalization.utils import basic_tokenize, post_process_punct\nfrom nemo.collections.nlp.models.duplex_text_normalization.utils import get_formatted_string\nfrom nemo.utils import logging\nfrom nemo.utils.decorators.experimental import experimental\n\n__all__ = ['DuplexTextNormalizationModel']\n\n\n@experimental\nclass DuplexTextNormalizationModel(nn.Module):\n \"\"\"\n DuplexTextNormalizationModel is a wrapper class that can be used to\n encapsulate a trained tagger and a trained decoder. The class is intended\n to be used for inference only (e.g., for evaluation).\n \"\"\"\n\n def __init__(self, tagger, decoder, lang):\n super(DuplexTextNormalizationModel, self).__init__()\n\n self.tagger = tagger\n self.decoder = decoder\n self.lang = lang\n\n def evaluate(\n self, dataset: TextNormalizationTestDataset, batch_size: int, errors_log_fp: str, verbose: bool = True\n ):\n \"\"\" Function for evaluating the performance of the model on a dataset\n\n Args:\n dataset: The dataset to be used for evaluation.\n batch_size: Batch size to use during inference. You can set it to be 1\n (no batching) if you want to measure the running time of the model\n per individual example (assuming requests are coming to the model one-by-one).\n errors_log_fp: Path to the file for logging the errors\n verbose: if true prints and logs various evaluation results\n\n Returns:\n results: A Dict containing the evaluation results (e.g., accuracy, running time)\n \"\"\"\n results = {}\n error_f = open(errors_log_fp, 'w+')\n\n # Apply the model on the dataset\n (\n all_run_times,\n all_dirs,\n all_inputs,\n all_targets,\n all_classes,\n all_nb_spans,\n all_span_starts,\n all_span_ends,\n all_output_spans,\n ) = ([], [], [], [], [], [], [], [], [])\n all_tag_preds, all_final_preds = [], []\n nb_iters = int(ceil(len(dataset) / batch_size))\n for i in tqdm(range(nb_iters)):\n start_idx = i * batch_size\n end_idx = (i + 1) * batch_size\n batch_insts = dataset[start_idx:end_idx]\n (\n batch_dirs,\n batch_inputs,\n batch_targets,\n batch_classes,\n batch_nb_spans,\n batch_span_starts,\n batch_span_ends,\n ) = zip(*batch_insts)\n # Inference and Running Time Measurement\n batch_start_time = perf_counter()\n batch_tag_preds, batch_output_spans, batch_final_preds = self._infer(batch_inputs, batch_dirs)\n batch_run_time = (perf_counter() - batch_start_time) * 1000 # milliseconds\n all_run_times.append(batch_run_time)\n # Update all_dirs, all_inputs, all_tag_preds, all_final_preds and all_targets\n all_dirs.extend(batch_dirs)\n all_inputs.extend(batch_inputs)\n all_tag_preds.extend(batch_tag_preds)\n all_final_preds.extend(batch_final_preds)\n all_targets.extend(batch_targets)\n all_classes.extend(batch_classes)\n all_nb_spans.extend(batch_nb_spans)\n all_span_starts.extend(batch_span_starts)\n all_span_ends.extend(batch_span_ends)\n all_output_spans.extend(batch_output_spans)\n\n # Metrics\n tn_error_ctx, itn_error_ctx = 0, 0\n for direction in constants.INST_DIRECTIONS:\n (\n cur_dirs,\n cur_inputs,\n cur_tag_preds,\n cur_final_preds,\n cur_targets,\n cur_classes,\n cur_nb_spans,\n cur_span_starts,\n cur_span_ends,\n cur_output_spans,\n ) = ([], [], [], [], [], [], [], [], [], [])\n for dir, _input, tag_pred, final_pred, target, cls, nb_spans, span_starts, span_ends, output_spans in zip(\n all_dirs,\n all_inputs,\n all_tag_preds,\n all_final_preds,\n all_targets,\n all_classes,\n all_nb_spans,\n all_span_starts,\n all_span_ends,\n all_output_spans,\n ):\n if dir == direction:\n cur_dirs.append(dir)\n cur_inputs.append(_input)\n cur_tag_preds.append(tag_pred)\n cur_final_preds.append(final_pred)\n cur_targets.append(target)\n cur_classes.append(cls)\n cur_nb_spans.append(nb_spans)\n cur_span_starts.append(span_starts)\n cur_span_ends.append(span_ends)\n cur_output_spans.append(output_spans)\n nb_instances = len(cur_final_preds)\n cur_targets_sent = [\" \".join(x) for x in cur_targets]\n\n sent_accuracy = TextNormalizationTestDataset.compute_sent_accuracy(\n cur_final_preds, cur_targets_sent, cur_dirs, self.lang\n )\n\n class_accuracy = TextNormalizationTestDataset.compute_class_accuracy(\n [basic_tokenize(x, lang=self.lang) for x in cur_inputs],\n cur_targets,\n cur_tag_preds,\n cur_dirs,\n cur_output_spans,\n cur_classes,\n cur_nb_spans,\n cur_span_starts,\n cur_span_ends,\n self.lang,\n )\n if verbose:\n logging.info(f'\\n============ Direction {direction} ============')\n logging.info(f'Sentence Accuracy: {sent_accuracy}')\n logging.info(f'nb_instances: {nb_instances}')\n if not isinstance(class_accuracy, str):\n log_class_accuracies = \"\"\n for key, value in class_accuracy.items():\n log_class_accuracies += f\"\\n\\t{key}:\\t{value[0]}\\t{value[1]}/{value[2]}\"\n else:\n log_class_accuracies = class_accuracy\n logging.info(f'class accuracies: {log_class_accuracies}')\n # Update results\n results[direction] = {\n 'sent_accuracy': sent_accuracy,\n 'nb_instances': nb_instances,\n \"class_accuracy\": log_class_accuracies,\n }\n # Write errors to log file\n for _input, tag_pred, final_pred, target, classes in zip(\n cur_inputs, cur_tag_preds, cur_final_preds, cur_targets_sent, cur_classes\n ):\n if not TextNormalizationTestDataset.is_same(final_pred, target, direction, self.lang):\n if direction == constants.INST_BACKWARD:\n error_f.write('Backward Problem (ITN)\\n')\n itn_error_ctx += 1\n elif direction == constants.INST_FORWARD:\n error_f.write('Forward Problem (TN)\\n')\n tn_error_ctx += 1\n formatted_input_str = get_formatted_string(basic_tokenize(_input, lang=self.lang))\n formatted_tag_pred_str = get_formatted_string(tag_pred)\n class_str = \" \".join(classes)\n error_f.write(f'Original Input : {_input}\\n')\n error_f.write(f'Input : {formatted_input_str}\\n')\n error_f.write(f'Predicted Tags : {formatted_tag_pred_str}\\n')\n error_f.write(f'Ground Classes : {class_str}\\n')\n error_f.write(f'Predicted Str : {final_pred}\\n')\n error_f.write(f'Ground-Truth : {target}\\n')\n error_f.write('\\n')\n results['itn_error_ctx'] = itn_error_ctx\n results['tn_error_ctx'] = tn_error_ctx\n\n # Running Time\n avg_running_time = np.average(all_run_times) / batch_size # in ms\n if verbose:\n logging.info(f'Average running time (normalized by batch size): {avg_running_time} ms')\n results['running_time'] = avg_running_time\n\n # Close log file\n error_f.close()\n logging.info(f'Errors are saved at {errors_log_fp}.')\n return results\n\n # Functions for inference\n def _infer(self, sents: List[str], inst_directions: List[str], do_basic_tokenization=True):\n \"\"\"\n Main function for Inference\n\n If the 'joint' mode is used, \"sents\" will include both spoken and written forms on each input sentence,\n and \"inst_directions\" will include both constants.INST_BACKWARD and constants.INST_FORWARD\n\n Args:\n sents: A list of input texts.\n inst_directions: A list of str where each str indicates the direction of the corresponding instance \\\n (i.e., constants.INST_BACKWARD for ITN or constants.INST_FORWARD for TN).\n do_basic_tokenization: whether to do a pre-processing to separate punctuation marks,\n recommended to set to True\n\n Returns:\n tag_preds: A list of lists where the inner list contains the tag predictions from the tagger for each word in the input text.\n output_spans: A list of lists where each list contains the decoded semiotic spans from the decoder for an input text.\n final_outputs: A list of str where each str is the final output text for an input text.\n \"\"\"\n original_sents = [s for s in sents]\n # Separate into words\n if do_basic_tokenization:\n sents = [self.decoder.processor.tokenize(x).split() for x in sents]\n\n # Tagging\n # span_ends included, returns index wrt to words in input without auxiliary words\n tag_preds, nb_spans, span_starts, span_ends = self.tagger._infer(\n sents, inst_directions, do_basic_tokenization=do_basic_tokenization\n )\n output_spans = self.decoder._infer(sents, nb_spans, span_starts, span_ends, inst_directions)\n\n if not do_basic_tokenization:\n sents = [x.split() for x in sents]\n\n # Prepare final outputs\n final_outputs = []\n for ix, (sent, tags) in enumerate(zip(sents, tag_preds)):\n try:\n cur_words, jx, span_idx = [], 0, 0\n cur_spans = output_spans[ix]\n while jx < len(sent):\n tag, word = tags[jx], sent[jx]\n if constants.SAME_TAG in tag:\n cur_words.append(word)\n jx += 1\n else:\n jx += 1\n cur_words.append(cur_spans[span_idx])\n span_idx += 1\n while jx < len(sent) and tags[jx] == constants.I_PREFIX + constants.TRANSFORM_TAG:\n jx += 1\n cur_output_str = self.decoder.processor.detokenize(cur_words)\n cur_output_str = post_process_punct(input=original_sents[ix], nn_output=cur_output_str)\n final_outputs.append(cur_output_str)\n except IndexError:\n logging.warning(f\"Input sent is too long and will be skipped - {original_sents[ix]}\")\n final_outputs.append(original_sents[ix])\n return tag_preds, output_spans, final_outputs\n"
] |
[
[
"numpy.average"
]
] |
maumueller/ann-benchmarks-edml19
|
[
"e598caa74ddbcae6c269f249c1e16c97c4642f7d"
] |
[
"ann_benchmarks/datasets.py"
] |
[
"import h5py\nimport numpy\nimport os\nimport random\nimport sys\ntry:\n from urllib import urlretrieve\nexcept ImportError:\n from urllib.request import urlretrieve # Python 3\n\n\ndef download(src, dst):\n if not os.path.exists(dst):\n # TODO: should be atomic\n print('downloading %s -> %s...' % (src, dst))\n urlretrieve(src, dst)\n\n\ndef get_dataset_fn(dataset):\n if not os.path.exists('data'):\n os.mkdir('data')\n return os.path.join('data', '%s.hdf5' % dataset)\n\n\ndef get_dataset(which):\n hdf5_fn = get_dataset_fn(which)\n try:\n url = 'http://ann-benchmarks.com/%s.hdf5' % which\n download(url, hdf5_fn)\n except:\n print(\"Cannot download %s\" % url)\n if which in DATASETS:\n print(\"Creating dataset locally\")\n DATASETS[which](hdf5_fn)\n hdf5_f = h5py.File(hdf5_fn)\n return hdf5_f\n\n\n# Everything below this line is related to creating datasets\n# You probably never need to do this at home, just rely on the prepared datasets at http://ann-benchmarks.com\n\ndef write_output(train, test, fn, distance, point_type='float', count=100):\n from ann_benchmarks.algorithms.bruteforce import BruteForceBLAS\n n = 0\n f = h5py.File(fn, 'w')\n f.attrs['distance'] = distance\n f.attrs['point_type'] = point_type\n print('train size: %9d * %4d' % train.shape)\n print('test size: %9d * %4d' % test.shape)\n f.create_dataset('train', (len(train), len(train[0])), dtype=train.dtype)[:] = train\n f.create_dataset('test', (len(test), len(test[0])), dtype=test.dtype)[:] = test\n neighbors = f.create_dataset('neighbors', (len(test), count), dtype='i')\n distances = f.create_dataset('distances', (len(test), count), dtype='f')\n bf = BruteForceBLAS(distance, precision=train.dtype)\n bf.fit(train)\n queries = []\n for i, x in enumerate(test):\n if i % 1000 == 0:\n print('%d/%d...' % (i, test.shape[0]))\n res = list(bf.query_with_distances(x, count))\n res.sort(key=lambda t: t[-1])\n neighbors[i] = [j for j, _ in res]\n distances[i] = [d for _, d in res]\n f.close()\n\n\ndef train_test_split(X, test_size=10000):\n import sklearn.model_selection\n print('Splitting %d*%d into train/test' % X.shape)\n return sklearn.model_selection.train_test_split(X, test_size=test_size, random_state=1)\n\n\ndef glove(out_fn, d):\n import zipfile\n\n url = 'http://nlp.stanford.edu/data/glove.twitter.27B.zip'\n fn = os.path.join('data', 'glove.twitter.27B.zip')\n download(url, fn)\n with zipfile.ZipFile(fn) as z:\n print('preparing %s' % out_fn)\n z_fn = 'glove.twitter.27B.%dd.txt' % d\n X = []\n for line in z.open(z_fn):\n v = [float(x) for x in line.strip().split()[1:]]\n X.append(numpy.array(v))\n X_train, X_test = train_test_split(X)\n write_output(numpy.array(X_train), numpy.array(X_test), out_fn, 'angular')\n\n\ndef _load_texmex_vectors(f, n, k):\n import struct\n\n v = numpy.zeros((n, k))\n for i in range(n):\n f.read(4) # ignore vec length\n v[i] = struct.unpack('f' * k, f.read(k*4))\n\n return v\n\n\ndef _get_irisa_matrix(t, fn):\n import struct\n m = t.getmember(fn)\n f = t.extractfile(m)\n k, = struct.unpack('i', f.read(4))\n n = m.size // (4 + 4*k)\n f.seek(0)\n return _load_texmex_vectors(f, n, k)\n\n\ndef sift(out_fn):\n import tarfile\n\n url = 'ftp://ftp.irisa.fr/local/texmex/corpus/sift.tar.gz'\n fn = os.path.join('data', 'sift.tar.tz')\n download(url, fn)\n with tarfile.open(fn, 'r:gz') as t:\n train = _get_irisa_matrix(t, 'sift/sift_base.fvecs')\n test = _get_irisa_matrix(t, 'sift/sift_query.fvecs')\n write_output(train, test, out_fn, 'euclidean')\n\n\ndef gist(out_fn):\n import tarfile\n\n url = 'ftp://ftp.irisa.fr/local/texmex/corpus/gist.tar.gz'\n fn = os.path.join('data', 'gist.tar.tz')\n download(url, fn)\n with tarfile.open(fn, 'r:gz') as t:\n train = _get_irisa_matrix(t, 'gist/gist_base.fvecs')\n test = _get_irisa_matrix(t, 'gist/gist_query.fvecs')\n write_output(train, test, out_fn, 'euclidean')\n\n\ndef _load_mnist_vectors(fn):\n import gzip\n import struct\n\n print('parsing vectors in %s...' % fn)\n f = gzip.open(fn)\n type_code_info = {\n 0x08: (1, \"!B\"),\n 0x09: (1, \"!b\"),\n 0x0B: (2, \"!H\"),\n 0x0C: (4, \"!I\"),\n 0x0D: (4, \"!f\"),\n 0x0E: (8, \"!d\")\n }\n magic, type_code, dim_count = struct.unpack(\"!hBB\", f.read(4))\n assert magic == 0\n assert type_code in type_code_info\n\n dimensions = [struct.unpack(\"!I\", f.read(4))[0] for i in range(dim_count)]\n\n entry_count = dimensions[0]\n entry_size = numpy.product(dimensions[1:])\n\n b, format_string = type_code_info[type_code]\n vectors = []\n for i in range(entry_count):\n vectors.append([struct.unpack(format_string, f.read(b))[0] for j in range(entry_size)])\n return numpy.array(vectors)\n\n\ndef mnist(out_fn):\n download('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', 'mnist-train.gz')\n download('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', 'mnist-test.gz')\n train = _load_mnist_vectors('mnist-train.gz')\n test = _load_mnist_vectors('mnist-test.gz')\n write_output(train, test, out_fn, 'euclidean')\n\n\ndef fashion_mnist(out_fn):\n download('http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz', 'fashion-mnist-train.gz')\n download('http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz', 'fashion-mnist-test.gz')\n train = _load_mnist_vectors('fashion-mnist-train.gz')\n test = _load_mnist_vectors('fashion-mnist-test.gz')\n write_output(train, test, out_fn, 'euclidean')\n\n\ndef transform_bag_of_words(filename, n_dimensions, out_fn):\n import gzip\n from scipy.sparse import lil_matrix\n from sklearn.feature_extraction.text import TfidfTransformer\n from sklearn import random_projection\n with gzip.open(filename, 'rb') as f:\n file_content = f.readlines()\n entries = int(file_content[0])\n words = int(file_content[1])\n file_content = file_content[3:] # strip first three entries\n print(\"building matrix...\")\n A = lil_matrix((entries, words))\n for e in file_content:\n doc, word, cnt = [int(v) for v in e.strip().split()]\n A[doc - 1, word - 1] = cnt\n print(\"normalizing matrix entries with tfidf...\")\n B = TfidfTransformer().fit_transform(A)\n print(\"reducing dimensionality...\")\n C = random_projection.GaussianRandomProjection(n_components = n_dimensions).fit_transform(B)\n X_train, X_test = train_test_split(C)\n write_output(numpy.array(X_train), numpy.array(X_test), out_fn, 'angular')\n\n\ndef nytimes(out_fn, n_dimensions):\n fn = 'nytimes_%s.txt.gz' % n_dimensions\n download('https://archive.ics.uci.edu/ml/machine-learning-databases/bag-of-words/docword.nytimes.txt.gz', fn)\n transform_bag_of_words(fn, n_dimensions, out_fn)\n\n\ndef random(out_fn, n_dims, n_samples, centers, distance):\n import sklearn.datasets\n\n X, _ = sklearn.datasets.make_blobs(n_samples=n_samples, n_features=n_dims, centers=centers, random_state=1)\n X_train, X_test = train_test_split(X, test_size=0.1)\n write_output(X_train, X_test, out_fn, distance)\n\ndef random_bitstring(out_fn, n_dims, n_samples, n_queries):\n import sklearn.datasets\n\n Y, _ = sklearn.datasets.make_blobs(n_samples=n_samples, n_features=n_dims, centers=n_queries, random_state=1)\n X = numpy.zeros((n_samples, n_dims), dtype=numpy.bool)\n for i, vec in enumerate(Y):\n X[i] = numpy.array([v > 0 for v in vec], dtype=numpy.bool)\n\n X_train, X_test = train_test_split(X, test_size=n_queries)\n write_output(X_train, X_test, out_fn, 'hamming', 'bit')\n\n\ndef word2bits(out_fn, path, fn):\n import tarfile\n local_fn = fn + '.tar.gz'\n url = 'http://web.stanford.edu/~maxlam/word_vectors/compressed/%s/%s.tar.gz' % (path, fn)\n download(url, local_fn)\n print('parsing vectors in %s...' % local_fn)\n with tarfile.open(local_fn, 'r:gz') as t:\n f = t.extractfile(fn)\n n_words, k = [int(z) for z in next(f).strip().split()]\n X = numpy.zeros((n_words, k), dtype=numpy.bool)\n for i in range(n_words):\n X[i] = numpy.array([float(z) > 0 for z in next(f).strip().split()[1:]], dtype=numpy.bool)\n\n X_train, X_test = train_test_split(X, test_size=1000)\n write_output(X_train, X_test, out_fn, 'hamming', 'bit')\n\ndef sift_hamming(out_fn, fn):\n import tarfile\n local_fn = fn + '.tar.gz'\n url = 'http://sss.projects.itu.dk/ann-benchmarks/datasets/%s.tar.gz' % fn\n download(url, local_fn)\n print('parsing vectors in %s...' % local_fn)\n with tarfile.open(local_fn, 'r:gz') as t:\n f = t.extractfile(fn)\n lines = f.readlines()\n X = numpy.zeros((len(lines), 256), dtype=numpy.bool)\n for i, line in enumerate(lines):\n X[i] = numpy.array([int(x) > 0 for x in line.decode().strip()], dtype=numpy.bool)\n X_train, X_test = train_test_split(X, test_size = 1000)\n write_output(X_train, X_test, out_fn, 'hamming', 'bit')\n\ndef lastfm(out_fn, n_dimensions, test_size=50000):\n # This tests out ANN methods for retrieval on simple matrix factorization based\n # recommendation algorithms. The idea being that the query/test vectors are user factors\n # and the train set are item factors from the matrix factorization model.\n\n # Since the predictor is a dot product, we transform the factors first as described in this\n # paper: https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/XboxInnerProduct.pdf\n # This hopefully replicates the experiments done in this post:\n # http://www.benfrederickson.com/approximate-nearest-neighbours-for-recommender-systems/\n\n # The dataset is from \"Last.fm Dataset - 360K users\":\n # http://www.dtic.upf.edu/~ocelma/MusicRecommendationDataset/lastfm-360K.html\n\n # this requires the implicit package to generate the factors (on my desktop/gpu this only\n # takes 4-5 seconds to train - but could take 1-2 minutes on a laptop)\n from implicit.datasets.lastfm import get_lastfm\n from implicit.approximate_als import augment_inner_product_matrix\n import implicit\n\n # train an als model on the lastfm data\n _, _, play_counts = get_lastfm()\n model = implicit.als.AlternatingLeastSquares(factors=n_dimensions)\n model.fit(implicit.nearest_neighbours.bm25_weight(play_counts, K1=100, B=0.8))\n\n # transform item factors so that each one has the same norm, and transform the user\n # factors such by appending a 0 column\n _, item_factors = augment_inner_product_matrix(model.item_factors)\n user_factors = numpy.append(model.user_factors,\n numpy.zeros((model.user_factors.shape[0], 1)),\n axis=1)\n\n # only query the first 50k users (speeds things up signficantly without changing results)\n user_factors = user_factors[:test_size]\n\n # after that transformation a cosine lookup will return the same results as the inner product\n # on the untransformed data\n write_output(item_factors, user_factors, out_fn, 'angular')\n\n\nDATASETS = {\n 'fashion-mnist-784-euclidean': fashion_mnist,\n 'gist-960-euclidean': gist,\n 'glove-25-angular': lambda out_fn: glove(out_fn, 25),\n 'glove-50-angular': lambda out_fn: glove(out_fn, 50),\n 'glove-100-angular': lambda out_fn: glove(out_fn, 100),\n 'glove-200-angular': lambda out_fn: glove(out_fn, 200),\n 'mnist-784-euclidean': mnist,\n 'random-xs-20-euclidean': lambda out_fn: random(out_fn, 20, 10000, 100, 'euclidean'),\n 'random-s-100-euclidean': lambda out_fn: random(out_fn, 100, 100000, 1000, 'euclidean'),\n 'random-xs-20-angular': lambda out_fn: random(out_fn, 20, 10000, 100, 'angular'),\n 'random-s-100-angular': lambda out_fn: random(out_fn, 100, 100000, 1000, 'angular'),\n 'random-xs-16-hamming': lambda out_fn: random_bitstring(out_fn, 16, 10000, 100),\n 'random-s-128-hamming': lambda out_fn: random_bitstring(out_fn, 128, 50000, 1000),\n 'random-l-256-hamming': lambda out_fn: random_bitstring(out_fn, 256, 100000, 1000),\n 'sift-128-euclidean': sift,\n 'nytimes-256-angular': lambda out_fn: nytimes(out_fn, 256),\n 'nytimes-16-angular': lambda out_fn: nytimes(out_fn, 16),\n 'word2bits-800-hamming': lambda out_fn: word2bits(out_fn, '400K', 'w2b_bitlevel1_size800_vocab400K'),\n 'lastfm-64-dot': lambda out_fn: lastfm(out_fn, 64),\n 'sift-256-hamming': lambda out_fn: sift_hamming(out_fn, 'sift.hamming.256'),\n}\n"
] |
[
[
"numpy.product",
"sklearn.random_projection.GaussianRandomProjection",
"numpy.array",
"sklearn.feature_extraction.text.TfidfTransformer",
"numpy.zeros",
"scipy.sparse.lil_matrix"
]
] |
fpvc040/OverlapPredator
|
[
"16b2007711126dc337d48049b0c1a56c23f4bc78"
] |
[
"Fast_GR_cupoch/point_cloud_alignment.py"
] |
[
"import numpy as np\nimport copy\nimport time\nimport open3d as o3d\n\ndef preprocess_point_cloud(pcd, voxel_size):\n print(\":: Downsample with a voxel size %.3f.\" % voxel_size)\n pcd_down = pcd.voxel_down_sample(voxel_size)\n\n radius_normal = voxel_size * 2\n print(\":: Estimate normal with search radius %.3f.\" % radius_normal)\n pcd_down.estimate_normals(\n o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))\n\n radius_feature = voxel_size * 5\n print(\":: Compute FPFH feature with search radius %.3f.\" % radius_feature)\n pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature(\n pcd_down,\n o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100))\n return pcd_down, pcd_fpfh\n\n\ndef prepare_dataset(voxel_size):\n print(\":: Load two point clouds and disturb initial pose.\")\n source = o3d.io.read_point_cloud(\"../assets/1.pcd\")\n target = o3d.io.read_point_cloud(\"../assets/2.pcd\")\n trans_init = np.asarray([[0.0, 0.0, 1.0, 0.0], [1.0, 0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]])\n source.transform(trans_init)\n #draw_registration_result(source, target, np.identity(4))\n\n source_down, source_fpfh = preprocess_point_cloud(source, voxel_size)\n target_down, target_fpfh = preprocess_point_cloud(target, voxel_size)\n return source, target, source_down, target_down, source_fpfh, target_fpfh\n\n\nvoxel_size = 0.1 # means 5cm for this dataset\nsource, target, source_down, target_down, source_fpfh, target_fpfh = prepare_dataset(\n voxel_size)\n\ndef execute_fast_global_registration(source_down, target_down, source_fpfh,\n target_fpfh, voxel_size):\n distance_threshold = voxel_size * 0.5\n print(\":: Apply fast global registration with distance threshold %.3f\" \\\n % distance_threshold)\n result = o3d.pipelines.registration.registration_fast_based_on_feature_matching(\n source_down, target_down, source_fpfh, target_fpfh,\n o3d.pipelines.registration.FastGlobalRegistrationOption(\n maximum_correspondence_distance=distance_threshold))\n return result\n\n\ndef refine_registration(source, target, source_fpfh, target_fpfh, voxel_size, result):\n distance_threshold = voxel_size * 0.4\n print(\":: Point-to-plane ICP registration is applied on original point\")\n print(\" clouds to refine the alignment. This time we use a strict\")\n print(\" distance threshold %.3f.\" % distance_threshold)\n result = o3d.pipelines.registration.registration_icp(\n source, target, distance_threshold, result.transformation,\n o3d.pipelines.registration.TransformationEstimationPointToPoint())\n return result\n\n\nstart = time.time()\nresult_fast = execute_fast_global_registration(source_down, target_down,\n source_fpfh, target_fpfh,\n voxel_size)\nprint(\"Fast global registration took %.3f sec.\\n\" % (time.time() - start))\nprint(result_fast.transformation)\n#result_icp = refine_registration(source, target, source_fpfh, target_fpfh,\n# voxel_size, result_fast)\n#print(result_icp.transformation)\n"
] |
[
[
"numpy.asarray"
]
] |
alexisicte/aviate
|
[
"91e357ed9cb234c878a12a8f18391c3c33f6b357"
] |
[
"venv/lib/python3.8/site-packages/mne/channels/channels.py"
] |
[
"# Authors: Alexandre Gramfort <[email protected]>\n# Matti Hämäläinen <[email protected]>\n# Denis Engemann <[email protected]>\n# Andrew Dykstra <[email protected]>\n# Teon Brooks <[email protected]>\n# Daniel McCloy <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os\nimport os.path as op\nimport sys\nfrom collections import OrderedDict\nfrom copy import deepcopy\nfrom functools import partial\n\nimport numpy as np\nfrom scipy import sparse\n\nfrom ..defaults import HEAD_SIZE_DEFAULT, _handle_default\nfrom ..transforms import _frame_to_str\nfrom ..utils import (verbose, logger, warn,\n _check_preload, _validate_type, fill_doc, _check_option)\nfrom ..io.compensator import get_current_comp\nfrom ..io.constants import FIFF\nfrom ..io.meas_info import anonymize_info, Info, MontageMixin, create_info\nfrom ..io.pick import (channel_type, pick_info, pick_types, _picks_by_type,\n _check_excludes_includes, _contains_ch_type,\n channel_indices_by_type, pick_channels, _picks_to_idx,\n _get_channel_types)\nfrom ..io.write import DATE_NONE\nfrom ..io._digitization import _get_data_as_dict_from_dig\n\n\ndef _get_meg_system(info):\n \"\"\"Educated guess for the helmet type based on channels.\"\"\"\n have_helmet = True\n for ch in info['chs']:\n if ch['kind'] == FIFF.FIFFV_MEG_CH:\n # Only take first 16 bits, as higher bits store CTF grad comp order\n coil_type = ch['coil_type'] & 0xFFFF\n nmag = np.sum(\n [c['kind'] == FIFF.FIFFV_MEG_CH for c in info['chs']])\n if coil_type == FIFF.FIFFV_COIL_NM_122:\n system = '122m'\n break\n elif coil_type // 1000 == 3: # All Vectorview coils are 30xx\n system = '306m'\n break\n elif (coil_type == FIFF.FIFFV_COIL_MAGNES_MAG or\n coil_type == FIFF.FIFFV_COIL_MAGNES_GRAD):\n system = 'Magnes_3600wh' if nmag > 150 else 'Magnes_2500wh'\n break\n elif coil_type == FIFF.FIFFV_COIL_CTF_GRAD:\n system = 'CTF_275'\n break\n elif coil_type == FIFF.FIFFV_COIL_KIT_GRAD:\n system = 'KIT'\n # Our helmet does not match very well, so let's just create it\n have_helmet = False\n break\n elif coil_type == FIFF.FIFFV_COIL_BABY_GRAD:\n system = 'BabySQUID'\n break\n elif coil_type == FIFF.FIFFV_COIL_ARTEMIS123_GRAD:\n system = 'ARTEMIS123'\n have_helmet = False\n break\n else:\n system = 'unknown'\n have_helmet = False\n return system, have_helmet\n\n\ndef _get_ch_type(inst, ch_type, allow_ref_meg=False):\n \"\"\"Choose a single channel type (usually for plotting).\n\n Usually used in plotting to plot a single datatype, e.g. look for mags,\n then grads, then ... to plot.\n \"\"\"\n if ch_type is None:\n allowed_types = ['mag', 'grad', 'planar1', 'planar2', 'eeg', 'csd',\n 'fnirs_cw_amplitude', 'fnirs_od', 'hbo', 'hbr',\n 'ecog', 'seeg']\n allowed_types += ['ref_meg'] if allow_ref_meg else []\n for type_ in allowed_types:\n if isinstance(inst, Info):\n if _contains_ch_type(inst, type_):\n ch_type = type_\n break\n elif type_ in inst:\n ch_type = type_\n break\n else:\n raise RuntimeError('No plottable channel types found')\n return ch_type\n\n\n@verbose\ndef equalize_channels(instances, copy=True, verbose=None):\n \"\"\"Equalize channel picks and ordering across multiple MNE-Python objects.\n\n First, all channels that are not common to each object are dropped. Then,\n using the first object in the list as a template, the channels of each\n object are re-ordered to match the template. The end result is that all\n given objects define the same channels, in the same order.\n\n Parameters\n ----------\n instances : list\n A list of MNE-Python objects to equalize the channels for. Objects can\n be of type Raw, Epochs, Evoked, AverageTFR, Forward, Covariance,\n CrossSpectralDensity or Info.\n copy : bool\n When dropping and/or re-ordering channels, an object will be copied\n when this parameter is set to ``True``. When set to ``False`` (the\n default) the dropping and re-ordering of channels happens in-place.\n\n .. versionadded:: 0.20.0\n %(verbose)s\n\n Returns\n -------\n equalized_instances : list\n A list of MNE-Python objects that have the same channels defined in the\n same order.\n\n Notes\n -----\n This function operates inplace.\n \"\"\"\n from ..cov import Covariance\n from ..io.base import BaseRaw\n from ..io.meas_info import Info\n from ..epochs import BaseEpochs\n from ..evoked import Evoked\n from ..forward import Forward\n from ..time_frequency import _BaseTFR, CrossSpectralDensity\n\n # Instances need to have a `ch_names` attribute and a `pick_channels`\n # method that supports `ordered=True`.\n allowed_types = (BaseRaw, BaseEpochs, Evoked, _BaseTFR, Forward,\n Covariance, CrossSpectralDensity, Info)\n allowed_types_str = (\"Raw, Epochs, Evoked, TFR, Forward, Covariance, \"\n \"CrossSpectralDensity or Info\")\n for inst in instances:\n _validate_type(inst, allowed_types, \"Instances to be modified\",\n allowed_types_str)\n\n chan_template = instances[0].ch_names\n logger.info('Identifying common channels ...')\n channels = [set(inst.ch_names) for inst in instances]\n common_channels = set(chan_template).intersection(*channels)\n all_channels = set(chan_template).union(*channels)\n dropped = list(set(all_channels - common_channels))\n\n # Preserve the order of chan_template\n order = np.argsort([chan_template.index(ch) for ch in common_channels])\n common_channels = np.array(list(common_channels))[order].tolist()\n\n # Update all instances to match the common_channels list\n reordered = False\n equalized_instances = []\n for inst in instances:\n # Only perform picking when needed\n if inst.ch_names != common_channels:\n if copy:\n inst = inst.copy()\n inst.pick_channels(common_channels, ordered=True)\n if len(inst.ch_names) == len(common_channels):\n reordered = True\n equalized_instances.append(inst)\n\n if dropped:\n logger.info('Dropped the following channels:\\n%s' % dropped)\n elif reordered:\n logger.info('Channels have been re-ordered.')\n\n return equalized_instances\n\n\nclass ContainsMixin(object):\n \"\"\"Mixin class for Raw, Evoked, Epochs.\"\"\"\n\n def __contains__(self, ch_type):\n \"\"\"Check channel type membership.\n\n Parameters\n ----------\n ch_type : str\n Channel type to check for. Can be e.g. 'meg', 'eeg', 'stim', etc.\n\n Returns\n -------\n in : bool\n Whether or not the instance contains the given channel type.\n\n Examples\n --------\n Channel type membership can be tested as::\n\n >>> 'meg' in inst # doctest: +SKIP\n True\n >>> 'seeg' in inst # doctest: +SKIP\n False\n\n \"\"\"\n if ch_type == 'meg':\n has_ch_type = (_contains_ch_type(self.info, 'mag') or\n _contains_ch_type(self.info, 'grad'))\n else:\n has_ch_type = _contains_ch_type(self.info, ch_type)\n return has_ch_type\n\n @property\n def compensation_grade(self):\n \"\"\"The current gradient compensation grade.\"\"\"\n return get_current_comp(self.info)\n\n @fill_doc\n def get_channel_types(self, picks=None, unique=False, only_data_chs=False):\n \"\"\"Get a list of channel type for each channel.\n\n Parameters\n ----------\n %(picks_all)s\n unique : bool\n Whether to return only unique channel types. Default is ``False``.\n only_data_chs : bool\n Whether to ignore non-data channels. Default is ``False``.\n\n Returns\n -------\n channel_types : list\n The channel types.\n \"\"\"\n return _get_channel_types(self.info, picks=picks, unique=unique,\n only_data_chs=only_data_chs)\n\n @fill_doc\n def get_montage(self):\n \"\"\"Get a DigMontage from instance.\n\n Returns\n -------\n %(montage)s\n \"\"\"\n from ..channels.montage import make_dig_montage\n if self.info['dig'] is None:\n return None\n # obtain coord_frame, and landmark coords\n # (nasion, lpa, rpa, hsp, hpi) from DigPoints\n montage_bunch = _get_data_as_dict_from_dig(self.info['dig'])\n coord_frame = _frame_to_str.get(montage_bunch.coord_frame)\n\n # get the channel names and chs data structure\n ch_names, chs = self.info['ch_names'], self.info['chs']\n picks = pick_types(self.info, meg=False, eeg=True,\n seeg=True, ecog=True)\n\n # channel positions from dig do not match ch_names one to one,\n # so use loc[:3] instead\n ch_pos = {ch_names[ii]: chs[ii]['loc'][:3] for ii in picks}\n\n # create montage\n montage = make_dig_montage(\n ch_pos=ch_pos,\n coord_frame=coord_frame,\n nasion=montage_bunch.nasion,\n lpa=montage_bunch.lpa,\n rpa=montage_bunch.rpa,\n hsp=montage_bunch.hsp,\n hpi=montage_bunch.hpi,\n )\n return montage\n\n\n# XXX Eventually de-duplicate with _kind_dict of mne/io/meas_info.py\n_human2fiff = {'ecg': FIFF.FIFFV_ECG_CH,\n 'eeg': FIFF.FIFFV_EEG_CH,\n 'emg': FIFF.FIFFV_EMG_CH,\n 'eog': FIFF.FIFFV_EOG_CH,\n 'exci': FIFF.FIFFV_EXCI_CH,\n 'ias': FIFF.FIFFV_IAS_CH,\n 'misc': FIFF.FIFFV_MISC_CH,\n 'resp': FIFF.FIFFV_RESP_CH,\n 'seeg': FIFF.FIFFV_SEEG_CH,\n 'stim': FIFF.FIFFV_STIM_CH,\n 'syst': FIFF.FIFFV_SYST_CH,\n 'bio': FIFF.FIFFV_BIO_CH,\n 'ecog': FIFF.FIFFV_ECOG_CH,\n 'fnirs_cw_amplitude': FIFF.FIFFV_FNIRS_CH,\n 'fnirs_od': FIFF.FIFFV_FNIRS_CH,\n 'hbo': FIFF.FIFFV_FNIRS_CH,\n 'hbr': FIFF.FIFFV_FNIRS_CH}\n_human2unit = {'ecg': FIFF.FIFF_UNIT_V,\n 'eeg': FIFF.FIFF_UNIT_V,\n 'emg': FIFF.FIFF_UNIT_V,\n 'eog': FIFF.FIFF_UNIT_V,\n 'exci': FIFF.FIFF_UNIT_NONE,\n 'ias': FIFF.FIFF_UNIT_NONE,\n 'misc': FIFF.FIFF_UNIT_V,\n 'resp': FIFF.FIFF_UNIT_NONE,\n 'seeg': FIFF.FIFF_UNIT_V,\n 'stim': FIFF.FIFF_UNIT_NONE,\n 'syst': FIFF.FIFF_UNIT_NONE,\n 'bio': FIFF.FIFF_UNIT_V,\n 'ecog': FIFF.FIFF_UNIT_V,\n 'fnirs_cw_amplitude': FIFF.FIFF_UNIT_V,\n 'fnirs_od': FIFF.FIFF_UNIT_NONE,\n 'hbo': FIFF.FIFF_UNIT_MOL,\n 'hbr': FIFF.FIFF_UNIT_MOL}\n_unit2human = {FIFF.FIFF_UNIT_V: 'V',\n FIFF.FIFF_UNIT_T: 'T',\n FIFF.FIFF_UNIT_T_M: 'T/m',\n FIFF.FIFF_UNIT_MOL: 'M',\n FIFF.FIFF_UNIT_NONE: 'NA',\n FIFF.FIFF_UNIT_CEL: 'C'}\n\n\ndef _check_set(ch, projs, ch_type):\n \"\"\"Ensure type change is compatible with projectors.\"\"\"\n new_kind = _human2fiff[ch_type]\n if ch['kind'] != new_kind:\n for proj in projs:\n if ch['ch_name'] in proj['data']['col_names']:\n raise RuntimeError('Cannot change channel type for channel %s '\n 'in projector \"%s\"'\n % (ch['ch_name'], proj['desc']))\n ch['kind'] = new_kind\n\n\nclass SetChannelsMixin(MontageMixin):\n \"\"\"Mixin class for Raw, Evoked, Epochs.\"\"\"\n\n @verbose\n def set_eeg_reference(self, ref_channels='average', projection=False,\n ch_type='auto', forward=None, verbose=None):\n \"\"\"Specify which reference to use for EEG data.\n\n Use this function to explicitly specify the desired reference for EEG.\n This can be either an existing electrode or a new virtual channel.\n This function will re-reference the data according to the desired\n reference.\n\n Parameters\n ----------\n %(set_eeg_reference_ref_channels)s\n %(set_eeg_reference_projection)s\n %(set_eeg_reference_ch_type)s\n %(set_eeg_reference_forward)s\n %(verbose_meth)s\n\n Returns\n -------\n inst : instance of Raw | Epochs | Evoked\n Data with EEG channels re-referenced. If ``ref_channels='average'``\n and ``projection=True`` a projection will be added instead of\n directly re-referencing the data.\n %(set_eeg_reference_see_also_notes)s\n \"\"\"\n from ..io.reference import set_eeg_reference\n return set_eeg_reference(self, ref_channels=ref_channels, copy=False,\n projection=projection, ch_type=ch_type,\n forward=forward)[0]\n\n def _get_channel_positions(self, picks=None):\n \"\"\"Get channel locations from info.\n\n Parameters\n ----------\n picks : str | list | slice | None\n None gets good data indices.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n picks = _picks_to_idx(self.info, picks)\n chs = self.info['chs']\n pos = np.array([chs[k]['loc'][:3] for k in picks])\n n_zero = np.sum(np.sum(np.abs(pos), axis=1) == 0)\n if n_zero > 1: # XXX some systems have origin (0, 0, 0)\n raise ValueError('Could not extract channel positions for '\n '{} channels'.format(n_zero))\n return pos\n\n def _set_channel_positions(self, pos, names):\n \"\"\"Update channel locations in info.\n\n Parameters\n ----------\n pos : array-like | np.ndarray, shape (n_points, 3)\n The channel positions to be set.\n names : list of str\n The names of the channels to be set.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n if len(pos) != len(names):\n raise ValueError('Number of channel positions not equal to '\n 'the number of names given.')\n pos = np.asarray(pos, dtype=np.float64)\n if pos.shape[-1] != 3 or pos.ndim != 2:\n msg = ('Channel positions must have the shape (n_points, 3) '\n 'not %s.' % (pos.shape,))\n raise ValueError(msg)\n for name, p in zip(names, pos):\n if name in self.ch_names:\n idx = self.ch_names.index(name)\n self.info['chs'][idx]['loc'][:3] = p\n else:\n msg = ('%s was not found in the info. Cannot be updated.'\n % name)\n raise ValueError(msg)\n\n @verbose\n def set_channel_types(self, mapping, verbose=None):\n \"\"\"Define the sensor type of channels.\n\n Parameters\n ----------\n mapping : dict\n A dictionary mapping a channel to a sensor type (str), e.g.,\n ``{'EEG061': 'eog'}``.\n %(verbose_meth)s\n\n Returns\n -------\n inst : instance of Raw | Epochs | Evoked\n The instance (modified in place).\n\n .. versionchanged:: 0.20\n Return the instance.\n\n Notes\n -----\n The following sensor types are accepted:\n\n ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, stim, syst, ecog,\n hbo, hbr, fnirs_cw_amplitude, fnirs_od\n\n .. versionadded:: 0.9.0\n \"\"\"\n ch_names = self.info['ch_names']\n\n # first check and assemble clean mappings of index and name\n unit_changes = dict()\n for ch_name, ch_type in mapping.items():\n if ch_name not in ch_names:\n raise ValueError(\"This channel name (%s) doesn't exist in \"\n \"info.\" % ch_name)\n\n c_ind = ch_names.index(ch_name)\n if ch_type not in _human2fiff:\n raise ValueError('This function cannot change to this '\n 'channel type: %s. Accepted channel types '\n 'are %s.'\n % (ch_type,\n \", \".join(sorted(_human2unit.keys()))))\n # Set sensor type\n _check_set(self.info['chs'][c_ind], self.info['projs'], ch_type)\n unit_old = self.info['chs'][c_ind]['unit']\n unit_new = _human2unit[ch_type]\n if unit_old not in _unit2human:\n raise ValueError(\"Channel '%s' has unknown unit (%s). Please \"\n \"fix the measurement info of your data.\"\n % (ch_name, unit_old))\n if unit_old != _human2unit[ch_type]:\n this_change = (_unit2human[unit_old], _unit2human[unit_new])\n if this_change not in unit_changes:\n unit_changes[this_change] = list()\n unit_changes[this_change].append(ch_name)\n self.info['chs'][c_ind]['unit'] = _human2unit[ch_type]\n if ch_type in ['eeg', 'seeg', 'ecog']:\n coil_type = FIFF.FIFFV_COIL_EEG\n elif ch_type == 'hbo':\n coil_type = FIFF.FIFFV_COIL_FNIRS_HBO\n elif ch_type == 'hbr':\n coil_type = FIFF.FIFFV_COIL_FNIRS_HBR\n elif ch_type == 'fnirs_cw_amplitude':\n coil_type = FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE\n elif ch_type == 'fnirs_od':\n coil_type = FIFF.FIFFV_COIL_FNIRS_OD\n else:\n coil_type = FIFF.FIFFV_COIL_NONE\n self.info['chs'][c_ind]['coil_type'] = coil_type\n msg = \"The unit for channel(s) {0} has changed from {1} to {2}.\"\n for this_change, names in unit_changes.items():\n warn(msg.format(\", \".join(sorted(names)), *this_change))\n return self\n\n @fill_doc\n def rename_channels(self, mapping):\n \"\"\"Rename channels.\n\n Parameters\n ----------\n %(rename_channels_mapping)s\n\n Returns\n -------\n inst : instance of Raw | Epochs | Evoked\n The instance (modified in place).\n\n .. versionchanged:: 0.20\n Return the instance.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n rename_channels(self.info, mapping)\n return self\n\n @verbose\n def plot_sensors(self, kind='topomap', ch_type=None, title=None,\n show_names=False, ch_groups=None, to_sphere=True,\n axes=None, block=False, show=True, sphere=None,\n verbose=None):\n \"\"\"Plot sensor positions.\n\n Parameters\n ----------\n kind : str\n Whether to plot the sensors as 3d, topomap or as an interactive\n sensor selection dialog. Available options 'topomap', '3d',\n 'select'. If 'select', a set of channels can be selected\n interactively by using lasso selector or clicking while holding\n control key. The selected channels are returned along with the\n figure instance. Defaults to 'topomap'.\n ch_type : None | str\n The channel type to plot. Available options 'mag', 'grad', 'eeg',\n 'seeg', 'ecog', 'all'. If ``'all'``, all the available mag, grad,\n eeg, seeg and ecog channels are plotted. If None (default), then\n channels are chosen in the order given above.\n title : str | None\n Title for the figure. If None (default), equals to ``'Sensor\n positions (%%s)' %% ch_type``.\n show_names : bool | array of str\n Whether to display all channel names. If an array, only the channel\n names in the array are shown. Defaults to False.\n ch_groups : 'position' | array of shape (n_ch_groups, n_picks) | None\n Channel groups for coloring the sensors. If None (default), default\n coloring scheme is used. If 'position', the sensors are divided\n into 8 regions. See ``order`` kwarg of :func:`mne.viz.plot_raw`. If\n array, the channels are divided by picks given in the array.\n\n .. versionadded:: 0.13.0\n to_sphere : bool\n Whether to project the 3d locations to a sphere. When False, the\n sensor array appears similar as to looking downwards straight above\n the subject's head. Has no effect when kind='3d'. Defaults to True.\n\n .. versionadded:: 0.14.0\n axes : instance of Axes | instance of Axes3D | None\n Axes to draw the sensors to. If ``kind='3d'``, axes must be an\n instance of Axes3D. If None (default), a new axes will be created.\n\n .. versionadded:: 0.13.0\n block : bool\n Whether to halt program execution until the figure is closed.\n Defaults to False.\n\n .. versionadded:: 0.13.0\n show : bool\n Show figure if True. Defaults to True.\n %(topomap_sphere_auto)s\n %(verbose_meth)s\n\n Returns\n -------\n fig : instance of Figure\n Figure containing the sensor topography.\n selection : list\n A list of selected channels. Only returned if ``kind=='select'``.\n\n See Also\n --------\n mne.viz.plot_layout\n\n Notes\n -----\n This function plots the sensor locations from the info structure using\n matplotlib. For drawing the sensors using mayavi see\n :func:`mne.viz.plot_alignment`.\n\n .. versionadded:: 0.12.0\n \"\"\"\n from ..viz.utils import plot_sensors\n return plot_sensors(self.info, kind=kind, ch_type=ch_type, title=title,\n show_names=show_names, ch_groups=ch_groups,\n to_sphere=to_sphere, axes=axes, block=block,\n show=show, sphere=sphere, verbose=verbose)\n\n @verbose\n def anonymize(self, daysback=None, keep_his=False, verbose=None):\n \"\"\"Anonymize measurement information in place.\n\n Parameters\n ----------\n %(anonymize_info_parameters)s\n %(verbose)s\n\n Returns\n -------\n inst : instance of Raw | Epochs | Evoked\n The modified instance.\n\n Notes\n -----\n %(anonymize_info_notes)s\n\n .. versionadded:: 0.13.0\n \"\"\"\n anonymize_info(self.info, daysback=daysback, keep_his=keep_his,\n verbose=verbose)\n self.set_meas_date(self.info['meas_date']) # unify annot update\n return self\n\n def set_meas_date(self, meas_date):\n \"\"\"Set the measurement start date.\n\n Parameters\n ----------\n meas_date : datetime | float | tuple | None\n The new measurement date.\n If datetime object, it must be timezone-aware and in UTC.\n A tuple of (seconds, microseconds) or float (alias for\n ``(meas_date, 0)``) can also be passed and a datetime\n object will be automatically created. If None, will remove\n the time reference.\n\n Returns\n -------\n inst : instance of Raw | Epochs | Evoked\n The modified raw instance. Operates in place.\n\n See Also\n --------\n mne.io.Raw.anonymize\n\n Notes\n -----\n If you want to remove all time references in the file, call\n :func:`mne.io.anonymize_info(inst.info) <mne.io.anonymize_info>`\n after calling ``inst.set_meas_date(None)``.\n\n .. versionadded:: 0.20\n \"\"\"\n from ..annotations import _handle_meas_date\n meas_date = _handle_meas_date(meas_date)\n self.info['meas_date'] = meas_date\n\n # clear file_id and meas_id if needed\n if meas_date is None:\n for key in ('file_id', 'meas_id'):\n value = self.info.get(key)\n if value is not None:\n assert 'msecs' not in value\n value['secs'] = DATE_NONE[0]\n value['usecs'] = DATE_NONE[1]\n # The following copy is needed for a test CTF dataset\n # otherwise value['machid'][:] = 0 would suffice\n _tmp = value['machid'].copy()\n _tmp[:] = 0\n value['machid'] = _tmp\n\n if hasattr(self, 'annotations'):\n self.annotations._orig_time = meas_date\n return self\n\n\nclass UpdateChannelsMixin(object):\n \"\"\"Mixin class for Raw, Evoked, Epochs, AverageTFR.\"\"\"\n\n @verbose\n def pick_types(self, meg=None, eeg=False, stim=False, eog=False,\n ecg=False, emg=False, ref_meg='auto', misc=False,\n resp=False, chpi=False, exci=False, ias=False, syst=False,\n seeg=False, dipole=False, gof=False, bio=False, ecog=False,\n fnirs=False, csd=False, include=(), exclude='bads',\n selection=None, verbose=None):\n \"\"\"Pick some channels by type and names.\n\n Parameters\n ----------\n meg : bool | str\n If True include MEG channels. If string it can be 'mag', 'grad',\n 'planar1' or 'planar2' to select only magnetometers, all\n gradiometers, or a specific type of gradiometer.\n eeg : bool\n If True include EEG channels.\n stim : bool\n If True include stimulus channels.\n eog : bool\n If True include EOG channels.\n ecg : bool\n If True include ECG channels.\n emg : bool\n If True include EMG channels.\n ref_meg : bool | str\n If True include CTF / 4D reference channels. If 'auto', reference\n channels are included if compensations are present and ``meg`` is\n not False. Can also be the string options for the ``meg``\n parameter.\n misc : bool\n If True include miscellaneous analog channels.\n resp : bool\n If True include response-trigger channel. For some MEG systems this\n is separate from the stim channel.\n chpi : bool\n If True include continuous HPI coil channels.\n exci : bool\n Flux excitation channel used to be a stimulus channel.\n ias : bool\n Internal Active Shielding data (maybe on Triux only).\n syst : bool\n System status channel information (on Triux systems only).\n seeg : bool\n Stereotactic EEG channels.\n dipole : bool\n Dipole time course channels.\n gof : bool\n Dipole goodness of fit channels.\n bio : bool\n Bio channels.\n ecog : bool\n Electrocorticography channels.\n fnirs : bool | str\n Functional near-infrared spectroscopy channels. If True include all\n fNIRS channels. If False (default) include none. If string it can\n be 'hbo' (to include channels measuring oxyhemoglobin) or 'hbr' (to\n include channels measuring deoxyhemoglobin).\n csd : bool\n EEG-CSD channels.\n include : list of str\n List of additional channels to include. If empty do not include\n any.\n exclude : list of str | str\n List of channels to exclude. If 'bads' (default), exclude channels\n in ``info['bads']``.\n selection : list of str\n Restrict sensor channels (MEG, EEG) to this list of channel names.\n %(verbose_meth)s\n\n Returns\n -------\n inst : instance of Raw, Epochs, or Evoked\n The modified instance.\n\n See Also\n --------\n pick_channels\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n idx = pick_types(\n self.info, meg=meg, eeg=eeg, stim=stim, eog=eog, ecg=ecg, emg=emg,\n ref_meg=ref_meg, misc=misc, resp=resp, chpi=chpi, exci=exci,\n ias=ias, syst=syst, seeg=seeg, dipole=dipole, gof=gof, bio=bio,\n ecog=ecog, fnirs=fnirs, include=include, exclude=exclude,\n selection=selection)\n\n self._pick_drop_channels(idx)\n\n # remove dropped channel types from reject and flat\n if getattr(self, 'reject', None) is not None:\n # use list(self.reject) to avoid RuntimeError for changing\n # dictionary size during iteration\n for ch_type in list(self.reject):\n if ch_type not in self:\n del self.reject[ch_type]\n\n if getattr(self, 'flat', None) is not None:\n for ch_type in list(self.flat):\n if ch_type not in self:\n del self.flat[ch_type]\n\n return self\n\n def pick_channels(self, ch_names, ordered=False):\n \"\"\"Pick some channels.\n\n Parameters\n ----------\n ch_names : list\n The list of channels to select.\n ordered : bool\n If True (default False), ensure that the order of the channels in\n the modified instance matches the order of ``ch_names``.\n\n .. versionadded:: 0.20.0\n\n Returns\n -------\n inst : instance of Raw, Epochs, or Evoked\n The modified instance.\n\n See Also\n --------\n drop_channels\n pick_types\n reorder_channels\n\n Notes\n -----\n The channel names given are assumed to be a set, i.e. the order\n does not matter. The original order of the channels is preserved.\n You can use ``reorder_channels`` to set channel order if necessary.\n\n .. versionadded:: 0.9.0\n \"\"\"\n picks = pick_channels(self.info['ch_names'], ch_names, ordered=ordered)\n return self._pick_drop_channels(picks)\n\n @fill_doc\n def pick(self, picks, exclude=()):\n \"\"\"Pick a subset of channels.\n\n Parameters\n ----------\n %(picks_all)s\n exclude : list | str\n Set of channels to exclude, only used when picking based on\n types (e.g., exclude=\"bads\" when picks=\"meg\").\n\n Returns\n -------\n inst : instance of Raw, Epochs, or Evoked\n The modified instance.\n \"\"\"\n picks = _picks_to_idx(self.info, picks, 'all', exclude,\n allow_empty=False)\n return self._pick_drop_channels(picks)\n\n def reorder_channels(self, ch_names):\n \"\"\"Reorder channels.\n\n Parameters\n ----------\n ch_names : list\n The desired channel order.\n\n Returns\n -------\n inst : instance of Raw, Epochs, or Evoked\n The modified instance.\n\n See Also\n --------\n drop_channels\n pick_types\n pick_channels\n\n Notes\n -----\n Channel names must be unique. Channels that are not in ``ch_names``\n are dropped.\n\n .. versionadded:: 0.16.0\n \"\"\"\n _check_excludes_includes(ch_names)\n idx = list()\n for ch_name in ch_names:\n ii = self.ch_names.index(ch_name)\n if ii in idx:\n raise ValueError('Channel name repeated: %s' % (ch_name,))\n idx.append(ii)\n return self._pick_drop_channels(idx)\n\n def drop_channels(self, ch_names):\n \"\"\"Drop channel(s).\n\n Parameters\n ----------\n ch_names : iterable or str\n Iterable (e.g. list) of channel name(s) or channel name to remove.\n\n Returns\n -------\n inst : instance of Raw, Epochs, or Evoked\n The modified instance.\n\n See Also\n --------\n reorder_channels\n pick_channels\n pick_types\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n if isinstance(ch_names, str):\n ch_names = [ch_names]\n\n try:\n all_str = all([isinstance(ch, str) for ch in ch_names])\n except TypeError:\n raise ValueError(\"'ch_names' must be iterable, got \"\n \"type {} ({}).\".format(type(ch_names), ch_names))\n\n if not all_str:\n raise ValueError(\"Each element in 'ch_names' must be str, got \"\n \"{}.\".format([type(ch) for ch in ch_names]))\n\n missing = [ch for ch in ch_names if ch not in self.ch_names]\n if len(missing) > 0:\n msg = \"Channel(s) {0} not found, nothing dropped.\"\n raise ValueError(msg.format(\", \".join(missing)))\n\n bad_idx = [self.ch_names.index(ch) for ch in ch_names\n if ch in self.ch_names]\n idx = np.setdiff1d(np.arange(len(self.ch_names)), bad_idx)\n return self._pick_drop_channels(idx)\n\n def _pick_drop_channels(self, idx):\n # avoid circular imports\n from ..io import BaseRaw\n from ..time_frequency import AverageTFR, EpochsTFR\n\n msg = 'adding, dropping, or reordering channels'\n if isinstance(self, BaseRaw):\n if self._projector is not None:\n _check_preload(self, f'{msg} after calling .apply_proj()')\n else:\n _check_preload(self, msg)\n\n if getattr(self, 'picks', None) is not None:\n self.picks = self.picks[idx]\n\n if getattr(self, '_read_picks', None) is not None:\n self._read_picks = [r[idx] for r in self._read_picks]\n\n if hasattr(self, '_cals'):\n self._cals = self._cals[idx]\n\n pick_info(self.info, idx, copy=False)\n\n for key in ('_comp', '_projector'):\n mat = getattr(self, key, None)\n if mat is not None:\n setattr(self, key, mat[idx][:, idx])\n\n # All others (Evoked, Epochs, Raw) have chs axis=-2\n axis = -3 if isinstance(self, (AverageTFR, EpochsTFR)) else -2\n if hasattr(self, '_data'): # skip non-preloaded Raw\n self._data = self._data.take(idx, axis=axis)\n else:\n assert isinstance(self, BaseRaw) and not self.preload\n\n self._pick_projs()\n return self\n\n def _pick_projs(self):\n \"\"\"Keep only projectors which apply to at least 1 data channel.\"\"\"\n drop_idx = []\n for idx, proj in enumerate(self.info['projs']):\n if not set(self.info['ch_names']) & set(proj['data']['col_names']):\n drop_idx.append(idx)\n\n for idx in drop_idx:\n logger.info(f\"Removing projector {self.info['projs'][idx]}\")\n\n if drop_idx and hasattr(self, 'del_proj'):\n self.del_proj(drop_idx)\n\n return self\n\n def add_channels(self, add_list, force_update_info=False):\n \"\"\"Append new channels to the instance.\n\n Parameters\n ----------\n add_list : list\n A list of objects to append to self. Must contain all the same\n type as the current object.\n force_update_info : bool\n If True, force the info for objects to be appended to match the\n values in ``self``. This should generally only be used when adding\n stim channels for which important metadata won't be overwritten.\n\n .. versionadded:: 0.12\n\n Returns\n -------\n inst : instance of Raw, Epochs, or Evoked\n The modified instance.\n\n See Also\n --------\n drop_channels\n\n Notes\n -----\n If ``self`` is a Raw instance that has been preloaded into a\n :obj:`numpy.memmap` instance, the memmap will be resized.\n \"\"\"\n # avoid circular imports\n from ..io import BaseRaw, _merge_info\n from ..epochs import BaseEpochs\n\n _validate_type(add_list, (list, tuple), 'Input')\n\n # Object-specific checks\n for inst in add_list + [self]:\n _check_preload(inst, \"adding channels\")\n if isinstance(self, BaseRaw):\n con_axis = 0\n comp_class = BaseRaw\n elif isinstance(self, BaseEpochs):\n con_axis = 1\n comp_class = BaseEpochs\n else:\n con_axis = 0\n comp_class = type(self)\n for inst in add_list:\n _validate_type(inst, comp_class, 'All input')\n data = [inst._data for inst in [self] + add_list]\n\n # Make sure that all dimensions other than channel axis are the same\n compare_axes = [i for i in range(data[0].ndim) if i != con_axis]\n shapes = np.array([dat.shape for dat in data])[:, compare_axes]\n for shape in shapes:\n if not ((shapes[0] - shape) == 0).all():\n raise AssertionError('All data dimensions except channels '\n 'must match, got %s != %s'\n % (shapes[0], shape))\n del shapes\n\n # Create final data / info objects\n infos = [self.info] + [inst.info for inst in add_list]\n new_info = _merge_info(infos, force_update_to_first=force_update_info)\n\n # Now update the attributes\n if isinstance(self._data, np.memmap) and con_axis == 0 and \\\n sys.platform != 'darwin': # resizing not available--no mremap\n # Use a resize and fill in other ones\n out_shape = (sum(d.shape[0] for d in data),) + data[0].shape[1:]\n n_bytes = np.prod(out_shape) * self._data.dtype.itemsize\n self._data.flush()\n self._data.base.resize(n_bytes)\n self._data = np.memmap(self._data.filename, mode='r+',\n dtype=self._data.dtype, shape=out_shape)\n assert self._data.shape == out_shape\n assert self._data.nbytes == n_bytes\n offset = len(data[0])\n for d in data[1:]:\n this_len = len(d)\n self._data[offset:offset + this_len] = d\n offset += this_len\n else:\n self._data = np.concatenate(data, axis=con_axis)\n self.info = new_info\n if isinstance(self, BaseRaw):\n self._cals = np.concatenate([getattr(inst, '_cals')\n for inst in [self] + add_list])\n # We should never use these since data are preloaded, let's just\n # set it to something large and likely to break (2 ** 31 - 1)\n extra_idx = [2147483647] * sum(info['nchan'] for info in infos[1:])\n assert all(len(r) == infos[0]['nchan'] for r in self._read_picks)\n self._read_picks = [\n np.concatenate([r, extra_idx]) for r in self._read_picks]\n assert all(len(r) == self.info['nchan'] for r in self._read_picks)\n return self\n\n\nclass InterpolationMixin(object):\n \"\"\"Mixin class for Raw, Evoked, Epochs.\"\"\"\n\n @verbose\n def interpolate_bads(self, reset_bads=True, mode='accurate',\n origin='auto', method=None, verbose=None):\n \"\"\"Interpolate bad MEG and EEG channels.\n\n Operates in place.\n\n Parameters\n ----------\n reset_bads : bool\n If True, remove the bads from info.\n mode : str\n Either ``'accurate'`` or ``'fast'``, determines the quality of the\n Legendre polynomial expansion used for interpolation of channels\n using the minimum-norm method.\n origin : array-like, shape (3,) | str\n Origin of the sphere in the head coordinate frame and in meters.\n Can be ``'auto'`` (default), which means a head-digitization-based\n origin fit.\n\n .. versionadded:: 0.17\n method : dict\n Method to use for each channel type.\n Currently only the key \"eeg\" has multiple options:\n\n - ``\"spline\"`` (default)\n Use spherical spline interpolation.\n - ``\"MNE\"``\n Use minimum-norm projection to a sphere and back.\n This is the method used for MEG channels.\n\n The value for \"meg\" is \"MNE\", and the value for\n \"fnirs\" is \"nearest\". The default (None) is thus an alias for::\n\n method=dict(meg=\"MNE\", eeg=\"spline\", fnirs=\"nearest\")\n\n .. versionadded:: 0.21\n %(verbose_meth)s\n\n Returns\n -------\n inst : instance of Raw, Epochs, or Evoked\n The modified instance.\n\n Notes\n -----\n .. versionadded:: 0.9.0\n \"\"\"\n from ..bem import _check_origin\n from .interpolation import _interpolate_bads_eeg,\\\n _interpolate_bads_meeg, _interpolate_bads_nirs\n\n _check_preload(self, \"interpolation\")\n method = _handle_default('interpolation_method', method)\n for key in method:\n _check_option('method[key]', key, ('meg', 'eeg', 'fnirs'))\n _check_option(\"method['eeg']\", method['eeg'], ('spline', 'MNE'))\n _check_option(\"method['meg']\", method['meg'], ('MNE',))\n _check_option(\"method['fnirs']\", method['fnirs'], ('nearest',))\n\n if len(self.info['bads']) == 0:\n warn('No bad channels to interpolate. Doing nothing...')\n return self\n logger.info('Interpolating bad channels')\n origin = _check_origin(origin, self.info)\n if method['eeg'] == 'spline':\n _interpolate_bads_eeg(self, origin=origin)\n eeg_mne = False\n else:\n eeg_mne = True\n _interpolate_bads_meeg(self, mode=mode, origin=origin, eeg=eeg_mne)\n _interpolate_bads_nirs(self)\n\n if reset_bads is True:\n self.info['bads'] = []\n\n return self\n\n\n@fill_doc\ndef rename_channels(info, mapping):\n \"\"\"Rename channels.\n\n .. warning:: The channel names must have at most 15 characters\n\n Parameters\n ----------\n info : dict\n Measurement info to modify.\n %(rename_channels_mapping)s\n \"\"\"\n _validate_type(info, Info, 'info')\n info._check_consistency()\n bads = list(info['bads']) # make our own local copies\n ch_names = list(info['ch_names'])\n\n # first check and assemble clean mappings of index and name\n if isinstance(mapping, dict):\n orig_names = sorted(list(mapping.keys()))\n missing = [orig_name not in ch_names for orig_name in orig_names]\n if any(missing):\n raise ValueError(\"Channel name(s) in mapping missing from info: \"\n \"%s\" % np.array(orig_names)[np.array(missing)])\n new_names = [(ch_names.index(ch_name), new_name)\n for ch_name, new_name in mapping.items()]\n elif callable(mapping):\n new_names = [(ci, mapping(ch_name))\n for ci, ch_name in enumerate(ch_names)]\n else:\n raise ValueError('mapping must be callable or dict, not %s'\n % (type(mapping),))\n\n # check we got all strings out of the mapping\n for new_name in new_names:\n _validate_type(new_name[1], 'str', 'New channel mappings')\n\n bad_new_names = [name for _, name in new_names if len(name) > 15]\n if len(bad_new_names):\n raise ValueError('Channel names cannot be longer than 15 '\n 'characters. These channel names are not '\n 'valid : %s' % new_names)\n\n # do the remapping locally\n for c_ind, new_name in new_names:\n for bi, bad in enumerate(bads):\n if bad == ch_names[c_ind]:\n bads[bi] = new_name\n ch_names[c_ind] = new_name\n\n # check that all the channel names are unique\n if len(ch_names) != len(np.unique(ch_names)):\n raise ValueError('New channel names are not unique, renaming failed')\n\n # do the remapping in info\n info['bads'] = bads\n for ch, ch_name in zip(info['chs'], ch_names):\n ch['ch_name'] = ch_name\n info._update_redundant()\n info._check_consistency()\n\n\ndef _recursive_flatten(cell, dtype):\n \"\"\"Unpack mat files in Python.\"\"\"\n if len(cell) > 0:\n while not isinstance(cell[0], dtype):\n cell = [c for d in cell for c in d]\n return cell\n\n\n@fill_doc\ndef read_ch_adjacency(fname, picks=None):\n \"\"\"Parse FieldTrip neighbors .mat file.\n\n More information on these neighbor definitions can be found on the related\n `FieldTrip documentation pages\n <http://www.fieldtriptoolbox.org/template/neighbours/>`__.\n\n Parameters\n ----------\n fname : str\n The file name. Example: 'neuromag306mag', 'neuromag306planar',\n 'ctf275', 'biosemi64', etc.\n %(picks_all)s\n Picks Must match the template.\n\n Returns\n -------\n ch_adjacency : scipy.sparse.csr_matrix, shape (n_channels, n_channels)\n The adjacency matrix.\n ch_names : list\n The list of channel names present in adjacency matrix.\n\n See Also\n --------\n find_ch_adjacency\n\n Notes\n -----\n This function is closely related to :func:`find_ch_adjacency`. If you\n don't know the correct file for the neighbor definitions,\n :func:`find_ch_adjacency` can compute the adjacency matrix from 2d\n sensor locations.\n \"\"\"\n from scipy.io import loadmat\n if not op.isabs(fname):\n templates_dir = op.realpath(op.join(op.dirname(__file__),\n 'data', 'neighbors'))\n templates = os.listdir(templates_dir)\n for f in templates:\n if f == fname:\n break\n if f == fname + '_neighb.mat':\n fname += '_neighb.mat'\n break\n else:\n raise ValueError('I do not know about this neighbor '\n 'template: \"{}\"'.format(fname))\n\n fname = op.join(templates_dir, fname)\n\n nb = loadmat(fname)['neighbours']\n ch_names = _recursive_flatten(nb['label'], str)\n picks = _picks_to_idx(len(ch_names), picks)\n neighbors = [_recursive_flatten(c, str) for c in\n nb['neighblabel'].flatten()]\n assert len(ch_names) == len(neighbors)\n adjacency = _ch_neighbor_adjacency(ch_names, neighbors)\n # picking before constructing matrix is buggy\n adjacency = adjacency[picks][:, picks]\n ch_names = [ch_names[p] for p in picks]\n return adjacency, ch_names\n\n\ndef _ch_neighbor_adjacency(ch_names, neighbors):\n \"\"\"Compute sensor adjacency matrix.\n\n Parameters\n ----------\n ch_names : list of str\n The channel names.\n neighbors : list of list\n A list of list of channel names. The neighbors to\n which the channels in ch_names are connected with.\n Must be of the same length as ch_names.\n\n Returns\n -------\n ch_adjacency : scipy.sparse matrix\n The adjacency matrix.\n \"\"\"\n if len(ch_names) != len(neighbors):\n raise ValueError('`ch_names` and `neighbors` must '\n 'have the same length')\n set_neighbors = {c for d in neighbors for c in d}\n rest = set_neighbors - set(ch_names)\n if len(rest) > 0:\n raise ValueError('Some of your neighbors are not present in the '\n 'list of channel names')\n\n for neigh in neighbors:\n if (not isinstance(neigh, list) and\n not all(isinstance(c, str) for c in neigh)):\n raise ValueError('`neighbors` must be a list of lists of str')\n\n ch_adjacency = np.eye(len(ch_names), dtype=bool)\n for ii, neigbs in enumerate(neighbors):\n ch_adjacency[ii, [ch_names.index(i) for i in neigbs]] = True\n ch_adjacency = sparse.csr_matrix(ch_adjacency)\n return ch_adjacency\n\n\ndef find_ch_adjacency(info, ch_type):\n \"\"\"Find the adjacency matrix for the given channels.\n\n This function tries to infer the appropriate adjacency matrix template\n for the given channels. If a template is not found, the adjacency matrix\n is computed using Delaunay triangulation based on 2d sensor locations.\n\n Parameters\n ----------\n info : instance of Info\n The measurement info.\n ch_type : str | None\n The channel type for computing the adjacency matrix. Currently\n supports 'mag', 'grad', 'eeg' and None. If None, the info must contain\n only one channel type.\n\n Returns\n -------\n ch_adjacency : scipy.sparse.csr_matrix, shape (n_channels, n_channels)\n The adjacency matrix.\n ch_names : list\n The list of channel names present in adjacency matrix.\n\n See Also\n --------\n read_ch_adjacency\n\n Notes\n -----\n .. versionadded:: 0.15\n\n Automatic detection of an appropriate adjacency matrix template only\n works for MEG data at the moment. This means that the adjacency matrix\n is always computed for EEG data and never loaded from a template file. If\n you want to load a template for a given montage use\n :func:`read_ch_adjacency` directly.\n \"\"\"\n if ch_type is None:\n picks = channel_indices_by_type(info)\n if sum([len(p) != 0 for p in picks.values()]) != 1:\n raise ValueError('info must contain only one channel type if '\n 'ch_type is None.')\n ch_type = channel_type(info, 0)\n else:\n _check_option('ch_type', ch_type, ['mag', 'grad', 'eeg'])\n (has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types,\n has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils,\n has_eeg_coils_and_meg, has_eeg_coils_only,\n has_neuromag_122_grad, has_csd_coils) = _get_ch_info(info)\n conn_name = None\n if has_vv_mag and ch_type == 'mag':\n conn_name = 'neuromag306mag'\n elif has_vv_grad and ch_type == 'grad':\n conn_name = 'neuromag306planar'\n elif has_neuromag_122_grad:\n conn_name = 'neuromag122'\n elif has_4D_mag:\n if 'MEG 248' in info['ch_names']:\n idx = info['ch_names'].index('MEG 248')\n grad = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_GRAD\n mag = info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG\n if ch_type == 'grad' and grad:\n conn_name = 'bti248grad'\n elif ch_type == 'mag' and mag:\n conn_name = 'bti248'\n elif 'MEG 148' in info['ch_names'] and ch_type == 'mag':\n idx = info['ch_names'].index('MEG 148')\n if info['chs'][idx]['coil_type'] == FIFF.FIFFV_COIL_MAGNES_MAG:\n conn_name = 'bti148'\n elif has_CTF_grad and ch_type == 'mag':\n if info['nchan'] < 100:\n conn_name = 'ctf64'\n elif info['nchan'] > 200:\n conn_name = 'ctf275'\n else:\n conn_name = 'ctf151'\n elif n_kit_grads > 0:\n from ..io.kit.constants import KIT_NEIGHBORS\n conn_name = KIT_NEIGHBORS.get(info['kit_system_id'])\n\n if conn_name is not None:\n logger.info('Reading adjacency matrix for %s.' % conn_name)\n return read_ch_adjacency(conn_name)\n logger.info('Could not find a adjacency matrix for the data. '\n 'Computing adjacency based on Delaunay triangulations.')\n return _compute_ch_adjacency(info, ch_type)\n\n\ndef _compute_ch_adjacency(info, ch_type):\n \"\"\"Compute channel adjacency matrix using Delaunay triangulations.\n\n Parameters\n ----------\n info : instance of mne.measuerment_info.Info\n The measurement info.\n ch_type : str\n The channel type for computing the adjacency matrix. Currently\n supports 'mag', 'grad' and 'eeg'.\n\n Returns\n -------\n ch_adjacency : scipy.sparse matrix, shape (n_channels, n_channels)\n The adjacency matrix.\n ch_names : list\n The list of channel names present in adjacency matrix.\n \"\"\"\n from scipy.spatial import Delaunay\n from .. import spatial_tris_adjacency\n from ..channels.layout import _find_topomap_coords, _pair_grad_sensors\n combine_grads = (ch_type == 'grad' and FIFF.FIFFV_COIL_VV_PLANAR_T1 in\n np.unique([ch['coil_type'] for ch in info['chs']]))\n\n picks = dict(_picks_by_type(info, exclude=[]))[ch_type]\n ch_names = [info['ch_names'][pick] for pick in picks]\n if combine_grads:\n pairs = _pair_grad_sensors(info, topomap_coords=False, exclude=[])\n if len(pairs) != len(picks):\n raise RuntimeError('Cannot find a pair for some of the '\n 'gradiometers. Cannot compute adjacency '\n 'matrix.')\n # only for one of the pair\n xy = _find_topomap_coords(info, picks[::2], sphere=HEAD_SIZE_DEFAULT)\n else:\n xy = _find_topomap_coords(info, picks, sphere=HEAD_SIZE_DEFAULT)\n tri = Delaunay(xy)\n neighbors = spatial_tris_adjacency(tri.simplices)\n\n if combine_grads:\n ch_adjacency = np.eye(len(picks), dtype=bool)\n for idx, neigbs in zip(neighbors.row, neighbors.col):\n for ii in range(2): # make sure each pair is included\n for jj in range(2):\n ch_adjacency[idx * 2 + ii, neigbs * 2 + jj] = True\n ch_adjacency[idx * 2 + ii, idx * 2 + jj] = True # pair\n ch_adjacency = sparse.csr_matrix(ch_adjacency)\n else:\n ch_adjacency = sparse.lil_matrix(neighbors)\n ch_adjacency.setdiag(np.repeat(1, ch_adjacency.shape[0]))\n ch_adjacency = ch_adjacency.tocsr()\n\n return ch_adjacency, ch_names\n\n\ndef fix_mag_coil_types(info, use_cal=False):\n \"\"\"Fix magnetometer coil types.\n\n Parameters\n ----------\n info : dict\n The info dict to correct. Corrections are done in-place.\n use_cal : bool\n If True, further refine the check for old coil types by checking\n ``info['chs'][ii]['cal']``.\n\n Notes\n -----\n This function changes magnetometer coil types 3022 (T1: SQ20483N) and\n 3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition\n records in the info structure.\n\n Neuromag Vectorview systems can contain magnetometers with two\n different coil sizes (3022 and 3023 vs. 3024). The systems\n incorporating coils of type 3024 were introduced last and are used at\n the majority of MEG sites. At some sites with 3024 magnetometers,\n the data files have still defined the magnetometers to be of type\n 3022 to ensure compatibility with older versions of Neuromag software.\n In the MNE software as well as in the present version of Neuromag\n software coil type 3024 is fully supported. Therefore, it is now safe\n to upgrade the data files to use the true coil type.\n\n .. note:: The effect of the difference between the coil sizes on the\n current estimates computed by the MNE software is very small.\n Therefore the use of ``fix_mag_coil_types`` is not mandatory.\n \"\"\"\n old_mag_inds = _get_T1T2_mag_inds(info, use_cal)\n\n for ii in old_mag_inds:\n info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_VV_MAG_T3\n logger.info('%d of %d magnetometer types replaced with T3.' %\n (len(old_mag_inds), len(pick_types(info, meg='mag'))))\n info._check_consistency()\n\n\ndef _get_T1T2_mag_inds(info, use_cal=False):\n \"\"\"Find T1/T2 magnetometer coil types.\"\"\"\n picks = pick_types(info, meg='mag')\n old_mag_inds = []\n # From email exchanges, systems with the larger T2 coil only use the cal\n # value of 2.09e-11. Newer T3 magnetometers use 4.13e-11 or 1.33e-10\n # (Triux). So we can use a simple check for > 3e-11.\n for ii in picks:\n ch = info['chs'][ii]\n if ch['coil_type'] in (FIFF.FIFFV_COIL_VV_MAG_T1,\n FIFF.FIFFV_COIL_VV_MAG_T2):\n if use_cal:\n if ch['cal'] > 3e-11:\n old_mag_inds.append(ii)\n else:\n old_mag_inds.append(ii)\n return old_mag_inds\n\n\ndef _get_ch_info(info):\n \"\"\"Get channel info for inferring acquisition device.\"\"\"\n chs = info['chs']\n # Only take first 16 bits, as higher bits store CTF comp order\n coil_types = {ch['coil_type'] & 0xFFFF for ch in chs}\n channel_types = {ch['kind'] for ch in chs}\n\n has_vv_mag = any(k in coil_types for k in\n [FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2,\n FIFF.FIFFV_COIL_VV_MAG_T3])\n has_vv_grad = any(k in coil_types for k in [FIFF.FIFFV_COIL_VV_PLANAR_T1,\n FIFF.FIFFV_COIL_VV_PLANAR_T2,\n FIFF.FIFFV_COIL_VV_PLANAR_T3])\n has_neuromag_122_grad = any(k in coil_types\n for k in [FIFF.FIFFV_COIL_NM_122])\n\n is_old_vv = ' ' in chs[0]['ch_name']\n\n has_4D_mag = FIFF.FIFFV_COIL_MAGNES_MAG in coil_types\n ctf_other_types = (FIFF.FIFFV_COIL_CTF_REF_MAG,\n FIFF.FIFFV_COIL_CTF_REF_GRAD,\n FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD)\n has_CTF_grad = (FIFF.FIFFV_COIL_CTF_GRAD in coil_types or\n (FIFF.FIFFV_MEG_CH in channel_types and\n any(k in ctf_other_types for k in coil_types)))\n # hack due to MNE-C bug in IO of CTF\n # only take first 16 bits, as higher bits store CTF comp order\n n_kit_grads = sum(ch['coil_type'] & 0xFFFF == FIFF.FIFFV_COIL_KIT_GRAD\n for ch in chs)\n\n has_any_meg = any([has_vv_mag, has_vv_grad, has_4D_mag, has_CTF_grad,\n n_kit_grads])\n has_eeg_coils = (FIFF.FIFFV_COIL_EEG in coil_types and\n FIFF.FIFFV_EEG_CH in channel_types)\n has_eeg_coils_and_meg = has_eeg_coils and has_any_meg\n has_eeg_coils_only = has_eeg_coils and not has_any_meg\n has_csd_coils = (FIFF.FIFFV_COIL_EEG_CSD in coil_types and\n FIFF.FIFFV_EEG_CH in channel_types)\n\n return (has_vv_mag, has_vv_grad, is_old_vv, has_4D_mag, ctf_other_types,\n has_CTF_grad, n_kit_grads, has_any_meg, has_eeg_coils,\n has_eeg_coils_and_meg, has_eeg_coils_only, has_neuromag_122_grad,\n has_csd_coils)\n\n\ndef make_1020_channel_selections(info, midline=\"z\"):\n \"\"\"Return dict mapping from ROI names to lists of picks for 10/20 setups.\n\n This passes through all channel names, and uses a simple heuristic to\n separate channel names into three Region of Interest-based selections:\n Left, Midline and Right. The heuristic is that channels ending on any of\n the characters in ``midline`` are filed under that heading, otherwise those\n ending in odd numbers under \"Left\", those in even numbers under \"Right\".\n Other channels are ignored. This is appropriate for 10/20 files, but not\n for other channel naming conventions.\n If an info object is provided, lists are sorted from posterior to anterior.\n\n Parameters\n ----------\n info : instance of Info\n Where to obtain the channel names from. The picks will\n be in relation to the position in ``info[\"ch_names\"]``. If possible,\n this lists will be sorted by y value position of the channel locations,\n i.e., from back to front.\n midline : str\n Names ending in any of these characters are stored under the\n ``Midline`` key. Defaults to 'z'. Note that capitalization is ignored.\n\n Returns\n -------\n selections : dict\n A dictionary mapping from ROI names to lists of picks (integers).\n \"\"\"\n _validate_type(info, \"info\")\n\n try:\n from .layout import find_layout\n layout = find_layout(info)\n pos = layout.pos\n ch_names = layout.names\n except RuntimeError: # no channel positions found\n ch_names = info[\"ch_names\"]\n pos = None\n\n selections = dict(Left=[], Midline=[], Right=[])\n for pick, channel in enumerate(ch_names):\n last_char = channel[-1].lower() # in 10/20, last char codes hemisphere\n if last_char in midline:\n selection = \"Midline\"\n elif last_char.isdigit():\n selection = \"Left\" if int(last_char) % 2 else \"Right\"\n else: # ignore the channel\n continue\n selections[selection].append(pick)\n\n if pos is not None:\n # sort channels from front to center\n # (y-coordinate of the position info in the layout)\n selections = {selection: np.array(picks)[pos[picks, 1].argsort()]\n for selection, picks in selections.items()}\n\n return selections\n\n\ndef combine_channels(inst, groups, method='mean', keep_stim=False,\n drop_bad=False):\n \"\"\"Combine channels based on specified channel grouping.\n\n Parameters\n ----------\n inst : instance of Raw, Epochs, or Evoked\n An MNE-Python object to combine the channels for. The object can be of\n type Raw, Epochs, or Evoked.\n groups : dict\n Specifies which channels are aggregated into a single channel, with\n aggregation method determined by the ``method`` parameter. One new\n pseudo-channel is made per dict entry; the dict values must be lists of\n picks (integer indices of ``ch_names``). For example::\n\n groups=dict(Left=[1, 2, 3, 4], Right=[5, 6, 7, 8])\n\n Note that within a dict entry all channels must have the same type.\n method : str | callable\n Which method to use to combine channels. If a :class:`str`, must be one\n of 'mean', 'median', or 'std' (standard deviation). If callable, the\n callable must accept one positional input (data of shape ``(n_channels,\n n_times)``, or ``(n_epochs, n_channels, n_times)``) and return an\n :class:`array <numpy.ndarray>` of shape ``(n_times,)``, or ``(n_epochs,\n n_times)``. For example with an instance of Raw or Evoked::\n\n method = lambda data: np.mean(data, axis=0)\n\n Another example with an instance of Epochs::\n\n method = lambda data: np.median(data, axis=1)\n\n Defaults to ``'mean'``.\n keep_stim : bool\n If ``True``, include stimulus channels in the resulting object.\n Defaults to ``False``.\n drop_bad : bool\n If ``True``, drop channels marked as bad before combining. Defaults to\n ``False``.\n\n Returns\n -------\n combined_inst : instance of Raw, Epochs, or Evoked\n An MNE-Python object of the same type as the input ``inst``, containing\n one virtual channel for each group in ``groups`` (and, if ``keep_stim``\n is ``True``, also containing stimulus channels).\n \"\"\"\n from ..io import BaseRaw, RawArray\n from .. import BaseEpochs, EpochsArray, Evoked, EvokedArray\n\n ch_axis = 1 if isinstance(inst, BaseEpochs) else 0\n ch_idx = list(range(inst.info['nchan']))\n ch_names = inst.info['ch_names']\n ch_types = inst.get_channel_types()\n inst_data = inst.data if isinstance(inst, Evoked) else inst.get_data()\n groups = OrderedDict(deepcopy(groups))\n\n # Convert string values of ``method`` into callables\n # XXX Possibly de-duplicate with _make_combine_callable of mne/viz/utils.py\n if isinstance(method, str):\n method_dict = {key: partial(getattr(np, key), axis=ch_axis)\n for key in ('mean', 'median', 'std')}\n try:\n method = method_dict[method]\n except KeyError:\n raise ValueError('\"method\" must be a callable, or one of \"mean\", '\n f'\"median\", or \"std\"; got \"{method}\".')\n\n # Instantiate channel info and data\n new_ch_names, new_ch_types, new_data = [], [], []\n if not isinstance(keep_stim, bool):\n raise TypeError('\"keep_stim\" must be of type bool, not '\n f'{type(keep_stim)}.')\n if keep_stim:\n stim_ch_idx = list(pick_types(inst.info, meg=False, stim=True))\n if stim_ch_idx:\n new_ch_names = [ch_names[idx] for idx in stim_ch_idx]\n new_ch_types = [ch_types[idx] for idx in stim_ch_idx]\n new_data = [np.take(inst_data, idx, axis=ch_axis)\n for idx in stim_ch_idx]\n else:\n warn('Could not find stimulus channels.')\n\n # Get indices of bad channels\n ch_idx_bad = []\n if not isinstance(drop_bad, bool):\n raise TypeError('\"drop_bad\" must be of type bool, not '\n f'{type(drop_bad)}.')\n if drop_bad and inst.info['bads']:\n ch_idx_bad = pick_channels(ch_names, inst.info['bads'])\n\n # Check correctness of combinations\n for this_group, this_picks in groups.items():\n # Check if channel indices are out of bounds\n if not all(idx in ch_idx for idx in this_picks):\n raise ValueError('Some channel indices are out of bounds.')\n # Check if heterogeneous sensor type combinations\n this_ch_type = np.array(ch_types)[this_picks]\n if len(set(this_ch_type)) > 1:\n types = ', '.join(set(this_ch_type))\n raise ValueError('Cannot combine sensors of different types; '\n f'\"{this_group}\" contains types {types}.')\n # Remove bad channels\n these_bads = [idx for idx in this_picks if idx in ch_idx_bad]\n this_picks = [idx for idx in this_picks if idx not in ch_idx_bad]\n if these_bads:\n logger.info('Dropped the following channels in group '\n f'{this_group}: {these_bads}')\n # Check if combining less than 2 channel\n if len(set(this_picks)) < 2:\n warn(f'Less than 2 channels in group \"{this_group}\" when '\n f'combining by method \"{method}\".')\n # If all good create more detailed dict without bad channels\n groups[this_group] = dict(picks=this_picks, ch_type=this_ch_type[0])\n\n # Combine channels and add them to the new instance\n for this_group, this_group_dict in groups.items():\n new_ch_names.append(this_group)\n new_ch_types.append(this_group_dict['ch_type'])\n this_picks = this_group_dict['picks']\n this_data = np.take(inst_data, this_picks, axis=ch_axis)\n new_data.append(method(this_data))\n new_data = np.swapaxes(new_data, 0, ch_axis)\n info = create_info(sfreq=inst.info['sfreq'], ch_names=new_ch_names,\n ch_types=new_ch_types)\n if isinstance(inst, BaseRaw):\n combined_inst = RawArray(new_data, info, first_samp=inst.first_samp,\n verbose=inst.verbose)\n elif isinstance(inst, BaseEpochs):\n combined_inst = EpochsArray(new_data, info, events=inst.events,\n tmin=inst.times[0], verbose=inst.verbose)\n elif isinstance(inst, Evoked):\n combined_inst = EvokedArray(new_data, info, tmin=inst.times[0],\n verbose=inst.verbose)\n\n return combined_inst\n"
] |
[
[
"numpy.swapaxes",
"numpy.take",
"numpy.abs",
"numpy.unique",
"numpy.asarray",
"numpy.memmap",
"scipy.spatial.Delaunay",
"scipy.io.loadmat",
"scipy.sparse.csr_matrix",
"numpy.concatenate",
"numpy.prod",
"numpy.repeat",
"numpy.array",
"numpy.sum",
"scipy.sparse.lil_matrix"
]
] |
laitalaj/cvpce
|
[
"7509e7d7783039f39a88edc6e411333bcf6fb2af",
"7509e7d7783039f39a88edc6e411333bcf6fb2af",
"7509e7d7783039f39a88edc6e411333bcf6fb2af"
] |
[
"cvpce/planogram_adapters.py",
"cvpce/models/proposals.py",
"cvpce/cli/datasets/gp.py"
] |
[
"import json\n\nimport torch\nimport networkx as nx\n\ndef _get_object(planogram, graph, node):\n return planogram['objects'][graph.nodes[node]['ogg']]\n\ndef _process_dir(d): # Flip for compliance w/ detections\n res = d.upper()\n if 'N' in res:\n return res.replace('N', 'S')\n if 'S' in res:\n return res.replace('S', 'N')\n return res\n\ndef read_tonioni_planogram(planogram_path):\n with open(planogram_path, 'r') as planogram_file:\n planogram = json.load(planogram_file)\n\n g = nx.DiGraph()\n western_nodes = set()\n southern_nodes = set()\n for i, entry in enumerate(planogram['graph']):\n g.add_node(i, ogg=entry['ogg'])\n g.add_edges_from((i, j, {'dir': _process_dir(k)}) for k, j in entry.items() if j >= 0 and k != 'ogg')\n if entry['w'] == -1:\n western_nodes.add(i)\n if entry['n'] == -1: # Flip for compliance w/ detections, TODO: clean this stuff up a bit\n southern_nodes.add(i)\n\n rows = {w: [] for w in western_nodes}\n cols = {s: [] for s in southern_nodes}\n for w, r in rows.items():\n prev = -1\n nxt = [w]\n while len(nxt):\n if len(nxt) > 1: raise RuntimeError(f'Multiple nodes east from {prev}: {nxt} (file: {planogram_path})')\n nxt = nxt[0]\n g.nodes[nxt]['row'] = w\n r.append(nxt)\n prev = nxt\n nxt = [e for e in g[prev] if g[prev][e]['dir'] == 'E']\n for s, c in cols.items():\n prev = -1\n nxt = [s]\n while len(nxt):\n if len(nxt) > 1: raise RuntimeError(f'Multiple nodes north from {prev}: {nxt} (file: {planogram_path})')\n nxt = nxt[0]\n g.nodes[nxt]['col'] = s\n c.append(nxt)\n prev = nxt\n nxt = [n for n in g[prev] if g[prev][n]['dir'] == 'N']\n\n row_y = {w: float('-inf') for w in rows}\n col_x = {s: float('-inf') for s in cols}\n for r in rows.values():\n baseline = 0\n x = 0\n for p in r:\n col = g.nodes[p]['col']\n if col_x[col] > float('-inf'):\n baseline = col_x[col] - x\n break\n x += _get_object(planogram, g, p)['width']\n x = baseline\n for p in r:\n col = g.nodes[p]['col']\n col_x[col] = max(x, col_x[col])\n x += _get_object(planogram, g, p)['width']\n for c in cols.values():\n baseline = 0\n y = 0\n for p in c:\n row = g.nodes[p]['row']\n if row_y[row] > float('-inf'):\n baseline = row_y[row] - y\n break\n y += _get_object(planogram, g, p)['height']\n y = baseline\n for p in c:\n row = g.nodes[p]['row']\n row_y[row] = max(y, row_y[row])\n y += _get_object(planogram, g, p)['height']\n\n for r in rows.values():\n x = col_x[g.nodes[r[0]]['col']] + _get_object(planogram, g, r[0])['width']\n for p in r[1:]:\n col = g.nodes[p]['col']\n if x > col_x[col]:\n col_x[col] = x\n else:\n x = col_x[col]\n x += _get_object(planogram, g, p)['width']\n for c in cols.values():\n y = row_y[g.nodes[c[0]]['row']] + _get_object(planogram, g, c[0])['height']\n for p in c[1:]:\n row = g.nodes[p]['row']\n if y > row_y[row]:\n row_y[row] = y\n else:\n y = row_y[row]\n y += _get_object(planogram, g, p)['height']\n\n for n, node in g.nodes.items():\n obj = _get_object(planogram, g, n)\n x1 = col_x[node['col']]\n y1 = row_y[node['row']] - obj['height']\n x2 = x1 + obj['width']\n y2 = row_y[node['row']]\n node['pos'] = (x1, y1, x2, y2)\n\n node_range = range(len(planogram['graph']))\n boxes = torch.tensor([g.nodes[i]['pos'] for i in node_range], dtype=torch.float)\n for i in g:\n label = _get_object(planogram, g, i)['img_path']\n #label = f'{g.nodes[i][\"row\"]}/{g.nodes[i][\"col\"]}'\n del g.nodes[i]['pos'], g.nodes[i]['row'], g.nodes[i]['col'], g.nodes[i]['ogg']\n g.nodes[i]['label'] = label.split('.')[0]\n labels = [g.nodes[i]['label'] for i in node_range]\n\n return boxes, labels, g\n",
"import torch\nfrom torch import nn\nfrom torch.nn import functional as nnf\nfrom torchvision import models as tmodels\nfrom torchvision.models.detection import RetinaNet\nfrom torchvision.models.detection.backbone_utils import BackboneWithFPN\nfrom torchvision.ops.feature_pyramid_network import ExtraFPNBlock, LastLevelP6P7\nfrom torchvision.ops.misc import FrozenBatchNorm2d\n\nfrom .. import utils\n\nclass StateLoggingLayer(ExtraFPNBlock):\n def __init__(self, in_channels, out_channels):\n super().__init__()\n self.p6p7 = LastLevelP6P7(in_channels, out_channels)\n def forward(self, p, c, names):\n p, names = self.p6p7(p, c, names)\n print(f\"names: {names}, c: {len(c)}, p: {len(p)}\")\n print(', '.join(f'C{i}: {c[i].shape}' for i in range(len(c))))\n print(', '.join(f'P{i}: {p[i].shape}' for i in range(len(p))))\n return p, names\n\nclass LoggingTransform(nn.Module):\n def __init__(self, transform):\n super().__init__()\n self.transform = transform\n def postprocess(self, result, image_shapes, original_image_sizes):\n return self.base_transform.postprocess(result, image_shapes, original_image_sizes)\n def forward(self, images, targets):\n def pretty_targets(t):\n return {k: v.shape if torch.is_tensor(v) else v for k, v in t.items()}\n print(f'Before: {[i.shape for i in images]}, {[pretty_targets(t) for t in targets]}')\n images, targets = self.transform(images, targets)\n for t in images.tensors:\n utils.show(t)\n print(f'After: {images.tensors.shape} w/ {images.image_sizes}, {[pretty_targets(t) for t in targets]}')\n return images, targets\n\nclass SizeCapturingTransform(nn.Module):\n def __init__(self, base_transform):\n super().__init__()\n self.base_transform = base_transform\n self.image_sizes = None\n def postprocess(self, result, image_shapes, original_image_sizes):\n return self.base_transform.postprocess(result, image_shapes, original_image_sizes)\n def forward(self, images, targets):\n image_list, targets = self.base_transform(images, targets)\n self.image_sizes = image_list.image_sizes\n return image_list, targets\n\nclass GaussianLayerBlock(nn.Module):\n def __init__(self, in_channels, out_channels):\n super().__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, 3, padding=1)\n self.norm = nn.BatchNorm2d(out_channels)\n self.activation = nn.ReLU()\n\n nn.init.kaiming_normal_(self.conv.weight, nonlinearity='relu')\n nn.init.constant_(self.conv.bias, 0)\n def forward(self, x):\n x = self.conv(x)\n x = self.norm(x)\n return self.activation(x)\n\nclass GaussianLayer(nn.Module):\n def __init__(self, c_channels, p_channels):\n super().__init__()\n self.lateral = nn.Conv2d(c_channels, p_channels, 1)\n self.block1 = GaussianLayerBlock(p_channels, p_channels//2)\n self.block2 = GaussianLayerBlock(p_channels//2, p_channels//4)\n self.up = nn.Upsample(scale_factor=2)\n\n nn.init.xavier_normal_(self.lateral.weight)\n nn.init.constant_(self.lateral.bias, 0)\n def forward(self, x, p):\n x = self.lateral(x) + self.up(p)\n x = self.block1(x)\n x = self.block2(x)\n return self.up(x)\n\nclass GaussianSubnetBlock(nn.Module):\n def __init__(self, in_channels, out_channels, kernel, tanh=False):\n super().__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, kernel, padding=1 if kernel > 1 else 0)\n self.activation = nn.Tanh() if tanh else nn.ReLU()\n\n if tanh:\n nn.init.xavier_normal_(self.conv.weight, gain=nn.init.calculate_gain('tanh'))\n else:\n nn.init.kaiming_normal_(self.conv.weight, nonlinearity='relu')\n nn.init.constant_(self.conv.bias, 0)\n def forward(self, x):\n x = self.conv(x)\n return self.activation(x)\n\nclass GaussianSubnet(nn.Module):\n def __init__(self, in_channels, tanh=False):\n super().__init__()\n self.blocks = nn.Sequential(\n GaussianSubnetBlock(in_channels, in_channels//2, 3),\n GaussianSubnetBlock(in_channels//2, in_channels//2, 3),\n GaussianSubnetBlock(in_channels//2, in_channels//4, 3),\n GaussianSubnetBlock(in_channels//4, in_channels//4, 1),\n GaussianSubnetBlock(in_channels//4, 1, 1, tanh),\n )\n def forward(self, x):\n return self.blocks.forward(x)\n\nclass BackboneWithFPNAndGaussians(BackboneWithFPN): # todo: gaussian layer on-off switch\n # see https://github.com/pytorch/vision/blob/master/torchvision/models/detection/backbone_utils.py#L11\n def __init__(self, backbone, extra_fpn_block=LastLevelP6P7, tanh=False):\n return_layers = {f'layer{i + 1}': str(i) for i in range(4)}\n\n fpn_layers = [2, 3, 4]\n in_channels_list = [(backbone.inplanes // 8) * 2 ** (i - 1) for i in fpn_layers]\n\n out_channels = 256\n extra_blocks = extra_fpn_block(out_channels, out_channels)\n\n super().__init__(backbone, return_layers, in_channels_list, out_channels, extra_blocks)\n\n c2_channels = 256\n self.gaussian_layer = GaussianLayer(c2_channels, out_channels)\n self.gaussian_subnet = GaussianSubnet(out_channels//4, tanh)\n self.gaussians = None\n def get_gaussians(self):\n g = self.gaussians\n self.gaussians = None #making sure these are used only once and the memory is cleared\n return g\n def forward(self, x):\n x = self.body(x)\n\n _, first_features = x.popitem(last = False)\n p = self.fpn(x) # missing the c2 = first feature maps at this point, as expected by RetinaNet\n\n gl_features = self.gaussian_layer(first_features, next(iter(p.values())))\n self.gaussians = self.gaussian_subnet(gl_features)\n\n return p\n\ndef gaussian_loss(predictions, targets, sizes, tanh=False, negative_threshold=0.0, positive_threshold=0.1, min_negatives=1000, negatives_per_positive=3):\n batch_targets = torch.full_like(predictions, -1) if tanh else torch.zeros_like(predictions) # can't be done on dataset level due to \"unpredictability\" -> probably could tune RetinaNet a bit\n for i, target_and_size in enumerate(zip(targets, sizes)):\n target, size = target_and_size\n target = target[None, None]\n size = tuple(s // 2 for s in size)\n target = nnf.interpolate(target, size=size, mode='bilinear', align_corners=False)\n batch_targets[i, 0, :size[0], :size[1]] = target\n\n negative_mask = batch_targets <= negative_threshold\n positive_mask = batch_targets >= positive_threshold\n\n se = nnf.mse_loss(predictions, batch_targets, reduction='none')\n positive_se = se[positive_mask]\n negative_se = se[negative_mask]\n\n top_negatives = max(min_negatives, negatives_per_positive * len(positive_se))\n top_indices = negative_se.argsort(descending = True)[:top_negatives]\n\n return (positive_se.sum() + negative_se[top_indices].sum()) / (len(positive_se) + len(top_indices))\n\nclass GaussianLayerNetwork(RetinaNet):\n def __init__(self, resnet, num_classes, extra_fpn_block=LastLevelP6P7, transform_wrapper=SizeCapturingTransform,\n gaussian_loss_params={}, tanh=False, detections_per_img=1000, **kwargs):\n # detections_per_img: 1000 > 576 in SKU110K train, 718 in val, 533 in test > 300 (default)\n super().__init__(BackboneWithFPNAndGaussians(resnet, extra_fpn_block, tanh=tanh), num_classes, detections_per_img=detections_per_img, **kwargs)\n self.transform = transform_wrapper(self.transform)\n self.gaussian_loss_params = gaussian_loss_params\n def compute_loss(self, targets, head_outputs, anchors):\n loss = super().compute_loss(targets, head_outputs, anchors)\n\n predicted_gaussians = self.backbone.get_gaussians()\n loss['gaussian'] = gaussian_loss(predicted_gaussians, [t['gaussians'] for t in targets], self.transform.image_sizes, **self.gaussian_loss_params)\n\n return loss\n def forward(self, images, targets = None):\n res = super().forward(images, targets)\n if not self.training:\n for r, g in zip(res, self.backbone.get_gaussians()):\n r['gaussians'] = g\n return res\n\ndef gln_backbone(trainable_layers=5, pretrained=True):\n backbone = tmodels.resnet50(pretrained=pretrained, norm_layer=FrozenBatchNorm2d)\n\n layers_to_train = ['layer4', 'layer3', 'layer2', 'layer1', 'conv1'][:trainable_layers]\n for name, parameter in backbone.named_parameters():\n if all([not name.startswith(layer) for layer in layers_to_train]):\n parameter.requires_grad_(False)\n\n return backbone\n\ndef state_logging_gln(num_classes = 1, trainable_layers=5):\n model = GaussianLayerNetwork(\n gln_backbone(trainable_layers),\n num_classes,\n extra_fpn_block=StateLoggingLayer,\n transform_wrapper=lambda t: SizeCapturingTransform(LoggingTransform(t))\n )\n return model\n\ndef gln(num_classes = 1, trainable_layers=4, pretrained_backbone=True, tanh=False, gaussian_loss_params = {}, detections_per_img=1000):\n return GaussianLayerNetwork(gln_backbone(trainable_layers, pretrained_backbone), num_classes, tanh=tanh, gaussian_loss_params=gaussian_loss_params, detections_per_img=detections_per_img)\n",
"import random\nimport re\nimport os\nimport shutil\n\nimport click\nimport networkx as nx\nimport torch\nfrom torchvision import ops as tvops\nfrom matplotlib import pyplot as plt\n\nfrom ... import datautils, utils, production\nfrom ...defaults import GP_ROOT, GP_TEST_DIR, GP_TEST_VALIDATION_SET, GP_ANN_DIR, GP_BASELINE_ANN_FILE, GP_TRAIN_FOLDERS, GP_PLANO_DIR\n\[email protected]()\ndef gp():\n '''\n Commands for Grocery Products dataset.\n\n This command group contains commands for visualizing various aspects of the\n Grocery Products dataset (George et al. 2014)\n and the annotations, planograms and extra data in GP-180 (Tonioni et al. 2017).\n\n The actual commands under this don't contain help texts,\n sorry about that!\n I'll try to have time to add those in the future.\n '''\n pass\n\[email protected]()\[email protected](\n '--imgs',\n type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True),\n default=GP_TEST_DIR, show_default=True\n)\[email protected](\n '--annotations',\n type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True),\n default=GP_BASELINE_ANN_FILE, show_default=True\n)\ndef visualize_baseline(imgs, annotations):\n data = datautils.GPBaselineDataset(imgs, annotations)\n img, anns = random.choice(data)\n utils.show(img,\n groundtruth=tvops.box_convert(anns['boxes'], 'xyxy', 'xywh')\n )\n\[email protected]()\[email protected](\n '--img-dir',\n type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True),\n multiple=True,\n default=GP_TRAIN_FOLDERS, show_default=True\n)\[email protected]('--only', type=str, multiple=True)\ndef visualize_train(img_dir, only):\n data = datautils.GroceryProductsDataset(img_dir, only=only if len(only) else None, include_annotations=True, include_masks=True)\n img, gen_img, hier, ann = random.choice(data)\n print(' - '.join(hier))\n print(ann)\n mask = utils.scale_from_tanh(gen_img[3])\n utils.show_multiple([utils.scale_from_tanh(img), utils.scale_from_tanh(gen_img[:3]), torch.stack((mask, mask, mask))])\n\[email protected]()\[email protected](\n '--imgs',\n type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True),\n default=GP_TEST_DIR, show_default=True\n)\[email protected](\n '--annotations',\n type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True),\n default=GP_ANN_DIR, show_default=True\n)\[email protected]('--store', type=int)\[email protected]('--image', type=int)\ndef visualize_test(imgs, annotations, store, image):\n dataset = datautils.GroceryProductsTestSet(imgs, annotations)\n if store is None or image is None:\n img, anns, boxes = random.choice(dataset)\n else:\n idx = dataset.get_index_for(store, image)\n if idx is None:\n print(f'No image or annotations for store {store}, image {image}')\n return\n img, anns, boxes = dataset[idx]\n utils.show(img, groundtruth=tvops.box_convert(boxes, 'xyxy', 'xywh'), groundtruth_labels=anns)\n\[email protected]()\[email protected](\n '--img-dir',\n type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True),\n multiple=True,\n default=GP_TRAIN_FOLDERS, show_default=True\n)\[email protected](\n '--test-imgs',\n type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True),\n default=GP_TEST_DIR, show_default=True\n)\[email protected](\n '--annotations',\n type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True),\n default=GP_ANN_DIR, show_default=True\n)\[email protected](\n '--planograms',\n type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True),\n default=GP_PLANO_DIR, show_default=True\n)\ndef visualize_planoset(img_dir, test_imgs, annotations, planograms):\n data = datautils.PlanogramTestSet(test_imgs, annotations, planograms)\n rebuildset = datautils.GroceryProductsDataset(img_dir, include_annotations=True, resize=False)\n img, anns, boxes, plano = random.choice(data)\n\n fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(6, 8)) if img.shape[2] >= img.shape[1] else plt.subplots(1, 3, figsize=(6, 6))\n fig.set_dpi(300)\n\n centres = torch.tensor([[(x1 + x2) / 2, -(y1 + y2) / 2] for x1, y1, x2, y2 in plano['boxes']])\n nx.draw(plano['graph'], pos={i: (x.item(), y.item()) for i, (x, y) in enumerate(centres)}, ax=ax1, with_labels=True)\n utils.build_rebuild(plano['boxes'], plano['labels'], rebuildset, ax=ax2)\n utils.build_fig(img, ax=ax3)\n plt.show()\n\[email protected]()\[email protected](\n '--train-imgs',\n type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True),\n multiple=True,\n default=GP_TRAIN_FOLDERS, show_default=True\n)\[email protected](\n '--test-imgs',\n type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True),\n default=GP_TEST_DIR, show_default=True\n)\[email protected](\n '--annotations',\n type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True),\n default=GP_ANN_DIR, show_default=True\n)\ndef visualize(train_imgs, test_imgs, annotations):\n ann_re = re.compile(r'^(Food/)?(.*?)\\..*$')\n def shorten_ann(ann):\n try:\n return ann_re.match(ann).group(2)\n except AttributeError:\n print(f'Malformed annotation: {ann}')\n return ann\n\n train_set = datautils.GroceryProductsDataset(train_imgs, include_annotations=True, random_crop=False, resize=False)\n test_set = datautils.GroceryProductsTestSet(test_imgs, annotations)\n test_imgs, test_anns, test_boxes = zip(*[random.choice(test_set) for _ in range(2)])\n test_boxes = [tvops.box_convert(boxes, 'xyxy', 'xywh') for boxes in test_boxes]\n\n uniq_anns = set(test_anns[0]) | set(test_anns[1])\n train_imgs = []\n train_anns = []\n for ann in uniq_anns:\n idx = train_set.index_for_ann(ann)\n if idx is None: continue\n img, _, _, ann = train_set[idx]\n train_imgs.append(img)\n train_anns.append(ann)\n if len(train_imgs) < 8:\n more_imgs, _, _, more_anns = zip(*[random.choice(train_set) for _ in range(8 - len(train_imgs))])\n train_imgs += more_imgs\n train_anns += more_anns\n\n test_anns = [[shorten_ann(ann) for ann in anns] for anns in test_anns]\n train_anns = [shorten_ann(ann) for ann in train_anns]\n\n utils.draw_dataset_sample(test_imgs, test_boxes, test_anns, train_imgs, train_anns)\n\[email protected]()\[email protected](\n '--img-dir',\n type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True),\n multiple=True,\n default=GP_TRAIN_FOLDERS, show_default=True\n)\[email protected]('--only', type=str, multiple=True)\ndef train_distribution(img_dir, only):\n if not len(only): only = None\n data = datautils.GroceryProductsDataset(img_dir, only=only, random_crop=False)\n dist, leaf = utils.gp_distribution(data)\n for h, c in dist.items():\n print(f'{h}: {c} ({leaf[h]})')\n\[email protected]()\[email protected](\n '--imgs',\n type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True),\n default=GP_TEST_DIR, show_default=True\n)\[email protected](\n '--annotations',\n type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True),\n default=GP_ANN_DIR, show_default=True\n)\[email protected]('--only', type=click.Choice(('none', 'test', 'val', 'keep2', 'skip2')), default='none', show_default=True)\ndef test_distribution(imgs, annotations, only):\n only_list = None\n skip_list = None\n if only == 'test':\n skip_list = GP_TEST_VALIDATION_SET\n elif only == 'val':\n only_list = GP_TEST_VALIDATION_SET\n elif only == 'keep2':\n only_list = 2\n elif only == 'skip2':\n skip_list = 2\n\n data = datautils.GroceryProductsTestSet(imgs, annotations, only=only_list, skip=skip_list)\n dist, leaf = utils.gp_test_distribution(data)\n for h, c in dist.items():\n print(f'{h}: {c} ({leaf[h]})')\n utils.plot_gp_distribution(dist, leaf)\n\[email protected]()\[email protected](\n '--source-dir',\n type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True),\n default=utils.rel_path(*GP_ROOT, 'Training', 'Food'), show_default=True\n)\[email protected](\n '--out-dir', type=click.Path(exists=False), default=utils.rel_path(*GP_ROOT, 'Training', 'Food_Fixed'), show_default=True\n)\[email protected]('--dry-run/--no-dry-run', default=False, show_default=True)\ndef fix(source_dir, out_dir, dry_run):\n renamed_re = re.compile(r'food_(\\d+).jpg')\n to_search = [source_dir]\n hierarchies = [[]]\n print('Fixing GP...')\n while len(to_search):\n current_path = to_search.pop()\n current_hierarchy = hierarchies.pop()\n print(f'{current_path}...')\n\n files = []\n for entry in os.scandir(current_path):\n if entry.is_dir(follow_symlinks=False): # not following symlinks here to avoid possibily infinite looping\n to_search.append(entry.path)\n hierarchies.append(current_hierarchy + [entry.name])\n elif entry.is_file():\n match = renamed_re.match(entry.name)\n if match is None: continue\n files.append((int(match.group(1)), entry))\n\n if not files: continue\n\n _, files = zip(*sorted(files))\n new_names = sorted([f'{i}.jpg' for i in range(1, len(files))]) # the original annotations have JPGs and jpegs, but Tonioni's use only jpgs\n\n out_path = os.path.join(out_dir, *current_hierarchy)\n if dry_run:\n i = 0\n else:\n os.makedirs(out_path)\n print(f'{\"(Not) \" if dry_run else \"\"}Copying {len(files) - 1} files to {out_path}...')\n for f, new in zip(files[1:], new_names): # the first entry is always garbage\n if dry_run:\n if i == 0:\n print(f'{f.path} -> {os.path.join(out_path, new)}')\n i = 25\n else:\n i -= 1\n else:\n shutil.copy(f.path, os.path.join(out_path, new))\n print('Done!')\n\[email protected]()\[email protected](\n '--imgs',\n type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True),\n default=GP_TEST_DIR, show_default=True\n)\[email protected](\n '--annotations',\n type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True),\n default=GP_ANN_DIR, show_default=True\n)\[email protected](\n '--planograms',\n type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True),\n default=GP_PLANO_DIR, show_default=True\n)\ndef planogram_test(imgs, annotations, planograms):\n data = datautils.PlanogramTestSet(imgs, annotations, planograms, only=GP_TEST_VALIDATION_SET)\n img, anns, boxes, plano = random.choice(data)\n _, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 8))\n utils.draw_planogram(plano['boxes'], plano['labels'], ax=ax1)\n centres = torch.tensor([[(x1 + x2) / 2, (y1 + y2) / 2] for x1, y1, x2, y2 in plano['boxes']])\n nx.draw(plano['graph'], pos={i: (x.item(), y.item()) for i, (x, y) in enumerate(centres)}, ax=ax2, with_labels=True)\n #utils.build_fig(img, groundtruth=boxes, groundtruth_labels=anns, ax=ax3)\n utils.build_fig(img, ax=ax3) # TODO: Jostain syystä ei toimi groundtruthien piirto täs .__.\n plt.show()\n\n comparator = production.PlanogramComparator()\n res = comparator.compare(plano, {'boxes': boxes, 'labels': anns})\n print(res)\n\[email protected]()\[email protected](\n '--img-dir',\n type=click.Path(exists=True, file_okay=False, dir_okay=True, readable=True),\n multiple=True,\n default=GP_TRAIN_FOLDERS, show_default=True\n)\[email protected]('--only', type=str, multiple=True)\ndef mask_test(img_dir, only):\n data = datautils.GroceryProductsDataset(img_dir, only=only if len(only) else None)\n img, _, _ = random.choice(data)\n img = utils.scale_from_tanh(img)\n mask = utils.build_mask(img)\n utils.show_multiple([img, torch.stack((mask, mask, mask))])\n"
] |
[
[
"torch.tensor"
],
[
"torch.nn.init.calculate_gain",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.init.xavier_normal_",
"torch.zeros_like",
"torch.is_tensor",
"torch.nn.Tanh",
"torch.nn.functional.mse_loss",
"torch.nn.Upsample",
"torch.nn.functional.interpolate",
"torch.nn.BatchNorm2d",
"torch.full_like",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
],
[
"torch.stack",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots",
"torch.tensor"
]
] |
BoweiY/P-CNN
|
[
"99cb38c3217bce215afefc39228e8d63f30aef9f"
] |
[
"lib/datasets/voc_eval.py"
] |
[
"# --------------------------------------------------------\n# Fast/er R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Bharath Hariharan\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport xml.etree.ElementTree as ET\nimport os\nimport pickle\nimport numpy as np\n\ndef parse_rec(filename):\n \"\"\" Parse a PASCAL VOC xml file \"\"\"\n tree = ET.parse(filename)\n objects = []\n for obj in tree.findall('object'):\n obj_struct = {}\n obj_struct['name'] = obj.find('name').text\n obj_struct['pose'] = obj.find('pose').text\n # obj_struct['truncated'] = int(obj.find('truncated').text)\n # obj_struct['difficult'] = int(obj.find('difficult').text)\n bbox = obj.find('bndbox')\n obj_struct['bbox'] = [int(bbox.find('xmin').text),\n int(bbox.find('ymin').text),\n int(bbox.find('xmax').text),\n int(bbox.find('ymax').text)]\n objects.append(obj_struct)\n\n return objects\n\n\ndef voc_ap(rec, prec, use_07_metric=False):\n \"\"\" ap = voc_ap(rec, prec, [use_07_metric])\n Compute VOC AP given precision and recall.\n If use_07_metric is true, uses the\n VOC 07 11 point method (default:False).\n \"\"\"\n if use_07_metric:\n # 11 point metric\n ap = 0.\n for t in np.arange(0., 1.1, 0.1):\n if np.sum(rec >= t) == 0:\n p = 0\n else:\n p = np.max(prec[rec >= t])\n ap = ap + p / 11.\n else:\n # correct AP calculation\n # first append sentinel values at the end\n mrec = np.concatenate(([0.], rec, [1.]))\n mpre = np.concatenate(([0.], prec, [0.]))\n\n # compute the precision envelope\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\n\ndef voc_eval(detpath,\n annopath,\n imagesetfile,\n classname,\n cachedir,\n ovthresh=0.5,\n use_07_metric=False):\n \"\"\"rec, prec, ap = voc_eval(detpath,\n annopath,\n imagesetfile,\n classname,\n [ovthresh],\n [use_07_metric])\n\n Top level function that does the PASCAL VOC evaluation.\n\n detpath: Path to detections\n detpath.format(classname) should produce the detection results file.\n annopath: Path to annotations\n annopath.format(imagename) should be the xml annotations file.\n imagesetfile: Text file containing the list of images, one image per line.\n classname: Category name (duh)\n cachedir: Directory for caching the annotations\n [ovthresh]: Overlap threshold (default = 0.5)\n [use_07_metric]: Whether to use VOC07's 11 point AP computation\n (default False)\n \"\"\"\n # assumes detections are in detpath.format(classname)\n # assumes annotations are in annopath.format(imagename)\n # assumes imagesetfile is a text file with each line an image name\n # cachedir caches the annotations in a pickle file\n\n # first load gt\n if not os.path.isdir(cachedir):\n os.mkdir(cachedir)\n cachefile = os.path.join(cachedir, 'annots.pkl')\n\n # read list of images\n with open(imagesetfile, 'r') as f:\n lines = f.readlines()\n imagenames = [x.strip() for x in lines]\n\n# # spz修改 #\n# # if not os.path.isfile(cachefile):\n# # load annotations\n recs = {}\n for i, imagename in enumerate(imagenames):\n recs[imagename] = parse_rec(annopath.format(imagename))\n if i % 100 == 0:\n print('Reading annotation for {:d}/{:d}'.format(\n i + 1, len(imagenames)))\n # save\n print('Saving cached annotations to {:s}'.format(cachefile))\n with open(cachefile, 'wb') as f:\n pickle.dump(recs, f)\n# # else:\n# # # load\n# # with open(cachefile, 'rb') as f:\n# # try:\n# # recs = pickle.load(f)\n# # except:\n# # recs = pickle.load(f, encoding='bytes')\n\n # extract gt objects for this class\n class_recs = {}\n npos = 0\n for imagename in imagenames:\n R = [obj for obj in recs[imagename] if obj['name'] == classname]\n bbox = np.array([x['bbox'] for x in R])\n # difficult = np.array([x['difficult'] for x in R]).astype(np.bool)\n difficult = np.array([0 for x in R]).astype(np.bool)\n det = [False] * len(R)\n npos = npos + sum(~difficult)\n class_recs[imagename] = {'bbox': bbox,\n 'difficult': difficult,\n 'det': det}\n\n # read dets\n detfile = detpath.format(classname)\n with open(detfile, 'r') as f:\n lines = f.readlines()\n\n if len(lines) == 0:\n # No detection examples\n return 0, 0, 0, 0, npos\n\n splitlines = [x.strip().split(' ') for x in lines]\n image_ids = [x[0] for x in splitlines]\n confidence = np.array([float(x[1]) for x in splitlines])\n BB = np.array([[float(z) for z in x[2:]] for x in splitlines])\n\n nd = len(image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n\n if BB.shape[0] > 0:\n # sort by confidence\n sorted_ind = np.argsort(-confidence)\n sorted_scores = np.sort(-confidence)\n BB = BB[sorted_ind, :]\n image_ids = [image_ids[x] for x in sorted_ind]\n\n # go down dets and mark TPs and FPs\n for d in range(nd):\n R = class_recs[image_ids[d]]\n bb = BB[d, :].astype(float)\n ovmax = -np.inf\n BBGT = R['bbox'].astype(float)\n\n if BBGT.size > 0:\n # compute overlaps\n # intersection\n ixmin = np.maximum(BBGT[:, 0], bb[0])\n iymin = np.maximum(BBGT[:, 1], bb[1])\n ixmax = np.minimum(BBGT[:, 2], bb[2])\n iymax = np.minimum(BBGT[:, 3], bb[3])\n iw = np.maximum(ixmax - ixmin + 1., 0.)\n ih = np.maximum(iymax - iymin + 1., 0.)\n inters = iw * ih\n\n # union\n uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +\n (BBGT[:, 2] - BBGT[:, 0] + 1.) *\n (BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)\n\n overlaps = inters / uni\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n\n if ovmax > ovthresh:\n if not R['difficult'][jmax]:\n if not R['det'][jmax]:\n tp[d] = 1.\n R['det'][jmax] = 1\n else:\n fp[d] = 1.\n else:\n fp[d] = 1.\n\n # compute precision recall\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(npos)\n # avoid divide by zero in case the first detection matches a difficult\n # ground truth\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n ap = voc_ap(rec, prec, use_07_metric)\n\n return rec, prec, ap\n"
] |
[
[
"numpy.maximum",
"numpy.minimum",
"numpy.arange",
"numpy.cumsum",
"numpy.sort",
"numpy.finfo",
"numpy.concatenate",
"numpy.max",
"numpy.argmax",
"numpy.where",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] |
leeminsu87/KoBERT
|
[
"8ebf43f2a1fa2c4deb6b7a63a089c5904c8feda3"
] |
[
"kobert/pytorch_kobert.py"
] |
[
"# coding=utf-8\n# Copyright 2019 SK T-Brain Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom zipfile import ZipFile\nimport torch\nfrom transformers import BertModel\nimport gluonnlp as nlp\n\nfrom kobert import download, get_tokenizer\n\n\ndef get_pytorch_kobert_model(ctx=\"cpu\", cachedir=\".cache\"):\n def get_kobert_model(model_path, vocab_file, ctx=\"cpu\"):\n bertmodel = BertModel.from_pretrained(model_path, return_dict=False)\n device = torch.device(ctx)\n bertmodel.to(device)\n bertmodel.eval()\n vocab_b_obj = nlp.vocab.BERTVocab.from_sentencepiece(\n vocab_file, padding_token=\"[PAD]\"\n )\n return bertmodel, vocab_b_obj\n\n pytorch_kobert = {\n \"url\": \"s3://skt-lsl-nlp-model/KoBERT/models/kobert_v1.zip\",\n \"chksum\": \"411b242919\", # 411b2429199bc04558576acdcac6d498\n }\n\n # download model\n model_info = pytorch_kobert\n model_path, is_cached = download(\n model_info[\"url\"], model_info[\"chksum\"], cachedir=cachedir\n )\n cachedir_full = os.path.expanduser(cachedir)\n zipf = ZipFile(os.path.expanduser(model_path))\n zipf.extractall(path=cachedir_full)\n model_path = os.path.join(os.path.expanduser(cachedir), \"kobert_from_pretrained\")\n # download vocab\n vocab_path = get_tokenizer()\n return get_kobert_model(model_path, vocab_path, ctx)\n\n\nif __name__ == \"__main__\":\n import torch\n from kobert import get_pytorch_kobert_model\n\n input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])\n input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])\n token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])\n model, vocab = get_pytorch_kobert_model()\n sequence_output, pooled_output = model(input_ids, input_mask, token_type_ids)\n print(pooled_output.shape)\n print(vocab)\n print(sequence_output[0])\n"
] |
[
[
"torch.device",
"torch.LongTensor"
]
] |
zzz2010/Contrib
|
[
"d351d83da718145cef9f6c98598f7fedc027efe5",
"d351d83da718145cef9f6c98598f7fedc027efe5"
] |
[
"biggan/paddorch/paddorch/linalg.py",
"MARS/test_single_stream.py"
] |
[
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom paddle.common_ops_import import *\nfrom paddle.fluid.layer_helper import LayerHelper\nfrom paddle.fluid.data_feeder import check_variable_and_dtype, check_type\nfrom paddle.fluid.framework import in_dygraph_mode, _varbase_creator\n\nfrom paddle.fluid.layers import transpose # DEFINE_ALIAS\n\n__all__ = [\n 'matmul',\n 'dot',\n # 'einsum',\n 'norm',\n 'transpose',\n 'dist',\n 't',\n 'cross',\n 'cholesky',\n # 'tensordot',\n 'bmm',\n 'histogram'\n]\n\n\ndef matmul(x, y, transpose_x=False, transpose_y=False, name=None):\n \"\"\"\n Applies matrix multiplication to two tensors. `matmul` follows\n the complete broadcast rules,\n and its behavior is consistent with `np.matmul`.\n\n Currently, the input tensors' number of dimensions can be any, `matmul` can be used to\n achieve the `dot`, `matmul` and `batchmatmul`.\n\n The actual behavior depends on the shapes of :math:`x`, :math:`y` and the\n flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically:\n\n - If a transpose flag is specified, the last two dimensions of the tensor\n are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor\n is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas\n for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`.\n\n The multiplication behavior depends on the dimensions of `x` and `y`. Specifically:\n\n - If both tensors are 1-dimensional, the dot product result is obtained.\n\n - If both tensors are 2-dimensional, the matrix-matrix product is obtained.\n\n - If the `x` is 1-dimensional and the `y` is 2-dimensional,\n a `1` is prepended to its dimension in order to conduct the matrix multiply.\n After the matrix multiply, the prepended dimension is removed.\n\n - If the `x` is 2-dimensional and `y` is 1-dimensional,\n the matrix-vector product is obtained.\n\n - If both arguments are at least 1-dimensional and at least one argument\n is N-dimensional (where N > 2), then a batched matrix multiply is obtained.\n If the first argument is 1-dimensional, a 1 is prepended to its dimension\n in order to conduct the batched matrix multiply and removed after.\n If the second argument is 1-dimensional, a 1 is appended to its\n dimension for the purpose of the batched matrix multiple and removed after.\n The non-matrix (exclude the last two dimensions) dimensions are\n broadcasted according the broadcast rule.\n For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor,\n out will be a (j, k, n, p) tensor.\n\n Args:\n x (Tensor): The input tensor which is a Tensor.\n y (Tensor): The input tensor which is a Tensor.\n transpose_x (bool): Whether to transpose :math:`x` before multiplication.\n transpose_y (bool): Whether to transpose :math:`y` before multiplication.\n name(str|None): A name for this layer(optional). If set None, the layer\n will be named automatically.\n\n Returns:\n Tensor: The output Tensor.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n import numpy as np\n\n paddle.disable_static()\n # vector * vector\n x_data = np.random.random([10]).astype(np.float32)\n y_data = np.random.random([10]).astype(np.float32)\n x = paddle.to_tensor(x_data)\n y = paddle.to_tensor(y_data)\n z = paddle.matmul(x, y)\n print(z.numpy().shape)\n # [1]\n\n # matrix * vector\n x_data = np.random.random([10, 5]).astype(np.float32)\n y_data = np.random.random([5]).astype(np.float32)\n x = paddle.to_tensor(x_data)\n y = paddle.to_tensor(y_data)\n z = paddle.matmul(x, y)\n print(z.numpy().shape)\n # [10]\n\n # batched matrix * broadcasted vector\n x_data = np.random.random([10, 5, 2]).astype(np.float32)\n y_data = np.random.random([2]).astype(np.float32)\n x = paddle.to_tensor(x_data)\n y = paddle.to_tensor(y_data)\n z = paddle.matmul(x, y)\n print(z.numpy().shape)\n # [10, 5]\n\n # batched matrix * batched matrix\n x_data = np.random.random([10, 5, 2]).astype(np.float32)\n y_data = np.random.random([10, 2, 5]).astype(np.float32)\n x = paddle.to_tensor(x_data)\n y = paddle.to_tensor(y_data)\n z = paddle.matmul(x, y)\n print(z.numpy().shape)\n # [10, 5, 5]\n\n # batched matrix * broadcasted matrix\n x_data = np.random.random([10, 1, 5, 2]).astype(np.float32)\n y_data = np.random.random([1, 3, 2, 5]).astype(np.float32)\n x = paddle.to_tensor(x_data)\n y = paddle.to_tensor(y_data)\n z = paddle.matmul(x, y)\n print(z.numpy().shape)\n # [10, 3, 5, 5]\n\n \"\"\"\n op_type = 'matmul_v2'\n if in_dygraph_mode():\n op = getattr(core.ops, op_type)\n return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y)\n\n attrs = {\n 'trans_x': transpose_x,\n 'trans_y': transpose_y,\n }\n\n def __check_input(x, y):\n var_names = {'x': x, 'y': y}\n for name, val in var_names.items():\n check_variable_and_dtype(val, name, ['float32', 'float64'],\n 'matmul')\n\n __check_input(x, y)\n\n helper = LayerHelper('matmul_v2', **locals())\n out = helper.create_variable_for_type_inference(dtype=x.dtype)\n helper.append_op(\n type='matmul_v2',\n inputs={'X': x,\n 'Y': y},\n outputs={'Out': out},\n attrs=attrs)\n return out\n\n\ndef norm(x, p='fro', axis=None, keepdim=False, name=None):\n \"\"\"\n\t:alias_main: paddle.norm\n\t:alias: paddle.norm,paddle.tensor.norm,paddle.tensor.linalg.norm\n\n Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean\n or 2-norm, and in general the p-norm for p > 0) of a given tensor.\n\n Args:\n x (Tensor): The input tensor could be N-D tensor, and the input data\n type could be float32 or float64.\n p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`,\n `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm.\n Default value is `fro`.\n axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int\n or list(int)/tuple(int) with only one element, the vector norm is computed over the axis.\n If `axis < 0`, the dimension to norm operation is rank(input) + axis.\n If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis.\n Defalut value is `None`.\n keepdim (bool, optional): Whether to reserve the reduced dimension in the\n output Tensor. The result tensor will have fewer dimension\n than the :attr:`input` unless :attr:`keepdim` is true, default\n value is False.\n name (str, optional): The default value is None. Normally there is no need for\n user to set this property. For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: results of norm operation on the specified axis of input tensor,\n it's data type is the same as input's Tensor.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import numpy as np\n paddle.disable_static()\n shape=[2, 3, 4]\n np_input = np.arange(24).astype('float32') - 12\n np_input = np_input.reshape(shape)\n x = paddle.to_tensor(np_input)\n #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]]\n # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]]\n\n # compute frobenius norm along last two dimensions.\n out_fro = paddle.norm(x, p='fro', axis=[0,1])\n # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535]\n\n # compute 2-order vector norm along last dimension.\n out_pnorm = paddle.norm(x, p=2, axis=-1)\n #out_pnorm.numpy(): [[21.118711 13.190906 5.477226]\n # [ 3.7416575 11.224972 19.131126]]\n\n # compute 2-order norm along [0,1] dimension.\n out_pnorm = paddle.norm(x, p=2, axis=[0,1])\n #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535]\n\n # compute inf-order norm\n out_pnorm = paddle.norm(x, p=np.inf)\n #out_pnorm.numpy() = [12.]\n out_pnorm = paddle.norm(x, p=np.inf, axis=0)\n #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]]\n\n # compute -inf-order norm\n out_pnorm = paddle.norm(x, p=-np.inf)\n #out_pnorm.numpy(): [0.]\n out_pnorm = paddle.norm(x, p=-np.inf, axis=0)\n #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]]\n \"\"\"\n\n def frobenius_norm(input, dim=None, keepdim=False, name=None):\n \"\"\"\n The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`.\n Args:\n input (Variable): Tensor, data type float32, float64.\n dim (list, optional): None for last two dimensions.\n keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.\n \"\"\"\n if dim is not None and not (isinstance(dim, list) and len(dim) == 2):\n raise ValueError(\n \"The dim of frobenius norm op should be None or two elements list!\"\n )\n if in_dygraph_mode():\n if dim is None:\n return core.ops.frobenius_norm(input, 'keep_dim', keepdim,\n 'reduce_all', True)\n return core.ops.frobenius_norm(input, 'dim', dim, 'keep_dim',\n keepdim, 'reduce_all', False)\n attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False}\n if dim is None:\n attrs['reduce_all'] = True\n check_variable_and_dtype(input, 'input', ['float32', 'float64'],\n 'frobenius_norm')\n\n helper = LayerHelper('frobenius_norm', **locals())\n out = helper.create_variable_for_type_inference(\n dtype=helper.input_dtype())\n\n helper.append_op(\n type='frobenius_norm',\n inputs={'X': input},\n outputs={'Out': out},\n attrs=attrs)\n return out\n\n def vector_norm(input,\n porder=None,\n axis=None,\n keepdim=False,\n asvector=False,\n name=None):\n \"\"\"\n Calculate the p-order vector norm for certain dimension of Tensor `input`.\n Args:\n input (Variable): Tensor, data type float32, float64.\n porder (float, optional): None for porder=2.0.\n axis (int, optional): None for last dimension.\n keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.\n \"\"\"\n if in_dygraph_mode():\n if axis is None: axis = -1\n return core.ops.p_norm(input, 'porder', porder, 'axis', axis,\n 'keepdim', keepdim, 'asvector', asvector)\n if porder is not None:\n check_type(porder, 'porder', (float, int), 'p_norm')\n if axis is not None:\n check_type(axis, 'axis', (int), 'p_norm')\n check_variable_and_dtype(input, 'input', ['float32', 'float64'],\n 'p_norm')\n\n attrs = {\n 'axis': axis if axis is not None else -1,\n 'porder': float(porder) if porder is not None else 2.0,\n 'keepdim': keepdim,\n 'asvector': asvector,\n 'epsilon': 1e-12,\n }\n helper = LayerHelper('p_norm', **locals())\n out = helper.create_variable_for_type_inference(\n dtype=helper.input_dtype())\n\n helper.append_op(\n type='p_norm',\n inputs={'X': input},\n outputs={'Out': out},\n attrs=attrs)\n return out\n\n def inf_norm(input,\n porder=None,\n axis=axis,\n keepdim=False,\n asvector=False,\n name=None):\n helper = LayerHelper('frobenius_norm', **locals())\n out = helper.create_variable_for_type_inference(\n dtype=helper.input_dtype())\n helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out})\n reduce_out = helper.create_variable_for_type_inference(\n dtype=helper.input_dtype())\n\n reduce_all = True if axis == None or axis == [] or asvector == True else False\n axis = axis if axis != None and axis != [] else [0]\n\n reduce_type = 'reduce_max' if porder == np.float(\n 'inf') else 'reduce_min'\n helper.append_op(\n type=reduce_type,\n inputs={'X': out},\n outputs={'Out': reduce_out},\n attrs={'dim': axis,\n 'keep_dim': keepdim,\n 'reduce_all': reduce_all})\n\n return reduce_out\n\n def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None):\n block = LayerHelper('norm', **locals())\n out = block.create_variable_for_type_inference(\n dtype=block.input_dtype())\n abs_out = block.create_variable_for_type_inference(\n dtype=block.input_dtype())\n block.append_op(\n type='abs', inputs={'X': input}, outputs={'Out': abs_out})\n pow_out = block.create_variable_for_type_inference(\n dtype=block.input_dtype())\n\n block.append_op(\n type='pow',\n inputs={'X': abs_out},\n outputs={'Out': pow_out},\n attrs={'factor': porder})\n sum_out = block.create_variable_for_type_inference(\n dtype=block.input_dtype())\n block.append_op(\n type='reduce_sum',\n inputs={'X': pow_out},\n outputs={'Out': sum_out},\n attrs={\n 'dim': axis,\n 'keep_dim': keepdim,\n 'reduce_all': True if axis is None else False\n })\n porder\n block.append_op(\n type='pow',\n inputs={'X': sum_out},\n outputs={'Out': out},\n attrs={'factor': float(1. / porder)})\n return out\n\n if axis is None and p is not None:\n if isinstance(p, str):\n if p == \"fro\":\n return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name)\n else:\n raise ValueError(\n \"only valid string values are 'fro', found {}\".format(p))\n elif isinstance(p, (int, float)):\n return vector_norm(\n x,\n porder=p,\n axis=axis,\n keepdim=keepdim,\n asvector=True,\n name=name)\n else:\n raise ValueError(\"only valid p type is string or float, found {}\".\n format(type(p)))\n\n if isinstance(axis, tuple):\n axis = list(axis)\n if isinstance(axis, list) and len(axis) == 1:\n axis = axis[0]\n\n # calculate vector norm, where axis is int or list with only one integer\n if isinstance(axis, int):\n if isinstance(p, str):\n if p == \"fro\":\n return vector_norm(\n x,\n porder=2,\n axis=axis,\n keepdim=keepdim,\n asvector=False,\n name=name)\n\n else:\n raise ValueError(\n \"only valid string values are 'fro', found {}\".format(p))\n elif isinstance(p, (int, float)):\n return vector_norm(\n x,\n axis=axis,\n porder=p,\n keepdim=keepdim,\n asvector=False,\n name=name)\n else:\n raise ValueError(\n \"unspport p for p-order vector norm. except float, found {}\".\n format(p))\n # calculate matrix norm, where axis is list with two integers\n elif isinstance(axis, list) and len(axis) == 2:\n if p == \"fro\":\n return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name)\n elif p == np.inf or p == -np.inf:\n return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name)\n elif p == 0:\n raise ValueError(\n \"just suport axis type int or list (length of list <=1) if p = 0, found {}\".\n format(axis))\n else:\n return p_matrix_norm(\n x, porder=p, axis=axis, keepdim=keepdim, name=name)\n else:\n raise ValueError(\n \"except axis type int or list (length of list <=2), found {}\".\n format(axis))\n\n\ndef dist(x, y, p=2):\n \"\"\"\n\t:alias_main: paddle.dist\n\t:alias: paddle.dist,paddle.tensor.dist,paddle.tensor.linalg.dist\n\n This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure\n of distance. The shapes of x and y must be broadcastable. The definition is as follows, for\n details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_:\n\n - Each input has at least one dimension.\n - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist.\n\n Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be\n obtained as follows:\n\n 1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the\n tensor with fewer dimensions.\n\n For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the\n dimension of y.\n\n x (4-D Tensor): 8 x 1 x 6 x 1\n\n y (4-D Tensor): 1 x 7 x 1 x 5\n\n 2. Determine the size of each dimension of the output z: choose the maximum value from the\n two input dimensions.\n\n z (4-D Tensor): 8 x 7 x 6 x 5\n\n If the number of dimensions of the two inputs are the same, the size of the output can be\n directly determined in step 2. When p takes different values, the norm formula is as follows:\n\n When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z.\n\n .. math::\n\n ||z||_{0}=\\lim_{p \\\\rightarrow 0}\\sum_{i=1}^{m}|z_i|^{p}\n\n When p = inf, the inf-norm of z is the maximum element of z.\n\n .. math::\n\n ||z||_\\infty=\\max_i |z_i|\n\n When p = -inf, the negative-inf-norm of z is the minimum element of z.\n\n .. math::\n\n ||z||_{-\\infty}=\\min_i |z_i|\n\n Otherwise, the p-norm of z follows the formula,\n\n .. math::\n\n ||z||_{p}=(\\sum_{i=1}^{m}|z_i|^p)^{\\\\frac{1}{p}}\n\n Args:\n x (Variable): 1-D to 6-D Tensor, its data type is float32 or float64.\n y (Variable): 1-D to 6-D Tensor, its data type is float32 or float64.\n p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2.\n\n Returns:\n Variable: Tensor that is the p-norm of (x - y).\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.fluid as fluid\n import numpy as np\n\n with fluid.dygraph.guard():\n x = fluid.dygraph.to_variable(np.array([[3, 3],[3, 3]]).astype(np.float32))\n y = fluid.dygraph.to_variable(np.array([[3, 3],[3, 1]]).astype(np.float32))\n out = paddle.dist(x, y, 0)\n print(out.numpy()) # out = [1.]\n\n out = paddle.dist(x, y, 2)\n print(out.numpy()) # out = [2.]\n\n out = paddle.dist(x, y, float(\"inf\"))\n print(out.numpy()) # out = [2.]\n\n out = paddle.dist(x, y, float(\"-inf\"))\n print(out.numpy()) # out = [0.]\n \"\"\"\n check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist')\n check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist')\n check_type(p, 'p', (float, int), 'dist')\n helper = LayerHelper(\"dist\", **locals())\n out = helper.create_variable_for_type_inference(x.dtype)\n\n inputs = {\"X\": [x], \"Y\": [y]}\n outputs = {'Out': [out]}\n attrs = {\"p\": float(p)}\n helper.append_op(\n type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs)\n return out\n\n\ndef dot(x, y, name=None):\n \"\"\"\n This operator calculates inner product for vectors.\n\n .. note::\n Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix\n is the batch dimension, which means that the vectors of multiple batches are dotted.\n\n Parameters:\n x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64``\n y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64``\n name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name`\n\n Returns:\n Variable: the calculated result Tensor.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n import numpy as np\n\n paddle.disable_static()\n x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32)\n y_data = np.random.uniform(1, 3, [10]).astype(np.float32)\n x = paddle.to_tensor(x_data)\n y = paddle.to_tensor(y_data)\n z = paddle.dot(x, y)\n print(z.numpy())\n\n \"\"\"\n op_type = 'dot'\n # skip var type check in dygraph mode to improve efficiency\n if in_dygraph_mode():\n op = getattr(core.ops, op_type)\n return op(x, y)\n\n assert x is not None, 'x cannot be None in {}'.format(op_type)\n assert y is not None, 'y cannot be None in {}'.format(op_type)\n\n check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],\n op_type)\n check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'],\n op_type)\n\n helper = LayerHelper(op_type, **locals())\n if name is None:\n out = helper.create_variable_for_type_inference(dtype=x.dtype)\n else:\n out = helper.create_variable(\n name=name, dtype=x.dtype, persistable=False)\n helper.append_op(\n type=\"dot\", inputs={'X': x,\n 'Y': y}, attrs={}, outputs={\"Out\": out})\n return out\n\n\ndef t(input, name=None):\n \"\"\"\n\t:alias_main: paddle.t\n\t:alias: paddle.t,paddle.tensor.t,paddle.tensor.linalg.t\n\n Transpose <=2-D tensor.\n 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to\n the fluid.layers.transpose function which perm dimensions set 0 and 1.\n\n Args:\n input (Variable): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32.\n name(str, optional): The default value is None. Normally there is no need for\n user to set this property. For more information, please refer to :ref:`api_guide_Name`\n Returns:\n Variable: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64.\n\n For Example:\n .. code-block:: text\n # Example 1 (0-D tensor)\n x = tensor([0.79])\n paddle.t(x) = tensor([0.79])\n # Example 2 (1-D tensor)\n x = tensor([0.79, 0.84, 0.32])\n paddle.t(x) = tensor([0.79, 0.84, 0.32])\n\n # Example 3 (2-D tensor)\n x = tensor([0.79, 0.84, 0.32],\n [0.64, 0.14, 0.57])\n paddle.t(x) = tensor([0.79, 0.64],\n [0.84, 0.14],\n [0.32, 0.57])\n\n Examples:\n .. code-block:: python\n import paddle\n import paddle.fluid as fluid\n x = fluid.data(name='x', shape=[2, 3],\n dtype='float32')\n x_transposed = paddle.t(x)\n print x_transposed.shape\n #(3L, 2L)\n \"\"\"\n if len(input.shape) > 2:\n raise ValueError(\n \"Input(input) only support N-D (N<=2) tensor, but received \"\n \"length of Input(input) is %s. Perhaps you can use paddle.\"\n \"tensor.transpose() instead.\" % len(input.shape))\n if in_dygraph_mode():\n if len(input.shape) == 1:\n return input\n # 2-D tensor\n perm = [1, 0]\n out, _ = core.ops.transpose2(input, 'axis', perm)\n return out\n\n check_variable_and_dtype(\n input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],\n 'transpose')\n\n helper = LayerHelper('t', **locals())\n out = helper.create_variable_for_type_inference(input.dtype)\n input_shape = helper.create_variable_for_type_inference(input.dtype)\n if len(input.shape) == 1:\n out = input\n else:\n helper.append_op(\n type='transpose2',\n inputs={'X': [input]},\n outputs={'Out': [out],\n 'XShape': [input_shape]},\n attrs={'axis': [1, 0]})\n return out\n\n\ndef cross(x, y, axis=None, name=None):\n \"\"\"\n\t:alias_main: paddle.cross\n\t:alias: paddle.cross,paddle.tensor.cross,paddle.tensor.linalg.cross\n\n Computes the cross product between two tensors along an axis.\n Inputs must have the same shape, and the length of their axes should be equal to 3.\n If `axis` is not given, it defaults to the first axis found with the length 3.\n\n Args:\n x (Variable): The first input tensor variable.\n y (Variable): The second input tensor variable.\n axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3.\n name (str, optional): The default value is None. Normally there is no need for\n user to set this property. For more information, please refer to :ref:`api_guide_Name`\n\n Returns:\n Variable: A Tensor with same data type as `x`.\n\n Examples:\n .. code-block:: python\n import paddle\n from paddle import to_variable\n import numpy as np\n\n paddle.disable_static()\n\n data_x = np.array([[1.0, 1.0, 1.0],\n [2.0, 2.0, 2.0],\n [3.0, 3.0, 3.0]])\n data_y = np.array([[1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0]])\n x = to_variable(data_x)\n y = to_variable(data_y)\n\n z1 = paddle.cross(x, y)\n print(z1.numpy())\n # [[-1. -1. -1.]\n # [ 2. 2. 2.]\n # [-1. -1. -1.]]\n\n z2 = paddle.cross(x, y, axis=1)\n print(z2.numpy())\n # [[0. 0. 0.]\n # [0. 0. 0.]\n # [0. 0. 0.]]\n \"\"\"\n if in_dygraph_mode():\n if axis is not None:\n return core.ops.cross(x, y, 'dim', axis)\n else:\n return core.ops.cross(x, y)\n\n helper = LayerHelper(\"cross\", **locals())\n out = helper.create_variable_for_type_inference(x.dtype)\n attrs = dict()\n attrs['dim'] = axis\n\n helper.append_op(\n type='cross',\n inputs={'X': x,\n 'Y': y},\n outputs={'Out': out},\n attrs=attrs)\n return out\n\n\ndef cholesky(x, upper=False, name=None):\n \"\"\"\n Computes the Cholesky decomposition of one symmetric positive-definite\n matrix or batches of symmetric positive-definite matrice.\n\n If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` ,\n and the returned matrix :math:`U` is upper-triangular. Otherwise, the\n decomposition has the form :math:`A = LL^{T}` , and the returned matrix\n :math:`L` is lower-triangular.\n\n Args:\n x (Variable): The input tensor. Its shape should be `[*, M, M]`,\n where * is zero or more batch dimensions, and matrices on the\n inner-most 2 dimensions all should be symmetric positive-definite.\n Its data type should be float32 or float64.\n upper (bool): The flag indicating whether to return upper or lower\n triangular matrices. Default: False.\n\n Returns:\n Variable: A Tensor with same shape and data type as `x`. It represents \\\n triangular matrices generated by Cholesky decomposition.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import numpy as np\n\n paddle.disable_static()\n a = np.random.rand(3, 3)\n a_t = np.transpose(a, [1, 0])\n x_data = np.matmul(a, a_t) + 1e-03\n x = paddle.to_tensor(x_data)\n out = paddle.cholesky(x, upper=False)\n print(out.numpy())\n # [[1.190523 0. 0. ]\n # [0.9906703 0.27676893 0. ]\n # [1.25450498 0.05600871 0.06400121]]\n\n \"\"\"\n if in_dygraph_mode():\n return core.ops.cholesky(x, \"upper\", upper)\n check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky')\n check_type(upper, 'upper', bool, 'cholesky')\n helper = LayerHelper('cholesky', **locals())\n out = helper.create_variable_for_type_inference(dtype=x.dtype)\n helper.append_op(\n type='cholesky',\n inputs={'X': [x]},\n outputs={'Out': out},\n attrs={'upper': upper})\n return out\n\n\ndef bmm(x, y, name=None):\n \"\"\"\n\t:alias_main: paddle.bmm\n\t:alias: paddle.bmm,paddle.tensor.bmm,paddle.tensor.linalg.bmm\n\n Applies batched matrix multiplication to two tensors.\n\n Both of the two input tensors must be three-dementional and share the same batch size.\n\n if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor.\n\n Args:\n x (Variable): The input variable which is a Tensor or LoDTensor.\n y (Variable): The input variable which is a Tensor or LoDTensor.\n name(str|None): A name for this layer(optional). If set None, the layer\n will be named automatically.\n\n Returns:\n Variable: The product Tensor (or LoDTensor) variable.\n\n Examples:\n import paddle\n\n paddle.disable_static()\n\n # In imperative mode:\n # size x: (2, 2, 3) and y: (2, 3, 2)\n x = paddle.to_tensor([[[1.0, 1.0, 1.0],\n [2.0, 2.0, 2.0]],\n [[3.0, 3.0, 3.0],\n [4.0, 4.0, 4.0]]])\n y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]],\n [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]])\n out = paddle.bmm(x, y)\n #output size: (2, 2, 2)\n #output value:\n #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]]\n out_np = out.numpy()\n \"\"\"\n x_shape = x.shape\n y_shape = y.shape\n if not len(x_shape) == len(y_shape) == 3:\n raise ValueError(\n \"x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}\".\n format(x_shape, y_shape))\n if x_shape[2] != y_shape[1]:\n raise ValueError(\n \"x's width must be equal with y's height. But received x's shape: {}, y's shape: {}\".\n format(x_shape, y_shape))\n helper = LayerHelper('bmm', **locals())\n if in_dygraph_mode():\n return core.ops.bmm(x, y)\n out = helper.create_variable_for_type_inference(dtype=x.dtype)\n helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out})\n return out\n\n\ndef histogram(input, bins=100, min=0, max=0):\n \"\"\"\n Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max.\n If min and max are both zero, the minimum and maximum values of the data are used.\n\n Args:\n input (Variable): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor\n should be float32, float64, int32, int64.\n bins (int): number of histogram bins\n min (int): lower end of the range (inclusive)\n max (int): upper end of the range (inclusive)\n\n Returns:\n Variable: Tensor or LoDTensor calculated by histogram layer. The data type is int64.\n\n Code Example 1:\n .. code-block:: python\n import paddle\n import numpy as np\n startup_program = paddle.static.Program()\n train_program = paddle.static.Program()\n with paddle.static.program_guard(train_program, startup_program):\n inputs = paddle.data(name='input', dtype='int32', shape=[2,3])\n output = paddle.histogram(inputs, bins=5, min=1, max=5)\n place = paddle.CPUPlace()\n exe = paddle.static.Executor(place)\n exe.run(startup_program)\n img = np.array([[2, 4, 2], [2, 5, 4]]).astype(np.int32)\n res = exe.run(train_program,\n feed={'input': img},\n fetch_list=[output])\n print(np.array(res[0])) # [0,3,0,2,1]\n\n Code Example 2:\n .. code-block:: python\n import paddle\n paddle.disable_static(paddle.CPUPlace())\n inputs = paddle.to_tensor([1, 2, 1])\n result = paddle.histogram(inputs, bins=4, min=0, max=3)\n print(result) # [0, 2, 1, 0]\n paddle.enable_static()\n \"\"\"\n if in_dygraph_mode():\n return core.ops.histogram(input, \"bins\", bins, \"min\", min, \"max\", max)\n\n helper = LayerHelper('histogram', **locals())\n check_variable_and_dtype(\n input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram')\n out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)\n helper.append_op(\n type='histogram',\n inputs={'X': input},\n outputs={'Out': out},\n attrs={'bins': bins,\n 'min': min,\n 'max': max})\n return out",
"#coding=utf-8\n# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.\n#\n#Licensed under the Apache License, Version 2.0 (the \"License\");\n#you may not use this file except in compliance with the License.\n#You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#Unless required by applicable law or agreed to in writing, software\n#distributed under the License is distributed on an \"AS IS\" BASIS,\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#See the License for the specific language governing permissions and\n#limitations under the License.\n\nimport getpass\nimport os\nimport socket\nimport numpy as np\nfrom PIL import Image, ImageFilter\nimport argparse\nimport time\nimport sys\nimport pdb\nimport math\n\nfrom utils import *\nfrom dataset.dataset import *\nfrom dataset.preprocess_data import *\nfrom models.model import generate_model\nfrom opts import parse_opts\n\nimport paddle\nimport paddle.fluid as fluid\n \nif __name__==\"__main__\":\n opt = parse_opts()\n print(opt)\n \n opt.arch = '{}-{}'.format(opt.model, opt.model_depth)\n\n with fluid.dygraph.guard(place = fluid.CUDAPlace(0)):\n print(\"Preprocessing validation data ...\")\n test_data = globals()['{}_test'.format(opt.dataset)](split = opt.split, train = 0, opt = opt)\n test_dataloader = paddle.batch(test_data, batch_size=opt.batch_size, drop_last=False)\n \n if opt.modality=='Flow': opt.input_channels = 2\n else: opt.input_channels = 3\n \n # Loading model and checkpoint\n model,_ = generate_model(opt)\n if opt.modality=='RGB' and opt.RGB_resume_path!='':\n para_dict, _ = fluid.dygraph.load_dygraph(opt.RGB_resume_path)\n model.set_dict(para_dict)\n if opt.modality=='Flow' and opt.Flow_resume_path!='':\n para_dict, _ = fluid.dygraph.load_dygraph(opt.Flow_resume_path)\n model.set_dict(para_dict)\n model.eval()\n accuracies = AverageMeter()\n clip_accuracies = AverageMeter()\n \n #Path to store results\n result_path = \"{}/{}/\".format(opt.result_path, opt.dataset)\n if not os.path.exists(result_path):\n os.makedirs(result_path) \n\n for i, data in enumerate(test_dataloader()):\n #输入视频图像、光流\n # pdb.set_trace()\n clip = np.array([x[0] for x in data]).astype('float32') \n # #输入视频图像、光流的标签 \n targets = np.array([x[1] for x in data]).astype('int')\n clip = np.squeeze(clip)\n if opt.modality == 'Flow':\n inputs = np.zeros((int(clip.shape[1]/opt.sample_duration), 2, opt.sample_duration, opt.sample_size, opt.sample_size),dtype=np.float32)\n else:\n inputs = np.zeros((int(clip.shape[1]/opt.sample_duration), 3, opt.sample_duration, opt.sample_size, opt.sample_size),dtype=np.float32)\n for k in range(inputs.shape[0]):\n inputs[k,:,:,:,:] = clip[:,k*opt.sample_duration:(k+1)*opt.sample_duration,:,:] \n #将视频图像和光流分离开\n inputs = fluid.dygraph.base.to_variable(inputs)\n targets = fluid.dygraph.base.to_variable(targets)\n outputs= model(inputs)\n preds = fluid.layers.reduce_mean(outputs, dim=0, keep_dim=True)\n # pdb.set_trace()\n acc = calculate_accuracy(preds, targets) \n accuracies.update(acc[0], targets.shape[0]) \n \n print(\"Video accuracy = \", accuracies.avg)"
] |
[
[
"numpy.float"
],
[
"numpy.squeeze",
"numpy.array"
]
] |
huggingface/optimum-graphcore
|
[
"356832240a3ae7744ea909a39c949d4067d01ba6"
] |
[
"optimum/graphcore/models/t5/modeling_t5.py"
] |
[
"# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport warnings\n\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\nfrom torch.utils.checkpoint import checkpoint\n\nimport poptorch\nfrom optimum.utils import logging\nfrom transformers import T5ForConditionalGeneration\nfrom transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput\nfrom transformers.models.t5.modeling_t5 import __HEAD_MASK_WARNING_MSG, T5LayerNorm, T5Stack\n\nfrom ...generation_utils import IPUGenerationMixin\nfrom ...modeling_utils import (\n GenerationMethodsMixin,\n PipelineMixin,\n SerializedLinear,\n SharedEmbedding,\n get_layer_ipu,\n recomputation_checkpoint,\n register,\n)\n\n\nlogger = logging.get_logger(__name__)\n\n\nclass T5StackWithoutPositionBiasSharing(T5Stack):\n def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor:\n if encoder_attention_mask.dim() == 3:\n encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]\n if encoder_attention_mask.dim() == 2:\n encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]\n # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition\n # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow\n # /transformer/transformer_layers.py#L270\n # encoder_extended_attention_mask = (encoder_extended_attention_mask ==\n # encoder_extended_attention_mask.transpose(-1, -2))\n encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility\n\n # Always use -1e4 to avoid NaN issues.\n encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e4\n return encoder_extended_attention_mask\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n inputs_embeds=None,\n head_mask=None,\n cross_attn_head_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n # Model parallel\n if self.model_parallel:\n torch.cuda.set_device(self.first_device)\n self.embed_tokens = self.embed_tokens.to(self.first_device)\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n err_msg_prefix = \"decoder_\" if self.is_decoder else \"\"\n raise ValueError(\n f\"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time\"\n )\n elif input_ids is not None:\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n err_msg_prefix = \"decoder_\" if self.is_decoder else \"\"\n raise ValueError(f\"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds\")\n\n if inputs_embeds is None:\n assert self.embed_tokens is not None, \"You have to initialize the model with valid token embeddings\"\n inputs_embeds = self.embed_tokens(input_ids)\n\n batch_size, seq_length = input_shape\n\n # required mask seq length can be calculated via length of past\n mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length\n\n if use_cache is True:\n assert self.is_decoder, f\"`use_cache` can only be set to `True` if {self} is used as a decoder\"\n\n if attention_mask is None:\n attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device)\n if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None:\n encoder_seq_length = encoder_hidden_states.shape[1]\n encoder_attention_mask = torch.ones(\n batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long\n )\n\n # initialize past_key_values with `None` if past does not exist\n if past_key_values is None:\n past_key_values = [None] * len(self.block)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, inputs_embeds.device)\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n head_mask = self.get_head_mask(head_mask, self.config.num_layers)\n cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers)\n present_key_value_states = () if use_cache else None\n all_hidden_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n all_cross_attentions = () if (output_attentions and self.is_decoder) else None\n position_bias = None\n encoder_decoder_position_bias = None\n\n hidden_states = self.dropout(inputs_embeds)\n\n for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)):\n layer_head_mask = head_mask[i]\n cross_attn_layer_head_mask = cross_attn_head_mask[i]\n # Model parallel\n if self.model_parallel:\n torch.cuda.set_device(hidden_states.device)\n # Ensure that attention_mask is always on the same device as hidden_states\n if attention_mask is not None:\n attention_mask = attention_mask.to(hidden_states.device)\n if position_bias is not None:\n position_bias = position_bias.to(hidden_states.device)\n if encoder_hidden_states is not None:\n encoder_hidden_states = encoder_hidden_states.to(hidden_states.device)\n if encoder_extended_attention_mask is not None:\n encoder_extended_attention_mask = encoder_extended_attention_mask.to(hidden_states.device)\n if encoder_decoder_position_bias is not None:\n encoder_decoder_position_bias = encoder_decoder_position_bias.to(hidden_states.device)\n if layer_head_mask is not None:\n layer_head_mask = layer_head_mask.to(hidden_states.device)\n if cross_attn_layer_head_mask is not None:\n cross_attn_layer_head_mask = cross_attn_layer_head_mask.to(hidden_states.device)\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if self.gradient_checkpointing and self.training:\n if use_cache:\n logger.warn(\n \"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...\"\n )\n use_cache = False\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return tuple(module(*inputs, use_cache, output_attentions))\n\n return custom_forward\n\n layer_outputs = checkpoint(\n create_custom_forward(layer_module),\n hidden_states,\n extended_attention_mask,\n position_bias,\n encoder_hidden_states,\n encoder_extended_attention_mask,\n encoder_decoder_position_bias,\n layer_head_mask,\n cross_attn_layer_head_mask,\n None, # past_key_value is always None with gradient checkpointing\n )\n else:\n layer_outputs = layer_module(\n hidden_states,\n attention_mask=extended_attention_mask,\n position_bias=position_bias,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n encoder_decoder_position_bias=encoder_decoder_position_bias,\n layer_head_mask=layer_head_mask,\n cross_attn_layer_head_mask=cross_attn_layer_head_mask,\n past_key_value=past_key_value,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n\n # layer_outputs is a tuple with:\n # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights)\n if use_cache is False:\n layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:]\n\n hidden_states, present_key_value_state = layer_outputs[:2]\n\n if self.is_decoder and encoder_hidden_states is not None:\n encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3]\n # append next layer key value states\n if use_cache:\n present_key_value_states = present_key_value_states + (present_key_value_state,)\n\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[3],)\n if self.is_decoder:\n all_cross_attentions = all_cross_attentions + (layer_outputs[5],)\n\n # Model Parallel: If it's the last layer for that device, put things on the next device\n if self.model_parallel:\n for k, v in self.device_map.items():\n if i == v[-1] and \"cuda:\" + str(k) != self.last_device:\n hidden_states = hidden_states.to(\"cuda:\" + str(k + 1))\n\n hidden_states = self.final_layer_norm(hidden_states)\n hidden_states = self.dropout(hidden_states)\n\n # Add last layer\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v\n for v in [\n hidden_states,\n present_key_value_states,\n all_hidden_states,\n all_attentions,\n all_cross_attentions,\n ]\n if v is not None\n )\n return BaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=hidden_states,\n past_key_values=present_key_value_states,\n hidden_states=all_hidden_states,\n attentions=all_attentions,\n cross_attentions=all_cross_attentions,\n )\n\n\n@register(T5ForConditionalGeneration)\nclass PipelinedT5ForConditionalGeneration(\n GenerationMethodsMixin, T5ForConditionalGeneration, PipelineMixin, IPUGenerationMixin\n):\n @property\n def is_encoder_and_decoder_embeddings_computation_shared(self):\n return isinstance(self.shared, SharedEmbedding)\n\n def encoder_and_decoder_embeddings_computation(self, use_shared_embedding: bool):\n \"\"\"Sets the T5ForConditionalGeneration shared embedding layer to SharedEmbedding that combines the computation under one layer.\n\n Args:\n use_shared_embedding: whether to use SharedEmbedding or not.\n \"\"\"\n\n if use_shared_embedding:\n if isinstance(self.shared, SharedEmbedding):\n logger.warning(\"encoder and decoder embeddings computation is already shared\")\n else:\n self.shared = SharedEmbedding(self.shared)\n else:\n if isinstance(self.shared, nn.Embedding):\n logger.warning(\"encoder and decoder embeddings computation is not shared\")\n else:\n self.shared = self.shared.shared\n\n def scale_down_weights(self, factor: float = 1, restore: bool = False):\n self.lm_scale_modifier = 1 if not restore else None\n # self.lm_scale_modifier = nn.Parameter(torch.ones(self.config.d_model, dtype=torch.float16)) if not restore else None\n\n emb_scaling = 1 / 32.0 * factor\n att_v_scaling = 1 / 4.0 * factor\n att_o_scaling = 1 / 8.0 * factor\n ff_wi_scaling = 1 / 4.0 * factor\n ff_wo_scaling = 1 / 4.0 * factor\n ff_ln_scaling = 1 / 2.0 * factor\n\n if restore:\n emb_scaling = 1 / emb_scaling\n att_v_scaling = 1 / att_v_scaling\n att_o_scaling = 1 / att_o_scaling\n ff_wi_scaling = 1 / ff_wi_scaling\n ff_wo_scaling = 1 / ff_wo_scaling\n ff_ln_scaling = 1 / ff_ln_scaling\n\n with torch.no_grad():\n self.shared.weight *= emb_scaling\n for unit in self.encoder.block:\n unit.layer[0].SelfAttention.v.weight *= att_v_scaling\n unit.layer[0].SelfAttention.o.weight *= att_o_scaling\n unit.layer[1].DenseReluDense.wi.weight *= ff_wi_scaling\n unit.layer[1].DenseReluDense.wo.weight *= ff_wo_scaling\n unit.layer[1].layer_norm.weight *= ff_ln_scaling\n for unit in self.decoder.block:\n unit.layer[0].SelfAttention.v.weight *= att_v_scaling\n unit.layer[0].SelfAttention.o.weight *= att_o_scaling\n unit.layer[1].EncDecAttention.v.weight *= att_v_scaling\n unit.layer[1].EncDecAttention.o.weight *= att_o_scaling\n unit.layer[2].DenseReluDense.wi.weight *= ff_wi_scaling\n unit.layer[2].DenseReluDense.wo.weight *= ff_wo_scaling\n unit.layer[2].layer_norm.weight *= ff_ln_scaling\n\n if not restore:\n self.lm_scale_modifier /= emb_scaling\n\n def parallelize(self):\n \"\"\"\n Transform the model to run in an IPU pipeline.\n - Adds pipeline stages to the model\n - (If enabled) Replaces the shared embedding with a SerializedEmbedding\n - Adds recomputation checkpoints\n\n Recommended usage:\n ```\n model = PipelinedT5ForConditionalGeneration(config).parallelize().half()\n ```\n \"\"\"\n layer_ipu = get_layer_ipu(self.ipu_config.layers_per_ipu)\n\n logger.info(\"-------------------- Device Allocation --------------------\")\n logger.info(\"Embedding --> IPU 0\")\n\n if self.ipu_config.embedding_serialization_factor > 1:\n serialized_lm_head = SerializedLinear(\n self.config.d_model,\n self.shared.num_embeddings,\n self.ipu_config.embedding_serialization_factor,\n bias=False,\n mode=poptorch.MatMulSerializationMode.OutputChannels,\n )\n serialized_lm_head.load_state_dict(self.lm_head.state_dict())\n self.lm_head = serialized_lm_head\n # TODO: is it needed to check?\n if self.config.tie_word_embeddings:\n self.tie_weights()\n\n # self.scale_down_weights(factor=1)\n self.encoder_and_decoder_embeddings_computation(True)\n self.shared = poptorch.BeginBlock(self.shared, \"Embedding\", ipu_id=0)\n\n # Use a custom T5Stack implementation because sharing the position bias causes OOM error\n self.encoder.__class__ = T5StackWithoutPositionBiasSharing\n self.decoder.__class__ = T5StackWithoutPositionBiasSharing\n\n # Enable autocast for T5LayerNorm because computation cannot happen in fp16\n for mod in self.modules():\n if isinstance(mod, T5LayerNorm):\n mod.forward = poptorch.autocast(enabled=True)(mod.forward)\n\n for index, layer in enumerate(self.encoder.block):\n ipu = layer_ipu[index]\n if self.ipu_config.recompute_checkpoint_every_layer and index != self.config.num_layers - 1:\n recomputation_checkpoint(layer)\n self.encoder.block[index] = poptorch.BeginBlock(layer, f\"Encoder{index}\", ipu_id=ipu)\n logger.info(f\"Encoder {index:<2} --> IPU {ipu}\")\n\n self.encoder.final_layer_norm = poptorch.BeginBlock(\n self.encoder.final_layer_norm, \"Encoder Stack Final LayerNorm\", ipu_id=ipu\n )\n\n shift = len(self.encoder.block)\n for index, layer in enumerate(self.decoder.block):\n ipu = layer_ipu[index + shift]\n if self.ipu_config.recompute_checkpoint_every_layer and index != self.config.num_layers - 1:\n recomputation_checkpoint(layer)\n self.decoder.block[index] = poptorch.BeginBlock(layer, f\"Decoder{index}\", ipu_id=ipu)\n logger.info(f\"Decoder {index:<2} --> IPU {ipu}\")\n\n self.decoder.final_layer_norm = poptorch.BeginBlock(\n self.decoder.final_layer_norm, \"Decoder Stack Final LayerNorm\", ipu_id=ipu\n )\n\n logger.info(\"LM Head Output --> IPU 0\")\n self.lm_head = poptorch.BeginBlock(self.lm_head, \"LM Head Output\", ipu_id=0)\n logger.info(\"-----------------------------------------------------------\")\n return self\n\n def deparallelize(self):\n \"\"\"\n Undo the changes to the model done by `parallelize`.\n You should call this before doing `save_pretrained` so that the `model.state_dict` is\n fully compatible with `transformers.T5ForConditionalGeneration`.\n \"\"\"\n # T5ForConditionalGeneration has a deparallelize method, so make sure that the PipelineMixin one is used here.\n PipelineMixin.deparallelize(self)\n\n self.encoder_and_decoder_embeddings_computation(False)\n # self.scale_down_weights(factor=1, restore=True)\n\n self.encoder.__class__ = T5Stack\n self.decoder.__class__ = T5Stack\n\n for mod in self.modules():\n if isinstance(mod, T5LayerNorm):\n mod.forward = T5LayerNorm.forward.__get__(mod, T5LayerNorm)\n\n return self\n\n def _forward(\n self,\n input_ids=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n head_mask=None,\n decoder_head_mask=None,\n cross_attn_head_mask=None,\n encoder_outputs=None,\n past_key_values=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=False,\n ):\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if self.is_encoder_and_decoder_embeddings_computation_shared:\n inputs_embeds, decoder_inputs_embeds = self.shared(\n input_ids=input_ids,\n decoder_input_ids=decoder_input_ids,\n )\n if inputs_embeds is not None:\n input_ids = None\n if decoder_inputs_embeds is not None:\n decoder_input_ids = None\n\n # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask\n if head_mask is not None and decoder_head_mask is None:\n if self.config.num_layers == self.config.num_decoder_layers:\n warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)\n decoder_head_mask = head_mask\n\n # Encode if needed (training, first prediction pass)\n if encoder_outputs is None:\n # Convert encoder inputs in embeddings if needed\n encoder_outputs = self.encoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\n encoder_outputs = BaseModelOutput(\n last_hidden_state=encoder_outputs[0],\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\n )\n\n hidden_states = encoder_outputs[0]\n\n if self.model_parallel:\n torch.cuda.set_device(self.decoder.first_device)\n\n if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:\n # get decoder inputs from shifting lm labels to the right\n decoder_input_ids = self._shift_right(labels)\n\n # Set device for model parallelism\n if self.model_parallel:\n torch.cuda.set_device(self.decoder.first_device)\n hidden_states = hidden_states.to(self.decoder.first_device)\n if decoder_input_ids is not None:\n decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)\n if attention_mask is not None:\n attention_mask = attention_mask.to(self.decoder.first_device)\n if decoder_attention_mask is not None:\n decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)\n\n # Decode\n decoder_outputs = self.decoder(\n input_ids=decoder_input_ids,\n attention_mask=decoder_attention_mask,\n inputs_embeds=decoder_inputs_embeds,\n past_key_values=past_key_values,\n encoder_hidden_states=hidden_states,\n encoder_attention_mask=attention_mask,\n head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = decoder_outputs[0]\n\n # Set device for model parallelism\n if self.model_parallel:\n torch.cuda.set_device(self.encoder.first_device)\n self.lm_head = self.lm_head.to(self.encoder.first_device)\n sequence_output = sequence_output.to(self.lm_head.weight.device)\n\n if self.config.tie_word_embeddings:\n # Rescale output before projecting on vocab\n # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586\n sequence_output = sequence_output * (self.model_dim**-0.5)\n\n lm_scale_modifier = getattr(self, \"lm_scale_modifier\", None)\n if lm_scale_modifier is not None:\n sequence_output = sequence_output * lm_scale_modifier\n\n lm_logits = self.lm_head(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = nn.CrossEntropyLoss(ignore_index=-100)\n loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))\n # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666\n\n if not return_dict:\n output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs\n return ((loss,) + output) if loss is not None else output\n\n return Seq2SeqLMOutput(\n loss=loss,\n logits=lm_logits,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n cross_attentions=decoder_outputs.cross_attentions,\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\n encoder_hidden_states=encoder_outputs.hidden_states,\n encoder_attentions=encoder_outputs.attentions,\n )\n\n def train(self, mode: bool = True) -> \"PipelinedT5ForConditionalGeneration\":\n mod = super(T5ForConditionalGeneration, self).train(mode=mode)\n mod.forward = mod._forward_for_train if mode else mod._forward_for_generate\n return mod\n\n def _forward_for_train(self, input_ids, attention_mask, decoder_input_ids, labels=None):\n outputs = self._forward(\n input_ids=input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n labels=labels,\n use_cache=False,\n return_dict=False,\n )\n # Only returning the loss to make the communication between the host and the device faster.\n return outputs[0:1]\n\n def _forward_for_generate(self, encoder_outputs, decoder_input_ids, attention_mask, labels=None):\n outputs = super().forward(\n encoder_outputs=encoder_outputs,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n return_dict=False,\n use_cache=False,\n labels=labels,\n )\n # Only returning the loss (if labels is provided) and the logits.\n if labels is None:\n return outputs[:1]\n return outputs[:2]\n\n forward = _forward_for_train\n"
] |
[
[
"torch.no_grad",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.cuda.set_device"
]
] |
fuine/nDES
|
[
"895671d8982ff408725ebab6f83fc1b2c42680cc"
] |
[
"gpu_utils/benchmark_selective_matmul.py"
] |
[
"import torch\nfrom timerit import Timerit\nimport numpy as np\nfrom numpy.random import shuffle\nfrom gpu_utils import poor_selective_matmul, create_sorted_weights_for_matmul\n\nnp.random.seed(42)\n\n\ndef benchmark_torch():\n device = torch.device(\"cuda:0\")\n t1 = Timerit(num=200, verbose=2)\n total = 0\n for timer in t1:\n matrix = torch.rand((50000, 4000), device=device)\n srt_idx = np.arange(0, 4000)\n shuffle(srt_idx)\n sorting_idx = torch.tensor(srt_idx[:2000], device=device)\n vec = torch.rand((2000,), device=device)\n result = torch.zeros((50000,), device=device)\n with timer:\n result = matrix[:, sorting_idx].matmul(vec)\n total += result.sum().item()\n print(\"t1.total_time = %r\" % (t1.total_time,))\n print(total)\n\n\ndef benchmark_cuda():\n device = torch.device(\"cuda:0\")\n t1 = Timerit(num=200, verbose=2)\n total = 0\n for timer in t1:\n matrix = torch.rand((50000, 4000), device=device)\n srt_idx = np.arange(0, 4000)\n shuffle(srt_idx)\n sorting_idx = torch.tensor(srt_idx, dtype=torch.int32, device=device)\n vec = torch.rand((2000,), device=device)\n result = torch.zeros((50000,), device=device)\n with timer:\n result = poor_selective_matmul(\n matrix, sorting_idx.argsort().int(), vec, result, 2000\n )\n # result = poor_selective_matmul(matrix, sorting_idx.int(), vec, result, 2000)\n total += result.sum().item()\n print(\"t1.total_time = %r\" % (t1.total_time,))\n print(total)\n\n\ndef benchmark_cuda_weights():\n device = torch.device(\"cuda:0\")\n t1 = Timerit(num=200, verbose=2)\n total = 0\n for timer in t1:\n matrix = torch.rand((50000, 4000), device=device)\n srt_idx = np.arange(0, 4000)\n shuffle(srt_idx)\n sorting_idx = torch.tensor(srt_idx, dtype=torch.int32, device=device)\n vec = torch.rand((2000,), device=device)\n result = torch.zeros((50000,), device=device)\n with timer:\n sorted_weights = torch.zeros((4000,), device=device)\n sorted_weights = create_sorted_weights_for_matmul(\n vec, sorting_idx, sorted_weights, 2000\n )\n result = matrix.matmul(sorted_weights)\n total += result.sum().item()\n print(\"t1.total_time = %r\" % (t1.total_time,))\n print(total)\n\n\nif __name__ == \"__main__\":\n print(\"CUDA weights\")\n benchmark_cuda_weights()\n print(\"Poor CUDA\")\n benchmark_cuda()\n print(\"Torch version\")\n benchmark_torch()\n"
] |
[
[
"numpy.random.seed",
"torch.zeros",
"numpy.arange",
"numpy.random.shuffle",
"torch.tensor",
"torch.rand",
"torch.device"
]
] |
DominickZhang/NAS-FCOS
|
[
"1f7281478430eaed028e2cc2dfa8be226c63939b"
] |
[
"maskrcnn_benchmark/nas/modeling/layer_factory.py"
] |
[
"\"\"\"Different custom layers\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom maskrcnn_benchmark.layers.dcn_v2 import DCN\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, bias=False, dilation=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n groups=groups, padding=dilation, dilation=dilation, bias=bias)\n\n\ndef conv1x1(in_planes, out_planes, stride=1, bias=False):\n \"1x1 convolution\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,\n padding=0, bias=bias)\n\n\nOPS = {\n 'skip_connect': lambda C, stride, affine, repeats=1: Identity() if stride == 1 else FactorizedReduce(C, C, affine=affine),\n 'sep_conv_3x3': lambda C, stride, affine, repeats=1: SepConv(C, C, 3, stride, 1, affine=affine, repeats=repeats),\n 'sep_conv_3x3_dil3': lambda C, stride, affine, repeats=1: SepConv(C, C, 3, stride, 3, \n affine=affine, dilation=3, repeats=repeats),\n 'sep_conv_5x5_dil6': lambda C, stride, affine, repeats=1: SepConv(C, C, 5, stride, 12, \n affine=affine, dilation=6, repeats=repeats),\n 'def_conv_3x3': lambda C, stride, affine, repeats=1: DefConv(C, C, 3),\n}\n\n\nAGG_OPS = {\n 'psum' : lambda C, stride, affine, repeats=1: ParamSum(C),\n 'cat' : lambda C, stride, affine, repeats=1: ConcatReduce(C, affine=affine, repeats=repeats),\n }\n\n\nHEAD_OPS = {\n 'skip_connect': lambda C, stride, affine, repeats=1: Identity() if stride == 1 else GN_FactorizedReduce(C, C, affine=affine),\n 'sep_conv_3x3': lambda C, stride, affine, repeats=1: GN_SepConv(C, C, 3, stride, 1, affine=affine, repeats=repeats),\n 'conv1x1': lambda C, stride, affine, repeats=1: nn.Sequential(\n conv1x1(C, C, stride=stride),\n nn.GroupNorm(32, C),\n nn.ReLU(inplace=False)),\n 'conv3x3': lambda C, stride, affine, repeats=1: nn.Sequential(\n conv3x3(C, C, stride=stride),\n nn.GroupNorm(32, C),\n nn.ReLU(inplace=False)),\n 'sep_conv_3x3_dil3': lambda C, stride, affine, repeats=1: GN_SepConv(C, C, 3, stride, 3,\n affine=affine, dilation=3, repeats=repeats),\n 'def_conv_3x3': lambda C, stride, affine, repeats=1: GN_DefConv(C, C, 3),\n }\n\ndef conv_bn(C_in, C_out, kernel_size, stride, padding, affine=True):\n return nn.Sequential(\n nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding,\n bias=False),\n nn.BatchNorm2d(C_out, affine=affine)\n )\n \ndef conv_bn_relu(C_in, C_out, kernel_size, stride, padding, affine=True):\n return nn.Sequential(\n nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding,\n bias=False),\n nn.BatchNorm2d(C_out, affine=affine),\n nn.ReLU(inplace=False),\n )\n\ndef conv_bn_relu6(inp, oup, stride):\n return nn.Sequential(\n nn.Conv2d(inp, oup, 3, stride, 1, bias=False),\n nn.BatchNorm2d(oup),\n nn.ReLU6(inplace=True)\n )\n\ndef conv_1x1_bn_relu6(inp, oup):\n return nn.Sequential(\n nn.Conv2d(inp, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n nn.ReLU6(inplace=True)\n )\n\nclass InvertedResidual(nn.Module):\n def __init__(self, inp, oup, stride, expand_ratio):\n super(InvertedResidual, self).__init__()\n self.stride = stride\n assert stride in [1, 2]\n\n self.use_res_connect = self.stride == 1 and inp == oup\n\n self.conv = nn.Sequential(\n # pw\n nn.Conv2d(inp, inp * expand_ratio, 1, 1, 0, bias=False),\n nn.BatchNorm2d(inp * expand_ratio),\n nn.ReLU6(inplace=True),\n # dw\n nn.Conv2d(inp * expand_ratio, inp * expand_ratio, 3, stride, 1, groups=inp * expand_ratio, bias=False),\n nn.BatchNorm2d(inp * expand_ratio),\n nn.ReLU6(inplace=True),\n # pw-linear\n nn.Conv2d(inp * expand_ratio, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n )\n\n def forward(self, x):\n if self.use_res_connect:\n return x + self.conv(x)\n else:\n return self.conv(x)\n\nclass GAPConv1x1(nn.Module):\n \"\"\"Global Average Pooling + conv1x1\"\"\"\n def __init__(self, C_in, C_out):\n super(GAPConv1x1, self).__init__()\n self.conv1x1 = conv_bn_relu(C_in, C_out, 1, stride=1, padding=0)\n\n def forward(self, x):\n size = x.size()[2:]\n out = x.mean(2, keepdim=True).mean(3, keepdim=True)\n out = self.conv1x1(out)\n out = nn.functional.interpolate(out, size=size, mode='bilinear', align_corners=False)\n return out\n\n\nclass DilConv(nn.Module):\n\n def __init__(self, C_in, C_out, kernel_size, stride, padding,\n dilation, affine=True):\n super(DilConv, self).__init__()\n self.op = nn.Sequential(\n nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride,\n padding=padding, dilation=dilation, groups=C_in,\n bias=False),\n nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),\n nn.BatchNorm2d(C_out, affine=affine),\n nn.ReLU(inplace=True),\n )\n\n def forward(self, x):\n return self.op(x)\n\n\nclass SepConv(nn.Module):\n\n def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation=1, affine=True, repeats=1):\n super(SepConv, self).__init__()\n if C_in != C_out:\n assert repeats == 1, \"SepConv with C_in != C_out must have only 1 repeat\"\n basic_op = lambda: nn.Sequential(\n nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride,\n padding=padding, dilation=dilation, groups=C_in, bias=False),\n nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),\n nn.BatchNorm2d(C_out, affine=affine),\n nn.ReLU(inplace=True))\n self.op = nn.Sequential()\n for idx in range(repeats):\n self.op.add_module('sep_{}'.format(idx), \n basic_op())\n\n def forward(self, x):\n return self.op(x)\n\n\nclass GN_SepConv(nn.Module):\n\n def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation=1, affine=True, repeats=1):\n super(GN_SepConv, self).__init__()\n if C_in != C_out:\n assert repeats == 1, \"SepConv with C_in != C_out must have only 1 repeat\"\n basic_op = lambda: nn.Sequential(\n nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride,\n padding=padding, dilation=dilation, groups=C_in, bias=False),\n nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),\n nn.GroupNorm(32, C_out),\n nn.ReLU(inplace=True))\n self.op = nn.Sequential()\n for idx in range(repeats):\n self.op.add_module('sep_{}'.format(idx),\n basic_op())\n\n def forward(self, x):\n return self.op(x)\n\n\nclass Identity(nn.Module):\n\n def __init__(self):\n super(Identity, self).__init__()\n\n def forward(self, x):\n return x\n\n\nclass Zero(nn.Module):\n\n def __init__(self, stride):\n super(Zero, self).__init__()\n self.stride = stride\n\n def forward(self, x):\n if self.stride == 1:\n return x.mul(0.)\n return x[:, :, ::self.stride, ::self.stride].mul(0.)\n\n\nclass FactorizedReduce(nn.Module):\n\n def __init__(self, C_in, C_out, affine=True):\n super(FactorizedReduce, self).__init__()\n assert C_out % 2 == 0\n self.relu = nn.ReLU(inplace=False)\n self.conv_1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2,\n padding=0, bias=False)\n self.conv_2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2,\n padding=0, bias=False)\n self.bn = nn.BatchNorm2d(C_out, affine=affine)\n\n def forward(self, x):\n x = self.relu(x)\n out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:])], dim=1)\n out = self.bn(out)\n return out\n\n\nclass GN_FactorizedReduce(nn.Module):\n\n def __init__(self, C_in, C_out, affine=True):\n super(GN_FactorizedReduce, self).__init__()\n assert C_out % 2 == 0\n self.relu = nn.ReLU(inplace=False)\n self.conv_1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2,\n padding=0, bias=False)\n self.conv_2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2,\n padding=0, bias=False)\n self.gn = nn.GroupNorm(32, C_out)\n\n def forward(self, x):\n x = self.relu(x)\n out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:])], dim=1)\n out = self.gn(out)\n return out\n\n\nclass DefConv(nn.Module):\n\n def __init__(self, C_in, C_out, ksize):\n super(DefConv, self).__init__()\n self.dcn = nn.Sequential(DCN(C_in, C_out, ksize, stride=1, padding=ksize // 2, deformable_groups=2),\n nn.BatchNorm2d(C_out),\n nn.ReLU(inplace=True))\n\n def forward(self, x):\n return self.dcn(x)\n\n\nclass GN_DefConv(nn.Module):\n\n def __init__(self, C_in, C_out, ksize):\n super(GN_DefConv, self).__init__()\n self.dcn = nn.Sequential(DCN(C_in, C_out, ksize, stride=1, padding=ksize // 2, deformable_groups=2),\n nn.GroupNorm(32, C_out),\n nn.ReLU(inplace=True))\n\n def forward(self, x):\n return self.dcn(x)\n\n\ndef resize(x1, x2, largest=True):\n if largest:\n if x1.size()[2:] > x2.size()[2:]:\n x2 = nn.Upsample(size=x1.size()[2:], mode='bilinear')(x2)\n elif x1.size()[2:] < x2.size()[2:]:\n x1 = nn.Upsample(size=x2.size()[2:], mode='bilinear')(x1)\n return x1, x2\n else:\n raise NotImplementedError\n\n\nclass ParamSum(nn.Module):\n\n def __init__(self, C):\n super(ParamSum, self).__init__()\n self.a = nn.Parameter(torch.ones(C))\n self.b = nn.Parameter(torch.ones(C))\n\n def forward(self, x, y):\n bsize = x.size(0)\n x, y = resize(x, y)\n return (self.a.expand(bsize, -1)[:, :, None, None] * x +\n self.b.expand(bsize, -1)[:, :, None, None] * y)\n\n\nclass ConcatReduce(nn.Module):\n \n def __init__(self, C, affine=True, repeats=1):\n super(ConcatReduce, self).__init__()\n self.conv1x1 = nn.Sequential(\n nn.BatchNorm2d(2 * C, affine=affine),\n nn.ReLU(inplace=False),\n nn.Conv2d(2 * C, C, 1, stride=1, groups=C, padding=0, bias=False)\n )\n\n def forward(self, x, y):\n x, y = resize(x, y)\n z = torch.cat([x, y], 1)\n return self.conv1x1(z)"
] |
[
[
"torch.nn.Sequential",
"torch.nn.ReLU6",
"torch.ones",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.functional.interpolate",
"torch.nn.BatchNorm2d",
"torch.nn.GroupNorm",
"torch.nn.ReLU"
]
] |
stnava/superiq
|
[
"a13befe5f525bbef02cd095031952db62c5d054e"
] |
[
"applications/evaluate_OASIS_TRT_20_BF.py"
] |
[
"import os\n# set number of threads - this should be optimized for your compute instance\nmynt=\"16\"\nos.environ[\"TF_NUM_INTEROP_THREADS\"] = mynt\nos.environ[\"TF_NUM_INTRAOP_THREADS\"] = mynt\nos.environ[\"ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS\"] = mynt\n\nimport os.path\nfrom os import path\nimport glob as glob\n\nimport math\nimport tensorflow\nimport ants\nimport antspynet\nimport tensorflow as tf\nimport glob\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom superiq import super_resolution_segmentation_per_label\nfrom superiq import ljlf_parcellation\nfrom superiq import images_to_list\nfrom superiq import check_for_labels_in_image\nfrom superiq import sort_library_by_similarity\nfrom superiq import basalforebrain_segmentation\nfrom superiq import native_to_superres_ljlf_segmentation\n\ndef splitall(path):\n allparts = []\n while 1:\n parts = os.path.split(path)\n if parts[0] == path: # sentinel for absolute paths\n allparts.insert(0, parts[0])\n break\n elif parts[1] == path: # sentinel for relative paths\n allparts.insert(0, parts[1])\n break\n else:\n path = parts[0]\n allparts.insert(0, parts[1])\n return allparts\n\n# get data from here https://ndownloader.figshare.com/files/26224727\ntdir = \"/Users/stnava/data/superiq_data_resources/\"\nif ( not path. exists( tdir ) ):\n\traise RuntimeError('Failed to find the data directory')\n\nbrains = glob.glob(tdir+\"segmentation_libraries/OASIS30/Brains/*\")\nbrains.sort()\nbrainsSeg = glob.glob(tdir+\"segmentation_libraries/OASIS30/Segmentations/*\")\nbrainsSeg.sort()\ntemplatefilename = tdir + \"template/adni_template.nii.gz\"\ntemplatesegfilename = tdir + \"template/adni_template_dkt_labels.nii.gz\"\n\nif not 'low_volume' in locals():\n low_volume=False;\n\nnhigh=20\nif low_volume: # these subjects have very low volume BF relative to others\n brains=brains[nhigh:len(brains)]\n brainsSeg=brainsSeg[nhigh:len(brainsSeg)]\nelse:\n brains=brains[0:nhigh]\n brainsSeg=brainsSeg[0:nhigh]\n\n\nseg_params={\n 'submask_dilation': 8,\n 'reg_iterations': [100, 100, 20],\n 'searcher': 1,\n 'radder': 2,\n 'syn_sampling': 32,\n 'syn_metric': 'mattes',\n 'max_lab_plus_one': True, 'verbose': True}\n\nseg_params_sr={\n 'submask_dilation': seg_params['submask_dilation']*1,\n 'reg_iterations': seg_params['reg_iterations'],\n 'searcher': seg_params['searcher'],\n 'radder': seg_params['radder'],\n 'syn_sampling': seg_params['syn_sampling'],\n 'syn_metric': seg_params['syn_metric'],\n 'max_lab_plus_one': True, 'verbose': True}\n\nsr_params={\"upFactor\": [2,2,2], \"dilation_amount\": seg_params[\"submask_dilation\"], \"verbose\":True}\nmdl = tf.keras.models.load_model(\"models/SEGSR_32_ANINN222_3.h5\")\n\n# store output data\nbrainName = []\n# the three types of output which we will compute in series\ndicevalNativeSeg = []\ndicevalSRNativeSeg = []\ndicevalSRSeg = []\ndicevalSRSeg2 = []\n\nbrainsTest = glob.glob(tdir+\"segmentation_libraries/OASISTRT20/Brains/*\")\nbrainsTestSeg = glob.glob(tdir+\"segmentation_libraries/OASISTRT20/Segmentations/*\")\nbrainsTest.sort()\nbrainsTestSeg.sort()\nfor k in range( len(brainName), len( brainsTest ) ):\n localid=os.path.splitext( os.path.splitext( os.path.basename( brainsTest[k]) )[0])[0]\n original_image = ants.image_read(brainsTest[k])\n testseg = ants.image_read(brainsTestSeg[k])\n # map to basal forebrain\n testseg = ants.mask_image( testseg, testseg, [91,92], binarize=True )\n print( str(k) + \" \" + localid)\n # first - create a SR version of the image and the ground truth\n\n # now segment it with the library\n wlab = [75,76]\n sloop = native_to_superres_ljlf_segmentation(\n target_image = original_image,\n segmentation_numbers = wlab,\n template = ants.image_read(templatefilename),\n template_segmentation = ants.image_read(templatesegfilename),\n library_intensity=images_to_list(brains),\n library_segmentation=images_to_list(brainsSeg),\n seg_params = seg_params,\n sr_params = sr_params,\n sr_model = mdl )\n\n mypt = 0.5\n srGroundTruthNN = ants.resample_image_to_target( testseg, sloop['srOnNativeSeg']['super_resolution'] , interp_type='nearestNeighbor' )\n srsegLJLF = ants.threshold_image( sloop['srSeg']['probsum'], mypt, math.inf )\n nativejlf = ants.mask_image( sloop['nativeSeg']['segmentation'], sloop['nativeSeg']['segmentation'], wlab, binarize=True)\n nativeOverlapSloop = ants.label_overlap_measures( testseg, nativejlf )\n nativejlfsr = ants.mask_image( sloop['srOnNativeSeg']['super_resolution_segmentation'], sloop['srOnNativeSeg']['super_resolution_segmentation'], wlab, binarize=True)\n srOnNativeOverlapSloop = ants.label_overlap_measures( srGroundTruthNN, nativejlfsr )\n srjlf = ants.mask_image( sloop['srSeg']['segmentation'], sloop['srSeg']['segmentation'], wlab, binarize=True )\n srOverlapSloop = ants.label_overlap_measures( srGroundTruthNN, srjlf )\n srOverlap2 = ants.label_overlap_measures( srGroundTruthNN, srsegLJLF )\n # collect the 3 evaluation results - ready for data frame\n brainName.append( localid )\n dicevalNativeSeg.append(nativeOverlapSloop[\"MeanOverlap\"][0])\n dicevalSRNativeSeg.append( srOnNativeOverlapSloop[\"MeanOverlap\"][0])\n dicevalSRSeg.append( srOverlapSloop[\"MeanOverlap\"][0])\n dicevalSRSeg2.append( srOverlap2[\"MeanOverlap\"][0])\n print( brainName[k] + \": N: \" + str(dicevalNativeSeg[k]) + \" SRN: \" + str(dicevalSRNativeSeg[k])+ \" SRN: \" + str(dicevalSRSeg[k]) )\n ################################################################################\n dict = {\n 'name': brainName,\n 'diceNativeSeg': dicevalNativeSeg,\n 'diceSRNativeSeg': dicevalSRNativeSeg,\n 'diceSRSeg': dicevalSRSeg }\n df = pd.DataFrame(dict)\n df.to_csv('./bf_sr_eval_TRT20_via_OASIS30.csv' )\n ################################################################################\n\n# these are the outputs you would write out, along with label geometry for each segmentation\nants.image_write( sloop['srOnNativeSeg']['super_resolution'], '/tmp/tempI.nii.gz' )\nants.image_write( srGroundTruthNN, '/tmp/tempGT.nii.gz' )\nants.image_write( sloop['srSeg']['segmentation'], '/tmp/tempSRSeg.nii.gz' )\nants.image_write( sloop['nativeSeg']['segmentation'], '/tmp/tempORSeg.nii.gz' )\n"
] |
[
[
"tensorflow.keras.models.load_model",
"pandas.DataFrame"
]
] |
co2meal/-bnpy-dev
|
[
"8f297d8f3e4a56088d7755134c329f63a550be9e"
] |
[
"bnpy/deletemove/DCollector.py"
] |
[
"\"\"\"\nFunctions for collecting a target dataset for a delete move.\n\n- addDataFromBatchToPlan\n- getDataSubsetRelevantToPlan\n\"\"\"\n\nimport numpy as np\nimport DeleteLogger\n\n\ndef addDataFromBatchToPlan(Plan, hmodel, Dchunk, LPchunk,\n uIDs=None,\n maxUID=-1,\n batchID=0,\n lapFrac=None,\n isFirstBatch=0,\n isLastBatch=0,\n dtargetMaxSize=1000,\n dtargetMinCount=0.01,\n **kwargs):\n \"\"\" Add relevant data from provided chunk to the planned target set.\n\n Returns\n -------\n Plan : dict, same reference as provided, updated in-place.\n\n Post Conditions\n -------\n Plan dict is updated if current chunk has items to add to target set.\n Updated fields:\n * DTargetData\n * batchIDs\n\n Plan dict will be returned empty if:\n * Target set goes over the budget space of dtargetMaxSize\n * Target set has no items after the last batch.\n \"\"\"\n assert uIDs is not None\n # Remember that recent seqcreate moves\n # can create more states in local params\n # than are currently available in the whole-dataset model,\n # because global step hasn't happened yet.\n assert len(uIDs) >= hmodel.allocModel.K\n assert len(uIDs) >= hmodel.obsModel.K\n\n if isFirstBatch:\n msg = '<<<<<<<<<<<<<<<<<<<< addDataFromBatchToPlan @ lap %6.2f' \\\n % (np.ceil(lapFrac))\n DeleteLogger.log(msg)\n\n relData, relIDs = getDataSubsetRelevantToPlan(\n Dchunk, LPchunk, Plan,\n dtargetMinCount=dtargetMinCount)\n relSize = getSize(relData)\n if relSize < 1:\n msg = ' %6.3f | batch %3d | batch trgtSize 0 | agg trgtSize 0' \\\n % (lapFrac, batchID)\n DeleteLogger.log(msg)\n\n if isLastBatch and not hasValidKey(Plan, 'DTargetData'):\n DeleteLogger.log(\"ABANDONED. No relevant items found.\")\n return dict()\n return Plan\n\n # ---- Add all these docs to the Plan\n batchIDs = [batchID for n in xrange(relSize)]\n if hasValidKey(Plan, 'DTargetData'):\n Plan['DTargetData'].add_data(relData)\n Plan['batchIDs'].extend(batchIDs)\n else:\n Plan['DTargetData'] = relData\n Plan['batchIDs'] = batchIDs\n Plan['dataUnitIDs'] = relIDs\n\n curTargetSize = getSize(Plan['DTargetData'])\n if curTargetSize > dtargetMaxSize:\n for key in Plan.keys():\n del Plan[key]\n msg = ' %6.3f | batch %3d | targetSize %d EXCEEDED BUDGET of %d' \\\n % (lapFrac, batchID, curTargetSize, dtargetMaxSize)\n DeleteLogger.log(msg)\n DeleteLogger.log(\"ABANDONED.\")\n return Plan\n\n if lapFrac is not None:\n msg = ' %6.3f | batch %3d | batch trgtSize %5d | agg trgtSize %5d' \\\n % (lapFrac, batchID, relSize, curTargetSize)\n DeleteLogger.log(msg)\n\n # ---- Track stats specific to chosen subset\n targetLPchunk = hmodel.allocModel.selectSubsetLP(Dchunk, LPchunk, relIDs)\n targetSSchunk = hmodel.get_global_suff_stats(relData, targetLPchunk,\n doPrecompEntropy=1)\n targetSSchunk.uIDs = uIDs.copy()\n\n # ---- targetSS tracks aggregate stats across batches\n if not hasValidKey(Plan, 'targetSS'):\n Kextra = 0\n Plan['targetSS'] = targetSSchunk.copy()\n else:\n Kextra = targetSSchunk.K - Plan['targetSS'].K\n if Kextra > 0:\n Plan['targetSS'].insertEmptyComps(Kextra)\n Plan['targetSS'] += targetSSchunk\n curUIDs = Plan['targetSS'].uIDs\n newUIDs = np.arange(maxUID - Kextra + 1, maxUID + 1)\n Plan['targetSS'].uIDs = np.hstack([curUIDs, newUIDs])\n\n # ---- targetSSByBatch tracks batch-specific stats\n if not hasValidKey(Plan, 'targetSSByBatch'):\n Plan['targetSSByBatch'] = dict()\n Plan['targetSSByBatch'][batchID] = targetSSchunk\n\n if np.allclose(lapFrac, np.ceil(lapFrac)):\n # Update batch-specific info\n # to account for any recent births\n for batchID in Plan['targetSSByBatch']:\n Kcur = Plan['targetSSByBatch'][batchID].K\n Kfinal = targetSSchunk.K\n Kextra = Kfinal - Kcur\n if Kextra > 0:\n curUIDs = Plan['targetSSByBatch'][batchID].uIDs\n newUIDs = np.arange(maxUID - Kextra + 1, maxUID + 1)\n newUIDs = np.hstack([curUIDs, newUIDs])\n\n del Plan['targetSSByBatch'][batchID].uIDs\n Plan['targetSSByBatch'][batchID].insertEmptyComps(Kextra)\n Plan['targetSSByBatch'][batchID].uIDs = newUIDs\n\n return Plan\n\n\ndef getDataSubsetRelevantToPlan(Dchunk, LPchunk, Plan,\n dtargetMinCount=0.01):\n \"\"\" Get subset of provided DataObj containing units relevant to the Plan.\n\n Returns\n --------\n relData : None or bnpy.data.DataObj\n relIDs : list of integer ids of relevant units of provided Dchunk\n \"\"\"\n if not hasValidKey(Plan, 'candidateIDs'):\n return None, []\n\n for dd, delCompID in enumerate(Plan['candidateIDs']):\n if 'DocTopicCount' in LPchunk:\n DocTopicCount = LPchunk['DocTopicCount']\n curkeepmask = DocTopicCount[:, delCompID] >= dtargetMinCount\n elif 'respPair' in LPchunk or 'TransCount' in LPchunk:\n curkeepmask = np.zeros(Dchunk.nDoc, dtype=np.int32)\n for n in xrange(Dchunk.nDoc):\n start = Dchunk.doc_range[n]\n stop = Dchunk.doc_range[n + 1]\n Usage_n = np.sum(LPchunk['resp'][start:stop, delCompID])\n curkeepmask[n] = Usage_n >= dtargetMinCount\n else:\n curkeepmask = LPchunk['resp'][:, delCompID] >= dtargetMinCount\n\n # Aggregate current mask with masks for all previous delCompID values\n if dd > 0:\n keepmask = np.logical_or(keepmask, curkeepmask)\n else:\n keepmask = curkeepmask\n\n relUnitIDs = np.flatnonzero(keepmask)\n if len(relUnitIDs) < 1:\n return None, relUnitIDs\n else:\n relData = Dchunk.select_subset_by_mask(relUnitIDs,\n doTrackFullSize=False)\n return relData, relUnitIDs\n\n\ndef hasValidKey(dict, key):\n \"\"\" Return True if key is in dict and not None, False otherwise.\n \"\"\"\n return key in dict and dict[key] is not None\n\n\ndef getSize(Data):\n \"\"\" Return the integer size of the provided dataset.\n \"\"\"\n if Data is None:\n return 0\n elif hasattr(Data, 'nDoc'):\n return Data.nDoc\n else:\n return Data.nObs\n"
] |
[
[
"numpy.hstack",
"numpy.arange",
"numpy.flatnonzero",
"numpy.logical_or",
"numpy.ceil",
"numpy.zeros",
"numpy.sum"
]
] |
quantummind/quantum
|
[
"fd952d0362c5445eef0da4437fb3e5ebb16b7948"
] |
[
"tensorflow_quantum/python/layers/circuit_executors/expectation_test.py"
] |
[
"# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow_quantum.layers.circuit_executors.expectation.\"\"\"\nimport numpy as np\nfrom absl.testing import parameterized\nimport sympy\nimport tensorflow as tf\n\nimport cirq\nfrom tensorflow_quantum.python.layers.circuit_executors import expectation\nfrom tensorflow_quantum.python.differentiators import linear_combination\nfrom tensorflow_quantum.python import util\n\n\ndef _gen_single_bit_rotation_problem(bit, symbols, noisy):\n \"\"\"Generate a toy problem on 1 qubit.\"\"\"\n starting_state = np.random.uniform(0, 2 * np.pi, 3)\n circuit = cirq.Circuit(\n cirq.rx(starting_state[0])(bit),\n cirq.ry(starting_state[1])(bit),\n cirq.rz(starting_state[2])(bit),\n cirq.rz(symbols[2])(bit),\n cirq.ry(symbols[1])(bit),\n cirq.rx(symbols[0])(bit))\n if noisy:\n circuit += cirq.depolarize(0.01)(bit)\n\n return circuit\n\n\nclass ExpectationTest(tf.test.TestCase):\n \"\"\"Basic tests for the expectation layer.\"\"\"\n\n def test_expectation_instantiate(self):\n \"\"\"Test that Expectation instantiates correctly.\"\"\"\n expectation.Expectation()\n expectation.Expectation(backend=None)\n expectation.Expectation(backend='noisy')\n expectation.Expectation(backend='noiseless')\n expectation.Expectation(backend=cirq.Simulator())\n expectation.Expectation(\n differentiator=linear_combination.ForwardDifference())\n\n def test_expectation_instantiate_error(self):\n \"\"\"Test that Expectation errors with bad inputs.\"\"\"\n\n class MySampler(cirq.Sampler):\n \"\"\"Class to test sampler detection in Expectation.\"\"\"\n\n def run_sweep(self):\n \"\"\"do nothing.\"\"\"\n return\n\n with self.assertRaisesRegex(TypeError,\n expected_regex=\"SampledExpectation\"):\n expectation.Expectation(backend=MySampler())\n\n with self.assertRaisesRegex(\n TypeError, expected_regex=\"SimulatesExpectationValues or None\"):\n expectation.Expectation(backend='junk')\n\n with self.assertRaisesRegex(\n TypeError, expected_regex=\"tfq.differentiators.Differentiator\"):\n expectation.Expectation(differentiator='junk')\n\n def test_expectation_type_inputs_error(self):\n \"\"\"Test that expectation errors within Keras call.\"\"\"\n\n bit = cirq.GridQubit(0, 0)\n test_pstring = cirq.Z(bit)\n test_psum = cirq.PauliSum.from_pauli_strings([test_pstring])\n reg_circuit = cirq.Circuit(cirq.H(bit))\n\n with self.assertRaisesRegex(Exception,\n expected_regex=\"Unknown initializer\"):\n expectation.Expectation()(reg_circuit,\n operators=test_psum,\n initializer='junk')\n\n with self.assertRaisesRegex(Exception,\n expected_regex=\"repetitions not provided\"):\n expectation.Expectation(backend='noisy')(reg_circuit,\n operators=test_psum)\n\n with self.assertRaisesRegex(Exception,\n expected_regex=\"cannot be parsed\"):\n expectation.Expectation(backend='noisy')(reg_circuit,\n operators=test_psum,\n repetitions='junk')\n\n with self.assertRaisesRegex(Exception, expected_regex=\"noiseless\"):\n expectation.Expectation(backend='noiseless')(reg_circuit,\n operators=test_psum,\n repetitions=1)\n\n def test_expectation_op_error(self):\n \"\"\"Test that expectation errors within underlying ops correctly.\"\"\"\n\n bit = cirq.GridQubit(0, 0)\n symbol = sympy.Symbol('alpha')\n test_pstring = cirq.Z(bit)\n test_psum = cirq.PauliSum.from_pauli_strings([test_pstring])\n symb_circuit = cirq.Circuit(cirq.H(bit)**symbol)\n reg_circuit = cirq.Circuit(cirq.H(bit))\n\n with self.assertRaisesRegex(Exception,\n expected_regex=\"Could not find symbol\"):\n # No symbol matchups.\n expectation.Expectation()([symb_circuit], operators=test_psum)\n\n with self.assertRaisesRegex(Exception,\n expected_regex=\"Unparseable proto\"):\n # Proto is unparseable.\n expectation.Expectation()([reg_circuit],\n operators=tf.convert_to_tensor(\n [['bad_operator']]))\n\n with self.assertRaisesRegex(Exception, expected_regex=\"rank 2\"):\n # Operators has wrong rank.\n expectation.Expectation()([reg_circuit],\n operators=util.convert_to_tensor(\n [test_psum]))\n\n with self.assertRaisesRegex(Exception, expected_regex=\"rank 2\"):\n # symbol_values has wrong rank.\n expectation.Expectation()([symb_circuit],\n symbol_names=[symbol],\n symbol_values=[0.5],\n operators=test_psum)\n\n with self.assertRaisesRegex(Exception, expected_regex=\"do not match.\"):\n # Wrong batch size for pauli operators.\n expectation.Expectation()(symb_circuit,\n symbol_names=[symbol],\n operators=[[test_psum], [test_psum]])\n\n with self.assertRaisesRegex(Exception, expected_regex=\"do not match.\"):\n # Wrong batch_size for symbol values.\n expectation.Expectation()([symb_circuit],\n symbol_names=[symbol],\n symbol_values=np.zeros((3, 1)),\n operators=test_psum)\n\n def test_static_cases(self):\n \"\"\"Run inputs through in complex cases.\"\"\"\n\n bit = cirq.GridQubit(0, 0)\n symbol = sympy.Symbol('alpha')\n test_pstring = cirq.Z(bit)\n test_psum = cirq.PauliSum.from_pauli_strings([test_pstring])\n symb_circuit = cirq.Circuit(cirq.H(bit)**symbol)\n reg_circuit = cirq.Circuit(cirq.H(bit))\n\n # Passing a 2d operators input requires a 1d circuit input.\n expectation.Expectation()([reg_circuit, reg_circuit],\n operators=[[test_psum, test_psum],\n [test_psum, test_psum]])\n\n # Passing 2d operators along with other inputs.\n expectation.Expectation()([symb_circuit, symb_circuit],\n symbol_names=[symbol],\n operators=[[test_psum, test_psum],\n [test_psum, test_psum]])\n expectation.Expectation()([symb_circuit, symb_circuit],\n symbol_names=[symbol],\n symbol_values=[[0.5], [0.8]],\n operators=[[test_psum, test_psum],\n [test_psum, test_psum]])\n\n # Ensure tiling up of circuits works as expected.\n expectation.Expectation()(reg_circuit, operators=test_psum)\n expectation.Expectation()(reg_circuit, operators=[test_psum, test_psum])\n\n # Ensure tiling up of symbol_values works as expected.\n expectation.Expectation()(symb_circuit,\n symbol_names=[symbol],\n symbol_values=[[0.5], [0.8]],\n operators=test_psum)\n expectation.Expectation()(symb_circuit,\n symbol_names=[symbol],\n symbol_values=[[0.5]],\n operators=test_psum)\n\n def test_static_cases_noisy(self):\n \"\"\"Test that the noisy trajectory backend works in complex cases.\"\"\"\n bit = cirq.GridQubit(0, 0)\n symbol = sympy.Symbol('alpha')\n test_pstring = cirq.Z(bit)\n test_psum = cirq.PauliSum.from_pauli_strings([test_pstring])\n symb_circuit = cirq.Circuit(cirq.H(bit)**symbol)\n reg_circuit = cirq.Circuit(cirq.H(bit))\n\n # Passing a 2d operators input requires a 1d circuit input.\n expectation.Expectation(backend='noisy')(\n [reg_circuit, reg_circuit],\n operators=[[test_psum, test_psum], [test_psum, test_psum]],\n repetitions=1)\n\n # Passing 2d operators along with other inputs.\n expectation.Expectation(backend='noisy')(\n [symb_circuit, symb_circuit],\n symbol_names=[symbol],\n operators=[[test_psum, test_psum], [test_psum, test_psum]],\n repetitions=1)\n expectation.Expectation(backend='noisy')(\n [symb_circuit, symb_circuit],\n symbol_names=[symbol],\n symbol_values=[[0.5], [0.8]],\n operators=[[test_psum, test_psum], [test_psum, test_psum]],\n repetitions=1)\n\n # Ensure tiling up of circuits works as expected.\n expectation.Expectation(backend='noisy')(reg_circuit,\n operators=test_psum,\n repetitions=1)\n expectation.Expectation(backend='noisy')(\n reg_circuit, operators=[test_psum, test_psum], repetitions=1)\n\n # Ensure tiling up of symbol_values works as expected.\n expectation.Expectation(backend='noisy')(symb_circuit,\n symbol_names=[symbol],\n symbol_values=[[0.5], [0.8]],\n operators=test_psum,\n repetitions=1)\n expectation.Expectation(backend='noisy')(symb_circuit,\n symbol_names=[symbol],\n symbol_values=[[0.5]],\n operators=test_psum,\n repetitions=1)\n\n # Test multiple operators with integer valued repetition.\n expectation.Expectation(backend='noisy')(\n symb_circuit,\n symbol_names=[symbol],\n symbol_values=[[0.5]],\n operators=[-1.0 * cirq.Z(bit),\n cirq.X(bit) + 2.0 * cirq.Z(bit)],\n repetitions=1)\n expectation.Expectation(backend='noisy')(\n symb_circuit,\n symbol_names=[symbol],\n symbol_values=[[0.5]],\n operators=[-1.0 * cirq.Z(bit),\n cirq.X(bit) + 2.0 * cirq.Z(bit)],\n repetitions=[5, 1])\n\n # Test 2d repetitions.\n expectation.Expectation(backend='noisy')(\n [symb_circuit, symb_circuit],\n symbol_names=[symbol],\n symbol_values=[[0.5], [0.4]],\n operators=[[\n -1.0 * cirq.Z(bit),\n cirq.X(bit) + 2.0 * cirq.Z(bit),\n cirq.Z(bit)\n ], [cirq.Z(bit), cirq.Z(bit), cirq.Z(bit)]],\n repetitions=[[1, 2, 3], [4, 5, 6]])\n\n def test_expectation_simple_tf_train(self):\n \"\"\"Train a layer using standard tf (not keras).\n This is a subtle test that will work since we don't use keras compile.\n \"\"\"\n bit = cirq.GridQubit(0, 0)\n circuit = \\\n cirq.Circuit(cirq.rx(sympy.Symbol('theta'))(bit))\n op = cirq.Z(bit)\n layer = expectation.Expectation()\n optimizer = tf.optimizers.Adam(learning_rate=0.05)\n for _ in range(200):\n with tf.GradientTape() as tape:\n circuit_out = layer(circuit,\n symbol_names=['theta'],\n operators=op)\n mse = tf.square(tf.reduce_sum(tf.subtract(circuit_out, -1)))\n grads = tape.gradient(mse, layer.trainable_weights)\n optimizer.apply_gradients(zip(grads, layer.trainable_weights))\n self.assertAllClose(mse.numpy(), 0, atol=1e-3)\n\n\nclass ExpectationFunctionalTests(parameterized.TestCase, tf.test.TestCase):\n \"\"\"Test hybrid/integrated models that include an expectation layer.\"\"\"\n\n @parameterized.parameters([\n {\n 'backend': 'noisy'\n },\n {\n 'backend': None # old API usage\n }\n ])\n def test_simple_param_value_input(self, backend):\n \"\"\"Train a densely connected hybrid model.\n\n This model will put a qubit in the zero or one state from a random state\n given the input zero or one. This tests the input signature:\n Expectation([input_value_batch]).\n \"\"\"\n noisy = backend == 'noisy'\n bit = cirq.GridQubit(0, 0)\n symbols = sympy.symbols('x y z')\n circuit = _gen_single_bit_rotation_problem(bit, symbols, noisy)\n\n inputs = tf.keras.Input(shape=(1,), dtype=tf.dtypes.float64)\n datum = tf.keras.Input(shape=(), dtype=tf.dtypes.string)\n l1 = tf.keras.layers.Dense(10)(inputs)\n l2 = tf.keras.layers.Dense(3)(l1)\n reps = 1000 if noisy else None\n outputs = expectation.Expectation(backend=backend)(\n datum,\n symbol_names=symbols,\n operators=cirq.Z(bit),\n symbol_values=l2,\n repetitions=reps)\n model = tf.keras.Model(inputs=[datum, inputs], outputs=outputs)\n\n data_in = np.array([[1], [0]], dtype=np.float32)\n data_out = np.array([[1], [-1]], dtype=np.float32)\n\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.05),\n loss=tf.keras.losses.mean_squared_error)\n\n circuits = util.convert_to_tensor([circuit, circuit])\n\n history = model.fit(x=[circuits, data_in], y=data_out, epochs=100)\n tol = 5e-2 if noisy else 1e-3\n self.assertAllClose(history.history['loss'][-1], 0, atol=tol)\n\n @parameterized.parameters([\n {\n 'backend': 'noisy'\n },\n {\n 'backend': None # old API usage\n }\n ])\n def test_simple_op_input(self, backend):\n \"\"\"Test a simple operator input\n\n Learn qubit in the z+ state using two different measurement operators.\n This tests input signature Expectation([operator_batch])\n \"\"\"\n noisy = backend == 'noisy'\n bit = cirq.GridQubit(0, 0)\n symbols = sympy.symbols('x, y, z')\n\n circuits = util.convert_to_tensor(\n [_gen_single_bit_rotation_problem(bit, symbols, noisy)] * 2)\n\n data_out = tf.convert_to_tensor(np.array([[1], [1]]))\n ops = util.convert_to_tensor([[cirq.Z(bit)], [cirq.Z(bit)]])\n\n circuit_input = tf.keras.Input(shape=(), dtype=tf.dtypes.string)\n op_input = tf.keras.Input(shape=(1,), dtype=tf.dtypes.string)\n\n reps = 1000 if noisy else None\n output = expectation.Expectation(backend=backend)(\n circuit_input,\n symbol_names=symbols,\n operators=op_input,\n initializer=tf.keras.initializers.RandomNormal(),\n repetitions=reps)\n\n model = tf.keras.Model(inputs=[circuit_input, op_input], outputs=output)\n\n model.compile(\n optimizer=tf.keras.optimizers.Adam(learning_rate=0.05),\n loss=tf.keras.losses.mean_squared_error,\n )\n history = model.fit(x=[circuits, ops],\n y=data_out,\n batch_size=2,\n epochs=200)\n tol = 5e-2 if noisy else 1e-3\n self.assertAllClose(history.history['loss'][-1], 0, atol=tol)\n\n @parameterized.parameters([\n {\n 'backend': 'noisy'\n },\n {\n 'backend': None # old api usage.\n },\n {\n 'backend': cirq.Simulator()\n }\n ])\n def test_simple_op_and_param_input(self, backend):\n \"\"\"Test a simple operator and parameter input.\n\n Train a NN to put a qubit in the z+ or x+ states based on a classical\n binary input. This tests the input signature:\n Expectation([value_batch, operator_batch]).\n \"\"\"\n noisy = backend == 'noisy'\n bit = cirq.GridQubit(0, 0)\n symbols = sympy.symbols('x, y, z')\n ops = util.convert_to_tensor([[cirq.Z(bit)], [cirq.X(bit)]])\n circuits = util.convert_to_tensor(\n [_gen_single_bit_rotation_problem(bit, symbols, noisy)] * 2)\n data_in = np.array([[1], [0]])\n data_out = np.array([[1], [1]])\n\n data_inp = tf.keras.Input(shape=(1), dtype=tf.dtypes.float32)\n op_inp = tf.keras.Input(shape=(1,), dtype=tf.dtypes.string)\n circuit_inp = tf.keras.Input(shape=(), dtype=tf.dtypes.string)\n dense_1 = tf.keras.layers.Dense(10)(data_inp)\n dense_2 = tf.keras.layers.Dense(3)(dense_1)\n reps = 1000 if noisy else None\n circuit_output = expectation.Expectation(backend=backend)(\n circuit_inp,\n symbol_names=symbols,\n symbol_values=dense_2,\n operators=op_inp,\n repetitions=reps)\n\n functional_model = tf.keras.Model(\n inputs=[data_inp, op_inp, circuit_inp], outputs=[circuit_output])\n\n functional_model.compile(\n optimizer=tf.keras.optimizers.Adam(learning_rate=0.05),\n loss=tf.keras.losses.mean_squared_error)\n history = functional_model.fit(x=[data_in, ops, circuits],\n y=data_out,\n batch_size=2,\n epochs=100)\n tol = 5e-2 if noisy else 1e-3\n self.assertAllClose(history.history['loss'][-1], 0, atol=tol)\n\n @parameterized.parameters([\n {\n 'backend': 'noisy'\n },\n {\n 'backend': None # old api usage.\n }\n ])\n def test_dnn_qnn_dnn(self, backend):\n \"\"\"Train a fully hybrid network using an Expectation layer.\n\n Train the network to output +-5 given an input of 1 or 0. This tests\n that everything works when Expectation layer is a middle layers.\n \"\"\"\n noisy = backend == 'noisy'\n bit = cirq.GridQubit(0, 0)\n symbols = sympy.symbols('x, y, z')\n circuits = util.convert_to_tensor(\n [_gen_single_bit_rotation_problem(bit, symbols, noisy)] * 2)\n data_in = np.array([[1], [0]], dtype=np.float32)\n data_out = np.array([[5], [-5]], dtype=np.float32)\n\n classical_input = tf.keras.Input(shape=(1,))\n circuit_input = tf.keras.Input(shape=(), dtype=tf.dtypes.string)\n d1 = tf.keras.layers.Dense(10)(classical_input)\n d2 = tf.keras.layers.Dense(3)(d1)\n reps = 1000 if noisy else None\n quantum = expectation.Expectation(backend=backend)(\n circuit_input,\n symbol_names=symbols,\n symbol_values=d2,\n operators=cirq.Z(bit),\n repetitions=reps)\n d3 = tf.keras.layers.Dense(1)(quantum)\n\n model = tf.keras.Model(inputs=[circuit_input, classical_input],\n outputs=d3)\n\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.05),\n loss=tf.keras.losses.mean_squared_error)\n history = model.fit(x=[circuits, data_in],\n y=data_out,\n batch_size=2,\n epochs=300)\n tol = 5e-2 if noisy else 1e-3\n self.assertAllClose(history.history['loss'][-1], 0, atol=tol)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] |
[
[
"tensorflow.convert_to_tensor",
"tensorflow.keras.Input",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.initializers.RandomNormal",
"tensorflow.test.main",
"tensorflow.keras.Model",
"tensorflow.subtract",
"tensorflow.keras.optimizers.Adam",
"tensorflow.optimizers.Adam",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros",
"tensorflow.GradientTape"
]
] |
Rapixar/hivemind
|
[
"bb759e23eec9c3d63b2763428bcdb8687ee055d3"
] |
[
"hivemind/server/task_pool.py"
] |
[
"\"\"\"\nTask pool is responsible for receiving tasks and grouping them together for processing (but not processing itself)\n\"\"\"\nimport ctypes\nimport multiprocessing as mp\nimport multiprocessing.context\nimport os\nimport threading\nimport time\nimport uuid\nfrom collections import namedtuple\nfrom concurrent.futures import Future\nfrom queue import Empty\nfrom typing import List, Tuple, Dict, Any, Generator\n\nimport torch\n\nfrom hivemind.utils import MPFuture, get_logger\n\nlogger = get_logger(__name__)\nTask = namedtuple(\"Task\", (\"future\", \"args\"))\n\n\nclass TaskPoolBase(mp.context.ForkProcess):\n \"\"\" A pool that accepts tasks and forms batches for parallel processing, interacts with Runtime \"\"\"\n\n def __init__(self, process_func: callable, daemon=True):\n super().__init__(daemon=daemon)\n self.process_func = process_func\n self._priority = mp.Value(ctypes.c_double, 1.0) # higher priority = the more urgent to process this pool\n\n def run(self):\n raise NotImplementedError()\n\n def submit_task(self, *args: torch.Tensor) -> Future:\n raise NotImplementedError()\n\n def iterate_minibatches(self, *args, **kwargs) -> Generator[List[Task], None, None]:\n raise NotImplementedError()\n\n @property\n def priority(self):\n return self._priority.value\n\n @priority.setter\n def priority(self, value):\n self._priority.value = float(value)\n\n @property\n def empty(self):\n raise NotImplementedError()\n\n\nclass TaskPool(TaskPoolBase):\n \"\"\"\n Request aggregator that accepts processing requests, groups them into batches, waits for Runtime\n to process these batches and dispatches results back to request sources. Operates as a background process.\n\n :param process_func: function to be applied to every formed batch; called by Runtime\n Note that process_func should accept only positional args (Tensors) and return a flat tuple of Tensors\n :param max_batch_size: process at most this many inputs in a batch (task contains have one or several inputs)\n :param min_batch_size: process at least this many inputs in a batch, otherwise wait for more\n :param timeout: wait for a subsequent task for at most this many seconds\n :param pool_size: store at most this many unprocessed tasks in a queue\n :param prefetch_batches: prepare up to this many *batches* in background for faster off-loading to runtime\n :param uid: pool identifier used for shared array allocation\n :param start: if True, start automatically at the end of __init__\n \"\"\"\n\n def __init__(self, process_func: callable, max_batch_size: int, min_batch_size=1,\n timeout=None, pool_size=None, prefetch_batches=1, uid=None, daemon=True, start=False):\n super().__init__(process_func, daemon=daemon)\n self.min_batch_size, self.max_batch_size, self.timeout = min_batch_size, max_batch_size, timeout\n self.uid = uid or uuid.uuid4()\n self.prefetch_batches = prefetch_batches\n\n # interaction with ConnectionHandlers\n self.tasks = mp.Queue(maxsize=pool_size or 0)\n self.undispatched_task_timestamps = mp.SimpleQueue()\n\n # interaction with Runtime\n self.batch_receiver, self.batch_sender = mp.Pipe(duplex=False) # send/recv arrays that contain batch inputs\n self.outputs_receiver, self.outputs_sender = mp.Pipe(duplex=False) # send/recv arrays that contain outputs\n\n if start:\n self.start()\n\n def submit_task(self, *args: torch.Tensor) -> Future:\n \"\"\" Add task to this pool's queue, return Future for its output \"\"\"\n future1, future2 = MPFuture.make_pair()\n task = Task(future1, args)\n if self.get_task_size(task) > self.max_batch_size:\n exc = ValueError(f\"Task size greater than max_batch_size ({self.max_batch_size}), it can't be processed\")\n future2.set_exception(exc)\n else:\n self.tasks.put(task)\n self.undispatched_task_timestamps.put(time.time())\n return future2\n\n def iterate_minibatches(self, *args, **kwargs):\n \"\"\" Form minibatches by grouping one or more tasks together up to self.max_batch_size \"\"\"\n batch = []\n total_size = 0\n\n while True:\n if total_size >= self.min_batch_size and self.tasks.empty():\n yield batch\n batch = []\n total_size = 0\n try:\n logger.debug(f\"{self.uid} getting next task\")\n task = self.tasks.get(timeout=self.timeout)\n except Empty:\n logger.warning(f\"Timeout reached but batch doesn't contain >={self.min_batch_size} elements yet\")\n continue\n\n task_size = self.get_task_size(task)\n\n if total_size + task_size > self.max_batch_size:\n yield batch\n batch = []\n total_size = 0\n\n if task.future.set_running_or_notify_cancel():\n batch.append(task)\n total_size += task_size\n\n def run(self, *args, **kwargs):\n torch.set_num_threads(1)\n logger.info(f'{self.uid} starting, pid={os.getpid()}')\n pending_batches = {} # Dict[batch uuid, List[MPFuture]] for each batch currently in runtime\n output_thread = threading.Thread(target=self._pool_output_loop, args=[pending_batches],\n name=f'{self.uid}_output')\n try:\n output_thread.start()\n self._pool_input_loop(pending_batches, *args, **kwargs)\n except BaseException as e:\n # terminate output loop\n self.outputs_sender.send(e)\n output_thread.join()\n raise e\n\n def _pool_input_loop(self, pending_batches: Dict[Any, List[Task]], *args, **kwargs):\n \"\"\" Infinite loop: aggregate tasks into batches and send them to runtime \"\"\"\n prev_num_tasks = 0 # number of tasks currently in shared buffer\n batch_index = max(pending_batches.keys(), default=0)\n batch_iterator = self.iterate_minibatches(*args, **kwargs)\n\n while True:\n # SIDE-EFFECT - compute pool priority from timestamp of earliest undispatched task\n # assumes that tasks are processed in the same order as they are created\n for skip_i in range(prev_num_tasks):\n finished_task_timestamp = self.undispatched_task_timestamps.get() # earlier timestamp = higher priority\n if skip_i == prev_num_tasks - 1:\n self.priority = finished_task_timestamp\n\n logger.debug(f\"{self.uid} getting next batch\")\n batch_tasks = next(batch_iterator)\n # save batch futures, _output_loop will deliver on them later\n pending_batches[batch_index] = batch_tasks\n\n logger.debug(f\"{self.uid}, batch {batch_index}: aggregating inputs\")\n # find or create shared arrays for current batch size\n batch_inputs = [\n torch.cat([task.args[i] for task in batch_tasks]).share_memory_()\n for i in range(len(batch_tasks[0].args))\n ]\n\n logger.debug(f\"{self.uid}, batch {batch_index}: sending to runtime\")\n self.batch_sender.send((batch_index, batch_inputs))\n logger.debug(f\"{self.uid}, batch {batch_index}: sent to runtime\")\n prev_num_tasks = len(batch_tasks)\n batch_index += 1\n\n def _pool_output_loop(self, pending_batches: Dict[Any, List[Task]]):\n \"\"\" Infinite loop: receive results from runtime and dispatch them to task Futures \"\"\"\n\n while True:\n logger.debug(f\"{self.uid} waiting for results from runtime\")\n payload = self.outputs_receiver.recv()\n if isinstance(payload, BaseException):\n raise payload\n else:\n batch_index, batch_outputs = payload\n logger.debug(f\"{self.uid}, batch {batch_index}: got results\")\n\n # split batch into partitions for individual tasks\n batch_tasks = pending_batches.pop(batch_index)\n task_sizes = [self.get_task_size(task) for task in batch_tasks]\n outputs_per_task = zip(*(torch.split_with_sizes(array, task_sizes, dim=0) for array in batch_outputs))\n logger.debug(f\"{self.uid}, batch {batch_index}: sending outputs to handlers\")\n\n # dispatch results to futures\n for task, task_outputs in zip(batch_tasks, outputs_per_task):\n task.future.set_result(tuple(task_outputs))\n\n @property\n def empty(self):\n return not self.batch_receiver.poll()\n\n def load_batch_to_runtime(self, timeout=None, device=None) -> Tuple[Any, List[torch.Tensor]]:\n \"\"\" receive next batch of numpy arrays \"\"\"\n if not self.batch_receiver.poll(timeout):\n raise TimeoutError()\n\n batch_index, batch_inputs = self.batch_receiver.recv()\n batch_inputs = [tensor.to(device, non_blocking=True) for tensor in batch_inputs]\n return batch_index, batch_inputs\n\n def send_outputs_from_runtime(self, batch_index: int, batch_outputs: List[torch.Tensor]):\n \"\"\" send results for a processed batch, previously loaded through load_batch_to_runtime \"\"\"\n batch_outputs = [tensor.to(device='cpu').share_memory_() for tensor in batch_outputs]\n self.outputs_sender.send((batch_index, batch_outputs))\n\n def get_task_size(self, task: Task) -> int:\n \"\"\" compute task processing complexity (used for batching); defaults to batch size \"\"\"\n return len(task.args[0]) if task.args else 1\n"
] |
[
[
"torch.split_with_sizes",
"torch.set_num_threads",
"torch.cat"
]
] |
fairscape/hctsa-analysis-replication-code
|
[
"16148637cad2d360065ecb8815f4962e6848213b"
] |
[
"merge-normalize-dataframe.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model\nfrom sklearn.svm import l1_min_c\nimport time\nimport json\n\nhr = pd.read_csv('/data/HR_daily_samples.csv')\nhr = hr.add_prefix('HR_')\n\nprint('Read in HR')\n\nsp = pd.read_csv('/data/SPO2_daily_samples.csv')\nsp = sp.add_prefix('SP_')\n\nprint('Read in SP')\n\n\nsp['day'] = np.ceil(sp['SP_time'] / 60 / 60 / 24)\nhr['day'] = np.ceil(hr['HR_time'] / 60 / 60 / 24)\nsp = sp.rename(columns={'SP_id': 'id'})\nhr = hr.rename(columns={'HR_id': 'id'})\n\ndf = pd.merge(hr, sp, on=['id','day'])\nmat_hr = pd.read_csv('./randomDailySample_matlab_HR.csv')\nmat_hr = mat_hr.add_prefix('HR_')\nmat_hr = mat_hr.rename(columns={'HR_id': 'id', 'HR_time': 'time'})\nmat_sp = pd.read_csv('./randomDailySample_matlab_SP.csv')\nmat_sp = mat_sp.add_prefix('SP_')\nmat_sp = mat_sp.rename(columns={'SP_id': 'id', 'SP_time': 'time'})\ndf = pd.merge(df, mat_hr, on=['id','time'])\ndf = pd.merge(df, mat_sp, on=['id','time'])\n\n#Optional: Include if interested in performance on just older babies\n# grouped = df.groupby(\"id\")\n# too_short = list(grouped['day'].max().index[grouped['day'].max() < 7])\n# df = df[~df.id.isin(too_short)]\n\n\ndf = df.replace([np.inf, -np.inf], np.nan)\ndf = df.select_dtypes(exclude=['object'])\ndf = df.loc[:,df.isnull().mean() < .035]\n\nprint('Building Cleaned Dataframe')\n\nX = df.dropna()\nids = X['id']\ntime = X['time']\nX = X.drop(['id','time'],axis = 1)\nX = X.loc[:,X.std() != 0]\n\ndef normalize(x):\n #Normalizes column onto scale of 0-1\n return 1 / ( 1 + np.exp( - ( x - x.median() ) / ( 1.35 * (x.quantile(.75) - x.quantile(.25)) ) ) )\n\nX = X.transform(normalize)\nX = X.loc[:, X.isnull().sum() == 0]\n\n#Drop some specific columns unfit for modelling\nbad_index = []\nbad_column = []\nfor i in range(len(list(X.columns))):\n column = list(X.columns)[i]\n if 'MD.pNN.pnn' in column:\n bad_index.append(i)\n bad_column.append(column)\n if 'PH.' in column and 'res.acl' in column:\n bad_index.append(i)\n bad_column.append(column)\n if 'EN.PermEn.3' in column:\n bad_index.append(i)\n bad_column.append(column)\n if 'SY.LocalGlobal' in column and 'skew' in column:\n bad_index.append(i)\n bad_column.append(column)\n if 'SY.LocalGlobal' in column and 'ac1' in column:\n bad_index.append(i)\n bad_column.append(column)\n if 'SY.LocalGlobal' in column and 'kurtosis' in column:\n bad_index.append(i)\n bad_column.append(column)\n if 'Time' in column or 'day' in column:\n bad_index.append(i)\n bad_column.append(column)\nX = X.drop(norm.columns[bad_index],axis=1)\nX['time'] = time\nX['id'] = ids\nX.to_csv('/outputs/normalized_data.csv',index = False)\n"
] |
[
[
"numpy.ceil",
"pandas.merge",
"pandas.read_csv"
]
] |
gbetegon88/dataiku-contrib
|
[
"4683a9e08a3706a163810bf104bf6464025e235e"
] |
[
"deeplearning-image-gpu/custom-recipes/deeplearning-image-extract/recipe.py"
] |
[
"import dataiku\nimport pandas as pd\nfrom dataiku.customrecipe import *\nfrom keras.models import load_model, Model\nimport numpy as np\nimport json\nimport os\nimport glob\nimport dl_image_toolbox_utils as utils\nimport constants\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n\n###################################################################################################################\n## LOADING ALL REQUIRED INFO AND \n## SETTING VARIABLES\n###################################################################################################################\n\nrecipe_config = get_recipe_config()\nextract_layer_index = int(recipe_config[\"extract_layer_index\"])\nshould_use_gpu = recipe_config.get('should_use_gpu', False)\n\n# gpu\nutils.load_gpu_options(should_use_gpu, recipe_config['list_gpu'], recipe_config['gpu_allocation'])\n\n# Plugin parameters\nimage_folder_input_name = get_input_names_for_role('image_folder')[0]\nimage_folder = dataiku.Folder(image_folder_input_name)\nutils.check_managed_folder_filesystem(image_folder)\nimage_folder_path = image_folder.get_path()\n\nmodel_folder_input_name = get_input_names_for_role('model_folder')[0]\nmodel_folder = dataiku.Folder(model_folder_input_name)\nutils.check_managed_folder_filesystem(model_folder)\nmodel_folder_path = model_folder.get_path()\n\noutput_name = get_output_names_for_role('feature_dataset')[0]\noutput_dataset = dataiku.Dataset(output_name)\n\n\n\n# Model\nmodel_and_pp = utils.load_instantiate_keras_model_preprocessing(model_folder_path, goal=constants.SCORING)\nmodel = model_and_pp[\"model\"]\npreprocessing = model_and_pp[\"preprocessing\"]\n\nmodel = Model(inputs=model.input, outputs=model.layers[extract_layer_index].output)\nmodel_input_shape = utils.get_model_input_shape(model, model_folder_path)\n\n# Image paths\nimages_paths = os.listdir(image_folder_path)\n\n###################################################################################################################\n## EXTRACTING FEATURES\n###################################################################################################################\n\n# Helper for predicting\ndef get_predictions():\n batch_size = 100\n n = 0\n results = {\"prediction\": [], \"error\": []}\n num_images = len(images_paths)\n while True:\n if (n * batch_size) >= num_images:\n break\n\n next_batch_list = []\n error_indices = []\n for index_in_batch, i in enumerate(range(n*batch_size, min((n + 1)*batch_size, num_images))):\n img_path = images_paths[i]\n try:\n preprocssed_img = utils.preprocess_img(utils.get_file_path(image_folder_path, img_path), model_input_shape, preprocessing)\n next_batch_list.append(preprocssed_img)\n except IOError as e:\n print(\"Cannot read the image '{}', skipping it. Error: {}\".format(img_path, e))\n error_indices.append(index_in_batch)\n next_batch = np.array(next_batch_list)\n\n prediction_batch = model.predict(next_batch).tolist()\n error_batch = [0] * len(prediction_batch)\n\n for err_index in error_indices:\n prediction_batch.insert(err_index, None)\n error_batch.insert(err_index, 1)\n\n results[\"prediction\"].extend(prediction_batch)\n results[\"error\"].extend(error_batch)\n n+=1\n print(\"{} images treated, out of {}\".format(min(n * batch_size, num_images), num_images))\n return results\n\n# Make the predictions\nprint(\"------ \\n Info: Start predicting \\n ------\")\npredictions = get_predictions()\nprint(\"------ \\n Info: Finished predicting \\n ------\")\n\n###################################################################################################################\n## SAVING RESULTS\n###################################################################################################################\n \n# Prepare results\noutput = pd.DataFrame()\noutput[\"images\"] = images_paths\nprint(\"------->\" + str(output))\noutput[\"prediction\"] = predictions[\"prediction\"]\noutput[\"error\"] = predictions[\"error\"]\n\n# Write to output dataset \nprint(\"------ \\n Info: Writing to output dataset \\n ------\")\noutput_dataset.write_with_schema(pd.DataFrame(output))\nprint(\"------ \\n Info: END of recipe \\n ------\")"
] |
[
[
"numpy.array",
"pandas.DataFrame"
]
] |
makeesyai/makeesy-deep-learning
|
[
"172f8a4301d6b60927824a56648d60559ba3f14e"
] |
[
"self_attention/deconstructing_multihead_attn.py"
] |
[
"from scipy.special import softmax\nimport numpy as np\n\n\"\"\"\nRNN and LSTM\nhttps://colab.research.google.com/github/mrm8488/shared_colab_notebooks/blob/master/basic_self_attention_.ipynb\n\nAttentions\nhttps://www.youtube.com/watch?v=S27pHKBEp30\n\nPosition Embeddings\nhttps://www.youtube.com/watch?v=dichIcUZfOw\n\n@ symbol\nhttps://www.python.org/dev/peps/pep-0465/#semantics\n\n\"\"\"\nx = np.array([\n [1, 0, 1, 0], # Input 1\n [0, 2, 0, 2], # Input 2\n [1, 1, 1, 1], # Input 3\n [1, 2, 1, 2], # Input 4\n [2, 2, 2, 2], # Input 5\n ])\nseql, emb = x.shape\n\nw_query = np.array([\n [1, 0, 1, 1, 0, 1],\n [1, 0, 0, 1, 0, 0],\n [0, 0, 1, 0, 0, 1],\n [0, 1, 1, 0, 1, 1]\n])\n\nw_key = np.array([\n [0, 0, 1, 0, 0, 1],\n [1, 1, 0, 1, 1, 0],\n [0, 1, 0, 0, 1, 0],\n [1, 1, 0, 1, 1, 0]\n])\nw_value = np.array([\n [0, 2, 0, 0, 2, 0],\n [0, 3, 0, 0, 3, 0],\n [1, 0, 3, 1, 0, 3],\n [1, 1, 0, 1, 1, 0]\n])\nkey = []\nquery = []\nvalue = []\n\n# Generate Query, Key, and Value\nfor i in range(len(x)):\n # The out dim: 1X4 @ 4X3 = 1X3 = array(3)\n query_i = x[i] @ w_query\n key_i = x[i] @ w_key\n value_i = x[i] @ w_value\n query.append(query_i)\n key.append(key_i)\n value.append(value_i)\n\n# print(query)\n# print(key)\n# print(value)\n# exit()\nheads = 2\nhead_dim = 3\n\n# Convert list into numpy array\nquery = np.stack(query).reshape((seql, heads, head_dim))\nkey = np.stack(key).reshape((seql, heads, head_dim))\nvalue = np.stack(value).reshape((seql, heads, head_dim))\nquery = np.transpose(query, (1, 0, 2))\nkey = np.transpose(key, (1, 0, 2))\nvalue = np.transpose(value, (1, 0, 2))\n\n# Transpose key again to get relevance score per head\nkey = np.transpose(key, (0, 2, 1))\n\n# Generate the relevance score\nrelevance = query @ key\n# Apply softmax to get probability scores of relevance\nrelevance_scores = softmax(relevance, axis=-1)\nprint(relevance_scores.round(decimals=2))\nexit()\nout = relevance_scores @ value\nprint(out)\n"
] |
[
[
"numpy.array",
"scipy.special.softmax",
"numpy.stack",
"numpy.transpose"
]
] |
amasotti/homer-skipgram-lemmata
|
[
"56c59a53b0c9f2c61354259e739dcfad6d443624"
] |
[
"skipgram_homer.py"
] |
[
"# -*- coding: utf-8 -*-\n__author__ = 'Antonio Masotti'\n__date__ = 'january 2021'\n\n\"\"\"\nAll the main functions needed to train the model.\nDefined here to make train_skipgram.py more readable\n\n\"\"\"\n\n# Imports\nimport json\nimport random\n\nimport matplotlib.pyplot as plt # for loss plotting\nimport numpy as np\nfrom tqdm import tqdm, trange\n#from torch.utils.tensorboard import SummaryWriter\n# intern imports\nfrom utils.dataset import make_batch\nfrom utils.utils import print_test\n\n#writer = SummaryWriter(comment=\"Testing\",log_dir=\"data/assets/\")\n\n# -------------------------------------------------------------------------\n# LOAD RAW DATA AND CREATE DATASET\n# -------------------------------------------------------------------------\n\n\ndef skip_gram_dataset(corpus, word2index, fp, window=5):\n \"\"\"\n Given a corpus, a window_size and a dictionary with mappings word : index, it returns\n a long list of lists that can be used to train the Skip Gram version of the\n Word2Vec model\n \"\"\"\n print(\"Creating Skipgram Dataset\")\n dataset = []\n # loop over each sentence\n for sentence in tqdm(corpus, desc=\"Sententence in Corpus\"):\n # take each word as target separately\n for center_word in range(len(sentence)):\n # loop in the window and be careful to not jump out of the boundaries :)\n for j in range(max(center_word - window, 0), min(center_word + window, len(sentence))):\n # jump the center word\n if j != center_word:\n # append the context words in tuples\n dataset.append(\n [word2index[sentence[center_word]], word2index[sentence[j]]])\n np.save(fp, dataset, allow_pickle=True)\n return dataset\n\n\ndef load_corpus(fp):\n corpus = np.load(fp, allow_pickle=True)\n print(\"Corpus loaded ...\")\n return corpus.tolist()\n\n\ndef load_dataset(fp):\n print(\"Loading Dataset ...\")\n skipDataset = np.load(fp, allow_pickle=True)\n return skipDataset.tolist()\n\n\ndef load_vocab(fp):\n with open(fp, \"r\", encoding=\"utf-8\") as vocab_json:\n vocab = json.load(vocab_json)\n return vocab\n\n\ndef lookup_tables(path):\n with open(path, \"r\", encoding=\"utf-8\") as fp:\n word2index = json.load(fp)\n index2word = {i: w for w, i in word2index.items()}\n return word2index, index2word\n\n\n# -------------------------------------------------------------------------\n# TRAINING PHASE\n# -------------------------------------------------------------------------\n# For testing while training\nTEST_WORDS = [\"λίσσομαι\", \"θεός\", \"θεά\", \"ἔρχομαι\",\n \"βαίνω\", \"θάλασσα\", \"θυμός\", \"ἔρος\", \"βούλομαι\"]\n\n\ndef switch_phase(dataset, params, vocab, train_bar, phase=\"train\"):\n dataset.set_split(phase)\n print(\n f'DATASET SUBSET LOADED : {dataset._target_split} with size : {len(dataset)}')\n print('Whole Dataset size: ', dataset.data_size)\n print('Size of the vocabulary: ', len(vocab), '\\n\\n')\n\n Loader = make_batch(dataset=dataset,\n device=params.device,\n batch_size=params.batch,\n shuffle=params.shuffle,\n drop_last=params.drop_last)\n if phase == \"train\":\n train_bar.reset(total=dataset._target_size / params.batch)\n return Loader\n\n\ndef train_model(model, dataset, vocab, optimizer, scheduler, word2index, index2word, params, paths, plot=True):\n # Set progress bars\n epoch_bar = tqdm(desc=\"Epochs Routine\",\n total=params.epochs, position=0, leave=True)\n train_bar = tqdm(desc=\"Training phase\", total=dataset.train_size /\n params.batch, position=1, leave=True)\n\n # Loss\n losses_train = []\n losses_val = []\n best_loss = [2.42]\n # batch_counter = 0 # as x_axis in tensorboard\n\n for epoch in trange(params.epochs):\n Loader = switch_phase(dataset=dataset, params=params,\n vocab=vocab, phase=\"train\", train_bar=train_bar)\n\n # Training batches\n for idx, (center, context) in enumerate(Loader):\n # Set training mode on\n model.train()\n\n loss = model(center, context)\n losses_train.append(loss.item())\n #writer.add_scalar('train loss', loss.item(), batch_counter)\n #batch_counter += 1\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n train_bar.set_postfix(epoch=epoch, loss=loss.item())\n train_bar.update()\n if idx % params.show_stats_after == 0:\n print_test(model=model, words=TEST_WORDS,\n w2i=word2index, i2w=index2word, epoch=epoch, save=True, n=7, metrics=\"cosine\")\n model.save(fp=paths.model, losses=losses_train,\n check_loss=best_loss)\n model.save_embeddings(paths.embeddings)\n\n # Validation and lr adjustment\n Loader = switch_phase(dataset=dataset, params=params,\n vocab=vocab, phase=\"val\", train_bar=train_bar)\n val_bar = tqdm(desc=\"Validation phase\", total=dataset.train_size /\n params.batch, position=1, leave=True)\n\n # set eval mode on\n model.eval()\n for idx, (center, context) in enumerate(Loader):\n loss = model(center, context)\n losses_val.append(loss.item())\n #writer.add_scalar('validation loss', loss.item(), batch_counter)\n #batch_counter += 1\n scheduler.step(losses_val[-1])\n\n if idx % 200 == 0:\n val_bar.set_postfix(loss=loss.item(), epoch=epoch)\n val_bar.update(n=200)\n\n # after both train and val\n epoch_bar.update()\n\n if plot:\n plot = plot_loss(losses=losses_train, path=paths.plots)\n # writer.close()\n model.save(fp=paths.model, losses=losses_train, check_loss=best_loss)\n\n# ---------------------------------------------------------------------------------\n# PLOTS AND STATS\n# ------------------------------------------------------------------------------------\n\n\ndef plot_some(data):\n if len(data) < 1000:\n return data\n else:\n random_idx = []\n i = 0\n while i < 700:\n r = random.randint(0, len(data)-1)\n if r not in random_idx:\n random_idx.append(r)\n i += 1\n return [data[j] for j in sorted(random_idx)]\n\n\ndef plot_loss(losses, path):\n plt.figure(figsize=(100, 100))\n plt.xlabel(\"batches\")\n plt.ylabel(\"batch_loss\")\n plt.title(\"loss vs #batch -- Training\")\n plt.plot(plot_some(losses))\n plt.savefig(path)\n plt.show()\n"
] |
[
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"numpy.save",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel",
"numpy.load",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
FighterLYL/autograd
|
[
"2c675bb44d706f9d30993e6389b18ccafe7213e4"
] |
[
"examples/lstm.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import print_function\nimport autograd.numpy as np\nimport autograd.numpy.random as npr\nfrom autograd import value_and_grad\nfrom autograd.util import quick_grad_check\nfrom scipy.optimize import minimize\nfrom six.moves import range\n\nclass WeightsParser(object):\n \"\"\"A helper class to index into a parameter vector.\"\"\"\n def __init__(self):\n self.idxs_and_shapes = {}\n self.num_weights = 0\n\n def add_shape(self, name, shape):\n start = self.num_weights\n self.num_weights += np.prod(shape)\n self.idxs_and_shapes[name] = (slice(start, self.num_weights), shape)\n\n def get(self, vect, name):\n idxs, shape = self.idxs_and_shapes[name]\n return np.reshape(vect[idxs], shape)\n\ndef sigmoid(x):\n return 0.5*(np.tanh(x) + 1.0) # Output ranges from 0 to 1.\n\ndef activations(weights, *args):\n cat_state = np.concatenate(args + (np.ones((args[0].shape[0],1)),), axis=1)\n return np.dot(cat_state, weights)\n\ndef logsumexp(X, axis=1):\n max_X = np.max(X)\n return max_X + np.log(np.sum(np.exp(X - max_X), axis=axis, keepdims=True))\n\ndef build_lstm(input_size, state_size, output_size):\n \"\"\"Builds functions to compute the output of an LSTM.\"\"\"\n parser = WeightsParser()\n parser.add_shape('init_cells', (1, state_size))\n parser.add_shape('init_hiddens', (1, state_size))\n parser.add_shape('change', (input_size + state_size + 1, state_size))\n parser.add_shape('forget', (input_size + 2 * state_size + 1, state_size))\n parser.add_shape('ingate', (input_size + 2 * state_size + 1, state_size))\n parser.add_shape('outgate', (input_size + 2 * state_size + 1, state_size))\n parser.add_shape('predict', (state_size + 1, output_size))\n\n def update_lstm(input, hiddens, cells, forget_weights, change_weights,\n ingate_weights, outgate_weights):\n \"\"\"One iteration of an LSTM layer.\"\"\"\n change = np.tanh(activations(change_weights, input, hiddens))\n forget = sigmoid(activations(forget_weights, input, cells, hiddens))\n ingate = sigmoid(activations(ingate_weights, input, cells, hiddens))\n cells = cells * forget + ingate * change\n outgate = sigmoid(activations(outgate_weights, input, cells, hiddens))\n hiddens = outgate * np.tanh(cells)\n return hiddens, cells\n\n def hiddens_to_output_probs(predict_weights, hiddens):\n output = activations(predict_weights, hiddens)\n return output - logsumexp(output) # Normalize log-probs.\n\n def outputs(weights, inputs):\n \"\"\"Outputs normalized log-probabilities of each character, plus an\n extra one at the end.\"\"\"\n forget_weights = parser.get(weights, 'forget')\n change_weights = parser.get(weights, 'change')\n ingate_weights = parser.get(weights, 'ingate')\n outgate_weights = parser.get(weights, 'outgate')\n predict_weights = parser.get(weights, 'predict')\n num_sequences = inputs.shape[1]\n hiddens = np.repeat(parser.get(weights, 'init_hiddens'), num_sequences, axis=0)\n cells = np.repeat(parser.get(weights, 'init_cells'), num_sequences, axis=0)\n\n output = [hiddens_to_output_probs(predict_weights, hiddens)]\n for input in inputs: # Iterate over time steps.\n hiddens, cells = update_lstm(input, hiddens, cells, forget_weights,\n change_weights, ingate_weights, outgate_weights)\n output.append(hiddens_to_output_probs(predict_weights, hiddens))\n return output\n\n def log_likelihood(weights, inputs, targets):\n logprobs = outputs(weights, inputs)\n loglik = 0.0\n num_time_steps, num_examples, _ = inputs.shape\n for t in range(num_time_steps):\n loglik += np.sum(logprobs[t] * targets[t])\n return loglik / (num_time_steps * num_examples)\n\n return outputs, log_likelihood, parser.num_weights\n\ndef string_to_one_hot(string, maxchar):\n \"\"\"Converts an ASCII string to a one-of-k encoding.\"\"\"\n ascii = np.array([ord(c) for c in string]).T\n return np.array(ascii[:,None] == np.arange(maxchar)[None, :], dtype=int)\n\ndef one_hot_to_string(one_hot_matrix):\n return \"\".join([chr(np.argmax(c)) for c in one_hot_matrix])\n\ndef build_dataset(filename, sequence_length, alphabet_size, max_lines=-1):\n \"\"\"Loads a text file, and turns each line into an encoded sequence.\"\"\"\n with open(filename) as f:\n content = f.readlines()\n content = content[:max_lines]\n content = [line for line in content if len(line) > 2] # Remove blank lines\n seqs = np.zeros((sequence_length, len(content), alphabet_size))\n for ix, line in enumerate(content):\n padded_line = (line + \" \" * sequence_length)[:sequence_length]\n seqs[:, ix, :] = string_to_one_hot(padded_line, alphabet_size)\n return seqs\n\nif __name__ == '__main__':\n npr.seed(1)\n input_size = output_size = 128 # The first 128 ASCII characters are the common ones.\n state_size = 40\n seq_length = 30\n param_scale = 0.01\n train_iters = 100\n\n train_inputs = build_dataset('lstm.py', seq_length, input_size, max_lines=60)\n\n pred_fun, loglike_fun, num_weights = build_lstm(input_size, state_size, output_size)\n\n def print_training_prediction(weights):\n print(\"Training text Predicted text\")\n logprobs = np.asarray(pred_fun(weights, train_inputs))\n for t in range(logprobs.shape[1]):\n training_text = one_hot_to_string(train_inputs[:,t,:])\n predicted_text = one_hot_to_string(logprobs[:,t,:])\n print(training_text.replace('\\n', ' ') + \"|\" + predicted_text.replace('\\n', ' '))\n\n # Wrap function to only have one argument, for scipy.minimize.\n def training_loss(weights):\n return -loglike_fun(weights, train_inputs, train_inputs)\n\n def callback(weights):\n print(\"Train loss:\", training_loss(weights))\n print_training_prediction(weights)\n\n # Build gradient of loss function using autograd.\n training_loss_and_grad = value_and_grad(training_loss)\n\n init_weights = npr.randn(num_weights) * param_scale\n # Check the gradients numerically, just to be safe\n quick_grad_check(training_loss, init_weights)\n\n print(\"Training LSTM...\")\n result = minimize(training_loss_and_grad, init_weights, jac=True, method='CG',\n options={'maxiter':train_iters}, callback=callback)\n trained_weights = result.x\n\n print(\"\\nGenerating text from LSTM model...\")\n num_letters = 30\n for t in range(20):\n text = \"\"\n for i in range(num_letters):\n seqs = string_to_one_hot(text, output_size)[:, np.newaxis, :]\n logprobs = pred_fun(trained_weights, seqs)[-1].ravel()\n text += chr(npr.choice(len(logprobs), p=np.exp(logprobs)))\n print(text)\n"
] |
[
[
"scipy.optimize.minimize"
]
] |
brb-reallife/Stocksera
|
[
"726f969ce74f8e8916ae0942c669be2c16b4d6b1"
] |
[
"scheduled_tasks/economy/get_upcoming_events_date.py"
] |
[
"import os\nimport sys\nimport json\nimport sqlite3\nimport tabula\nimport pandas as pd\nfrom datetime import datetime, timedelta\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '../..'))\n\nfrom scheduled_tasks.economy.ychart_connection import ychart_data\n\nconn = sqlite3.connect(r\"database/database.db\", check_same_thread=False)\ndb = conn.cursor()\n\ncurrent_date = datetime.utcnow()\n\n\ndef get_next_retail_sales_date():\n \"\"\"\n Get next retail sales release date\n \"\"\"\n df = tabula.read_pdf(r\"https://www.census.gov/retail/marts/www/martsdates.pdf\", pages=1)[0]\n df[\"Release Date\"] = pd.to_datetime(df[\"Release Date\"], errors='coerce')\n df = df[df[\"Release Date\"] >= current_date].iloc[0]\n df['Release Date'] = df['Release Date'].strftime('%Y-%m-%d')\n return df\n\n\ndef get_next_initial_jobless_date():\n \"\"\"\n Get next initial jobless claim date\n \"\"\"\n url = \"https://ycharts.com/indicators/us_initial_claims_for_unemployment_insurance\"\n release_date = ychart_data(url)[3].iloc[3][1].split(\",\")[0]\n release_date = str(datetime.strptime(release_date, \"%b %d %Y\")).split()[0]\n return release_date\n\n\ndef get_next_cpi_date():\n \"\"\"\n Get next CPI release date\n \"\"\"\n df = pd.read_html(r\"https://www.bls.gov/schedule/news_release/cpi.htm\")[0][:-1]\n df[\"Release Date\"] = pd.to_datetime(df[\"Release Date\"], errors='coerce')\n df = df[df[\"Release Date\"] >= current_date].iloc[0]\n df['Release Date'] = df['Release Date'].strftime('%Y-%m-%d')\n return df\n\n\ndef to_week_day(date):\n \"\"\"\n Get the next closest weekday\n Parameters\n ----------\n date : datetime\n Date to find the next closest weekday\n \"\"\"\n if date.weekday() in {5, 6}:\n date += timedelta(days=-date.weekday() + 7)\n return str(date.date())\n\n\ndef get_next_rrp_treasury_date(date):\n return to_week_day(date)\n\n\ndef get_holidays():\n \"\"\"\n Get holidays in US when stock market is closed\n \"\"\"\n holidays_df = pd.read_html(r\"https://www.sec.gov/edgar/filer-information/calendar\")[0]\n holidays_df[\"Date\"] = pd.to_datetime(holidays_df[\"Date\"])\n print(holidays_df)\n return holidays_df\n\n\ndef main():\n get_next_initial_jobless_date()\n db.execute(\"SELECT record_date from reverse_repo ORDER BY record_date DESC LIMIT 1\")\n record_date = db.fetchone()\n rrp_treasury_date = get_next_rrp_treasury_date(datetime.strptime(record_date[0], \"%Y-%m-%d\") + timedelta(days=1))\n retail_df = get_next_retail_sales_date()\n cpi_df = get_next_cpi_date()\n ijp_date = get_next_initial_jobless_date()\n\n with open(r\"database/economic_date.json\", \"w\") as r:\n information = {\n \"Retail Sales\": {\"Ref Month\": retail_df[\"Data Month\"], \"Release Date\": retail_df[\"Release Date\"]},\n \"Inflation\": {\"Ref Month\": cpi_df[\"Reference Month\"], \"Release Date\": cpi_df[\"Release Date\"]},\n \"Initial Jobless Claims\": {\"Release Date\": ijp_date},\n \"Daily Treasury\": {\"Release Date\": rrp_treasury_date},\n \"Reverse Repo\": {\"Release Date\": rrp_treasury_date},\n }\n json.dump(information, r, indent=4)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"pandas.to_datetime",
"pandas.read_html"
]
] |
jkurian49/hate-speech-detection
|
[
"22e12eb4fecdef857b1cc5d94dea76d66241608e"
] |
[
"hate_speech_detection.py"
] |
[
"#Author: Tommy Pawelski\n#Created: July 13th 2018\n\nimport pandas as pd\nimport numpy as np\nimport nltk\nimport string\nimport csv\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import VotingClassifier\nfrom mlxtend.classifier import StackingCVClassifier\n\n### LOAD IN ORIGINAL DATASET\n#print(\"LOADING ORIGINAL DATASET\")\n##read in each of the feature csv files\n#class_labels = pd.read_csv('feature datasets/labels.csv',encoding='utf-8')\n#weighted_tfidf_score = pd.read_csv('feature datasets/tfidf_scores.csv',encoding='utf-8')\n#sentiment_scores = pd.read_csv('feature datasets/sentiment_scores.csv',encoding='utf-8')\n#dependency_features = pd.read_csv('feature datasets/dependency_features.csv',encoding='utf-8')\n#char_bigrams = pd.read_csv('feature datasets/char_bigram_features.csv',encoding='utf-8')\n#word_bigrams = pd.read_csv('feature datasets/word_bigram_features.csv',encoding='utf-8')\n#tfidf_sparse_matrix = pd.read_csv('feature datasets/tfidf_features.csv',encoding='utf-8')\n#\n##merge all feature data sets based on 'index' column sentiment_scores, dependency_features, char_bigrams, word_bigrams\n#df_list=[class_labels, weighted_tfidf_score,sentiment_scores, dependency_features, char_bigrams, word_bigrams, tfidf_sparse_matrix]\n#master = df_list[0]\n#for df in df_list[1:]:\n# master = master.merge(df, on='index')\n#\n#master.columns.values\n##ignore first two columns (index and tweet)\n#\n#y=master.iloc[:,2] #class labels\n#X=master.iloc[:,3:] #all features\n#\n#\n##create train and test sets: 80% train, 20% test\n#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n\n##########################################################################################\n#NOW WE CAN START MODELING\nfrom sklearn.metrics import roc_curve, auc, roc_auc_score, f1_score\nfrom sklearn import model_selection\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.linear_model import LogisticRegressionCV\nfrom sklearn import svm\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom xgboost import XGBClassifier\nimport lightgbm as lgb\n\n#Create a base training set to benchmark our performance (train set with hatespeech dictionary weighted tif-df score as only feature)\n#x_base = pd.DataFrame(X_train.loc[:,'weighted_TFIDF_scores'])\n#x_base_test = pd.DataFrame(X_test.loc[:,'weighted_TFIDF_scores'])\n#\n## created scaled version of training and test data\n#from sklearn.preprocessing import StandardScaler\n#scaler = StandardScaler()\n#scaler.fit(X_train)\n#X_train_scale = scaler.transform(X_train)\n#X_test_scale = scaler.transform(X_test)\n\n#initialize models\n#lr = LogisticRegressionCV(multi_class='multinomial', solver='lbfgs')\n#gb = GradientBoostingClassifier(n_estimators=500, learning_rate=.025)\nxgb = XGBClassifier(learning_rate=.025, max_features=100)\n#mlp = MLPClassifier(solver='lbfgs',hidden_layer_sizes=(80,40,40,10), activation='relu', random_state=1,learning_rate='adaptive', alpha=1e-6)\n#rf= RandomForestClassifier(n_estimators=100, max_features=500)\n# 80,50,50,20\n\n#asses model performances using 5-fold cross validation and f1-score micro aveage as metric\n#print(\"baseline model f1-score = \", cross_val_score(lr,x_base, y_train,cv=5).mean()) #benchmark model: linear regression using just tfidf score (weighted with hate dict)\n#print(\"gb cross validation f1-score = \", cross_val_score(gb,x_base, y_train,cv=5,scoring=\"f1_micro\").mean()) #gradient boost with just tf-df score\n#print(\"rf cross validation f1-score = \", cross_val_score(rf,X_train,y_train,cv=5,scoring=\"f1_micro\").mean()) #random forest with full train set (all features)\n#print(\"xgb cross validation f1-score = \", cross_val_score(xgb,X_train,y_train,cv=5,scoring=\"f1_micro\").mean()) #xgboost with full train set (all features)\n#print(\"mlp cross validation f1-score = \", cross_val_score(mlp,X_train,y_train,cv=5,scoring=\"f1_micro\").mean())\n\n#meta classifier ensemble\n#stack = StackingCVClassifier(classifiers=[mlp, xgb, rf], cv=2,meta_classifier=lr, use_probas=True)\n#stack.fit(X_train.values, y_train.values)\n#pred2=stack.predict(X_test.values)\n#print ('fscore:{0:.3f}'.format(f1_score(y_test, pred2, average='micro')))\n\n#######################################################################################\n\n## LOAD IN NEW DATASET OF EDGE CASES\nprint(\"LOADING NEW DATASET\")\n#read in each of the feature csv files\nclass_labels = pd.read_csv('new feature datasets/labels-combined.csv')\nweighted_tfidf_score = pd.read_csv('new feature datasets/tfidf_scores-combined.csv')\nsentiment_scores = pd.read_csv('new feature datasets/sentiment_scores-combined.csv')\ndependency_features = pd.read_csv('new feature datasets/dependency_features-combined.csv')\nchar_bigrams = pd.read_csv('new feature datasets/char_bigram_features-combined.csv')\nword_bigrams = pd.read_csv('new feature datasets/word_bigram_features-combined.csv')\ntfidf_sparse_matrix = pd.read_csv('new feature datasets/tfidf_features-combined.csv')\n#merge all feature data sets based on 'index' column sentiment_scores, dependency_features, char_bigrams, word_bigrams\ndf_list2=[class_labels, weighted_tfidf_score, dependency_features, char_bigrams, word_bigrams, tfidf_sparse_matrix]\nmaster2 = df_list2[0]\nfor df in df_list2[1:]:\n master2 = master2.merge(df, on='index')\n\nmaster2.columns.values\n#ignore first two columns (index and tweet)\n\ny=master2.iloc[:,2] #class labels\nX=master2.iloc[:,3:] #all features\n\n#create train and test sets: 80% train, 20% test\nX_edge_train, X_edge_test, y_edge_train, y_edge_test = train_test_split(X, y, test_size=0.2)\nX_edge_test.to_csv(r'X_edge_test.csv')\n\n#######################################################################################\n\n# TEST ON TEST SET\nprint(\"TESTING MODEL\")\n#initialize ensembles\nestimators=[]\n#estimators.append(('mlp', mlp))\n#estimators.append(('rf', rf))\nestimators.append(('xgb', xgb))\n\n#voting ensemble\nensemble = VotingClassifier(estimators, voting='soft',weights=[1])\nensemble.fit(X_edge_train, y_edge_train)\npred = ensemble.predict(X_edge_test)\nprint ('fscore:{0:.3f}'.format(f1_score(y_edge_test, pred, average='micro')))\n\nfrom sklearn.metrics import confusion_matrix\nconfusion_lr = confusion_matrix(y_edge_test, pred)\nprint(confusion_lr)\n\nfor row_index, (input, prediction, label) in enumerate(zip (X_edge_test, pred, y_edge_test)):\n if prediction != label and label == 2:\n print('Row', row_index, 'has been classified as ', prediction, 'and should be ', label)\n tweet = class_labels._get_value(row_index,'tweet')\n print('Tweet: ', tweet)\n\n\n####################################################################################################################\n#REPORT AND PLOT MICRO-AVERAGE ROC AUC FOR EACH MODEL\n#from sklearn.preprocessing import label_binarize\n#import matplotlib.pyplot as plt\n#from itertools import cycle\n#from sklearn.multiclass import OneVsRestClassifier\n#from scipy import interp\n## Binarize the output\n#y = label_binarize(y, classes=[0, 1, 2])\n#n_classes = y.shape[1]\n#\n#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n## Compute micro-average ROC curve and ROC area\n##classifier = OneVsRestClassifier(lr)\n##classifier2= OneVsRestClassifier(gb)\n#classifier3 = OneVsRestClassifier(xgb)\n##y_score = classifier.fit(x_base, y_train).decision_function(x_base_test)\n##y_score2= classifier2.fit(X_train, y_train).decision_function(X_test)\n#y_score3= classifier3.fit(X_train, y_train).predict_proba(X_test)\n## Compute ROC curve and ROC area for each class\n##fpr = dict()\n##tpr = dict()\n##roc_auc = dict()\n##fpr2 = dict()\n##tpr2 = dict()\n##roc_auc2 = dict()\n#fpr3 = dict()\n#tpr3 = dict()\n#roc_auc3 = dict()\n#for i in range(n_classes):\n## fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])\n## roc_auc[i] = auc(fpr[i], tpr[i])\n## fpr2[i], tpr2[i], _ = roc_curve(y_test[:, i], y_score2[:, i])\n## roc_auc2[i] = auc(fpr2[i], tpr2[i])\n# fpr3[i], tpr3[i], _ = roc_curve(y_test[:, i], y_score3[:, i])\n# roc_auc3[i] = auc(fpr3[i], tpr3[i])\n#\n#\n##fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_test.ravel(), y_score.ravel())\n##roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n##fpr2[\"micro\"], tpr2[\"micro\"], _ = roc_curve(y_test.ravel(), y_score2.ravel())\n##roc_auc2[\"micro\"] = auc(fpr2[\"micro\"], tpr2[\"micro\"])\n#fpr3[\"micro\"], tpr3[\"micro\"], _ = roc_curve(y_test.ravel(), y_score3.ravel())\n#roc_auc3[\"micro\"] = auc(fpr3[\"micro\"], tpr3[\"micro\"])\n#\n## Compute macro-average ROC curve and ROC area\n#\n## First aggregate all false positive rates\n##all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))\n##all_fpr2 = np.unique(np.concatenate([fpr2[i] for i in range(n_classes)]))\n#all_fpr3 = np.unique(np.concatenate([fpr3[i] for i in range(n_classes)]))\n## Then interpolate all ROC curves at this points\n##mean_tpr = np.zeros_like(all_fpr)\n##mean_tpr2 = np.zeros_like(all_fpr2)\n#mean_tpr3 = np.zeros_like(all_fpr3)\n#for i in range(n_classes):\n## mean_tpr += interp(all_fpr, fpr[i], tpr[i])\n## mean_tpr2 += interp(all_fpr2, fpr2[i], tpr2[i])\n# mean_tpr3 += interp(all_fpr3, fpr3[i], tpr3[i])\n## Finally average it and compute AUC\n##mean_tpr /= n_classes\n##mean_tpr2 /= n_classes\n#mean_tpr3 /= n_classes\n#\n##fpr[\"micro\"] = all_fpr\n##tpr[\"micro\"] = mean_tpr\n##roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n##\n##\n##fpr2[\"micro\"] = all_fpr2\n##tpr2[\"micro\"] = mean_tpr2\n##roc_auc2[\"micro\"] = auc(fpr2[\"micro\"], tpr2[\"micro\"])\n#\n#fpr3[\"micro\"] = all_fpr3\n#tpr3[\"micro\"] = mean_tpr3\n#roc_auc3[\"micro\"] = auc(fpr3[\"micro\"], tpr3[\"micro\"])\n#\n## Plot all ROC curves (cite: http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html)\n#plt.figure()\n##plt.plot(fpr[\"micro\"], tpr[\"micro\"],\n## label='Base model micro-average ROC curve (area = {0:0.2f})'\n## ''.format(roc_auc[\"micro\"]),\n## color='#EB7D3C', linestyle=':', linewidth=4)\n##plt.plot(fpr2[\"micro\"], tpr2[\"micro\"],\n## label='LR micro-average ROC curve (area = {0:0.2f})'\n## ''.format(roc_auc2[\"micro\"]),\n## color='#4674C1', linestyle=':', linewidth=4)\n#plt.plot(fpr3[\"micro\"], tpr3[\"micro\"],\n# label='XGBoost micro-average ROC curve (area = {0:0.2f})'\n# ''.format(roc_auc3[\"micro\"]),\n# color='#72AC48', linestyle=':', linewidth=4)\n#plt.plot([0, 1], [0, 1], 'k--', lw=2)\n#plt.xlim([0.0, 1.0])\n#plt.ylim([0.0, 1.05])\n#plt.xlabel('False Positive Rate')\n#plt.ylabel('True Positive Rate')\n#plt.title('Micro-Average ROC ')\n#plt.legend(loc='upper center', bbox_to_anchor=(0.5, -.15),fancybox=True)\n#plt.save(\"ROC_curve.png\")\n#plt.show()\n#\n############### ROC values for just hate speech labels (class = 0)\n#plt.figure()\n#lw = 2\n##plt.plot(fpr[0], tpr[0], color='#EB7D3C',\n## lw=lw, label='Base model ROC curve (area = %0.2f)' % roc_auc[0])\n##plt.plot(fpr2[0], tpr2[0], color='#4674C1',\n## lw=lw, label='LR ROC curve (area = %0.2f)' % roc_auc2[0])\n#plt.plot(fpr3[0], tpr3[0], color='#72AC48',\n# lw=lw, label='XGBoost ROC curve (area = %0.2f)' % roc_auc3[0])\n#plt.plot([0, 1], [0, 1], color='black', lw=lw, linestyle='--')\n#plt.xlim([0.0, 1.0])\n#plt.ylim([0.0, 1.05])\n#plt.xlabel('False Positive Rate')\n#plt.ylabel('True Positive Rate')\n#plt.title('ROC for \"Hatespeech\" Label')\n#plt.legend(loc='upper center', bbox_to_anchor=(0.5, -.15),fancybox=True)\n#plt.save(\"ROC_hate_speech_only.png\")\n#plt.show()\n"
] |
[
[
"pandas.read_csv",
"sklearn.metrics.confusion_matrix",
"sklearn.model_selection.train_test_split",
"sklearn.ensemble.VotingClassifier",
"sklearn.metrics.f1_score"
]
] |
shenweihai1/rolis-eurosys2022
|
[
"59b3fd58144496a9b13415e30b41617b34924323"
] |
[
"third-party/paxos/scripts/figure.py"
] |
[
"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nxxx = [1, 2, 4, 8, 16, 32]\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.ticker import FuncFormatter\n\nSTR_CONCURRENT_REQS_PER_SERVER = \"Concurrent reqs/server\"\nSTR_THROUGHPUT = \"Throughput\"\nSTR_NUMBER_OF_SERVERS = \"Number of servers\"\nSTR_THROUGHPUT_NEW_ORDER = \"Throughput (New-order/s)\"\nSTR_CPU_UTILIZATION = \"CPU Utilization\"\nSTR_LATENCY_MS = \"Latency(ms)\"\nSTR_ATTEMPT_NEW_ORDER = \"Attempts(new-order/s)\"\nSTR_NUMBER_OF_TRIES_PER_COMMIT = \"Number of tries per commit\"\nSTR_LATENCY_MS_IN_LOG_SCALE = \"Latency(ms) in log scale\"\nSTR_COMMIT_RATE = \"commit rate\"\n\n\nmpl.rcParams['font.sans-serif'] = ['SimHei']\nSTR_CONCURRENT_REQS_PER_SERVER = u\"并发请求数/服务器\"\nSTR_THROUGHPUT = u\"吞吐量\"\nSTR_NUMBER_OF_SERVERS = u\"服务器数量\"\nSTR_THROUGHPUT_NEW_ORDER = u\"吞吐量(New-order/s)\"\nSTR_CPU_UTILIZATION = u\"CPU使用率\"\nSTR_LATENCY_MS = u\"延迟(ms)\"\nSTR_ATTEMPT_NEW_ORDER = u\"尝试次数(New-order/s)\"\nSTR_NUMBER_OF_TRIES_PER_COMMIT = u\"每次成功提交需要尝试次数\"\nSTR_LATENCY_MS_IN_LOG_SCALE = u\"延迟(ms)\"\nSTR_COMMIT_RATE = u\"提交成功率\"\n\n\nROCOCO = \"Rococo\"\nROCOCO = \"DepTran\"\n\nSHOW = True\nSHOW = False\nX_LOG_SCALE = False\n\n\neb_dis = 0.3\nfig_scale = 5.0/8\n#mpl.rcParams['xtick.direction'] = 'out'\n#mpl.rcParams['ytick.direction'] = 'out'\nmpl.rcParams['font.size'] = 12.5\nmpl.rcParams['legend.fontsize'] = 12.5\nmpl.rcParams['lines.linewidth'] = 3\nmpl.rcParams['lines.markersize'] = 14 * fig_scale\nmpl.rcParams['axes.grid'] = True\n\n#txt_legends = [\"OCC\", ROCOCO, \"2PL-T\", \"2PL-WD\", \"2PL-WW\"]\ntxt_legends = [\"OCC\", ROCOCO, \"2PL\", \"RO6\"]\nline_styles = [\"v-\", \"cx--\", \"mo:\", \"kp-.\", \"yd-.\"]\ncolors = ['#F84E1A', 'black', '#1B77F9', '#535353', '#A2DCFD','black', 'blue', 'green', 'red', 'black',\"cyan\", 'magenta', 'yellow']\n\nbar_colors = [\n '#A2DCFD', '#1B77F9', 'blue', # blue\n '#FFA07A', '#DC143C', '#8B0000', # red\n '#DCDCDC', '#696969', 'black',\n '#98FB98', '#3CB371', '#006400', # green\n '#535353', 'black', 'green',\n 'red', 'black', \"cyan\",\n 'magenta', 'yellow']\n\n#bar_hatches = [\"OOO\", \"///\", \"\\\\\\\\\\\\\", \"...\", \"xxx\", \"ooo\", \"\\\\|\\\\\", \"***\", \"/|/\", \"|||\",]\nbar_hatches = [\n\n \"///\", \"///\", \"///\",\n \"\", \"\", \"\",\n \"\\\\\\\\\\\\\", \"\\\\\\\\\\\\\", \"\\\\\\\\\\\\\",\n \"...\", \"xxx\", \"ooo\",\n \"\\\\|\\\\\", \"***\", \"/|/\", \"|||\",]\n\ndef flat_log(x, pos):\n return '%d' % x\n\ndef sort_legend(ax, ys):\n order = range(0, len(ys))\n sum_list = [sum(points) for points in ys]\n i = 0\n while (i < len(ys) - 1):\n j = i\n while (j < len(ys) - 1):\n if (sum_list[j] < sum_list[j + 1]):\n tmp = sum_list[j]\n sum_list[j] = sum_list[j + 1]\n sum_list[j + 1] = tmp\n tmp2 = order[j]\n order[j] = order[j + 1]\n order[j + 1] = tmp2\n j += 1\n i += 1\n handles, labels = ax.get_legend_handles_labels()\n new_handles = []\n new_labels = []\n for index in order:\n new_handles.append(handles[index])\n new_labels.append(labels[index])\n return new_handles, new_labels\n\ndef micro_tp(xs, ys, figname):\n fig, ax = plt.subplots(figsize=(12 * fig_scale, 25.0 / 4 * fig_scale))\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n width = 0.4 / fig_scale\n ind = np.arange(len(xs))\n\n #legends=[ [\"OCC 50%\", \"OCC 90%\", \"OCC 99%\"],\n # [\"DepTran 50%\", \"DepTran 90%\", \"DepTran 99%\"],\n # [\"2PL-P 50%\", \"2PL-P 90%\", \"2PL-P 99%\"]]\n\n #ax.bar(ind, ys, width, color = '#1B77F9')\n ax.bar(ind, ys, width, color = 'black')\n\n plt.xlim(-width/2, len(xs))\n ys_max = 0.0\n for k in ys:\n if ys_max < k:\n ys_max = k\n plt.ylim(0, ys_max * 1.2)\n #plt.xticks(xs+3, xs)\n xs=['1 RPC', ' 1 RPC\\n+1 DB', ' 3 RPC\\n+ 3 DB', 'OCC', '2PL', ROCOCO]\n\n #plt.legend(ncol=3, loc=\"upper center\", mode=\"expand\", bbox_to_anchor=(0., 1.1, 1, 0.1))\n #plt.xlabel(\"\")\n plt.ylabel(STR_THROUGHPUT)\n ax.set_xticks(ind +width/2)\n plt.setp(ax.set_xticklabels(xs), fontsize=14)\n\n plt.savefig(figname, bbox_inches=\"tight\")\n if SHOW: plt.show()\n pass\n\ndef tpcc_sc_tp(val, figname):\n fig, ax = plt.subplots(figsize=(8 * fig_scale, 5 * fig_scale))\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n #val_occ = [ v1/(v2+0.0) for v1, v2 in zip(val[0], val[1])]\n #val_2pl = [ v1/(v2+0.0) for v1, v2 in zip(val[2], val[1])]\n #plt.plot(xxx, val_occ, line_styles[0], label=txt_legends[0], color=colors[0])\n #plt.plot(xxx, val_2pl, line_styles[2], label=txt_legends[2], color=colors[0])\n\n for i in range(0, len(val)):\n plt.plot(xxx, val[i], line_styles[i], label=txt_legends[i], color=colors[i])\n\n handles, labels = sort_legend(ax, val)\n plt.legend(handles, labels, ncol=1, loc=\"best\")\n plt.xlabel(STR_NUMBER_OF_SERVERS)\n plt.ylabel(STR_THROUGHPUT_NEW_ORDER)\n# plt.ylim(0,400)\n #plt.xticks(np.arange(len(txt_sizes)), txt_sizes)\n plt.savefig(figname, bbox_inches=\"tight\")\n if SHOW: plt.show()\n\ndef tpcc_sc_cpu(val, figname):\n fig, ax = plt.subplots(figsize=(8 * fig_scale, 5 * fig_scale))\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n #val_occ = [ v1/(v2+0.0) for v1, v2 in zip(val[0], val[1])]\n #val_2pl = [ v1/(v2+0.0) for v1, v2 in zip(val[2], val[1])]\n #plt.plot(xxx, val_occ, line_styles[0], label=txt_legends[0], color=colors[0])\n #plt.plot(xxx, val_2pl, line_styles[2], label=txt_legends[2], color=colors[0])\n\n for i in range(0, len(val)):\n plt.plot(xxx, val[i], line_styles[i], label=txt_legends[i], color=colors[i])\n\n handles, labels = sort_legend(ax, val)\n plt.legend(handles, labels, ncol=1, loc=\"best\")\n plt.xlabel(STR_NUMBER_OF_SERVERS)\n plt.ylabel(STR_CPU_UTILIZATION)\n# plt.ylim(0,400)\n #plt.xticks(np.arange(len(txt_sizes)), txt_sizes)\n plt.savefig(figname, bbox_inches=\"tight\")\n if SHOW: plt.show()\n\ndef tpcc_ct_tp(val, figname):\n fig, ax = plt.subplots(figsize=(8 * fig_scale, 5 * fig_scale))\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n if X_LOG_SCALE: ax.set_xscale('log')\n\n #val_occ = [ v1/(v2+0.0) for v1, v2 in zip(val[0], val[1])]\n #val_2pl = [ v1/(v2+0.0) for v1, v2 in zip(val[2], val[1])]\n #plt.plot(xxx, val_occ, line_styles[0], label=txt_legends[0], color=colors[0])\n #plt.plot(xxx, val_2pl, line_styles[2], label=txt_legends[2], color=colors[0])\n\n for i in range(0, len(val)):\n plt.plot(xxx, val[i], line_styles[i], label=txt_legends[i], color=colors[i])\n\n handles, labels = sort_legend(ax, val)\n plt.legend(handles, labels, ncol=1, loc=\"best\")\n plt.xlabel(STR_CONCURRENT_REQS_PER_SERVER)\n plt.ylabel(STR_THROUGHPUT_NEW_ORDER)\n# plt.ylim(0,400)\n #plt.xticks(np.arange(len(txt_sizes)), txt_sizes)\n plt.savefig(figname, bbox_inches=\"tight\")\n if SHOW: plt.show()\n\ndef tpcc_ct_cpu(val, figname):\n fig, ax = plt.subplots(figsize=(8 * fig_scale, 5 * fig_scale))\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n #val_occ = [ v1/(v2+0.0) for v1, v2 in zip(val[0], val[1])]\n #val_2pl = [ v1/(v2+0.0) for v1, v2 in zip(val[2], val[1])]\n #plt.plot(xxx, val_occ, line_styles[0], label=txt_legends[0], color=colors[0])\n #plt.plot(xxx, val_2pl, line_styles[2], label=txt_legends[2], color=colors[0])\n\n for i in range(0, len(val)):\n plt.plot(xxx, val[i], line_styles[i], label=txt_legends[i], color=colors[i])\n\n handles, labels = sort_legend(ax, val)\n plt.legend(handles, labels, ncol=1, loc=\"best\")\n plt.xlabel(STR_CONCURRENT_REQS_PER_SERVER)\n plt.ylabel(STR_CPU_UTILIZATION)\n# plt.ylim(0,400)\n #plt.xticks(np.arange(len(txt_sizes)), txt_sizes)\n plt.savefig(figname, bbox_inches=\"tight\")\n if SHOW: plt.show()\n\ndef tpcc_ct_nt_eb(val_50, val_90, val_99, figname):\n fig, ax = plt.subplots(figsize=(8 * fig_scale, 5 * fig_scale))\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.xaxis.set_ticks_position('none')\n ax.yaxis.set_ticks_position('left')\n # ax.set_yscale('log')\n\n xs = np.arange(1, 21, 1)\n # width = 2\n\n for i in range(0, len(val_50)):\n v50 = [v for x, v in zip(xxx, val_50[i]) if x <= 20]\n v90 = [v for x, v in zip(xxx, val_90[i]) if x <= 20]\n v99 = [v for x, v in zip(xxx, val_99[i]) if x <= 20]\n\n yerr1 = [(v1-v2) for v1, v2 in zip(v90, v50)]\n yerr2 = [(v1-v2) for v1, v2 in zip(v99, v90)]\n\n #plt.plot(v90, label=txt_legends[i])\n plt.errorbar(xs + i*eb_dis, v90, yerr=[yerr1, yerr2], label=txt_legends[i], elinewidth=3)\n\n plt.xlim(1, 21)\n #plt.ylim(0,10)\n #plt.xticks(xs+3, xs)\n\n #plt.legend(ncol=3, loc=\"upper center\", mode=\"expand\", bbox_to_anchor=(0., 1.1, 1, 0.1))\n handles, labels = sort_legend(ax, val_90)\n plt.legend(handles, labels, ncol=1, loc=\"best\")\n plt.xlabel(STR_CONCURRENT_REQS_PER_SERVER)\n plt.ylabel(STR_NUMBER_OF_TRIES_PER_COMMIT)\n plt.savefig(figname, bbox_inches=\"tight\")\n if SHOW: plt.show()\n pass\n\ndef tpcc_ct_lt_eb(val_50, val_90, val_99, figname):\n fig, ax = plt.subplots(figsize=(8 * fig_scale, 5 * fig_scale))\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.xaxis.set_ticks_position('none')\n ax.yaxis.set_ticks_position('left')\n ax.set_yscale('log')\n y_formatter = FuncFormatter(flat_log)\n ax.yaxis.set_major_formatter(y_formatter)\n #ax.set_xscale('log')\n\n maxx1 = 20\n maxx2 = 100\n xs = np.arange(1, maxx1+1, 1)\n tmp = np.arange(maxx1+10, maxx2+1, 10)\n xs = np.concatenate((xs, tmp))\n\n # width = 2\n\n for i in range(0, len(val_50)):\n v50 = [v for x, v in zip(xxx, val_50[i]) if x <= maxx2]\n v90 = [v for x, v in zip(xxx, val_90[i]) if x <= maxx2]\n v99 = [v for x, v in zip(xxx, val_99[i]) if x <= maxx2]\n\n yerr1 = [(v1-v2) for v1, v2 in zip(v90, v50)]\n yerr2 = [(v1-v2) for v1, v2 in zip(v99, v90)]\n\n #print(len(xs))\n #print(len(v90))\n #plt.plot(v90, label=txt_legends[i])\n plt.errorbar(xs + i*eb_dis, v90, yerr=[yerr1, yerr2], label=txt_legends[i], elinewidth=3, color=colors[i])\n\n plt.xlim(1, maxx2+1)\n #plt.ylim(0,10)\n #plt.xticks(xs+3, xs)\n\n #plt.legend(ncol=3, loc=\"upper center\", mode=\"expand\", bbox_to_anchor=(0., 1.1, 1, 0.1))\n handles, labels = sort_legend(ax, val_90)\n plt.legend(handles, labels, ncol=1, loc=\"best\")\n plt.xlabel(STR_CONCURRENT_REQS_PER_SERVER)\n plt.ylabel(STR_LATENCY_MS)\n plt.savefig(figname, bbox_inches=\"tight\")\n if SHOW: plt.show()\n pass\n\ndef tpcc_ct_lt_bar(val_min, val_50, val_90, val_99, figname):\n fig, ax = plt.subplots(figsize=(8 * fig_scale, 5 * fig_scale))\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.xaxis.set_ticks_position('none')\n ax.yaxis.set_ticks_position('left')\n ax.set_yscale('log')\n\n # 10, 20, 30, ... , to 100.\n xs = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100];\n xs = np.arange(10, 101, 10)\n width = 2\n\n legends=[ [\"OCC 50%\", \"OCC 90%\", \"OCC 99%\"],\n [\"DepTran 50%\", \"DepTran 90%\", \"DepTran 99%\"],\n [\"2PL-P 50%\", \"2PL-P 90%\", \"2PL-P 99%\"]]\n\n for i in range(0, len(val_50)):\n v50 = [v for x, v in zip(xxx, val_50[i]) if x % 10 == 0]\n v90 = [v for x, v in zip(xxx, val_90[i]) if x % 10 == 0]\n v99 = [v for x, v in zip(xxx, val_99[i]) if x % 10 == 0]\n vmin = [v for x, v in zip(xxx, val_min[i]) if x % 10 == 0]\n bottom = np.zeros(len(xs))\n\n yerr1 = [(v1-v2) for v1, v2 in zip(v50, vmin)]\n yerr2 = [0] * len(v50)\n\n plt.bar(xs+i*width, v50, bottom=bottom, color=bar_colors[i*3+0], hatch=bar_hatches[i*3+0], width=width, log=True, label=legends[i][0], yerr=[yerr1, yerr2])\n bottom+=v50\n plt.bar(xs+i*width, v90, bottom=bottom, color=bar_colors[i*3+1], hatch=bar_hatches[i*3+1], width=width, log=True, label=legends[i][1])\n bottom+=v90\n plt.bar(xs+i*width, v99, bottom=bottom, color=bar_colors[i*3+2], hatch=bar_hatches[i*3+2], width=width, log=True, label=legends[i][2])\n bottom+=v99\n\n plt.xlim(9,110)\n plt.ylim(0,10)\n plt.xticks(xs+3, xs)\n\n plt.legend(ncol=3, loc=\"upper center\", mode=\"expand\", bbox_to_anchor=(0., 1.1, 1, 0.1))\n plt.xlabel(STR_CONCURRENT_REQS_PER_SERVER)\n plt.ylabel(STR_LATENCY_MS_IN_LOG_SCALE)\n plt.savefig(figname, bbox_inches=\"tight\")\n if SHOW: plt.show()\n pass\n\ndef tpcc_ct_nt(val, figname):\n\n# txt_sizes = [\"1K\", \"4K\", \"16K\", \"64K\", \"256K\", \"1M\", \"4M\", \"16M\"]\n fig, ax = plt.subplots(figsize=(8 * fig_scale, 5 * fig_scale))\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n for i in range(0, len(val)):\n plt.plot(xxx, val[i], line_styles[i], label=txt_legends[i], color=colors[i])\n\n handles, labels = sort_legend(ax, val)\n plt.legend(handles, labels, ncol=1, loc=\"upper left\")\n plt.xlabel(STR_CONCURRENT_REQS_PER_SERVER)\n plt.ylabel(STR_NUMBER_OF_TRIES_PER_COMMIT)\n# plt.ylim(0,400)\n #plt.xticks(np.arange(len(txt_sizes)), txt_sizes)\n plt.savefig(figname, bbox_inches=\"tight\")\n if SHOW: plt.show()\n\ndef tpcc_ct_lt(val, figname):\n\n# txt_sizes = [\"1K\", \"4K\", \"16K\", \"64K\", \"256K\", \"1M\", \"4M\", \"16M\"]\n fig, ax = plt.subplots(figsize=(8 * fig_scale, 5 * fig_scale))\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n for i in range(0, len(val)):\n plt.plot(xxx, val[i], line_styles[i], label=txt_legends[i], color=colors[i])\n\n handles, labels = sort_legend(ax, val)\n plt.legend(handles, labels, ncol=1, loc=\"upper left\")\n plt.xlabel(STR_CONCURRENT_REQS_PER_SERVER)\n plt.ylabel(STR_LATENCY_MS)\n# plt.ylim(0,400)\n #plt.xticks(np.arange(len(txt_sizes)), txt_sizes)\n plt.savefig(figname, bbox_inches=\"tight\")\n if SHOW: plt.show()\n\ndef tpcc_ct_at(val, figname):\n\n fig, ax = plt.subplots(figsize=(8 * fig_scale, 5 * fig_scale))\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n for i in range(0, len(val)):\n plt.plot(xxx, val[i], line_styles[i], label=txt_legends[i], color=colors[i])\n\n handles, labels = sort_legend(ax, val)\n plt.legend(handles, labels, ncol=1, loc=\"upper left\")\n plt.xlabel(STR_CONCURRENT_REQS_PER_SERVER)\n plt.ylabel(STR_ATTEMPT_NEW_ORDER)\n# plt.ylim(0,400)\n #plt.xticks(np.arange(len(txt_sizes)), txt_sizes)\n plt.savefig(figname, bbox_inches=\"tight\")\n if SHOW: plt.show()\n\ndef tpcc_ct_cr(val, figname):\n\n fig, ax = plt.subplots(figsize=(8 * fig_scale, 5 * fig_scale))\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n if X_LOG_SCALE: ax.set_xscale('log')\n\n for i in range(0, len(val)):\n plt.plot(xxx, val[i], line_styles[i], label=txt_legends[i], color=colors[i])\n\n handles, labels = sort_legend(ax, val)\n plt.legend(handles, labels, ncol=1, loc=\"best\")\n plt.xlabel(STR_CONCURRENT_REQS_PER_SERVER)\n plt.ylabel(STR_COMMIT_RATE)\n plt.ylim(0,1.2)\n #plt.xticks(np.arange(len(txt_sizes)), txt_sizes)\n plt.savefig(figname, bbox_inches=\"tight\")\n if SHOW: plt.show()\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylim",
"numpy.arange",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.concatenate",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xlabel",
"matplotlib.ticker.FuncFormatter",
"matplotlib.pyplot.show",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel"
]
] |
utiasSTARS/manipulator_learning
|
[
"9a0e0c66c0a3c07124331f010bd04bb52eaf95bb"
] |
[
"manipulator_learning/learning/imitation/collect_utils.py"
] |
[
"import h5py\nimport numpy as np\nimport os\nimport shutil\n\n\nclass HDF5Dataset:\n def __init__(self, data_dir, example_obs):\n if type(example_obs) == dict:\n self.dict_obs = True\n self.names = list(example_obs.keys())\n self.img_names = []\n for n in self.names:\n if len(example_obs[n].shape) > 1: # treat as image\n self.img_names.append(n)\n self.names.remove(n)\n else:\n self.dict_obs = False\n\n # generate config\n self.data_dir = data_dir\n self.data_file = data_dir + '/obs.hdf5'\n if os.path.exists(self.data_file):\n with h5py.File(self.data_file, 'r') as f:\n self.num_trajs = f['config'].attrs['num_trajs']\n self.timesteps_per_traj = list(f['config'].attrs['timesteps_per_traj'])\n self.total_timesteps = f['config'].attrs['total_timesteps']\n else:\n self.num_trajs = 0\n self.timesteps_per_traj = []\n self.total_timesteps = 0\n self.num_traj_timesteps = 0\n\n def save_dataset_info_to_file(self):\n print(\"Saving dataset info to file.\")\n with h5py.File(self.data_file, 'a') as f:\n config = f.require_group('config')\n config.attrs.create('num_trajs', data=self.num_trajs)\n config.attrs.create('total_timesteps', data=self.total_timesteps)\n config.attrs.create('timesteps_per_traj', data=np.array(self.timesteps_per_traj))\n\n def delete(self, index):\n if self.num_trajs > 0:\n if self.dict_obs:\n for i_n in self.img_names:\n dir = self.data_dir + '/' + i_n + '/run_' + str(index).zfill(4)\n shutil.rmtree(dir)\n with h5py.File(self.data_file, 'a') as f:\n for n in self.names:\n del f[n + '/run_' + str(self.num_trajs).zfill(4)]\n\n else:\n with h5py.File(self.data_file, 'a') as f:\n del f['run_' + str(self.num_trajs).zfill(4)]\n\n self.num_trajs -= 1\n self.total_timesteps -= self.timesteps_per_traj.pop(index)\n self.save_dataset_info_to_file()\n print(\"Demo %d deleted\" % index)\n else:\n print(\"No demos to delete.\")\n\n def add_traj_to_dataset(self, traj_data):\n pass\n\n def get_full_dataset(self):\n pass\n\n def get_data_run_timestep(self, run, ts):\n pass\n\n def get_data_index(self, index):\n pass"
] |
[
[
"numpy.array"
]
] |
donlee90/cs231n
|
[
"f3dbf849d2f40efbefe8d63bc8c95c295cd0eaf4"
] |
[
"assignment3/cs231n/classifiers/rnn.py"
] |
[
"from builtins import range\nfrom builtins import object\nimport numpy as np\n\nfrom cs231n.layers import *\nfrom cs231n.rnn_layers import *\n\n\nclass CaptioningRNN(object):\n \"\"\"\n A CaptioningRNN produces captions from image features using a recurrent\n neural network.\n\n The RNN receives input vectors of size D, has a vocab size of V, works on\n sequences of length T, has an RNN hidden dimension of H, uses word vectors\n of dimension W, and operates on minibatches of size N.\n\n Note that we don't use any regularization for the CaptioningRNN.\n \"\"\"\n\n def __init__(self, word_to_idx, input_dim=512, wordvec_dim=128,\n hidden_dim=128, cell_type='rnn', dtype=np.float32):\n \"\"\"\n Construct a new CaptioningRNN instance.\n\n Inputs:\n - word_to_idx: A dictionary giving the vocabulary. It contains V entries,\n and maps each string to a unique integer in the range [0, V).\n - input_dim: Dimension D of input image feature vectors.\n - wordvec_dim: Dimension W of word vectors.\n - hidden_dim: Dimension H for the hidden state of the RNN.\n - cell_type: What type of RNN to use; either 'rnn' or 'lstm'.\n - dtype: numpy datatype to use; use float32 for training and float64 for\n numeric gradient checking.\n \"\"\"\n if cell_type not in {'rnn', 'lstm'}:\n raise ValueError('Invalid cell_type \"%s\"' % cell_type)\n\n self.cell_type = cell_type\n self.dtype = dtype\n self.word_to_idx = word_to_idx\n self.idx_to_word = {i: w for w, i in word_to_idx.items()}\n self.params = {}\n\n vocab_size = len(word_to_idx)\n\n self._null = word_to_idx['<NULL>']\n self._start = word_to_idx.get('<START>', None)\n self._end = word_to_idx.get('<END>', None)\n\n # Initialize word vectors\n self.params['W_embed'] = np.random.randn(vocab_size, wordvec_dim)\n self.params['W_embed'] /= 100\n\n # Initialize CNN -> hidden state projection parameters\n self.params['W_proj'] = np.random.randn(input_dim, hidden_dim)\n self.params['W_proj'] /= np.sqrt(input_dim)\n self.params['b_proj'] = np.zeros(hidden_dim)\n\n # Initialize parameters for the RNN\n dim_mul = {'lstm': 4, 'rnn': 1}[cell_type]\n self.params['Wx'] = np.random.randn(wordvec_dim, dim_mul * hidden_dim)\n self.params['Wx'] /= np.sqrt(wordvec_dim)\n self.params['Wh'] = np.random.randn(hidden_dim, dim_mul * hidden_dim)\n self.params['Wh'] /= np.sqrt(hidden_dim)\n self.params['b'] = np.zeros(dim_mul * hidden_dim)\n\n # Initialize output to vocab weights\n self.params['W_vocab'] = np.random.randn(hidden_dim, vocab_size)\n self.params['W_vocab'] /= np.sqrt(hidden_dim)\n self.params['b_vocab'] = np.zeros(vocab_size)\n\n # Cast parameters to correct dtype\n for k, v in self.params.items():\n self.params[k] = v.astype(self.dtype)\n\n\n def loss(self, features, captions):\n \"\"\"\n Compute training-time loss for the RNN. We input image features and\n ground-truth captions for those images, and use an RNN (or LSTM) to compute\n loss and gradients on all parameters.\n\n Inputs:\n - features: Input image features, of shape (N, D)\n - captions: Ground-truth captions; an integer array of shape (N, T) where\n each element is in the range 0 <= y[i, t] < V\n\n Returns a tuple of:\n - loss: Scalar loss\n - grads: Dictionary of gradients parallel to self.params\n \"\"\"\n # Cut captions into two pieces: captions_in has everything but the last word\n # and will be input to the RNN; captions_out has everything but the first\n # word and this is what we will expect the RNN to generate. These are offset\n # by one relative to each other because the RNN should produce word (t+1)\n # after receiving word t. The first element of captions_in will be the START\n # token, and the first element of captions_out will be the first word.\n captions_in = captions[:, :-1]\n captions_out = captions[:, 1:]\n\n # You'll need this\n mask = (captions_out != self._null)\n\n # Weight and bias for the affine transform from image features to initial\n # hidden state\n W_proj, b_proj = self.params['W_proj'], self.params['b_proj']\n\n # Word embedding matrix\n W_embed = self.params['W_embed']\n\n # Input-to-hidden, hidden-to-hidden, and biases for the RNN\n Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']\n\n # Weight and bias for the hidden-to-vocab transformation.\n W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']\n\n loss, grads = 0.0, {}\n ############################################################################\n # TODO: Implement the forward and backward passes for the CaptioningRNN. #\n # In the forward pass you will need to do the following: #\n # (1) Use an affine transformation to compute the initial hidden state #\n # from the image features. This should produce an array of shape (N, H)#\n # (2) Use a word embedding layer to transform the words in captions_in #\n # from indices to vectors, giving an array of shape (N, T, W). #\n # (3) Use either a vanilla RNN or LSTM (depending on self.cell_type) to #\n # process the sequence of input word vectors and produce hidden state #\n # vectors for all timesteps, producing an array of shape (N, T, H). #\n # (4) Use a (temporal) affine transformation to compute scores over the #\n # vocabulary at every timestep using the hidden states, giving an #\n # array of shape (N, T, V). #\n # (5) Use (temporal) softmax to compute loss using captions_out, ignoring #\n # the points where the output word is <NULL> using the mask above. #\n # #\n # In the backward pass you will need to compute the gradient of the loss #\n # with respect to all model parameters. Use the loss and grads variables #\n # defined above to store loss and gradients; grads[k] should give the #\n # gradients for self.params[k]. #\n ############################################################################\n h0, cache_aff = affine_forward(features, W_proj, b_proj)\n x, cache_emb = word_embedding_forward(captions_in, W_embed)\n\n if self.cell_type == 'lstm':\n h, cache_lstm = lstm_forward(x, h0, Wx, Wh, b)\n else:\n h, cache_rnn = rnn_forward(x, h0, Wx, Wh, b)\n\n preds, cache_temp = temporal_affine_forward(h, W_vocab, b_vocab)\n loss, dpreds = temporal_softmax_loss(preds, captions_out, mask)\n\n grads = {}\n dh, dW_vocab, db_vocab = temporal_affine_backward(dpreds, cache_temp)\n\n if self.cell_type == 'lstm':\n dx, dh0, dWx, dWh, db = lstm_backward(dh, cache_lstm)\n else:\n dx, dh0, dWx, dWh, db = rnn_backward(dh, cache_rnn)\n\n dW_embed = word_embedding_backward(dx, cache_emb)\n _, dW_proj, db_proj = affine_backward(dh0, cache_aff)\n\n grads['W_vocab'] = dW_vocab\n grads['b_vocab'] = db_vocab\n grads['Wx'] = dWx\n grads['Wh'] = dWh\n grads['b'] = db\n grads['W_embed'] = dW_embed\n grads['W_proj'] = dW_proj\n grads['b_proj'] = db_proj\n \n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads\n\n\n def sample(self, features, max_length=30):\n \"\"\"\n Run a test-time forward pass for the model, sampling captions for input\n feature vectors.\n\n At each timestep, we embed the current word, pass it and the previous hidden\n state to the RNN to get the next hidden state, use the hidden state to get\n scores for all vocab words, and choose the word with the highest score as\n the next word. The initial hidden state is computed by applying an affine\n transform to the input image features, and the initial word is the <START>\n token.\n\n For LSTMs you will also have to keep track of the cell state; in that case\n the initial cell state should be zero.\n\n Inputs:\n - features: Array of input image features of shape (N, D).\n - max_length: Maximum length T of generated captions.\n\n Returns:\n - captions: Array of shape (N, max_length) giving sampled captions,\n where each element is an integer in the range [0, V). The first element\n of captions should be the first sampled word, not the <START> token.\n \"\"\"\n N = features.shape[0]\n captions = self._null * np.ones((N, max_length), dtype=np.int32)\n\n # Unpack parameters\n W_proj, b_proj = self.params['W_proj'], self.params['b_proj']\n W_embed = self.params['W_embed']\n Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']\n W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']\n\n ###########################################################################\n # TODO: Implement test-time sampling for the model. You will need to #\n # initialize the hidden state of the RNN by applying the learned affine #\n # transform to the input image features. The first word that you feed to #\n # the RNN should be the <START> token; its value is stored in the #\n # variable self._start. At each timestep you will need to do to: #\n # (1) Embed the previous word using the learned word embeddings #\n # (2) Make an RNN step using the previous hidden state and the embedded #\n # current word to get the next hidden state. #\n # (3) Apply the learned affine transformation to the next hidden state to #\n # get scores for all words in the vocabulary #\n # (4) Select the word with the highest score as the next word, writing it #\n # to the appropriate slot in the captions variable #\n # #\n # For simplicity, you do not need to stop generating after an <END> token #\n # is sampled, but you can if you want to. #\n # #\n # HINT: You will not be able to use the rnn_forward or lstm_forward #\n # functions; you'll need to call rnn_step_forward or lstm_step_forward in #\n # a loop. #\n ###########################################################################\n h, _ = affine_forward(features, W_proj, b_proj)\n x = np.array([W_embed[self._start]] * N)\n c = np.zeros(h.shape)\n\n for t in range(max_length):\n if self.cell_type == 'lstm':\n h, c, _ = lstm_step_forward(x, h, c, Wx, Wh, b)\n else:\n h, _ = rnn_step_forward(x, h, Wx, Wh, b)\n preds, _ = affine_forward(h, W_vocab, b_vocab)\n words = np.argmax(preds, axis=1)\n captions[:, t] = words\n x = W_embed[words]\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n return captions\n"
] |
[
[
"numpy.sqrt",
"numpy.ones",
"numpy.argmax",
"numpy.random.randn",
"numpy.array",
"numpy.zeros"
]
] |
tcstewar/fcg
|
[
"7d0692234fb6ff31fa8823e64b72fce264241eea"
] |
[
"fcg/html.py"
] |
[
"import numpy as np\nimport fcg\n\ndef create_color(seed):\n rng = np.random.RandomState(seed=seed)\n return '#%02x%02x%02x' % (rng.uniform(64,255), rng.uniform(64,255), rng.uniform(64,255))\n\ndef as_span(s):\n return '<span style=\"background-color: %s;\">%s</span>' % (create_color(id(s)), s._id)\ndef as_div(s):\n return '<div style=\"background-color: %s;\">%s</div>' % (create_color(id(s)), s._id)\n\ndef html_structures(w):\n html = []\n\n style = \"\"\"\n.fcg tr, .fcg td, .fcg th {border: 0px solid black;}\ntable.fcg {display: inline-block;}\n\n \"\"\"\n\n html.append('<style>%s</style>' % style)\n\n for s in sorted(w.structures, key=lambda x: x._id):\n items = []\n items.append('<tr><th colspan=\"2\"><center>%s</center></th></tr>' % s._id)\n for k, v in s.extract():\n if isinstance(v, fcg.Structure):\n v = as_div(v)\n if isinstance(v, list):\n v = list(v)\n for i, vv in enumerate(v):\n if isinstance(vv, fcg.Structure):\n v[i] = as_span(vv)\n v = '[%s]' % ','.join(v)\n\n items.append('<tr><th>%s</th><td>%s</td></tr>' % (k, v))\n c = create_color(id(s))\n html.append('<table class=\"fcg\" style=\"background-color: %s;\">%s</table>' % (c, ''.join(items)))\n return ''.join(html)\n"
] |
[
[
"numpy.random.RandomState"
]
] |
CiceroAraujo/SB
|
[
"637cc4bc63c952f058c316b2b1fbfbb5cd6250c8"
] |
[
"packs/solvers/solvers_trilinos/solvers_tril.py"
] |
[
"from PyTrilinos import Epetra, AztecOO\nimport numpy as np\nimport scipy.sparse as sp\n# IFPACK.PrintSparsity(Matrix, \"matrix.ps\")\n\n\nclass solverTril:\n\n def __init__(self, p=1):\n self._comm = Epetra.PyComm()\n self._params = dict()\n # self.set_parameters()\n\n def solve_linear_problem(self, A, b, x=None, its=1000, tolerance=1e-10):\n '''\n resolve o problema Ax = b\n input:\n A: matriz quadrada esparsa do scipy\n b = termo fonte\n x: chute inicial\n its: numero maximo de iteracoes\n tolerance: tolerancia para o residuo\n output:\n res: informa se o residuo foi menor que a tolerancia\n x2: vetor resposta\n '''\n comm = self._comm\n n = len(b)\n std_map = Epetra.Map(n, 0, comm)\n x2 = Epetra.Vector(std_map)\n if x:\n x2[:] = x[:]\n b2 = Epetra.Vector(std_map)\n b2[:] = b[:]\n A2 = Epetra.CrsMatrix(Epetra.Copy, std_map, 7)\n indices = sp.find(A)\n A2.InsertGlobalValues(indices[0], indices[1], indices[2])\n irr = A2.FillComplete()\n linearProblem = Epetra.LinearProblem(A2, x2, b2)\n solver = AztecOO.AztecOO(linearProblem)\n solver.SetAztecOption(AztecOO.AZ_output, AztecOO.AZ_warnings)\n # solver.SetParameters(self._params)\n solver.Iterate(its, tolerance)\n x2 = np.array(x2)\n res = solver.ScaledResidual() < tolerance\n return x2\n\n def set_parameters(self, params=None):\n if params:\n pass\n else:\n params = {'Solver': 'GMRES',\n 'Precond': 'Jacobi'}\n\n self._params.update(params)\n\n'''\n>>> solver.SetAztecOption(AztecOO.AZ_precond, AztecOO.AZ_dom_decomp)\n>>> solver.SetAztecOption(AztecOO.AZ_subdomain_solve, AztecOO.AZ_ilu)\n>>> solver.SetAztecOption(AztecOO.AZ_overalp, 1)\n>>> solver.SetAztecOption(AztecOO.AZ_graph_fill, 1)\n\n>>> solver.SetParameters({\"precond\": \"dom_decomp\",\n... \"subdomain_solve\": \"ilu\",\n... \"overlap\": 1,\n... \"graph_fill\": 1})\n\n'''\n"
] |
[
[
"numpy.array",
"scipy.sparse.find"
]
] |
NunoEdgarGFlowHub/zipline
|
[
"459366c17ac918576daf21ba9cf5706399eea988"
] |
[
"zipline/data/data_portal.py"
] |
[
"#\n# Copyright 2016 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom operator import mul\n\nimport bcolz\nfrom logbook import Logger\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.tslib import normalize_date\nfrom six import iteritems\nfrom six.moves import reduce\n\nfrom zipline.assets import Asset, Future, Equity\nfrom zipline.data.us_equity_pricing import NoDataOnDate\nfrom zipline.data.us_equity_loader import (\n USEquityDailyHistoryLoader,\n USEquityMinuteHistoryLoader,\n)\n\nfrom zipline.utils.math_utils import (\n nansum,\n nanmean,\n nanstd\n)\nfrom zipline.utils.memoize import remember_last, weak_lru_cache\nfrom zipline.errors import (\n NoTradeDataAvailableTooEarly,\n NoTradeDataAvailableTooLate,\n HistoryWindowStartsBeforeData,\n)\n\nlog = Logger('DataPortal')\n\nBASE_FIELDS = frozenset([\n \"open\", \"high\", \"low\", \"close\", \"volume\", \"price\", \"last_traded\"\n])\n\nOHLCV_FIELDS = frozenset([\n \"open\", \"high\", \"low\", \"close\", \"volume\"\n])\n\nOHLCVP_FIELDS = frozenset([\n \"open\", \"high\", \"low\", \"close\", \"volume\", \"price\"\n])\n\nHISTORY_FREQUENCIES = set([\"1m\", \"1d\"])\n\n\nclass DailyHistoryAggregator(object):\n \"\"\"\n Converts minute pricing data into a daily summary, to be used for the\n last slot in a call to history with a frequency of `1d`.\n\n This summary is the same as a daily bar rollup of minute data, with the\n distinction that the summary is truncated to the `dt` requested.\n i.e. the aggregation slides forward during a the course of simulation day.\n\n Provides aggregation for `open`, `high`, `low`, `close`, and `volume`.\n The aggregation rules for each price type is documented in their respective\n\n \"\"\"\n\n def __init__(self, market_opens, minute_reader):\n self._market_opens = market_opens\n self._minute_reader = minute_reader\n\n # The caches are structured as (date, market_open, entries), where\n # entries is a dict of asset -> (last_visited_dt, value)\n #\n # Whenever an aggregation method determines the current value,\n # the entry for the respective asset should be overwritten with a new\n # entry for the current dt.value (int) and aggregation value.\n #\n # When the requested dt's date is different from date the cache is\n # flushed, so that the cache entries do not grow unbounded.\n #\n # Example cache:\n # cache = (date(2016, 3, 17),\n # pd.Timestamp('2016-03-17 13:31', tz='UTC'),\n # {\n # 1: (1458221460000000000, np.nan),\n # 2: (1458221460000000000, 42.0),\n # })\n self._caches = {\n 'open': None,\n 'high': None,\n 'low': None,\n 'close': None,\n 'volume': None\n }\n\n # The int value is used for deltas to avoid extra computation from\n # creating new Timestamps.\n self._one_min = pd.Timedelta('1 min').value\n\n def _prelude(self, dt, field):\n date = dt.date()\n dt_value = dt.value\n cache = self._caches[field]\n if cache is None or cache[0] != date:\n market_open = self._market_opens.loc[date]\n cache = self._caches[field] = (dt.date(), market_open, {})\n\n _, market_open, entries = cache\n market_open = market_open.tz_localize('UTC')\n if dt != market_open:\n prev_dt = dt_value - self._one_min\n else:\n prev_dt = None\n return market_open, prev_dt, dt_value, entries\n\n def opens(self, assets, dt):\n \"\"\"\n The open field's aggregation returns the first value that occurs\n for the day, if there has been no data on or before the `dt` the open\n is `nan`.\n\n Once the first non-nan open is seen, that value remains constant per\n asset for the remainder of the day.\n\n Returns\n -------\n np.array with dtype=float64, in order of assets parameter.\n \"\"\"\n market_open, prev_dt, dt_value, entries = self._prelude(dt, 'open')\n\n opens = []\n normalized_date = normalize_date(dt)\n\n for asset in assets:\n if not asset._is_alive(normalized_date, True):\n opens.append(np.NaN)\n continue\n\n if prev_dt is None:\n val = self._minute_reader.get_value(asset, dt, 'open')\n entries[asset] = (dt_value, val)\n opens.append(val)\n continue\n else:\n try:\n last_visited_dt, first_open = entries[asset]\n if last_visited_dt == dt_value:\n opens.append(first_open)\n continue\n elif not pd.isnull(first_open):\n opens.append(first_open)\n entries[asset] = (dt_value, first_open)\n continue\n else:\n after_last = pd.Timestamp(\n last_visited_dt + self._one_min, tz='UTC')\n window = self._minute_reader.load_raw_arrays(\n ['open'],\n after_last,\n dt,\n [asset],\n )[0]\n nonnan = window[~pd.isnull(window)]\n if len(nonnan):\n val = nonnan[0]\n else:\n val = np.nan\n entries[asset] = (dt_value, val)\n opens.append(val)\n continue\n except KeyError:\n window = self._minute_reader.load_raw_arrays(\n ['open'],\n market_open,\n dt,\n [asset],\n )[0]\n nonnan = window[~pd.isnull(window)]\n if len(nonnan):\n val = nonnan[0]\n else:\n val = np.nan\n entries[asset] = (dt_value, val)\n opens.append(val)\n continue\n return np.array(opens)\n\n def highs(self, assets, dt):\n \"\"\"\n The high field's aggregation returns the largest high seen between\n the market open and the current dt.\n If there has been no data on or before the `dt` the high is `nan`.\n\n Returns\n -------\n np.array with dtype=float64, in order of assets parameter.\n \"\"\"\n market_open, prev_dt, dt_value, entries = self._prelude(dt, 'high')\n\n highs = []\n normalized_date = normalize_date(dt)\n\n for asset in assets:\n if not asset._is_alive(normalized_date, True):\n highs.append(np.NaN)\n continue\n\n if prev_dt is None:\n val = self._minute_reader.get_value(asset, dt, 'high')\n entries[asset] = (dt_value, val)\n highs.append(val)\n continue\n else:\n try:\n last_visited_dt, last_max = entries[asset]\n if last_visited_dt == dt_value:\n highs.append(last_max)\n continue\n elif last_visited_dt == prev_dt:\n curr_val = self._minute_reader.get_value(\n asset, dt, 'high')\n if pd.isnull(curr_val):\n val = last_max\n elif pd.isnull(last_max):\n val = curr_val\n else:\n val = max(last_max, curr_val)\n entries[asset] = (dt_value, val)\n highs.append(val)\n continue\n else:\n after_last = pd.Timestamp(\n last_visited_dt + self._one_min, tz='UTC')\n window = self._minute_reader.load_raw_arrays(\n ['high'],\n after_last,\n dt,\n [asset],\n )[0].T\n val = max(last_max, np.nanmax(window))\n entries[asset] = (dt_value, val)\n highs.append(val)\n continue\n except KeyError:\n window = self._minute_reader.load_raw_arrays(\n ['high'],\n market_open,\n dt,\n [asset],\n )[0].T\n val = np.nanmax(window)\n entries[asset] = (dt_value, val)\n highs.append(val)\n continue\n return np.array(highs)\n\n def lows(self, assets, dt):\n \"\"\"\n The low field's aggregation returns the smallest low seen between\n the market open and the current dt.\n If there has been no data on or before the `dt` the low is `nan`.\n\n Returns\n -------\n np.array with dtype=float64, in order of assets parameter.\n \"\"\"\n market_open, prev_dt, dt_value, entries = self._prelude(dt, 'low')\n\n lows = []\n normalized_date = normalize_date(dt)\n\n for asset in assets:\n if not asset._is_alive(normalized_date, True):\n lows.append(np.NaN)\n continue\n\n if prev_dt is None:\n val = self._minute_reader.get_value(asset, dt, 'low')\n entries[asset] = (dt_value, val)\n lows.append(val)\n continue\n else:\n try:\n last_visited_dt, last_min = entries[asset]\n if last_visited_dt == dt_value:\n lows.append(last_min)\n continue\n elif last_visited_dt == prev_dt:\n curr_val = self._minute_reader.get_value(\n asset, dt, 'low')\n val = np.nanmin([last_min, curr_val])\n entries[asset] = (dt_value, val)\n lows.append(val)\n continue\n else:\n after_last = pd.Timestamp(\n last_visited_dt + self._one_min, tz='UTC')\n window = self._minute_reader.load_raw_arrays(\n ['low'],\n after_last,\n dt,\n [asset],\n )[0].T\n window_min = np.nanmin(window)\n if pd.isnull(window_min):\n val = last_min\n else:\n val = min(last_min, window_min)\n entries[asset] = (dt_value, val)\n lows.append(val)\n continue\n except KeyError:\n window = self._minute_reader.load_raw_arrays(\n ['low'],\n market_open,\n dt,\n [asset],\n )[0].T\n val = np.nanmin(window)\n entries[asset] = (dt_value, val)\n lows.append(val)\n continue\n return np.array(lows)\n\n def closes(self, assets, dt):\n \"\"\"\n The close field's aggregation returns the latest close at the given\n dt.\n If the close for the given dt is `nan`, the most recent non-nan\n `close` is used.\n If there has been no data on or before the `dt` the close is `nan`.\n\n Returns\n -------\n np.array with dtype=float64, in order of assets parameter.\n \"\"\"\n market_open, prev_dt, dt_value, entries = self._prelude(dt, 'close')\n\n closes = []\n normalized_dt = normalize_date(dt)\n\n for asset in assets:\n if not asset._is_alive(normalized_dt, True):\n closes.append(np.NaN)\n continue\n\n if prev_dt is None:\n val = self._minute_reader.get_value(asset, dt, 'close')\n entries[asset] = (dt_value, val)\n closes.append(val)\n continue\n else:\n try:\n last_visited_dt, last_close = entries[asset]\n if last_visited_dt == dt_value:\n closes.append(last_close)\n continue\n elif last_visited_dt == prev_dt:\n val = self._minute_reader.get_value(\n asset, dt, 'close')\n if pd.isnull(val):\n val = last_close\n entries[asset] = (dt_value, val)\n closes.append(val)\n continue\n else:\n val = self._minute_reader.get_value(\n asset, dt, 'close')\n if pd.isnull(val):\n val = self.closes(\n [asset],\n pd.Timestamp(prev_dt, tz='UTC'))[0]\n entries[asset] = (dt_value, val)\n closes.append(val)\n continue\n except KeyError:\n val = self._minute_reader.get_value(\n asset, dt, 'close')\n if pd.isnull(val):\n val = self.closes([asset],\n pd.Timestamp(prev_dt, tz='UTC'))[0]\n entries[asset] = (dt_value, val)\n closes.append(val)\n continue\n return np.array(closes)\n\n def volumes(self, assets, dt):\n \"\"\"\n The volume field's aggregation returns the sum of all volumes\n between the market open and the `dt`\n If there has been no data on or before the `dt` the volume is 0.\n\n Returns\n -------\n np.array with dtype=int64, in order of assets parameter.\n \"\"\"\n market_open, prev_dt, dt_value, entries = self._prelude(dt, 'volume')\n\n volumes = []\n normalized_date = normalize_date(dt)\n\n for asset in assets:\n if not asset._is_alive(normalized_date, True):\n volumes.append(0)\n continue\n\n if prev_dt is None:\n val = self._minute_reader.get_value(asset, dt, 'volume')\n entries[asset] = (dt_value, val)\n volumes.append(val)\n continue\n else:\n try:\n last_visited_dt, last_total = entries[asset]\n if last_visited_dt == dt_value:\n volumes.append(last_total)\n continue\n elif last_visited_dt == prev_dt:\n val = self._minute_reader.get_value(\n asset, dt, 'volume')\n val += last_total\n entries[asset] = (dt_value, val)\n volumes.append(val)\n continue\n else:\n after_last = pd.Timestamp(\n last_visited_dt + self._one_min, tz='UTC')\n window = self._minute_reader.load_raw_arrays(\n ['volume'],\n after_last,\n dt,\n [asset],\n )[0]\n val = np.nansum(window) + last_total\n entries[asset] = (dt_value, val)\n volumes.append(val)\n continue\n except KeyError:\n window = self._minute_reader.load_raw_arrays(\n ['volume'],\n market_open,\n dt,\n [asset],\n )[0]\n val = np.nansum(window)\n entries[asset] = (dt_value, val)\n volumes.append(val)\n continue\n return np.array(volumes)\n\n\nclass DataPortal(object):\n \"\"\"Interface to all of the data that a zipline simulation needs.\n\n This is used by the simulation runner to answer questions about the data,\n like getting the prices of assets on a given day or to service history\n calls.\n\n Parameters\n ----------\n env : TradingEnvironment\n The trading environment for the simulation. This includes the trading\n calendar and benchmark data.\n first_trading_day : pd.Timestamp\n The first trading day for the simulation.\n equity_daily_reader : BcolzDailyBarReader, optional\n The daily bar reader for equities. This will be used to service\n daily data backtests or daily history calls in a minute backetest.\n If a daily bar reader is not provided but a minute bar reader is,\n the minutes will be rolled up to serve the daily requests.\n equity_minute_reader : BcolzMinuteBarReader, optional\n The minute bar reader for equities. This will be used to service\n minute data backtests or minute history calls. This can be used\n to serve daily calls if no daily bar reader is provided.\n future_daily_reader : BcolzDailyBarReader, optional\n The daily bar ready for futures. This will be used to service\n daily data backtests or daily history calls in a minute backetest.\n If a daily bar reader is not provided but a minute bar reader is,\n the minutes will be rolled up to serve the daily requests.\n future_minute_reader : BcolzFutureMinuteBarReader, optional\n The minute bar reader for futures. This will be used to service\n minute data backtests or minute history calls. This can be used\n to serve daily calls if no daily bar reader is provided.\n adjustment_reader : SQLiteAdjustmentWriter, optional\n The adjustment reader. This is used to apply splits, dividends, and\n other adjustment data to the raw data from the readers.\n \"\"\"\n def __init__(self,\n asset_finder,\n trading_schedule,\n first_trading_day,\n equity_daily_reader=None,\n equity_minute_reader=None,\n future_daily_reader=None,\n future_minute_reader=None,\n adjustment_reader=None):\n\n self.trading_schedule = trading_schedule\n self.asset_finder = asset_finder\n\n self.views = {}\n\n self._carrays = {\n 'open': {},\n 'high': {},\n 'low': {},\n 'close': {},\n 'volume': {},\n 'sid': {},\n }\n\n self._adjustment_reader = adjustment_reader\n\n # caches of sid -> adjustment list\n self._splits_dict = {}\n self._mergers_dict = {}\n self._dividends_dict = {}\n\n # Cache of sid -> the first trading day of an asset.\n self._asset_start_dates = {}\n self._asset_end_dates = {}\n\n # Handle extra sources, like Fetcher.\n self._augmented_sources_map = {}\n self._extra_source_df = None\n\n self._equity_daily_reader = equity_daily_reader\n if self._equity_daily_reader is not None:\n self._equity_history_loader = USEquityDailyHistoryLoader(\n self.trading_schedule,\n self._equity_daily_reader,\n self._adjustment_reader\n )\n self._equity_minute_reader = equity_minute_reader\n self._future_daily_reader = future_daily_reader\n self._future_minute_reader = future_minute_reader\n\n if self._equity_minute_reader is not None:\n self._equity_daily_aggregator = DailyHistoryAggregator(\n self.trading_schedule.schedule.market_open,\n self._equity_minute_reader)\n self._equity_minute_history_loader = USEquityMinuteHistoryLoader(\n self.trading_schedule,\n self._equity_minute_reader,\n self._adjustment_reader\n )\n self.MINUTE_PRICE_ADJUSTMENT_FACTOR = \\\n self._equity_minute_reader._ohlc_inverse\n\n self._first_trading_day = first_trading_day\n\n # Get the first trading minute\n self._first_trading_minute, _ = (\n self.trading_schedule.start_and_end(self._first_trading_day)\n if self._first_trading_day is not None else (None, None)\n )\n\n # Store the locs of the first day and first minute\n self._first_trading_day_loc = (\n self.trading_schedule.all_execution_days.get_loc(\n self.trading_schedule.session_date(self._first_trading_day)\n )\n if self._first_trading_day is not None else None\n )\n self._first_trading_minute_loc = (\n self.trading_schedule.all_execution_minutes.get_loc(\n self._first_trading_minute\n )\n if self._first_trading_minute is not None else None\n )\n\n def _reindex_extra_source(self, df, source_date_index):\n return df.reindex(index=source_date_index, method='ffill')\n\n def handle_extra_source(self, source_df, sim_params):\n \"\"\"\n Extra sources always have a sid column.\n\n We expand the given data (by forward filling) to the full range of\n the simulation dates, so that lookup is fast during simulation.\n \"\"\"\n if source_df is None:\n return\n\n # Normalize all the dates in the df\n source_df.index = source_df.index.normalize()\n\n # source_df's sid column can either consist of assets we know about\n # (such as sid(24)) or of assets we don't know about (such as\n # palladium).\n #\n # In both cases, we break up the dataframe into individual dfs\n # that only contain a single asset's information. ie, if source_df\n # has data for PALLADIUM and GOLD, we split source_df into two\n # dataframes, one for each. (same applies if source_df has data for\n # AAPL and IBM).\n #\n # We then take each child df and reindex it to the simulation's date\n # range by forward-filling missing values. this makes reads simpler.\n #\n # Finally, we store the data. For each column, we store a mapping in\n # self.augmented_sources_map from the column to a dictionary of\n # asset -> df. In other words,\n # self.augmented_sources_map['days_to_cover']['AAPL'] gives us the df\n # holding that data.\n source_date_index = self.trading_schedule.execution_days_in_range(\n start=sim_params.period_start,\n end=sim_params.period_end\n )\n\n # Break the source_df up into one dataframe per sid. This lets\n # us (more easily) calculate accurate start/end dates for each sid,\n # de-dup data, and expand the data to fit the backtest start/end date.\n grouped_by_sid = source_df.groupby([\"sid\"])\n group_names = grouped_by_sid.groups.keys()\n group_dict = {}\n for group_name in group_names:\n group_dict[group_name] = grouped_by_sid.get_group(group_name)\n\n # This will be the dataframe which we query to get fetcher assets at\n # any given time. Get's overwritten every time there's a new fetcher\n # call\n extra_source_df = pd.DataFrame()\n\n for identifier, df in iteritems(group_dict):\n # Before reindexing, save the earliest and latest dates\n earliest_date = df.index[0]\n latest_date = df.index[-1]\n\n # Since we know this df only contains a single sid, we can safely\n # de-dupe by the index (dt). If minute granularity, will take the\n # last data point on any given day\n df = df.groupby(level=0).last()\n\n # Reindex the dataframe based on the backtest start/end date.\n # This makes reads easier during the backtest.\n df = self._reindex_extra_source(df, source_date_index)\n\n if not isinstance(identifier, Asset):\n # for fake assets we need to store a start/end date\n self._asset_start_dates[identifier] = earliest_date\n self._asset_end_dates[identifier] = latest_date\n\n for col_name in df.columns.difference(['sid']):\n if col_name not in self._augmented_sources_map:\n self._augmented_sources_map[col_name] = {}\n\n self._augmented_sources_map[col_name][identifier] = df\n\n # Append to extra_source_df the reindexed dataframe for the single\n # sid\n extra_source_df = extra_source_df.append(df)\n\n self._extra_source_df = extra_source_df\n\n def _open_minute_file(self, field, asset):\n sid_str = str(int(asset))\n\n try:\n carray = self._carrays[field][sid_str]\n except KeyError:\n carray = self._carrays[field][sid_str] = \\\n self._get_ctable(asset)[field]\n\n return carray\n\n def _get_ctable(self, asset):\n sid = int(asset)\n\n if isinstance(asset, Future):\n if self._future_minute_reader.sid_path_func is not None:\n path = self._future_minute_reader.sid_path_func(\n self._future_minute_reader.rootdir, sid\n )\n else:\n path = \"{0}/{1}.bcolz\".format(\n self._future_minute_reader.rootdir, sid)\n elif isinstance(asset, Equity):\n if self._equity_minute_reader.sid_path_func is not None:\n path = self._equity_minute_reader.sid_path_func(\n self._equity_minute_reader.rootdir, sid\n )\n else:\n path = \"{0}/{1}.bcolz\".format(\n self._equity_minute_reader.rootdir, sid)\n\n else:\n # TODO: Figure out if assets should be allowed if neither, and\n # why this code path is being hit.\n if self._equity_minute_reader.sid_path_func is not None:\n path = self._equity_minute_reader.sid_path_func(\n self._equity_minute_reader.rootdir, sid\n )\n else:\n path = \"{0}/{1}.bcolz\".format(\n self._equity_minute_reader.rootdir, sid)\n\n return bcolz.open(path, mode='r')\n\n def get_last_traded_dt(self, asset, dt, data_frequency):\n \"\"\"\n Given an asset and dt, returns the last traded dt from the viewpoint\n of the given dt.\n\n If there is a trade on the dt, the answer is dt provided.\n \"\"\"\n if data_frequency == 'minute':\n return self._equity_minute_reader.get_last_traded_dt(asset, dt)\n elif data_frequency == 'daily':\n return self._equity_daily_reader.get_last_traded_dt(asset, dt)\n\n @staticmethod\n def _is_extra_source(asset, field, map):\n \"\"\"\n Internal method that determines if this asset/field combination\n represents a fetcher value or a regular OHLCVP lookup.\n \"\"\"\n # If we have an extra source with a column called \"price\", only look\n # at it if it's on something like palladium and not AAPL (since our\n # own price data always wins when dealing with assets).\n\n return not (field in BASE_FIELDS and isinstance(asset, Asset))\n\n def _get_fetcher_value(self, asset, field, dt):\n day = normalize_date(dt)\n\n try:\n return \\\n self._augmented_sources_map[field][asset].loc[day, field]\n except KeyError:\n return np.NaN\n\n def get_spot_value(self, asset, field, dt, data_frequency):\n \"\"\"\n Public API method that returns a scalar value representing the value\n of the desired asset's field at either the given dt.\n\n Parameters\n ----------\n asset : Asset\n The asset whose data is desired.\n field : {'open', 'high', 'low', 'close', 'volume',\n 'price', 'last_traded'}\n The desired field of the asset.\n dt : pd.Timestamp\n The timestamp for the desired value.\n data_frequency : str\n The frequency of the data to query; i.e. whether the data is\n 'daily' or 'minute' bars\n\n Returns\n -------\n value : float, int, or pd.Timestamp\n The spot value of ``field`` for ``asset`` The return type is based\n on the ``field`` requested. If the field is one of 'open', 'high',\n 'low', 'close', or 'price', the value will be a float. If the\n ``field`` is 'volume' the value will be a int. If the ``field`` is\n 'last_traded' the value will be a Timestamp.\n \"\"\"\n if self._is_extra_source(asset, field, self._augmented_sources_map):\n return self._get_fetcher_value(asset, field, dt)\n\n if field not in BASE_FIELDS:\n raise KeyError(\"Invalid column: \" + str(field))\n\n if dt < asset.start_date or \\\n (data_frequency == \"daily\" and dt > asset.end_date) or \\\n (data_frequency == \"minute\" and\n normalize_date(dt) > asset.end_date):\n if field == \"volume\":\n return 0\n elif field != \"last_traded\":\n return np.NaN\n\n if data_frequency == \"daily\":\n day_to_use = dt\n day_to_use = normalize_date(day_to_use)\n return self._get_daily_data(asset, field, day_to_use)\n else:\n if isinstance(asset, Future):\n if field == \"price\":\n return self._get_minute_spot_value_future(\n asset, \"close\", dt)\n else:\n return self._get_minute_spot_value_future(\n asset, field, dt)\n else:\n if field == \"last_traded\":\n return self._equity_minute_reader.get_last_traded_dt(\n asset, dt\n )\n elif field == \"price\":\n return self._get_minute_spot_value(asset, \"close\", dt,\n True)\n else:\n return self._get_minute_spot_value(asset, field, dt)\n\n def get_adjustments(self, assets, field, dt, perspective_dt):\n \"\"\"\n Returns a list of adjustments between the dt and perspective_dt for the\n given field and list of assets\n\n Parameters\n ----------\n assets : list of type Asset, or Asset\n The asset, or assets whose adjustments are desired.\n field : {'open', 'high', 'low', 'close', 'volume', \\\n 'price', 'last_traded'}\n The desired field of the asset.\n dt : pd.Timestamp\n The timestamp for the desired value.\n perspective_dt : pd.Timestamp\n The timestamp from which the data is being viewed back from.\n data_frequency : str\n The frequency of the data to query; i.e. whether the data is\n 'daily' or 'minute' bars\n\n Returns\n -------\n adjustments : list[Adjustment]\n The adjustments to that field.\n \"\"\"\n if isinstance(assets, Asset):\n assets = [assets]\n\n adjustment_ratios_per_asset = []\n split_adj_factor = lambda x: x if field != 'volume' else 1.0 / x\n\n for asset in assets:\n adjustments_for_asset = []\n split_adjustments = self._get_adjustment_list(\n asset, self._splits_dict, \"SPLITS\"\n )\n for adj_dt, adj in split_adjustments:\n if dt <= adj_dt <= perspective_dt:\n adjustments_for_asset.append(split_adj_factor(adj))\n elif adj_dt > perspective_dt:\n break\n\n if field != 'volume':\n merger_adjustments = self._get_adjustment_list(\n asset, self._mergers_dict, \"MERGERS\"\n )\n for adj_dt, adj in merger_adjustments:\n if dt <= adj_dt <= perspective_dt:\n adjustments_for_asset.append(adj)\n elif adj_dt > perspective_dt:\n break\n\n dividend_adjustments = self._get_adjustment_list(\n asset, self._dividends_dict, \"DIVIDENDS\",\n )\n for adj_dt, adj in dividend_adjustments:\n if dt <= adj_dt <= perspective_dt:\n adjustments_for_asset.append(adj)\n elif adj_dt > perspective_dt:\n break\n\n ratio = reduce(mul, adjustments_for_asset, 1.0)\n adjustment_ratios_per_asset.append(ratio)\n\n return adjustment_ratios_per_asset\n\n def get_adjusted_value(self, asset, field, dt,\n perspective_dt,\n data_frequency,\n spot_value=None):\n \"\"\"\n Returns a scalar value representing the value\n of the desired asset's field at the given dt with adjustments applied.\n\n Parameters\n ----------\n asset : Asset\n The asset whose data is desired.\n field : {'open', 'high', 'low', 'close', 'volume', \\\n 'price', 'last_traded'}\n The desired field of the asset.\n dt : pd.Timestamp\n The timestamp for the desired value.\n perspective_dt : pd.Timestamp\n The timestamp from which the data is being viewed back from.\n data_frequency : str\n The frequency of the data to query; i.e. whether the data is\n 'daily' or 'minute' bars\n\n Returns\n -------\n value : float, int, or pd.Timestamp\n The value of the given ``field`` for ``asset`` at ``dt`` with any\n adjustments known by ``perspective_dt`` applied. The return type is\n based on the ``field`` requested. If the field is one of 'open',\n 'high', 'low', 'close', or 'price', the value will be a float. If\n the ``field`` is 'volume' the value will be a int. If the ``field``\n is 'last_traded' the value will be a Timestamp.\n \"\"\"\n if spot_value is None:\n # if this a fetcher field, we want to use perspective_dt (not dt)\n # because we want the new value as of midnight (fetcher only works\n # on a daily basis, all timestamps are on midnight)\n if self._is_extra_source(asset, field,\n self._augmented_sources_map):\n spot_value = self.get_spot_value(asset, field, perspective_dt,\n data_frequency)\n else:\n spot_value = self.get_spot_value(asset, field, dt,\n data_frequency)\n\n if isinstance(asset, Equity):\n ratio = self.get_adjustments(asset, field, dt, perspective_dt)[0]\n spot_value *= ratio\n\n return spot_value\n\n def _get_minute_spot_value_future(self, asset, column, dt):\n # Futures bcolz files have 1440 bars per day (24 hours), 7 days a week.\n # The file attributes contain the \"start_dt\" and \"last_dt\" fields,\n # which represent the time period for this bcolz file.\n\n # The start_dt is midnight of the first day that this future started\n # trading.\n\n # figure out the # of minutes between dt and this asset's start_dt\n start_date = self._get_asset_start_date(asset)\n minute_offset = int((dt - start_date).total_seconds() / 60)\n\n if minute_offset < 0:\n # asking for a date that is before the asset's start date, no dice\n return 0.0\n\n # then just index into the bcolz carray at that offset\n carray = self._open_minute_file(column, asset)\n result = carray[minute_offset]\n\n # if there's missing data, go backwards until we run out of file\n while result == 0 and minute_offset > 0:\n minute_offset -= 1\n result = carray[minute_offset]\n\n if column != 'volume':\n # FIXME switch to a futures reader\n return result * 0.001\n else:\n return result\n\n def _get_minute_spot_value(self, asset, column, dt, ffill=False):\n result = self._equity_minute_reader.get_value(\n asset.sid, dt, column\n )\n\n if column == \"volume\":\n if result == 0:\n return 0\n elif not ffill or not np.isnan(result):\n # if we're not forward filling, or we found a result, return it\n return result\n\n # we are looking for price, and didn't find one. have to go hunting.\n last_traded_dt = \\\n self._equity_minute_reader.get_last_traded_dt(asset, dt)\n\n if last_traded_dt is pd.NaT:\n # no last traded dt, bail\n return np.nan\n\n # get the value as of the last traded dt\n result = self._equity_minute_reader.get_value(\n asset.sid,\n last_traded_dt,\n column\n )\n\n if np.isnan(result):\n return np.nan\n\n if dt == last_traded_dt or dt.date() == last_traded_dt.date():\n return result\n\n # the value we found came from a different day, so we have to adjust\n # the data if there are any adjustments on that day barrier\n return self.get_adjusted_value(\n asset, column, last_traded_dt,\n dt, \"minute\", spot_value=result\n )\n\n def _get_daily_data(self, asset, column, dt):\n if column == \"last_traded\":\n last_traded_dt = \\\n self._equity_daily_reader.get_last_traded_dt(asset, dt)\n\n if pd.isnull(last_traded_dt):\n return pd.NaT\n else:\n return last_traded_dt\n elif column in OHLCV_FIELDS:\n # don't forward fill\n try:\n val = self._equity_daily_reader.spot_price(asset, dt, column)\n if val == -1:\n if column == \"volume\":\n return 0\n else:\n return np.nan\n else:\n return val\n except NoDataOnDate:\n return np.nan\n elif column == \"price\":\n found_dt = dt\n while True:\n try:\n value = self._equity_daily_reader.spot_price(\n asset, found_dt, \"close\"\n )\n if value != -1:\n if dt == found_dt:\n return value\n else:\n # adjust if needed\n return self.get_adjusted_value(\n asset, column, found_dt, dt, \"minute\",\n spot_value=value\n )\n else:\n found_dt -= self.trading_schedule.day\n except NoDataOnDate:\n return np.nan\n\n @remember_last\n def _get_days_for_window(self, end_date, bar_count):\n tds = self.trading_schedule.all_execution_days\n end_loc = tds.get_loc(end_date)\n start_loc = end_loc - bar_count + 1\n if start_loc < self._first_trading_day_loc:\n raise HistoryWindowStartsBeforeData(\n first_trading_day=self._first_trading_day.date(),\n bar_count=bar_count,\n suggested_start_day=tds[\n self._first_trading_day_loc + bar_count\n ].date(),\n )\n return tds[start_loc:end_loc + 1]\n\n def _get_history_daily_window(self, assets, end_dt, bar_count,\n field_to_use):\n \"\"\"\n Internal method that returns a dataframe containing history bars\n of daily frequency for the given sids.\n \"\"\"\n days_for_window = self._get_days_for_window(end_dt.date(), bar_count)\n\n if len(assets) == 0:\n return pd.DataFrame(None,\n index=days_for_window,\n columns=None)\n\n future_data = []\n eq_assets = []\n\n for asset in assets:\n if isinstance(asset, Future):\n future_data.append(self._get_history_daily_window_future(\n asset, days_for_window, end_dt, field_to_use\n ))\n else:\n eq_assets.append(asset)\n eq_data = self._get_history_daily_window_equities(\n eq_assets, days_for_window, end_dt, field_to_use\n )\n if future_data:\n # TODO: This case appears to be uncovered by testing.\n data = np.concatenate(eq_data, np.array(future_data).T)\n else:\n data = eq_data\n return pd.DataFrame(\n data,\n index=days_for_window,\n columns=assets\n )\n\n def _get_history_daily_window_future(self, asset, days_for_window,\n end_dt, column):\n # Since we don't have daily bcolz files for futures (yet), use minute\n # bars to calculate the daily values.\n data = []\n data_groups = []\n\n # get all the minutes for the days NOT including today\n for day in days_for_window[:-1]:\n minutes = self.trading_schedule.execution_minutes_for_day(day)\n\n values_for_day = np.zeros(len(minutes), dtype=np.float64)\n\n for idx, minute in enumerate(minutes):\n minute_val = self._get_minute_spot_value_future(\n asset, column, minute\n )\n\n values_for_day[idx] = minute_val\n\n data_groups.append(values_for_day)\n\n # get the minutes for today\n last_day_minutes = pd.date_range(\n start=self.trading_schedule.start_and_end(end_dt)[0],\n end=end_dt,\n freq=\"T\"\n )\n\n values_for_last_day = np.zeros(len(last_day_minutes), dtype=np.float64)\n\n for idx, minute in enumerate(last_day_minutes):\n minute_val = self._get_minute_spot_value_future(\n asset, column, minute\n )\n\n values_for_last_day[idx] = minute_val\n\n data_groups.append(values_for_last_day)\n\n for group in data_groups:\n if len(group) == 0:\n continue\n\n if column == 'volume':\n data.append(np.sum(group))\n elif column == 'open':\n data.append(group[0])\n elif column == 'close':\n data.append(group[-1])\n elif column == 'high':\n data.append(np.amax(group))\n elif column == 'low':\n data.append(np.amin(group))\n\n return data\n\n def _get_history_daily_window_equities(\n self, assets, days_for_window, end_dt, field_to_use):\n ends_at_midnight = end_dt.hour == 0 and end_dt.minute == 0\n\n if ends_at_midnight:\n # two cases where we use daily data for the whole range:\n # 1) the history window ends at midnight utc.\n # 2) the last desired day of the window is after the\n # last trading day, use daily data for the whole range.\n return self._get_daily_window_for_sids(\n assets,\n field_to_use,\n days_for_window,\n extra_slot=False\n )\n else:\n # minute mode, requesting '1d'\n daily_data = self._get_daily_window_for_sids(\n assets,\n field_to_use,\n days_for_window[0:-1]\n )\n\n if field_to_use == 'open':\n minute_value = self._equity_daily_aggregator.opens(\n assets, end_dt)\n elif field_to_use == 'high':\n minute_value = self._equity_daily_aggregator.highs(\n assets, end_dt)\n elif field_to_use == 'low':\n minute_value = self._equity_daily_aggregator.lows(\n assets, end_dt)\n elif field_to_use == 'close':\n minute_value = self._equity_daily_aggregator.closes(\n assets, end_dt)\n elif field_to_use == 'volume':\n minute_value = self._equity_daily_aggregator.volumes(\n assets, end_dt)\n\n # append the partial day.\n daily_data[-1] = minute_value\n\n return daily_data\n\n def _handle_history_out_of_bounds(self, bar_count):\n suggested_start_day = (\n self.trading_schedule.all_execution_minutes[\n self._first_trading_minute_loc + bar_count\n ] + self.trading_schedule.day\n ).date()\n\n raise HistoryWindowStartsBeforeData(\n first_trading_day=self._first_trading_day.date(),\n bar_count=bar_count,\n suggested_start_day=suggested_start_day,\n )\n\n def _get_history_minute_window(self, assets, end_dt, bar_count,\n field_to_use):\n \"\"\"\n Internal method that returns a dataframe containing history bars\n of minute frequency for the given sids.\n \"\"\"\n # get all the minutes for this window\n try:\n minutes_for_window = self.trading_schedule.execution_minute_window(\n end_dt, -bar_count\n )\n except KeyError:\n self._handle_history_out_of_bounds(bar_count)\n\n if minutes_for_window[0] < self._first_trading_minute:\n self._handle_history_out_of_bounds(bar_count)\n\n asset_minute_data = self._get_minute_window_for_assets(\n assets,\n field_to_use,\n minutes_for_window,\n )\n\n return pd.DataFrame(\n asset_minute_data,\n index=minutes_for_window,\n columns=assets\n )\n\n def get_history_window(self, assets, end_dt, bar_count, frequency, field,\n ffill=True):\n \"\"\"\n Public API method that returns a dataframe containing the requested\n history window. Data is fully adjusted.\n\n Parameters\n ----------\n assets : list of zipline.data.Asset objects\n The assets whose data is desired.\n\n bar_count: int\n The number of bars desired.\n\n frequency: string\n \"1d\" or \"1m\"\n\n field: string\n The desired field of the asset.\n\n ffill: boolean\n Forward-fill missing values. Only has effect if field\n is 'price'.\n\n Returns\n -------\n A dataframe containing the requested data.\n \"\"\"\n if field not in OHLCVP_FIELDS:\n raise ValueError(\"Invalid field: {0}\".format(field))\n\n if frequency == \"1d\":\n if field == \"price\":\n df = self._get_history_daily_window(assets, end_dt, bar_count,\n \"close\")\n else:\n df = self._get_history_daily_window(assets, end_dt, bar_count,\n field)\n elif frequency == \"1m\":\n if field == \"price\":\n df = self._get_history_minute_window(assets, end_dt, bar_count,\n \"close\")\n else:\n df = self._get_history_minute_window(assets, end_dt, bar_count,\n field)\n else:\n raise ValueError(\"Invalid frequency: {0}\".format(frequency))\n\n # forward-fill price\n if field == \"price\":\n if frequency == \"1m\":\n data_frequency = 'minute'\n elif frequency == \"1d\":\n data_frequency = 'daily'\n else:\n raise Exception(\n \"Only 1d and 1m are supported for forward-filling.\")\n\n dt_to_fill = df.index[0]\n\n perspective_dt = df.index[-1]\n assets_with_leading_nan = np.where(pd.isnull(df.iloc[0]))[0]\n for missing_loc in assets_with_leading_nan:\n asset = assets[missing_loc]\n previous_dt = self.get_last_traded_dt(\n asset, dt_to_fill, data_frequency)\n if pd.isnull(previous_dt):\n continue\n previous_value = self.get_adjusted_value(\n asset,\n field,\n previous_dt,\n perspective_dt,\n data_frequency,\n )\n df.iloc[0, missing_loc] = previous_value\n\n df.fillna(method='ffill', inplace=True)\n\n for asset in df.columns:\n if df.index[-1] >= asset.end_date:\n # if the window extends past the asset's end date, set\n # all post-end-date values to NaN in that asset's series\n series = df[asset]\n series[series.index.normalize() > asset.end_date] = np.NaN\n\n return df\n\n def _get_minute_window_for_assets(self, assets, field, minutes_for_window):\n \"\"\"\n Internal method that gets a window of adjusted minute data for an asset\n and specified date range. Used to support the history API method for\n minute bars.\n\n Missing bars are filled with NaN.\n\n Parameters\n ----------\n asset : Asset\n The asset whose data is desired.\n\n field: string\n The specific field to return. \"open\", \"high\", \"close_price\", etc.\n\n minutes_for_window: pd.DateTimeIndex\n The list of minutes representing the desired window. Each minute\n is a pd.Timestamp.\n\n Returns\n -------\n A numpy array with requested values.\n \"\"\"\n if isinstance(assets, Future):\n return self._get_minute_window_for_future([assets], field,\n minutes_for_window)\n else:\n # TODO: Make caller accept assets.\n window = self._get_minute_window_for_equities(assets, field,\n minutes_for_window)\n return window\n\n def _get_minute_window_for_future(self, asset, field, minutes_for_window):\n # THIS IS TEMPORARY. For now, we are only exposing futures within\n # equity trading hours (9:30 am to 4pm, Eastern). The easiest way to\n # do this is to simply do a spot lookup for each desired minute.\n return_data = np.zeros(len(minutes_for_window), dtype=np.float64)\n for idx, minute in enumerate(minutes_for_window):\n return_data[idx] = \\\n self._get_minute_spot_value_future(asset, field, minute)\n\n # Note: an improvement could be to find the consecutive runs within\n # minutes_for_window, and use them to read the underlying ctable\n # more efficiently.\n\n # Once futures are on 24-hour clock, then we can just grab all the\n # requested minutes in one shot from the ctable.\n\n # no adjustments for futures, yay.\n return return_data\n\n def _get_minute_window_for_equities(\n self, assets, field, minutes_for_window):\n return self._equity_minute_history_loader.history(assets,\n minutes_for_window,\n field,\n False)\n\n def _apply_all_adjustments(self, data, asset, dts, field,\n price_adj_factor=1.0):\n \"\"\"\n Internal method that applies all the necessary adjustments on the\n given data array.\n\n The adjustments are:\n - splits\n - if field != \"volume\":\n - mergers\n - dividends\n - * 0.001\n - any zero fields replaced with NaN\n - all values rounded to 3 digits after the decimal point.\n\n Parameters\n ----------\n data : np.array\n The data to be adjusted.\n\n asset: Asset\n The asset whose data is being adjusted.\n\n dts: pd.DateTimeIndex\n The list of minutes or days representing the desired window.\n\n field: string\n The field whose values are in the data array.\n\n price_adj_factor: float\n Factor with which to adjust OHLC values.\n Returns\n -------\n None. The data array is modified in place.\n \"\"\"\n self._apply_adjustments_to_window(\n self._get_adjustment_list(\n asset, self._splits_dict, \"SPLITS\"\n ),\n data,\n dts,\n field != 'volume'\n )\n\n if field != 'volume':\n self._apply_adjustments_to_window(\n self._get_adjustment_list(\n asset, self._mergers_dict, \"MERGERS\"\n ),\n data,\n dts,\n True\n )\n\n self._apply_adjustments_to_window(\n self._get_adjustment_list(\n asset, self._dividends_dict, \"DIVIDENDS\"\n ),\n data,\n dts,\n True\n )\n\n if price_adj_factor is not None:\n data *= price_adj_factor\n np.around(data, 3, out=data)\n\n def _get_daily_window_for_sids(\n self, assets, field, days_in_window, extra_slot=True):\n \"\"\"\n Internal method that gets a window of adjusted daily data for a sid\n and specified date range. Used to support the history API method for\n daily bars.\n\n Parameters\n ----------\n asset : Asset\n The asset whose data is desired.\n\n start_dt: pandas.Timestamp\n The start of the desired window of data.\n\n bar_count: int\n The number of days of data to return.\n\n field: string\n The specific field to return. \"open\", \"high\", \"close_price\", etc.\n\n extra_slot: boolean\n Whether to allocate an extra slot in the returned numpy array.\n This extra slot will hold the data for the last partial day. It's\n much better to create it here than to create a copy of the array\n later just to add a slot.\n\n Returns\n -------\n A numpy array with requested values. Any missing slots filled with\n nan.\n\n \"\"\"\n bar_count = len(days_in_window)\n # create an np.array of size bar_count\n if extra_slot:\n return_array = np.zeros((bar_count + 1, len(assets)))\n else:\n return_array = np.zeros((bar_count, len(assets)))\n\n if field != \"volume\":\n # volumes default to 0, so we don't need to put NaNs in the array\n return_array[:] = np.NAN\n\n if bar_count != 0:\n data = self._equity_history_loader.history(assets,\n days_in_window,\n field,\n extra_slot)\n if extra_slot:\n return_array[:len(return_array) - 1, :] = data\n else:\n return_array[:len(data)] = data\n return return_array\n\n @staticmethod\n def _apply_adjustments_to_window(adjustments_list, window_data,\n dts_in_window, multiply):\n if len(adjustments_list) == 0:\n return\n\n # advance idx to the correct spot in the adjustments list, based on\n # when the window starts\n idx = 0\n\n while idx < len(adjustments_list) and dts_in_window[0] >\\\n adjustments_list[idx][0]:\n idx += 1\n\n # if we've advanced through all the adjustments, then there's nothing\n # to do.\n if idx == len(adjustments_list):\n return\n\n while idx < len(adjustments_list):\n adjustment_to_apply = adjustments_list[idx]\n\n if adjustment_to_apply[0] > dts_in_window[-1]:\n break\n\n range_end = dts_in_window.searchsorted(adjustment_to_apply[0])\n if multiply:\n window_data[0:range_end] *= adjustment_to_apply[1]\n else:\n window_data[0:range_end] /= adjustment_to_apply[1]\n\n idx += 1\n\n def _get_adjustment_list(self, asset, adjustments_dict, table_name):\n \"\"\"\n Internal method that returns a list of adjustments for the given sid.\n\n Parameters\n ----------\n asset : Asset\n The asset for which to return adjustments.\n\n adjustments_dict: dict\n A dictionary of sid -> list that is used as a cache.\n\n table_name: string\n The table that contains this data in the adjustments db.\n\n Returns\n -------\n adjustments: list\n A list of [multiplier, pd.Timestamp], earliest first\n\n \"\"\"\n if self._adjustment_reader is None:\n return []\n\n sid = int(asset)\n\n try:\n adjustments = adjustments_dict[sid]\n except KeyError:\n adjustments = adjustments_dict[sid] = self._adjustment_reader.\\\n get_adjustments_for_sid(table_name, sid)\n\n return adjustments\n\n def _check_is_currently_alive(self, asset, dt):\n sid = int(asset)\n\n if sid not in self._asset_start_dates:\n self._get_asset_start_date(asset)\n\n start_date = self._asset_start_dates[sid]\n if self._asset_start_dates[sid] > dt:\n raise NoTradeDataAvailableTooEarly(\n sid=sid,\n dt=normalize_date(dt),\n start_dt=start_date\n )\n\n end_date = self._asset_end_dates[sid]\n if self._asset_end_dates[sid] < dt:\n raise NoTradeDataAvailableTooLate(\n sid=sid,\n dt=normalize_date(dt),\n end_dt=end_date\n )\n\n def _get_asset_start_date(self, asset):\n self._ensure_asset_dates(asset)\n return self._asset_start_dates[asset]\n\n def _get_asset_end_date(self, asset):\n self._ensure_asset_dates(asset)\n return self._asset_end_dates[asset]\n\n def _ensure_asset_dates(self, asset):\n sid = int(asset)\n\n if sid not in self._asset_start_dates:\n if self._first_trading_day is not None:\n self._asset_start_dates[sid] = \\\n max(asset.start_date, self._first_trading_day)\n else:\n self._asset_start_dates[sid] = asset.start_date\n\n self._asset_end_dates[sid] = asset.end_date\n\n def get_splits(self, sids, dt):\n \"\"\"\n Returns any splits for the given sids and the given dt.\n\n Parameters\n ----------\n sids : container\n Sids for which we want splits.\n dt : pd.Timestamp\n The date for which we are checking for splits. Note: this is\n expected to be midnight UTC.\n\n Returns\n -------\n splits : list[(int, float)]\n List of splits, where each split is a (sid, ratio) tuple.\n \"\"\"\n if self._adjustment_reader is None or not sids:\n return {}\n\n # convert dt to # of seconds since epoch, because that's what we use\n # in the adjustments db\n seconds = int(dt.value / 1e9)\n\n splits = self._adjustment_reader.conn.execute(\n \"SELECT sid, ratio FROM SPLITS WHERE effective_date = ?\",\n (seconds,)).fetchall()\n\n splits = [split for split in splits if split[0] in sids]\n\n return splits\n\n def get_stock_dividends(self, sid, trading_days):\n \"\"\"\n Returns all the stock dividends for a specific sid that occur\n in the given trading range.\n\n Parameters\n ----------\n sid: int\n The asset whose stock dividends should be returned.\n\n trading_days: pd.DatetimeIndex\n The trading range.\n\n Returns\n -------\n list: A list of objects with all relevant attributes populated.\n All timestamp fields are converted to pd.Timestamps.\n \"\"\"\n\n if self._adjustment_reader is None:\n return []\n\n if len(trading_days) == 0:\n return []\n\n start_dt = trading_days[0].value / 1e9\n end_dt = trading_days[-1].value / 1e9\n\n dividends = self._adjustment_reader.conn.execute(\n \"SELECT * FROM stock_dividend_payouts WHERE sid = ? AND \"\n \"ex_date > ? AND pay_date < ?\", (int(sid), start_dt, end_dt,)).\\\n fetchall()\n\n dividend_info = []\n for dividend_tuple in dividends:\n dividend_info.append({\n \"declared_date\": dividend_tuple[1],\n \"ex_date\": pd.Timestamp(dividend_tuple[2], unit=\"s\"),\n \"pay_date\": pd.Timestamp(dividend_tuple[3], unit=\"s\"),\n \"payment_sid\": dividend_tuple[4],\n \"ratio\": dividend_tuple[5],\n \"record_date\": pd.Timestamp(dividend_tuple[6], unit=\"s\"),\n \"sid\": dividend_tuple[7]\n })\n\n return dividend_info\n\n def contains(self, asset, field):\n return field in BASE_FIELDS or \\\n (field in self._augmented_sources_map and\n asset in self._augmented_sources_map[field])\n\n def get_fetcher_assets(self, dt):\n \"\"\"\n Returns a list of assets for the current date, as defined by the\n fetcher data.\n\n Returns\n -------\n list: a list of Asset objects.\n \"\"\"\n # return a list of assets for the current date, as defined by the\n # fetcher source\n if self._extra_source_df is None:\n return []\n\n day = normalize_date(dt)\n\n if day in self._extra_source_df.index:\n assets = self._extra_source_df.loc[day]['sid']\n else:\n return []\n\n if isinstance(assets, pd.Series):\n return [x for x in assets if isinstance(x, Asset)]\n else:\n return [assets] if isinstance(assets, Asset) else []\n\n @weak_lru_cache(20)\n def _get_minute_count_for_transform(self, ending_minute, days_count):\n # cache size picked somewhat loosely. this code exists purely to\n # handle deprecated API.\n\n # bars is the number of days desired. we have to translate that\n # into the number of minutes we want.\n # we get all the minutes for the last (bars - 1) days, then add\n # all the minutes so far today. the +2 is to account for ignoring\n # today, and the previous day, in doing the math.\n previous_day = \\\n self.trading_schedule.previous_execution_day(ending_minute)\n days = self.trading_schedule.execution_days_in_range(\n self.trading_schedule.add_execution_days(-days_count + 2,\n previous_day),\n previous_day,\n )\n\n minutes_count = sum(\n 210 if day in self.trading_schedule.early_ends\n else 390 for day in days\n )\n\n # add the minutes for today\n today_open = self.trading_schedule.start_and_end(ending_minute)[0]\n minutes_count += \\\n ((ending_minute - today_open).total_seconds() // 60) + 1\n\n return minutes_count\n\n def get_simple_transform(self, asset, transform_name, dt, data_frequency,\n bars=None):\n if transform_name == \"returns\":\n # returns is always calculated over the last 2 days, regardless\n # of the simulation's data frequency.\n hst = self.get_history_window(\n [asset], dt, 2, \"1d\", \"price\", ffill=True\n )[asset]\n\n return (hst.iloc[-1] - hst.iloc[0]) / hst.iloc[0]\n\n if bars is None:\n raise ValueError(\"bars cannot be None!\")\n\n if data_frequency == \"minute\":\n freq_str = \"1m\"\n calculated_bar_count = self._get_minute_count_for_transform(\n dt, bars\n )\n else:\n freq_str = \"1d\"\n calculated_bar_count = bars\n\n price_arr = self.get_history_window(\n [asset], dt, calculated_bar_count, freq_str, \"price\", ffill=True\n )[asset]\n\n if transform_name == \"mavg\":\n return nanmean(price_arr)\n elif transform_name == \"stddev\":\n return nanstd(price_arr, ddof=1)\n elif transform_name == \"vwap\":\n volume_arr = self.get_history_window(\n [asset], dt, calculated_bar_count, freq_str, \"volume\",\n ffill=True\n )[asset]\n\n vol_sum = nansum(volume_arr)\n\n try:\n ret = nansum(price_arr * volume_arr) / vol_sum\n except ZeroDivisionError:\n ret = np.nan\n\n return ret\n"
] |
[
[
"numpy.nanmax",
"numpy.amax",
"pandas.isnull",
"pandas.Timestamp",
"numpy.isnan",
"numpy.around",
"numpy.amin",
"numpy.nanmin",
"pandas.Timedelta",
"pandas.DataFrame",
"pandas.tslib.normalize_date",
"numpy.nansum",
"numpy.array",
"numpy.sum"
]
] |
H-Park/stable-baselines
|
[
"d056fbf23e25a41bb30b458bb6ebea3864ef2afa"
] |
[
"stable_baselines/common/hvac_action_mask_skeleton.py"
] |
[
"import gym\nimport numpy as np\nfrom gym.spaces import Discrete, MultiDiscrete\n\nimport math\nfrom pprint import pprint as pp\nfrom scipy import linalg\nfrom scipy import integrate\nfrom scipy import interpolate\n\nimport os\nimport warnings\n\nimport stable_baselines.common.hvac_data.hvac_environment as Environment\nimport stable_baselines.common.hvac_data.hvac_params as Parameters\n\n# Action 1: Valve\n# Action 2: Damper\n# Action 3: Fan\n\n#valve: number <-1, -0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1>,\n#damper: number <0.05, 0.2, 0.4, 0.6, 0.8, 1>\n\nclass MultiDiscreteActionMaskEnv(gym.Env):\n metadata = {'render.modes': ['human', 'system', 'none']}\n\n def __init__(self):\n\n #####\n ##### Machine Teaching \n self.action_space = MultiDiscrete([21, 6, 4])\n\n self.observation_shape = (1, 33, 33)\n self.observation_space = gym.spaces.Box(low=0, high=1, shape=self.observation_shape, dtype=np.float16)\n\n self.counter = 0\n self.valid_actions1 = [1] * 21\n self.valid_actions2 = []\n\n for action in self.valid_actions1:\n self.valid_actions2.append([1] * 6)\n\n self.valid_actions3 = []\n\n for i in range(21):\n tmp = [] \n for j in range(6):\n tmp.append([1] * 4)\n self.valid_actions3.append(tmp)\n\n self.valid_actions = [self.valid_actions1, self.valid_actions2, self.valid_actions3]\n print('finished init')\n\n def _update_date_time(self):\n self.time = self.k * self.h # Total time elapesed in seconds\n self.days, remainder = np.divmod(self.time, 24 * 60 * 60)\n self.hours, remainder = np.divmod(remainder, 60 * 60)\n self.minutes = remainder // 60\n\n def _update_grace_period_remaining(self, reset):\n if reset:\n self.grace_left = self.room_params['grace_period']\n else:\n self.grace_left -= self.sim_params['sample_time']\n if self.grace_left < 0:\n self.grace_left = 0\n\n def _update_env(self):\n env_signals = self.env.get_env_signals(self.k)\n # Environment signals\n self.T_out = env_signals['T_out']\n self.n_people = env_signals['n_people']\n self.occupancy = env_signals['n_people'] > 0 # Occupancy sensor\n self.int_load = env_signals['int_load']\n # Update remaining time in grace period\n if (self.k == 0) or (self.T_set_cool != env_signals['T_set_cool']) or (\n self.T_set_heat != env_signals['T_set_heat']):\n # Reset it\n self._update_grace_period_remaining(True)\n else:\n # Decrease it. No changes to T-set-cool or T-set-heat\n self._update_grace_period_remaining(False)\n self.T_set_cool = env_signals['T_set_cool']\n self.T_set_heat = env_signals['T_set_heat']\n\n # Add to history\n self.T_set_cool_hist.append(env_signals['T_set_cool'])\n self.T_set_heat_hist.append(env_signals['T_set_heat'])\n self.occupancy_hist.append(env_signals['n_people'] > 0)\n\n # State truncation\n if len(self.T_set_cool_hist) > self.hist_buffer:\n self.T_set_cool_hist = self.T_set_cool_hist[-self.hist_buffer:]\n if len(self.T_set_heat_hist) > self.hist_buffer:\n self.T_set_heat_hist = self.T_set_heat_hist[-self.hist_buffer:]\n if len(self.occupancy_hist) > self.hist_buffer:\n self.occupancy_hist = self.occupancy_hist[-self.hist_buffer:]\n \n def _update_temp(self, control, flow, AHU_temp):\n \n # Applied power\n P_vent = flow * 1005 * 1.205 * control['damper'] / 100 * (AHU_temp - self.T_in)\n \n # limited heating and cooling power from valves\n if (self.T_in > 60):\n maxHeatPower = 0\n else:\n maxHeatPower = self.room_params['maxHeatPower']\n if (self.T_in < 10):\n maxCoolPower = 0\n else:\n maxCoolPower = self.room_params['maxCoolPower']\n\n # Mapping from valve signal to thermal power\n heat_valve_map = max( min( \\\n 100 * (control['heatValve'] - self.room_params['valve_closed_pos']) / (self.room_params['valve_open_pos'] - self.room_params['valve_closed_pos']) \\\n ,100 ), 0)\n cool_valve_map = max( min( \\\n 100 * (control['coolValve'] - self.room_params['valve_closed_pos']) / (self.room_params['valve_open_pos'] - self.room_params['valve_closed_pos']) \\\n ,100 ), 0) \n \n # Heating and cooling power from valves\n Phi = heat_valve_map * maxHeatPower / 100 \\\n - cool_valve_map * maxCoolPower / 100\n\n # add internal loads and thermal heat from people\n Phi += self.int_load + 100 * self.n_people # 100W/person\n Phi += P_vent\n\n # Discrete time thermal equation: Ad * x + Bd * u\n self.T_in, self.T_wall = np.dot(self.Ad,\n np.transpose([self.T_in, self.T_wall])) + np.dot(self.Bd,\n np.transpose(\n [Phi, self.T_out]))\n if self.disturbance:\n self.T_in += np.random.choice([0, -5, 5], p=[0.9998, 0.0001, 0.0001])\n\n def _update_control_hist(self, control):\n # Add to history\n self.heat_valve_hist.append(control['heatValve'])\n self.damper_hist.append(control['damper'])\n self.cool_valve_hist.append(control['coolValve'])\n self.fan_speed_hist.append(control['fanSpeed'])\n\n # State truncation\n if len(self.heat_valve_hist) > self.hist_buffer:\n self.heat_valve_hist = self.heat_valve_hist[-self.hist_buffer:]\n if len(self.damper_hist) > self.hist_buffer:\n self.damper_hist = self.damper_hist[-self.hist_buffer:]\n if len(self.cool_valve_hist) > self.hist_buffer:\n self.cool_valve_hist = self.cool_valve_hist[-self.hist_buffer:]\n if len(self.fan_speed_hist) > self.hist_buffer:\n self.fan_speed_hist = self.fan_speed_hist[-self.hist_buffer:]\n\n def _measure_error_T(self):\n error = 0\n if self.T_in_meas > self.T_set_cool:\n error = self.T_in_meas - self.T_set_cool\n elif self.T_in_meas < self.T_set_heat:\n error = self.T_in_meas - self.T_set_heat\n self.error_T_hist.append(error)\n\n # State truncation\n if len(self.error_T_hist) > self.hist_buffer:\n self.error_T_hist = self.error_T_hist[-self.hist_buffer:]\n\n # Also measure the real error\n real_error = 0\n if self.T_in > self.T_set_cool:\n real_error = self.T_in - self.T_set_cool\n elif self.T_in < self.T_set_heat:\n real_error = self.T_in - self.T_set_heat\n self.error_T_real = real_error\n\n def _measure_temp(self):\n # Low pass filter (9 min) for temperature sensor\n self.T_in_meas = self.T_in_meas * self.AdSensor + (1. - self.AdSensor) * self.T_in\n\n def _measure_deltaCO2(self):\n deltaCO2 = self.room_params['CO2_limit'] - self.CO2\n self.delta_co2_hist.append(deltaCO2)\n if len(self.delta_co2_hist) > self.hist_buffer:\n self.delta_co2_hist = self.delta_co2_hist[-self.hist_buffer:]\n\n def _update_co2(self, control, flow):\n # (based on Bonsai pilot 2018)\n # state vector: CO2 concentration (ppm)\n # input vector: CO2new nbrPeople\n A = -flow * control['damper'] / (self.room_params['V'] * 100)\n gen = 1043 * 516 / (24 * 60 * 60) # CO2 generated by people\n B = np.array([flow * control['damper'] / (self.room_params['V'] * 100), gen / self.room_params['V']])\n\n # Discretize every step due to the nonlinear property of the equation\n self.AdCO2 = math.exp(A * self.h_internal)\n\n def f(x):\n return lambda tt: np.dot(math.exp(A * tt), B)[x]\n\n def fq(x):\n return integrate.quad(f(x), 0, self.h_internal)[0]\n\n self.BdCO2 = np.array([fq(0), fq(1)])\n self.damper_old = control['damper']\n\n self.CO2 = self.AdCO2 * self.CO2 + np.dot(self.BdCO2,\n np.transpose([self.room_params['CO2_fresh'], self.n_people]))\n if self.disturbance:\n self.CO2 += np.random.choice([0, 2000], p=[0.9998, 0.0002])\n\n def _update_energy(self, control, flow, AHU_temp):\n P_AHU = flow * 1005 * 1.205 * control['damper'] * (AHU_temp - self.T_out) / 100\n heat_valve_map = max( min( \\\n 100 * (control['heatValve'] - self.room_params['valve_closed_pos']) / (self.room_params['valve_open_pos'] - self.room_params['valve_closed_pos']) \\\n ,100 ), 0)\n cool_valve_map = max( min( \\\n 100 * (control['coolValve'] - self.room_params['valve_closed_pos']) / (self.room_params['valve_open_pos'] - self.room_params['valve_closed_pos']) \\\n ,100 ), 0) \n self.energy_heat = (heat_valve_map / 100 * self.room_params['maxHeatPower'] + max(0, P_AHU)) * self.h\n self.energy_cool = (cool_valve_map / 100 * self.room_params['maxCoolPower'] + -1 * min(0,P_AHU)) * self.h\n self.energy_elec = control['fanSpeed'] / 100 * self.room_params['maxFlow'] * 3000 * self.h\n\n def reset(self):\n print('in reset')\n self.counter = 0\n self.valid_actions1 = [1] * 21\n self.valid_actions2 = []\n\n for action in self.valid_actions1:\n self.valid_actions2.append([1] * 6)\n\n self.valid_actions3 = []\n \n for i in range(21):\n tmp = [] \n for j in range(6):\n tmp.append([1] * 4)\n self.valid_actions3.append(tmp)\n self.valid_actions = [self.valid_actions1, self.valid_actions2, self.valid_actions3]\n return self.state()\n\n def step(self, actions):\n print('in step')\n \n #######\n ####### Machine Teaching\n valid_actions1 = [1] * 21\n valid_actions2 = []\n\n for action in valid_actions1:\n valid_actions2.append([1] * 6)\n\n valid_actions3 = []\n \n for i in range(21):\n tmp = [] \n for j in range(6):\n tmp.append([1] * 4)\n valid_actions3.append(tmp)\n\n if self.valid_actions[0][actions[0]] == 0:\n raise Exception(\"Invalid action was selected! Valid actions: {}, \"\n \"action taken: {}\".format(self.valid_actions, actions))\n else:\n valid_actions1[actions[0]] = 0\n if self.valid_actions[1][actions[0]][actions[1]] == 0:\n raise Exception(\"Invalid action was selected! Valid actions: {}, \"\n \"action taken: {}\".format(self.valid_actions, actions))\n else:\n valid_actions2[0][actions[1]] = 0\n valid_actions2[1][actions[1]] = 0\n if self.valid_actions[2][actions[0]][actions[1]][actions[2]] == 0:\n raise Exception(\"Invalid action was selected! Valid actions: {}, \"\n \"action taken: {}\".format(self.valid_actions, actions))\n else:\n valid_actions3[0][0][actions[2]] = 0\n valid_actions3[0][1][actions[2]] = 0\n valid_actions3[0][2][actions[2]] = 0\n valid_actions3[1][0][actions[2]] = 0\n valid_actions3[1][1][actions[2]] = 0\n valid_actions3[1][2][actions[2]] = 0\n\n self.valid_actions = [valid_actions1, valid_actions2, valid_actions3]\n self.counter += 1\n\n return self.state(), 0, self.finish(), {'action_mask': self.valid_actions}\n\n def get_state(self):\n observable_state = {}\n observable_state['iteration'] = self.k\n observable_state['time'] = self.time\n observable_state['days'] = self.days\n observable_state['hours'] = self.hours\n observable_state['minutes'] = self.minutes\n observable_state['grace_left'] = self.grace_left\n observable_state['T_in_meas'] = self.T_in_meas\n observable_state['occupancy'] = self.occupancy\n observable_state['CO2'] = self.CO2\n observable_state['T_set_cool'] = self.T_set_cool\n observable_state['T_set_heat'] = self.T_set_heat\n observable_state['CO2_limit'] = self.room_params['CO2_limit']\n ## Hist\n observable_state['T_set_cool_hist'] = self.T_set_cool_hist\n observable_state['T_set_heat_hist'] = self.T_set_heat_hist\n observable_state['error_T_hist'] = self.error_T_hist\n observable_state['delta_co2_hist'] = self.delta_co2_hist\n # Control Hist\n observable_state['heat_valve_hist'] = self.heat_valve_hist\n observable_state['cool_valve_hist'] = self.cool_valve_hist\n observable_state['damper_hist'] = self.damper_hist\n observable_state['fan_speed_hist'] = self.fan_speed_hist\n observable_state['occupancy_hist'] = self.occupancy_hist\n\n hidden_state = {}\n hidden_state['T_in'] = self.T_in\n hidden_state['T_out'] = self.T_out\n hidden_state['energy_heat'] = self.energy_heat\n hidden_state['energy_cool'] = self.energy_cool\n hidden_state['energy_elec'] = self.energy_elec\n hidden_state['n_people'] = self.n_people\n hidden_state['T_wall'] = self.T_wall\n hidden_state['int_load'] = self.int_load\n hidden_state['damper_old'] = self.damper_old\n hidden_state['error_T_real'] = self.error_T_real\n\n return observable_state, hidden_state\n\n def render(self, mode='human'):\n pass\n\n def finish(self):\n return self.counter == 250\n\n def state(self):\n tmp = np.reshape(np.array([*range(100)]), self.observation_shape)\n obs = tmp / 100\n return obs\n"
] |
[
[
"numpy.divmod",
"numpy.array",
"numpy.transpose",
"numpy.random.choice"
]
] |
DominicBroadbentCompass/bayesian-coresets-optimization
|
[
"3657f2ebfc4f0e6b36f5c651b0651f06d7e3d6b1"
] |
[
"experiments/tests/not_yet_updated/test_base/test_opt.py"
] |
[
"import warnings\n\nimport numpy as np\nfrom bayesiancoresets.base import OptimizationCoreset\n\nwarnings.filterwarnings('ignore',\n category=UserWarning) # tests will generate warnings (due to pathological data design for testing), just ignore them\nnp.seterr(all='raise')\nnp.set_printoptions(linewidth=500)\nnp.random.seed(100)\ntol = 1e-9\n\n\nclass DummyOptimizationCoreset(OptimizationCoreset):\n\n def __init__(self, N):\n super().__init__(N=N)\n\n def _optimize(self):\n return np.zeros(self.N)\n\n def _max_reg_coeff(self):\n return 1.\n\n\ndef test_api():\n for N in [0, 1, 10]:\n coreset = DummyOptimizationCoreset(N)\n try:\n a = coreset.error()\n a = coreset.weights()\n a = coreset._optimize()\n except NotImplementedError as e:\n pass\n except:\n assert False, \"OptimizationCoreset shouldn't implement error, weights, _step\"\n"
] |
[
[
"numpy.seterr",
"numpy.set_printoptions",
"numpy.zeros",
"numpy.random.seed"
]
] |
chriscline/mne-python
|
[
"ced882efa2455b94094f430c62fef95d825d984e"
] |
[
"tutorials/forward/25_automated_coreg.py"
] |
[
"\"\"\"\n=============================================\nUsing an automated approach to coregistration\n=============================================\n\nThis example shows how to use the coregistration functions to perform an\nautomated MEG-MRI coregistration via scripting.\n\n.. warning:: The quality of the coregistration depends heavily upon the\n quality of the head shape points (HSP) collected during subject\n prepration and the quality of your T1-weighted MRI. Use with\n caution and check the coregistration error.\n\"\"\"\n\n# Author: Jon Houck <[email protected]>\n# Guillaume Favelier <[email protected]>\n#\n# License: BSD-3-Clause\n\nimport os.path as op\nimport numpy as np\n\nimport mne\nfrom mne.coreg import Coregistration\nfrom mne.io import read_info\n\n\ndata_path = mne.datasets.sample.data_path()\nsubjects_dir = op.join(data_path, 'subjects')\nsubject = 'sample'\n\nfname_raw = op.join(data_path, 'MEG', subject, subject + '_audvis_raw.fif')\ninfo = read_info(fname_raw)\nplot_kwargs = dict(subject=subject, subjects_dir=subjects_dir,\n surfaces=\"head-dense\", dig=True, eeg=[],\n meg='sensors', show_axes=True,\n coord_frame='meg')\nview_kwargs = dict(azimuth=45, elevation=90, distance=0.6,\n focalpoint=(0., 0., 0.))\n\n# %%\n# Set up the coregistration model\n# -------------------------------\nfiducials = \"estimated\" # get fiducials from fsaverage\ncoreg = Coregistration(info, subject, subjects_dir, fiducials=fiducials)\nfig = mne.viz.plot_alignment(info, trans=coreg.trans, **plot_kwargs)\n\n# %%\n# Initial fit with fiducials\n# --------------------------\n# Do first a coregistration fit using only 3 fiducial points. This allows\n# to find a good initial solution before further optimization using\n# head shape points. This can also be useful to detect outlier head shape\n# points which are too far from the skin surface. One can see for example\n# that on this dataset there is one such point and we will omit it from\n# the subsequent fit.\ncoreg.fit_fiducials(verbose=True)\nfig = mne.viz.plot_alignment(info, trans=coreg.trans, **plot_kwargs)\n\n# %%\n# Refining with ICP\n# -----------------\n# Next we refine the transformation using a few iteration of the\n# Iterative Closest Point (ICP) algorithm. As the initial fiducials\n# are obtained from fsaverage and not from precise manual picking in the\n# GUI we do a fit with reduced weight for the nasion.\ncoreg.fit_icp(n_iterations=6, nasion_weight=2., verbose=True)\nfig = mne.viz.plot_alignment(info, trans=coreg.trans, **plot_kwargs)\n\n# %%\n# Omitting bad points\n# -------------------\n# It is now very clear that we have one point that is an outlier\n# and that should be removed.\ncoreg.omit_head_shape_points(distance=5. / 1000) # distance is in meters\n\n# %%\n# Final coregistration fit\n# ------------------------\n\n# sphinx_gallery_thumbnail_number = 4\ncoreg.fit_icp(n_iterations=20, nasion_weight=10., verbose=True)\nfig = mne.viz.plot_alignment(info, trans=coreg.trans, **plot_kwargs)\nmne.viz.set_3d_view(fig, **view_kwargs)\n\ndists = coreg.compute_dig_mri_distances() * 1e3 # in mm\nprint(\n f\"Distance between HSP and MRI (mean/min/max):\\n{np.mean(dists):.2f} mm \"\n f\"/ {np.min(dists):.2f} mm / {np.max(dists):.2f} mm\"\n)\n\n# %%\n# .. warning::\n# Don't forget to save the resulting ``trans`` matrix!\n#\n# .. code-block:: python\n#\n# mne.write_trans('/path/to/filename-trans.fif', coreg.trans)\n#\n# .. note:: The :class:`mne.coreg.Coregistration` class has the ability to\n# compute MRI scale factors using\n# :meth:`~mne.coreg.Coregistration.set_scale_mode` that is useful\n# for creating surrogate MRI subjects, i.e., using a template MRI\n# (such as one from :func:`mne.datasets.fetch_infant_template`)\n# matched to a subject's head digitization. When scaling is desired,\n# a scaled surrogate MRI should be created using\n# :func:`mne.scale_mri`.\n"
] |
[
[
"numpy.max",
"numpy.mean",
"numpy.min"
]
] |
z430/pycochleagram
|
[
"45420d969047781933a91281592f46befcc3f48f"
] |
[
"pycochleagram/tests/test_erbfilter_unity.py"
] |
[
"import os\nimport sys\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\nimport numpy as np\n\nimport erbfilter as erb\n\n\ndef test_erb_filts_unity(mode='grid', verbose=0):\n \"\"\"Test that the squard ERB filterbank sums to 1.\n\n This is intended to check the generalization of 1x, 2x and 4x filter\n oversampling to nx.\n\n Args:\n mode ({'grid', 'rand'}): Determine how the sample_factor list will be\n tested. If 'grid' (default), the entire range from [1, 20] will be\n searched. If 'rand', random samples will be taken from a range.\n verbose (int): Controls output verbosity level.\n \"\"\"\n mode = mode.lower()\n if mode == 'grid':\n sample_factor_list = range(1, 20)\n elif mode == 'rand':\n sample_factor_list = np.random.randint(1, 500, 20)\n else:\n raise NotImplementedError()\n\n signal_length_range = (1, 10000, 10)\n sr_range = (1000, 64000, 10)\n pad_factor = None\n low_lim_range = (1, 200, 10)\n hi_lim_range = (201, 60000, 10)\n N_range = (1, 1000, 10)\n ctr = 0\n\n for signal_length in np.random.randint(*signal_length_range):\n for sr in np.random.randint(*sr_range):\n for low_lim in np.random.randint(*low_lim_range):\n for hi_lim in np.random.randint(*hi_lim_range):\n for N in np.random.randint(*N_range):\n for sample_factor in sample_factor_list:\n try:\n if verbose > 0:\n print('N: %s, sample_factor: %s, signal_length: %s, sr: %s, low_lim: %s, hi_lim: %s, pad_factor: %s' %\n (N, sample_factor, signal_length, sr, low_lim, hi_lim, pad_factor))\n filts, hz_cutoffs, freqs = erb.make_erb_cos_filters_nx(signal_length, sr, N,\n low_lim, hi_lim, sample_factor,\n pad_factor=pad_factor, full_filter=False, strict=False)\n # get filters into columns\n filts = filts.T\n\n if verbose > 1:\n print('filts shape: %s, sample_factor: %s' % (filts.shape, sample_factor))\n\n filts_sum = np.sum(filts * filts, axis=0)\n\n if verbose > 1:\n print('filts_sum (min, max): (%s, %s)' % (filts_sum.min(), filts_sum.max()))\n\n is_close_to_one = np.allclose(filts_sum, np.ones_like(filts_sum))\n assert(is_close_to_one)\n\n ctr += 1\n if verbose > 0:\n print('PASSED: test %s' % ctr)\n\n except AssertionError as e:\n import matplotlib.pyplot as plt\n import pdb\n print('\\nFAILED\\n------')\n pdb.set_trace()\n\n\ndef main():\n # test all sample factors from [1, 20]\n test_erb_filts_unity(mode='grid', verbose=1)\n # test 20 sample factors randomly chosen from [1, 500]\n test_erb_filts_unity(mode='rand', verbose=1)\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.ones_like",
"numpy.sum",
"numpy.random.randint"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.