query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Retrieves the output mask tensor(s) of a layer. Only applicable if the layer has exactly one inbound node, i.e. if it is connected to one incoming layer.
def output_mask(self): output = self.output if isinstance(output, list): return [getattr(x, '_keras_mask', None) for x in output] else: return getattr(output, '_keras_mask', None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_output_mask_at(self, node_index):\n output = self.get_output_at(node_index)\n if isinstance(output, list):\n return [getattr(x, '_keras_mask', None) for x in output]\n else:\n return getattr(output, '_keras_mask', None)", "def get_input_mask_at(self, node_index):\n inputs = self.get_input_at(node_index)\n if isinstance(inputs, list):\n return [getattr(x, '_keras_mask', None) for x in inputs]\n else:\n return getattr(inputs, '_keras_mask', None)", "def input_mask(self):\n inputs = self.input\n if isinstance(inputs, list):\n return [getattr(x, '_keras_mask', None) for x in inputs]\n else:\n return getattr(inputs, '_keras_mask', None)", "def get_input_masks(nx_node, nx_graph):\n input_edges = list(nx_graph.in_edges(nx_node['key']))\n input_masks = [nx_graph.nodes[input_node]['output_mask'] for input_node, _ in input_edges]\n return input_masks", "def get_input_masks(nx_node, nx_graph):\n input_edges = list(nx_graph.in_edges(nx_node[\"key\"]))\n input_masks = [nx_graph.nodes[input_node][\"output_mask\"] for input_node, _ in input_edges]\n return input_masks", "def layer_masks(self, module):\n pass\n # return masks", "def masked_softmax(input_layer,n_nodes, batch_size):\n mask_lower = K.theano.tensor.tril(K.ones((n_nodes, n_nodes)))\n mask_upper = \\\n K.theano.tensor.triu(-100. * K.ones((n_nodes, n_nodes)), 1)\n mask_layer = mask_lower * input_layer + mask_upper\n mask_layer = mask_layer + 0 * K.eye(n_nodes)[0:n_nodes, 0:n_nodes]\n mask_layer = \\\n K.reshape(mask_layer, (batch_size * n_nodes, n_nodes))\n softmax_layer = K.softmax(mask_layer)\n output_layer = K.reshape(softmax_layer, (batch_size, n_nodes, n_nodes))\n return output_layer", "def get_out_seq_lens_nonmask_after_a_layer(self, in_seq_lens_tensor, i):\n out_lengths = in_seq_lens_tensor.clone()\n out_lengths = ((out_lengths.float() - (self.conv_layers_infos[i][1] - 1) - 1) / self.conv_layers_infos[i][-1] + 1).floor().long()\n out_nonmask = (~lengths_to_padding_mask(out_lengths)).float()\n return out_nonmask, out_lengths", "def get_output_for(self, inputs, **kwargs):\n # Retrieve the layer input\n input = inputs[0]\n # Retrieve the mask when it is supplied\n mask = None\n hid_init = None\n cell_init = None\n if self.mask_incoming_index > 0:\n mask = inputs[self.mask_incoming_index]\n if self.hid_init_incoming_index > 0:\n hid_init = inputs[self.hid_init_incoming_index]\n if self.cell_init_incoming_index > 0:\n cell_init = inputs[self.cell_init_incoming_index]\n\n # Treat all dimensions after the second as flattened feature dimensions\n if input.ndim > 3:\n input = T.flatten(input, 3)\n\n # Because scan iterates over the first dimension we dimshuffle to\n # (n_time_steps, n_batch, n_features)\n input = input.dimshuffle(1, 0, 2)\n seq_len, num_batch, _ = input.shape\n\n # Stack input weight matrices into a (num_inputs, 4*num_units)\n # matrix, which speeds up computation\n W_in_stacked = T.concatenate(\n [self.W_in_to_ingate, self.W_in_to_forgetgate,\n self.W_in_to_cell, self.W_in_to_outgate], axis=1)\n\n # Same for hidden weight matrices\n W_hid_stacked = T.concatenate(\n [self.W_hid_to_ingate, self.W_hid_to_forgetgate,\n self.W_hid_to_cell, self.W_hid_to_outgate], axis=1)\n\n # Stack biases into a (4*num_units) vector\n b_stacked = T.concatenate(\n [self.b_ingate, self.b_forgetgate,\n self.b_cell, self.b_outgate], axis=0)\n\n if self.precompute_input:\n # Because the input is given for all time steps, we can\n # precompute_input the inputs dot weight matrices before scanning.\n # W_in_stacked is (n_features, 4*num_units). input is then\n # (n_time_steps, n_batch, 4*num_units).\n input = T.dot(input, W_in_stacked) + b_stacked\n\n # At each call to scan, input_n will be (n_time_steps, 4*num_units).\n # We define a slicing function that extract the input to each LSTM gate\n def slice_w(x, n):\n return x[:, n*self.num_units:(n+1)*self.num_units]\n\n # Create single recurrent computation step function\n # input_n is the n'th vector of the input\n def step(input_n, cell_previous, hid_previous, *args):\n if not self.precompute_input:\n input_n = T.dot(input_n, W_in_stacked) + b_stacked\n\n # Calculate gates pre-activations and slice\n gates = input_n + T.dot(hid_previous, W_hid_stacked)\n\n # Clip gradients\n if self.grad_clipping:\n gates = theano.gradient.grad_clip(\n gates, -self.grad_clipping, self.grad_clipping)\n\n # Extract the pre-activation gate values\n ingate = slice_w(gates, 0)\n forgetgate = slice_w(gates, 1)\n cell_input = slice_w(gates, 2)\n outgate = slice_w(gates, 3)\n\n if self.peepholes:\n # Compute peephole connections\n ingate += cell_previous*self.W_cell_to_ingate\n forgetgate += cell_previous*self.W_cell_to_forgetgate\n\n # Apply nonlinearities\n ingate = self.nonlinearity_ingate(ingate)\n forgetgate = self.nonlinearity_forgetgate(forgetgate)\n cell_input = self.nonlinearity_cell(cell_input)\n\n # Compute new cell value\n cell = forgetgate*cell_previous + ingate*cell_input\n\n if self.peepholes:\n outgate += cell*self.W_cell_to_outgate\n outgate = self.nonlinearity_outgate(outgate)\n\n # Compute new hidden unit activation\n hid = outgate*self.nonlinearity(cell)\n return [cell, hid]\n\n def step_masked(input_n, mask_n, cell_previous, hid_previous, *args):\n cell, hid = step(input_n, cell_previous, hid_previous, *args)\n\n # Skip over any input with mask 0 by copying the previous\n # hidden state; proceed normally for any input with mask 1.\n not_mask = 1 - mask_n\n cell = cell*mask_n + cell_previous*not_mask\n hid = hid*mask_n + hid_previous*not_mask\n\n return [cell, hid]\n\n if mask is not None:\n # mask is given as (batch_size, seq_len). Because scan iterates\n # over first dimension, we dimshuffle to (seq_len, batch_size) and\n # add a broadcastable dimension\n mask = mask.dimshuffle(1, 0, 'x')\n sequences = [input, mask]\n step_fun = step_masked\n else:\n sequences = input\n step_fun = step\n\n ones = T.ones((num_batch, 1))\n if isinstance(self.cell_init, Layer):\n pass\n elif isinstance(self.cell_init, T.TensorVariable):\n cell_init = self.cell_init\n else:\n # Dot against a 1s vector to repeat to shape (num_batch, num_units)\n cell_init = T.dot(ones, self.cell_init)\n\n if isinstance(self.hid_init, Layer):\n pass\n elif isinstance(self.hid_init, T.TensorVariable):\n hid_init = self.hid_init\n else:\n # Dot against a 1s vector to repeat to shape (num_batch, num_units)\n hid_init = T.dot(ones, self.hid_init)\n\n # The hidden-to-hidden weight matrix is always used in step\n non_seqs = [W_hid_stacked]\n # The \"peephole\" weight matrices are only used when self.peepholes=True\n if self.peepholes:\n non_seqs += [self.W_cell_to_ingate,\n self.W_cell_to_forgetgate,\n self.W_cell_to_outgate]\n\n # When we aren't precomputing the input outside of scan, we need to\n # provide the input weights and biases to the step function\n if not self.precompute_input:\n non_seqs += [W_in_stacked, b_stacked]\n\n if self.unroll_scan:\n # Retrieve the dimensionality of the incoming layer\n input_shape = self.input_shapes[0]\n # Explicitly unroll the recurrence instead of using scan\n cell_out, hid_out = unroll_scan(\n fn=step_fun,\n sequences=sequences,\n outputs_info=[cell_init, hid_init],\n go_backwards=self.backwards,\n non_sequences=non_seqs,\n n_steps=input_shape[1])\n else:\n # Scan op iterates over first dimension of input and repeatedly\n # applies the step function\n cell_out, hid_out = theano.scan(\n fn=step_fun,\n sequences=sequences,\n outputs_info=[cell_init, hid_init],\n go_backwards=self.backwards,\n truncate_gradient=self.gradient_steps,\n non_sequences=non_seqs,\n strict=True)[0]\n\n # When it is requested that we only return the final sequence step,\n # we need to slice it out immediately after scan is applied\n if self.only_return_final:\n hid_out = hid_out[-1]\n cell_out = cell_out[-1]\n else:\n # dimshuffle back to (n_batch, n_time_steps, n_features))\n hid_out = hid_out.dimshuffle(1, 0, 2)\n cell_out = cell_out.dimshuffle(1, 0, 2)\n\n # if scan is backward reverse the output\n if self.backwards:\n hid_out = hid_out[:, ::-1]\n cell_out = cell_out[:, ::-1]\n\n return T.concatenate([cell_out, hid_out], axis=2)", "def compute_mask(self, input, input_mask=None):\n if input_mask is None:\n return [None for i in range(self.n)]\n else:\n raise ValueError(\"Not supporting mask for this layer {}\".format(self.name))", "def compute_mask(self, input, input_mask=None):\n n_inputs = len(input)\n if input_mask is None or all([m is None for m in input_mask]):\n # return [None for _ in range(0, n_inputs - 1, 2)]\n return [None for _ in range(n_inputs * (n_inputs - 1) / 2)]\n else:\n raise ValueError(\"Not supporting mask for this layer {}\".format(self.name))", "def compute_mask(self, input, input_mask=None):\n n_inputs = len(input)\n if input_mask is None or all([m is None for m in input_mask]):\n # return [None for _ in range(0, n_inputs - 1, 2)]\n return [None for _ in range(n_inputs * (n_inputs - 1) / 2)]\n else:\n raise ValueError(\"Not supporting mask for this layer {}\".format(self.name))", "def get_output_for(self, inputs, **kwargs):\n # Retrieve the layer input\n input = inputs[0]\n # Retrieve the mask when it is supplied\n mask = None\n hid_init = None\n if self.mask_incoming_index > 0:\n mask = inputs[self.mask_incoming_index]\n if self.hid_init_incoming_index > 0:\n hid_init = inputs[self.hid_init_incoming_index]\n\n # Input should be provided as (n_batch, n_time_steps, n_features)\n # but scan requires the iterable dimension to be first\n # So, we need to dimshuffle to (n_time_steps, n_batch, n_features)\n input = input.dimshuffle(1, 0, *range(2, input.ndim))\n seq_len, num_batch = input.shape[0], input.shape[1]\n\n # When we are not precomputing the input, we also need to pass the\n # input-to-hidden parameters to step\n non_seqs = L.get_all_params(self.input_to_hidden)\n\n # Create single recurrent computation step function\n def step(input_n, hid_previous, *args):\n hid_pre = L.get_output(\n self.input_to_hidden,{self.input_to_hidden_input : input_n,\n self.input_to_hidden_hidden : hid_previous}, **kwargs)\n\n # Clip gradients\n if self.grad_clipping:\n hid_pre = theano.gradient.grad_clip(\n hid_pre, -self.grad_clipping, self.grad_clipping)\n\n return hid_pre\n\n def step_masked(input_n, mask_n, hid_previous, *args):\n # Skip over any input with mask 0 by copying the previous\n # hidden state; proceed normally for any input with mask 1.\n hid = step(input_n, hid_previous, *args)\n hid_out = T.switch(mask_n, hid, hid_previous)\n return [hid_out]\n\n if mask is not None:\n mask = mask.dimshuffle(1, 0, 'x')\n sequences = [input, mask]\n step_fun = step_masked\n else:\n sequences = input\n step_fun = step\n\n if not isinstance(self.hid_init, L.Layer):\n # The code below simply repeats self.hid_init num_batch times in\n # its first dimension. Turns out using a dot product and a\n # dimshuffle is faster than T.repeat.\n dot_dims = (list(range(1, self.hid_init.ndim - 1)) +\n [0, self.hid_init.ndim - 1])\n hid_init = T.dot(T.ones((num_batch, 1)),\n self.hid_init.dimshuffle(dot_dims))\n\n if self.unroll_scan:\n # Retrieve the dimensionality of the incoming layer\n input_shape = self.input_shapes[0]\n # Explicitly unroll the recurrence instead of using scan\n hid_out = unroll_scan(\n fn=step_fun,\n sequences=sequences,\n outputs_info=[hid_init],\n go_backwards=self.backwards,\n non_sequences=non_seqs,\n n_steps=input_shape[1])[0]\n else:\n # Scan op iterates over first dimension of input and repeatedly\n # applies the step function\n hid_out = theano.scan(\n fn=step_fun,\n sequences=sequences,\n go_backwards=self.backwards,\n outputs_info=[hid_init],\n non_sequences=non_seqs,\n truncate_gradient=self.gradient_steps,\n strict=True)[0]\n\n # When it is requested that we only return the final sequence step,\n # we need to slice it out immediately after scan is applied\n if self.only_return_final:\n hid_out = hid_out[-1]\n else:\n # dimshuffle back to (n_batch, n_time_steps, n_features))\n hid_out = hid_out.dimshuffle(1, 0, *range(2, hid_out.ndim))\n\n # if scan is backward reverse the output\n if self.backwards:\n hid_out = hid_out[:, ::-1]\n\n return hid_out", "def compute_mask(self, input, input_mask=None):\n if input_mask is None or all([m is None for m in input_mask]):\n # return [None for _ in range(0, n_inputs - 1, 2)]\n return None\n else:\n raise ValueError(\"Not supporting mask for this layer {}\".format(self.name))", "def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument\n if not self.supports_masking:\n if mask is not None:\n if isinstance(mask, list):\n if any(m is not None for m in mask):\n raise TypeError('Layer ' + self.name + ' does not support masking, '\n 'but was passed an input_mask: ' + str(mask))\n else:\n raise TypeError('Layer ' + self.name + ' does not support masking, '\n 'but was passed an input_mask: ' + str(mask))\n # masking not explicitly supported: return None as mask\n return None\n # if masking is explicitly supported, by default\n # carry over the input mask\n return mask", "def _source_mask(self, ilens):\n x_masks = make_non_pad_mask(ilens)\n return x_masks.unsqueeze(-2)", "def compute_mask(self, inputs, mask=None):\n if self.padding != \"same\":\n raise ValueError(\"Padding mode '%s' not yet supported\" % (\n self.padding,))\n return mask", "def call(self, inputs)->Any:\n unpacked_inputs = tf_utils.unpack_inputs(inputs)\n input_tensor = unpacked_inputs[0]\n attention_mask = unpacked_inputs[1]\n output_tensor = input_tensor\n\n all_layer_outputs = []\n for layer in self.layers:\n output_tensor, attention_scores = layer(output_tensor, attention_mask)\n all_layer_outputs.append((output_tensor, attention_scores))\n\n return all_layer_outputs", "def mask(self):\n return list(self._mask_generator())", "def compute_mask(self, inputs, mask=None):\n if mask is None:\n return None\n if not isinstance(mask, list):\n raise ValueError('`mask` should be a list.')\n if not isinstance(inputs, list):\n raise ValueError('`inputs` should be a list.')\n if len(mask) != len(inputs):\n raise ValueError('The lists `inputs` and `mask` '\n 'should have the same length.')\n if mask[0] is not None:\n raise ValueError('Attention mask should be None.')\n if mask[1] is None:\n return None\n return K.any(mask[1], axis=-1)", "def compute_mask(self, inputs, mask=None):\n return None", "def fill_input_masks(nx_node, nx_graph):\n input_edges = sorted(list(nx_graph.in_edges(nx_node['key'])), key=lambda edge: nx_graph.edges[edge]['in_port'])\n input_masks = [nx_graph.nodes[input_node]['output_mask'] for input_node, _ in input_edges]\n\n filled_input_masks = []\n for i, mask in enumerate(input_masks):\n if mask is None:\n mask = torch.ones(nx_graph.edges[input_edges[i]]['activation_shape'][1])\n filled_input_masks.append(mask)\n return input_masks, filled_input_masks", "def lstm_mask_layer(proj, mask):\n\n return proj * mask[:, :, None]", "def get_outputs():\n all_hidden_states = get_states()\n all_attention = tf.map_fn(get_attention, all_hidden_states)\n a_values = tf.nn.softmax(all_attention, axis = 0)\n final_hidden_state = tf.einsum('ijk,ijl->jkl', a_values, \n all_hidden_states)\n output = tf.nn.sigmoid(tf.matmul(final_hidden_state[:,0,:], Wo) + bo, \n name='outputs')\n return output, a_values", "def mask(self) -> list[int]:\n return self._mask", "def attention_mask(model, x):\n config = model.config\n input_mask = model.inputs[\"input_mask\"]\n final_mask = model.builder.customOp(opName=\"AttentionMask\",\n opVersion=1,\n domain=\"ai.graphcore\",\n inputs=[input_mask, x],\n attributes={\"dataType\": model.config.popart_dtype})[0]\n final_mask = model.detach(final_mask)\n return final_mask", "def identity_mask_propagation(nx_node, nx_graph):\n input_masks = get_input_masks(nx_node, nx_graph)\n assert len(input_masks) == 1\n nx_node[\"input_masks\"] = input_masks\n nx_node[\"output_mask\"] = input_masks[0]", "def testMask2D(self):\n\n # This mask, applied on an image filled with 1, should result in an image\n # filled with 8 (since we sum 4 elements per channel and there are 2 input\n # channels).\n mask = np.array([[1, 1, 1],\n [1, 0, 0],\n [0, 0, 0]], dtype=np.float32)\n inputs = tf.constant(1.0, shape=(1, 5, 5, 2))\n conv1 = snt.Conv2D(\n output_channels=1,\n kernel_shape=3,\n mask=mask,\n padding=snt.VALID,\n use_bias=False,\n initializers=create_constant_initializers(1.0, 0.0, use_bias=False))\n out = conv1(inputs)\n expected_out = np.array([[8] * 3] * 3)\n with self.test_session():\n tf.variables_initializer([conv1.w]).run()\n self.assertAllClose(np.reshape(out.eval(), [3, 3]), expected_out)", "def _get_masks(n_input, n_hiddens, switched_dependencies=False):\n masks = []\n units = [n_input] + n_hiddens + [n_input]\n for layer in range(len(units)-1):\n n_in = units[layer]\n n_out = units[layer+1]\n\n mask = np.ones((n_in, n_out)) if not switched_dependencies else np.zeros((n_in, n_out))\n diagonalzeros = True if layer == len(units)-2 else False\n\n assert n_in % n_out == 0 or n_out % n_in == 0\n if n_out >= n_in:\n k = int(n_out / n_in)\n for i in range(n_in):\n mask[i, :i * k] = 0 if not switched_dependencies else 1\n if diagonalzeros and not switched_dependencies:\n mask[i, i * k:(i + 1) * k] = 0\n if not diagonalzeros and switched_dependencies:\n mask[i, i * k:(i + 1) * k] = 1\n else:\n k = int(n_in / n_out)\n for i in range(n_out):\n mask[(i + 1) * k:, i] = 0 if not switched_dependencies else 1\n if diagonalzeros and not switched_dependencies:\n mask[i * k:(i + 1) * k, i] = 0\n if not diagonalzeros and switched_dependencies:\n mask[i * k:(i + 1) * k, i] = 1\n\n masks.append(mask.astype(np.float32))\n\n return masks", "def attention_mask(nd, ns, dtype=tf.float32):\n i = tf.range(nd)[:, None]\n j = tf.range(ns)\n m = i >= j - ns + nd\n out = tf.cast(m, dtype)\n return out" ]
[ "0.7671821", "0.6720139", "0.65809995", "0.65300184", "0.6510303", "0.64391583", "0.6428725", "0.62786055", "0.6275386", "0.62514156", "0.61698884", "0.61698884", "0.61586946", "0.6113361", "0.6086056", "0.5989577", "0.59421456", "0.59397596", "0.5927023", "0.5896852", "0.5878513", "0.5851518", "0.5757808", "0.57351106", "0.57322174", "0.5711177", "0.5699816", "0.5680888", "0.56794566", "0.5673832" ]
0.7369137
1
`Input()` is used to instantiate a Keras tensor. A Keras tensor is a tensor object from the underlying backend (Theano or TensorFlow), which we augment with certain attributes that allow us to build a Keras model just by knowing the inputs and outputs of the model. For instance, if a, b and c are Keras tensors,
def Input( # pylint: disable=invalid-name shape=None, batch_size=None, name=None, dtype=None, sparse=False, tensor=None, **kwargs): if 'batch_shape' in kwargs: batch_shape = kwargs.pop('batch_shape') if shape and batch_shape: raise ValueError('Only provide the shape OR ' 'batch_shape argument to ' 'Input, not both at the same time.') batch_size = batch_shape[0] shape = batch_shape[1:] if kwargs: raise ValueError('Unrecognized keyword arguments:', kwargs.keys()) if dtype is None: dtype = K.floatx() if not shape and tensor is None: raise ValueError('Please provide to Input either a `shape`' ' or a `tensor` argument. Note that ' '`shape` does not include the batch ' 'dimension.') input_layer = InputLayer( input_shape=shape, batch_size=batch_size, name=name, dtype=dtype, sparse=sparse, input_tensor=tensor) # Return tensor including `_keras_history`. # Note that in this case train_output and test_output are the same pointer. outputs = input_layer._inbound_nodes[0].output_tensors if len(outputs) == 1: return outputs[0] else: return outputs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_tensor_from_input(self, input_data: Dict[str, Any],\n **kwargs) -> torch.Tensor:\n raise NotImplementedError", "def inputs(self) -> 'Input':\n return self.Input", "def identity_model(input_shape=image_input_shape, weights=None, classes=None,\n input_tensor=None):\n if input_tensor is None:\n x = Input(shape=input_shape)\n else:\n x = Input(tensor=input_tensor)\n return Model(x, x)", "def input_tensor(self):\n if self._direct_feed_dict:\n raise TypeError('This loom has direct_feed_dict set, '\n 'so it has no input tensor')\n return self._loom_input_tensor", "def inputs_init():\n input_user = tf.keras.Input((1, ))\n input_item = tf.keras.Input((1, ))\n input_rating = tf.keras.Input((1, ))\n return input_user, input_item, input_rating", "def build_input(self):\n n_input = tf.placeholder(tf.int32, [None, None], name='n_input')\n t_input = tf.placeholder(tf.int32, [None, None], name='t_input')\n n_target = tf.placeholder(tf.int32, [None, None], name='n_target')\n t_target = tf.placeholder(tf.int32, [None, None], name='t_target')\n keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n return n_input, t_input, n_target, t_target, keep_prob", "def make_input(name, data, dtype=None, **tags):\n from ..utils import make_placeholder_for\n input_var = make_placeholder_for(name, data, dtype=dtype, **tags)\n input_layer = InputLayer(input_var, shape=(None,) + data.shape[1:])\n return input_layer, input_var", "def get_tensor_from_input(input_data: Dict[str, Any]) -> torch.Tensor:\n return input_data['img']", "def model_inputs():\n # TODO: Implement Function\n inputs = tf.placeholder(tf.int32, shape=[None,None], name= \"input\")\n targets = tf.placeholder(tf.int32, shape=[None,None], name= \"targets\")\n lrate = tf.placeholder(tf.float32, name= \"learning_rate\")\n keep_prob = tf.placeholder(tf.float32, name= \"keep_prob\")\n target_seq_lenth = tf.placeholder(tf.int32, shape=[None], name= \"target_sequence_length\")\n max_target_len = tf.reduce_max(target_seq_lenth, name= 'max_target_len')\n source_seq_length = tf.placeholder(tf.int32, shape=[None], name= \"source_sequence_length\")\n return (inputs, targets, lrate, keep_prob, target_seq_lenth, max_target_len, source_seq_length)", "def _demo_inputs(input_shape=(1, 3, 64, 64)):\n feat = np.random.random(input_shape)\n feat = torch.FloatTensor(feat)\n\n return feat", "def input_tensor(interpreter):\n tensor_index = interpreter.get_input_details()[0]['index']\n return interpreter.tensor(tensor_index)()[0]", "def input(self):\r\n\r\n if len(self.inputs) == 1:\r\n return self.inputs[0]\r\n else:\r\n raise Exception(\"Single input requested. Node has none or more than one input (%d).\"\r\n % len(self.inputs))", "def as_input(self):\n return Input(self.alias, self.eval())", "def build_model_input(cls, name='input'):\n return cls(name, PortDirection.INPUT, type=PortType.MODEL)", "def get_train_inputs(self, example):\n return example", "def get_input(inputs):\n return input(inputs)", "def get_inputs_(self, batch, **kwargs):\n raise NotImplementedError", "def get_input(self):\n np.random.seed(0)\n\n inputs = np.zeros((16, 4))\n labels = np.random.randint(\n 0, 2, size=(16, 1)).astype(\n np.float32)\n\n for i in range(16):\n j = int(2 * labels[i] + np.random.randint(0, 2))\n inputs[i, j] = 1\n\n random_seed.set_random_seed(0)\n tf_inputs = constant_op.constant(inputs, dtype=dtypes.float32)\n tf_labels = constant_op.constant(labels, dtype=dtypes.float32)\n\n return tf_inputs, tf_labels", "def get_input(self, input_number: int) -> Input:\n return Input(self.api, input_number)", "def __init__(self, input, weight_init=None):\n n_in = input.get_shape()[1].value\n \n self.input = input\n \n # Initiate the weight for the input layer\n r = 4*np.sqrt(3.0/n_in)\n\n if weight_init is None:\n self.w = tf.Variable(tf.random_uniform([n_in,],-r, r), name='w')\n else: \n self.w = tf.Variable(weight_init, name='w')\n\n self.output = self.w * self.input", "def call(self, input_tensor):\n x = self.conv_1(input_tensor)\n x = self.bn_1(x, training=False)\n x = self.relu(x)\n x = self.conv_2(x)\n x = self.bn_2(x, training=False)\n x = self.relu(x)\n x = self.conv_3(x)\n x = self.bn_3(x, training=False)\n x_shortcut = self.shortcut(input_tensor)\n x_shortcut = self.bn_shortcut(x_shortcut, training=False)\n x = keras.layers.add([x, x_shortcut])\n x = self.relu(x)\n return x", "def create_model_inputs(feature_names):\n return layers.Input(shape=(len(feature_names)),name='input')", "def model_inputs():\n inputs_ = tf.placeholder(tf.int32, [None, None], name='inputs')\n labels_ = tf.placeholder(tf.int32, [None, None], name='labels')\n keep_prob_ = tf.placeholder(tf.float32, name='keep_prob')\n \n return inputs_, labels_, keep_prob_", "def build_cnn_input(config):\n print('Building input to cnn layer')\n input_layer = tf.keras.layers.Input(\n shape=(config['sequence_length'],), name='input_sequence')\n embedding_layer = tf.keras.layers.Embedding(\n input_dim=len(config['vocab']), output_dim=config['emb_size'],\n input_length=config['sequence_length'],\n name='embedding_layer')(input_layer)\n\n return input_layer, embedding_layer", "def identity(inputs: torch.Tensor):\n return inputs", "def identity(self, input_tensor, name):\n return tf.identity(input_tensor, name=name)", "def model(self):\n i = self.keras.Input(self.s)\n\n return keras.Model(inputs=[i], outputs=self.call(i))", "def GraphFn(self, inp):\n tensor = inp * 2.0\n tensor = array_ops.reshape(tensor, self.tensor_shapes[1])\n tensor = tensor + 3.0\n tensor = array_ops.reshape(tensor, self.tensor_shapes[2])\n tensor = tensor * 4.0\n tensor = array_ops.reshape(tensor, self.tensor_shapes[3])\n tensor += tensor + 5.0\n return array_ops.identity(tensor, name='output_0')", "def __init__(self, shape, input_var=None):\n\n self.output = layers.InputLayer(shape, input_var=input_var)", "def get_input(data):\n img = data['img']\n inputs = img\n targets = data['category']\n inputs, targets = inputs.cuda(), targets.cuda()\n return inputs, targets" ]
[ "0.6742256", "0.6606137", "0.65783495", "0.649664", "0.63277364", "0.6327408", "0.6239031", "0.6163693", "0.6142171", "0.61120164", "0.60526484", "0.5993728", "0.5975712", "0.59637946", "0.59475744", "0.5926236", "0.59232926", "0.5898539", "0.5879682", "0.587486", "0.5870248", "0.5867614", "0.5850281", "0.5846119", "0.5826765", "0.5819938", "0.5807112", "0.57976353", "0.5787087", "0.5774264" ]
0.6835913
0
Returns the `updates` from all layers that are stateful. This is useful for separating training updates and state updates, e.g. when we need to update a layer's internal state during prediction.
def state_updates(self): state_updates = [] for layer in self.layers: if getattr(layer, 'stateful', False): if hasattr(layer, 'updates'): state_updates += layer.updates return state_updates
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updates(self):\r\n return list(self.state_updates)", "def updates(self):\n if context.in_eager_mode():\n return []\n\n if not self.trainable and not self.stateful:\n return []\n\n updates = []\n for layer in self.layers:\n updates += layer.updates\n\n # `updates` might contain irrelevant updates, so it needs to be filtered\n # with respect to inputs the model has been called on.\n relevant_inputs = self.inputs or []\n for i in range(1, len(self._inbound_nodes)):\n inputs = self.get_input_at(i)\n if isinstance(inputs, list):\n relevant_inputs += inputs\n else:\n relevant_inputs.append(inputs)\n reachable = tf_layers_util.get_reachable_from_inputs(relevant_inputs,\n updates)\n relevant_conditional_updates = [x for x in updates if x in reachable]\n unconditional_updates = [\n x for x in updates if x._unconditional_update] # pylint: disable=protected-access\n # A layer could be used multiple times in a nested structure,\n # so the updates list must be de-duped.\n return list(set(\n relevant_conditional_updates + unconditional_updates + self._updates))", "def get_updates(self):\n\n\t\tda_cost = self.get_cost()\n\n\t\tweight_gradient = theano.tensor.grad(da_cost, self.weights)\n\t\tbias_gradient = theano.tensor.grad(da_cost, self.bias)\n\t\treverse_bias_gradient = theano.tensor.grad(da_cost, self.reverse_bias)\n\n\t\tlr_cost = self.get_lr_cost()\n\t\tlr_weight_gradient = theano.tensor.grad(lr_cost, self.label_weights)\n\t\tlr_bias_gradient = theano.tensor.grad(lr_cost, self.label_bias)\n\n\t\tupdates = [\n\t\t\t(self.weights, self.weights - self.learning_rate*weight_gradient),\n\t\t\t(self.bias, self.bias - self.learning_rate*bias_gradient),\n\t\t\t(self.reverse_bias, self.reverse_bias -\n\t\t\t\tself.learning_rate*reverse_bias_gradient),\n\t\t\t(self.label_weights, self.label_weights -\n\t\t\t\tself.learning_rate*lr_weight_gradient),\n\t\t\t(self.label_bias, self.label_bias -\n\t\t\t\tself.learning_rate*lr_bias_gradient)]\n\n\t\treturn updates", "def __getstate__(self):\n return (self.layers, self.best_loss)", "def get_updates(self, loss, params):\n # check asserts \n assert isinstance(params, list), '\"params\" should be a list type.'\n\n # do\n include_tags = print_tags_in_params(params, False) # include all tags to include all parameters\n unblocked_params = filter_params_by_tags(params, include_tags, self.exclude_tags)\n grads = self.compute_gradients(loss, unblocked_params)\n updates = self.gradients_to_updates(unblocked_params, grads)\n return updates", "def update_layers(self):\n\n # Para cada layer atualiza utilizando o gradiente descendente e o learning rate\n for layer in self.layers:\n layer.update_layer(self.learning_rate)", "def all_states(self) -> Tuple[State, ...]:\n return self.influence_graph.all_states()", "def get_state(self):\n return list(_modeller.mod_state_optimizer_state_get(self._modpt))", "def _dirty_states(self) -> Iterable[InstanceState[Any]]:\n return self.identity_map._dirty_states()", "def updates(self):\n return self._get_page('updates').json()", "def losses(self):\n losses = []\n for layer in self.layers:\n losses += layer.losses\n if context.in_eager_mode():\n return losses\n\n relevant_inputs = self.inputs or []\n for i in range(1, len(self._inbound_nodes)):\n inputs = self.get_input_at(i)\n if isinstance(inputs, list):\n relevant_inputs += inputs\n else:\n relevant_inputs.append(inputs)\n reachable = tf_layers_util.get_reachable_from_inputs(relevant_inputs,\n losses)\n relevant_conditional_losses = [x for x in losses if x in reachable]\n unconditional_losses = [\n x for x in losses if x._unconditional_loss] # pylint: disable=protected-access\n return list(set(\n relevant_conditional_losses + unconditional_losses + self._losses))", "def get_all_latched(self):\n return self.__latched_states", "def get_state(self):\n return {\n \"epoch\": self.epoch,\n \"weights\": self.model.get_weights(),\n \"optimizer_weights\": self.model.optimizer.get_weights()\n }", "def full_output_state(self):\n state = self.circuit.global_input_state\n for layer in range(self.circuit.element_layers):\n #TODO: a way to update the state one layer at a time\n #instead of one element at a time might be slightly faster\n for element in self.circuit.elements[layer]:\n state = self.evolve_element(state, element)\n return state", "def refreshStates(self):\n # Update the comboboxes\n self.updateLayersComboboxes()\n # Update the edit mode buttons\n self.updateEditState_pairsLayer()\n self.updateEditState_toBendLayer()\n # Update the transformation type\n self.updateTransformationType()", "def layers(self):\r\n return self._flc.layers", "def _get_layers(self) :\n \n return self._layers", "def _modify_updates(self, updates):\n wxf = self.wxf\n wyf = self.wyf\n wxf_updated = updates[wxf]\n wyf_updated = updates[wyf]\n nwxf = (wxf_updated.std(0) + SMALL)[numpy.newaxis, :]\n nwyf = (wyf_updated.std(0) + SMALL)[numpy.newaxis, :]\n meannxf = nwxf.mean()\n meannyf = nwyf.mean()\n # Center filters\n centered_wxf = wxf_updated - wxf_updated.mean(0)\n centered_wyf = wyf_updated - wyf_updated.mean(0)\n # Fix standard deviation\n wxf_updated = centered_wxf * (meannxf / nwxf)\n wyf_updated = centered_wyf * (meannyf / nwyf)\n updates[wxf] = wxf_updated\n updates[wyf] = wyf_updated", "def all_states(self):\n return self._states", "def apply_updates(self, updates):\n raise NotImplementedError()", "def get_states():\n # Getting all hidden state through time\n all_hidden_states = tf.scan(GRU, processed_input, \n initializer=initial_hidden, name='states')\n return all_hidden_states", "def get_all_states(self):\n return self._states", "def UpdateLayers(self):\n pass", "def state_dict(self):\n return {\n 'XY_net': self.XY_net.state_dict(),\n 'XY_optimizer_minee': self.XY_optimizer_minee.state_dict(),\n 'X_net': self.X_net.state_dict(),\n 'X_optimizer_minee': self.X_optimizer_minee.state_dict(),\n 'Y_net': self.Y_net.state_dict(),\n 'Y_optimizer_minee': self.Y_optimizer_minee.state_dict(),\n 'X': self.X,\n 'Y': self.Y,\n 'lr': self.lr,\n 'batch_size': self.batch_size,\n 'ref_batch_factor': self.ref_batch_factor\n }", "def Get_States(self):\n\n # Getting all hidden state throuh time\n all_hidden_states = tf.scan(self.LSTM,\n self.processed_input,\n initializer=self.initial_hidden,\n name='states')\n all_hidden_states=all_hidden_states[:,0,:,:]\n \n return all_hidden_states", "def get_updates(self, *args, **kwargs):\n\n updates_data = api.get_updates(\n *args,\n api_key=self.__creds.api_key_v2, \n **kwargs)\n return [en.Update(creds=self.__creds, **update_data) for update_data in updates_data]", "def project_updates(self):\n return self._tower.project_updates.filter({'project': self.id})", "def compute_updates(self, xs, gs, state=None):\n raise NotImplementedError()", "def getstate(self):\n return [elem.getstate() for elem in self]", "def layers(self):\n return self['layers']" ]
[ "0.7581514", "0.75490224", "0.63087124", "0.6013626", "0.5863231", "0.58500093", "0.5648043", "0.55675226", "0.55363774", "0.5518336", "0.55056685", "0.5496777", "0.544374", "0.5442008", "0.54091793", "0.5371852", "0.5368774", "0.53402925", "0.53316826", "0.53155684", "0.529942", "0.52721417", "0.52436805", "0.52223444", "0.521767", "0.5212224", "0.52085394", "0.5196086", "0.5187029", "0.51615906" ]
0.8816369
0
Retrieves a layer based on either its name (unique) or index. Indices are based on order of horizontal graph traversal (bottomup).
def get_layer(self, name=None, index=None): # TODO(fchollet): We could build a dictionary based on layer names # since they are constant, but we have not done that yet. if index is not None: if len(self.layers) <= index: raise ValueError('Was asked to retrieve layer at index ' + str(index) + ' but model only has ' + str(len(self.layers)) + ' layers.') else: return self.layers[index] else: if not name: raise ValueError('Provide either a layer name or layer index.') for layer in self.layers: if layer.name == name: return layer raise ValueError('No such layer: ' + name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_layer(model, layer_name=None, layer_idx=None):\n\n _validate_args(layer_name, layer_idx, layer=None)\n if layer_idx is not None:\n return model.layers[layer_idx]\n\n layer = [layer for layer in model.layers if layer_name in layer.name]\n if len(layer) > 1:\n print(warn_str + \"multiple matching layer names found; \"\n + \"picking earliest\")\n elif len(layer) == 0:\n raise Exception(\"no layers found w/ names matching \"\n + \"substring: '%s'\" % layer_name)\n return layer[0]", "def layer_by_name(net, name):\n for l in net.named_modules():\n if l[0] == name:\n return l[1]", "def get_layer(self, i):\n return self.layers[i]", "def find_layer_by_name(self, layer_name):\n return next(x for x in self.layers if x.name == layer_name)", "def GetLayer(self,layer):\n\t\tif isinstance(layer,str):\n\t\t\tindex=self.LayerNames.index(layer)\n\t\telif isinstance(layer,int):\n\t\t\tindex=layer\n\t\telse:\n\t\t\traise PycomError('Type of layer in GetLayer() is wrong')\n\t\treturn self.acad.ActiveDocument.Layers.Item(index)", "def getLayer(uniq):\n return Layer(Cuebot.getStub('layer').GetLayer(\n job_pb2.LayerGetLayerRequest(id=uniq), timeout=Cuebot.Timeout).layer)", "def __getitem__(cls, layer_name):\n return getattr(cls, layer_name)", "def get_layer(key):\n layer1 = {'gm': u'Global_Projection', 'np': u'North_Polar_Projection', 'radar': u'Sigma0_Data', 'flag': u'flag'}\n return layer1[key]", "def find_layer_from_name(self, name):\n try:\n _first, *others = filter(lambda x: x.Name == name, self._file3dm.Layers)\n if others:\n raise ReferenceError(\n \"There are more than one layers with \" f\"the name '{name}'\"\n )\n return _first\n except ValueError:\n return None", "def __getitem__(self, item):\n if isinstance(item, int):\n return self.layers[item]\n for layer in self.layers:\n if layer.layer_name == item.lower():\n return layer\n raise KeyError('Layer does not exist in packet')", "def layer_from_name(layer_name):\n if layer_name in _layer_name_cache:\n return _layer_name_cache[layer_name]\n layer_names = layer_name.split('.')\n layer_module, module_layer_name = layer_names[:-1], layer_names[-1]\n module_name = '.'.join(layer_module)\n module = import_name(module_name)\n try:\n return getattr(module, module_layer_name)\n except AttributeError:\n # the default error is very uninformative:\n # AttributeError: 'module' object has no attribute 'DemoLayer'\n # it doesn't say *which* module\n raise AttributeError('module %r has no attribute %r'\n % (module_name, module_layer_name))", "def get_layer_by_name(self, layername: str):\n\n if self.node_data is not []:\n if layername in list(self.layer_lookup.keys()):\n return np.array(self.node_data[self.layer_lookup[layername]].compute())\n else:\n print('get_layer_by_name: Unable to find {} in node data'.format(layername))\n return None\n else:\n print('get_layer_by_name: Unable to find node data for quad tree')\n return None", "def get_layer_index(self, layer_name):\n for i, layer in enumerate(self.tmx_data.layers):\n if layer.name == layer_name:\n return i\n return -1", "def locate_own_layer(name, group):\n for child in group.children():\n if isinstance(child, QgsLayerTreeLayer):\n if child.name() == name:\n return child\n return None", "def get_layer(legend_name, iface, geom_type, default_group=None):\n is_new = False\n assert legend_name is not None\n if len(legend_name) == 0:\n logger.debug(\"No layer name provided; locating current layer...\")\n map_layer = iface.layerTreeView().currentLayer()\n if map_layer is None:\n raise ValueError(\"No layer was provided and there \"\n \"is no current layer\")\n layer = map_layer.layer()\n logger.debug(\"current layer is %s\", layer.name())\n else:\n logger.debug(\"Requesting to locate layer %s\", legend_name)\n path = list(part for part in legend_name.split('/') if len(part) > 0)\n\n if len(path) > 1:\n logger.debug(\"The path has %d components\", len(path))\n legend_name = path[-1]\n group = get_path(path[:-1])\n map_layer = locate_own_layer(name=legend_name, group=group)\n if map_layer:\n layers = [map_layer]\n else:\n layers = []\n else:\n logger.debug(\"The path has a single name; searching entire tree\")\n layers = all_layers_with_name(legend_name)\n if len(layers) == 0:\n map_layers = QgsProject.instance().mapLayersByName(legend_name)\n layers = [QgsProject.instance().layerTreeRoot().findLayer(ly)\n for ly in map_layers]\n group = None\n logger.debug(\"found %d layers\", len(layers))\n\n if len(layers) == 0:\n logger.debug(\"no layer has been located and default group is %r\",\n default_group)\n if group is None and default_group:\n logger.debug(\"locating default group\")\n group = get_path(default_group)\n\n if isinstance(geom_type, int):\n geom_type = geometry_flat_name(geom_type)\n logger.debug(\"geometry type parsed to %r\", geom_type)\n else:\n logger.debug(\"geometry type is %r\", geom_type)\n\n layer = QgsVectorLayer(geom_type, legend_name, \"memory\")\n map_layer = add_layer_to_legend(layer, group=group)\n is_new = True\n logger.debug(\"layer was created and added to legend\")\n else:\n logger.debug(\"selecting first layer among %d\", len(layers))\n map_layer = layers[0]\n layer = map_layer.layer()\n\n return map_layer, layer, is_new", "def get_layer_index(layer: int, sublayer: Optional[int] = None) -> int:\n assert 0 <= layer <= 20\n if sublayer is None:\n return layer\n assert 0 <= sublayer <= 20\n return (sublayer + 1) * 21 + layer", "def get_feature_layer_index(feature_layer_collection, layer_name):\r\n for index, url in zip(range(len(feature_layer_collection.layers)),\r\n feature_layer_collection.layers):\r\n if layer_name in feature_layer_collection.layers[index].properties.name:\r\n layer_index = index\r\n print('index of feature layer',\r\n feature_layer_collection.layers[layer_index].properties.name, 'with url of',\r\n url, 'is', layer_index)\r\n\r\n return layer_index", "def find_layer(self, svg, layer_name):\n\n for g in svg.xpath('//svg:g', namespaces=NSS):\n if (g.get(inkex.addNS('groupmode', 'inkscape')) == 'layer'\n and (g.get(inkex.addNS('label', 'inkscape'))\n == layer_name)):\n return g", "def get_dense_layer(self, layer_i=0):\n dense_layers = [layer for layer in self.model.layers if layer.name.startswith('dense')]\n return dense_layers[layer_i]", "def get_layer_callback(l_name, model_graph):\n return model_graph.get_tensor_by_name(\"{}:0\".format(l_name))", "def get_layer_number(model, layer_name):\n for i, l in enumerate(model.layers):\n if l.name == layer_name:\n return i\n raise ValueError('No layer with name {} in model {}.'.format(layer_name, model.name))", "def get_layer(self, l):\n\n if l == 0:\n return self.input_layer\n elif 0 < l < self.num_layers() - 1:\n return self.hidden_layers[l - 1]\n elif l == self.num_layers() - 1:\n return self.output_layer\n else:\n return None", "def GetLayer(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_GetLayer(self, *args)", "def FindLayer(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_FindLayer(self, *args)", "def get_drawing(self, name, index):\n try:\n if name not in self._categories:\n # try and get the closest matching drawing. If nothing suitable foumd then return a scorpion\n name = self._category_mapping.get(name, 'scorpion')\n if index < 0 or index >= 100 or not isinstance(index, int):\n raise ValueError('index', index, ';index must be integer >= 0 and < 100')\n pickleFile = str(self._path / Path(name).with_suffix('.p'))\n with open(pickleFile,'rb') as f:\n images = pickle.load(f)\n if index < len(images):\n return images[index]\n else:\n print('Drawing {} index {} out of range {}'.format(name, index, len(images)))\n return images[0]\n except ValueError as e:\n self._logger.exception(e)\n raise e", "def find_layer(z, params):\r\n N = len(params['d_list'])\r\n for i in range(N):\r\n if z <= params['layer_bottom_list'][i]:\r\n return i-1\r\n return N-1", "def SelectLayer(self, name):\n layers = self.map.GetListOfLayers(l_type = 'raster')\n idx = None\n for i, layer in enumerate(layers):\n if self.layerName[name] == layer.GetName():\n idx = i\n break\n \n if idx is not None: # should not happen\n layers.append(layers.pop(idx))\n \n choice = self.toolbar.choice\n idx = choice.FindString(name)\n choice.Delete(idx)\n choice.Insert(name, 0)\n choice.SetSelection(0)\n \n #layers.reverse()\n self.map.ReorderLayers(layers)\n self.frame.GetWindow().UpdateMap(render = True, renderVector = False)", "def __getitem__(self, name):\n idx = self.lookup[name]\n return self.stack[idx][1]", "def get_act_layer(name: Union[Type[nn.Module], str] = 'relu'):\n if not name:\n return None\n if not isinstance(name, str):\n # callable, module, etc\n return name\n if not (is_no_jit() or is_exportable() or is_scriptable()):\n if name in _ACT_LAYER_ME:\n return _ACT_LAYER_ME[name]\n if not (is_no_jit() or is_exportable()):\n if name in _ACT_LAYER_JIT:\n return _ACT_LAYER_JIT[name]\n return _ACT_LAYER_DEFAULT[name]", "def find_ResNet_layer(arch, target_layer_name):\n\n hierarchy = target_layer_name.rsplit(\"_\",1)\n \n\n if target_layer_name.rsplit(\"_\",1)[0] == \"layer1\":\n target_layer = arch.layer1\n elif target_layer_name.rsplit(\"_\",1)[0] == \"layer2\":\n target_layer = arch.layer2\n elif target_layer_name.rsplit(\"_\",1)[0] == \"layer3\":\n target_layer = arch.layer3\n elif target_layer_name.rsplit(\"_\",1)[0] == \"layer4\":\n target_layer = arch.layer4\n \n# print(target_layer)\n if len(hierarchy) == 2:\n target_layer = target_layer[int(hierarchy[1])]\n\n return target_layer" ]
[ "0.7451492", "0.6969869", "0.68768555", "0.6770075", "0.65417993", "0.64981186", "0.6417856", "0.64015406", "0.6371185", "0.6366386", "0.63301843", "0.62157345", "0.61484885", "0.6089707", "0.60393846", "0.60238993", "0.59537786", "0.59020376", "0.5884721", "0.57912713", "0.5782681", "0.57708126", "0.5760328", "0.57104886", "0.56629086", "0.56540376", "0.5642118", "0.5626924", "0.5619197", "0.5578089" ]
0.8079192
0
Retrieve the network's updates. Will only include updates that are either unconditional, or conditional on inputs to this model (e.g. will not include updates that were created by layers of this model outside of the model). Effectively, `network.updates` behaves like `layer.updates`.
def updates(self): if context.in_eager_mode(): return [] if not self.trainable and not self.stateful: return [] updates = [] for layer in self.layers: updates += layer.updates # `updates` might contain irrelevant updates, so it needs to be filtered # with respect to inputs the model has been called on. relevant_inputs = self.inputs or [] for i in range(1, len(self._inbound_nodes)): inputs = self.get_input_at(i) if isinstance(inputs, list): relevant_inputs += inputs else: relevant_inputs.append(inputs) reachable = tf_layers_util.get_reachable_from_inputs(relevant_inputs, updates) relevant_conditional_updates = [x for x in updates if x in reachable] unconditional_updates = [ x for x in updates if x._unconditional_update] # pylint: disable=protected-access # A layer could be used multiple times in a nested structure, # so the updates list must be de-duped. return list(set( relevant_conditional_updates + unconditional_updates + self._updates))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def state_updates(self):\n state_updates = []\n for layer in self.layers:\n if getattr(layer, 'stateful', False):\n if hasattr(layer, 'updates'):\n state_updates += layer.updates\n return state_updates", "def get_updates(self):\n\n\t\tda_cost = self.get_cost()\n\n\t\tweight_gradient = theano.tensor.grad(da_cost, self.weights)\n\t\tbias_gradient = theano.tensor.grad(da_cost, self.bias)\n\t\treverse_bias_gradient = theano.tensor.grad(da_cost, self.reverse_bias)\n\n\t\tlr_cost = self.get_lr_cost()\n\t\tlr_weight_gradient = theano.tensor.grad(lr_cost, self.label_weights)\n\t\tlr_bias_gradient = theano.tensor.grad(lr_cost, self.label_bias)\n\n\t\tupdates = [\n\t\t\t(self.weights, self.weights - self.learning_rate*weight_gradient),\n\t\t\t(self.bias, self.bias - self.learning_rate*bias_gradient),\n\t\t\t(self.reverse_bias, self.reverse_bias -\n\t\t\t\tself.learning_rate*reverse_bias_gradient),\n\t\t\t(self.label_weights, self.label_weights -\n\t\t\t\tself.learning_rate*lr_weight_gradient),\n\t\t\t(self.label_bias, self.label_bias -\n\t\t\t\tself.learning_rate*lr_bias_gradient)]\n\n\t\treturn updates", "def updates(self):\n return self._get_page('updates').json()", "def updates(self):\r\n return list(self.state_updates)", "def get_updates(self) -> dict:\n url = self.URL + \"getUpdates\"\n js = self.get_json_from_url(url)\n return js", "def get_updates(self, loss, params):\n # check asserts \n assert isinstance(params, list), '\"params\" should be a list type.'\n\n # do\n include_tags = print_tags_in_params(params, False) # include all tags to include all parameters\n unblocked_params = filter_params_by_tags(params, include_tags, self.exclude_tags)\n grads = self.compute_gradients(loss, unblocked_params)\n updates = self.gradients_to_updates(unblocked_params, grads)\n return updates", "def update_networks(self):\n\t\t# layer 1 update\n\t\tself.W1_tv = tf.assign(self.W1_tv, self.W1_av)\n\t\tself.b1_tv = tf.assign(self.b1_tv, self.b1_av)\n\n\t\t# layer 2 update\n\t\tself.W2_tv = tf.assign(self.W2_tv, self.W2_av)\n\t\tself.b2_tv = tf.assign(self.b2_tv, self.b2_av)\n\n\t\t# layer 3 update\n\t\tself.W3_tv = tf.assign(self.W3_tv, self.W3_av)\n\t\tself.b3_tv = tf.assign(self.b3_tv, self.b3_av)", "def apply_updates(self, updates):\n raise NotImplementedError()", "def GetUpdatesForAll(self, limit = -1, since = -1, offset = -1):\n \n if (limit < 1):\n limit = self.limit\n \n url = self.__BuildGetUrl(\"updates\", \"\", limit, since, offset)\n return self.__GetJson(url, True)", "def get_updates(self, *args, **kwargs):\n\n updates_data = api.get_updates(\n *args,\n api_key=self.__creds.api_key_v2, \n **kwargs)\n return [en.Update(creds=self.__creds, **update_data) for update_data in updates_data]", "def set_updates(self, updates: dict, dry=False):\n self._updates += (UpdateQueryExpression(updates),)\n return self", "def isUpdated(self, updates: Dict[str, Dict[str, str]]) -> bool:\n\n for objnam in self._bodies & updates.keys():\n if {STATUS_ATTR, HEATER_ATTR, HTMODE_ATTR} & updates[objnam].keys():\n return True\n return False", "def _modify_updates(self, updates):\n wxf = self.wxf\n wyf = self.wyf\n wxf_updated = updates[wxf]\n wyf_updated = updates[wyf]\n nwxf = (wxf_updated.std(0) + SMALL)[numpy.newaxis, :]\n nwyf = (wyf_updated.std(0) + SMALL)[numpy.newaxis, :]\n meannxf = nwxf.mean()\n meannyf = nwyf.mean()\n # Center filters\n centered_wxf = wxf_updated - wxf_updated.mean(0)\n centered_wyf = wyf_updated - wyf_updated.mean(0)\n # Fix standard deviation\n wxf_updated = centered_wxf * (meannxf / nwxf)\n wyf_updated = centered_wyf * (meannyf / nwyf)\n updates[wxf] = wxf_updated\n updates[wyf] = wyf_updated", "def update_model_weights(model, updates, weights_structure=None,\n force_update=False, trainable=True):\n if weights_structure is None:\n weights_structure = get_model_weights_structure(model)\n\n assert len(updates) == len(weights_structure)\n\n for update, structure in zip(updates, weights_structure):\n layer_trace, weight_name, weight_index = structure\n\n # trace to the possibly nested layer to insert the next weight update\n trace_layer = model\n for trace_location in layer_trace:\n trace_layer = trace_layer.layers[trace_location]\n\n if hasattr(trace_layer, weight_name) or force_update:\n trace_layer_weights = (trace_layer._trainable_weights if trainable\n else trace_layer._non_trainable_weights)\n # set (_non)_trainable_weights used to track layer weights\n n_weights = len(trace_layer_weights)\n if n_weights < weight_index + 1: # create weight list if not set\n new_weights = [None] * (weight_index + 1)\n new_weights[:n_weights] = trace_layer_weights\n trace_layer_weights = new_weights\n trace_layer_weights[weight_index] = update\n # set the weight attribute used to apply the layer\n setattr(trace_layer, weight_name, update)\n else: # didn't find attribute... should not happen!\n raise ValueError(\n \"Could not find weight attribute {} in layer {}.\".format(\n weight_name, trace_layer.name))", "def defineUpdateOperations(self):\n self.updated_value = tf.placeholder(shape=[1, self.network.action_size], dtype=tf.float32)\n self.loss = tf.reduce_sum(tf.square(self.updated_value - self.network.policyLayer))\n self.trainer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)\n\n self.updateModel = self.trainer.minimize(self.loss)", "def update(self, updates: dict, dry=False):\n self._updates += (UpdateQueryExpression(updates),)\n self.set_action(\"update\")\n if dry:\n return self\n\n return self.connection.query(self.to_sql(), self._bindings)", "def get_updates(self, last_activity_date=None):\n endpoint = '/updates'\n if last_activity_date == None:\n return self.get_request(endpoint)\n params = {\n \"last_activity_date\": last_activity_date\n }\n return self.post_request(endpoint, params)", "def GetUpdates(self, limit = -1, since = -1, offset = -1):\n \n if (limit < 1):\n limit = self.limit\n \n url = self.__BuildGetUrl(\"updates\", self.userName, limit, since, offset)\n return self.__GetJson(url, True)", "def getUpdates(self):\n # execute the query\n ret = self._er.execQuery(self)\n\n if ret and ret.get(\"recentActivity\") and ret[\"recentActivity\"].get(\"events\"):\n # return the updated information\n return ret[\"recentActivity\"][\"events\"]\n # or empty\n return {}", "def update_layers(self):\n\n # Para cada layer atualiza utilizando o gradiente descendente e o learning rate\n for layer in self.layers:\n layer.update_layer(self.learning_rate)", "def weight_update_nesterov(self, network):\n # Before updating, take step back with current velocity\n for l, layer in enumerate(network.layers):\n layer.b -= self.beta * self.vel_b[l]\n layer.q -= self.beta * self.vel_q[l]\n layer.rx_inp -= self.beta * self.vel_rx_inp[l]\n layer.ry_inp -= self.beta * self.vel_ry_inp[l]\n layer.rx_pos_out -= self.beta * self.vel_rx_pos_out[l]\n layer.ry_pos_out -= self.beta * self.vel_ry_pos_out[l]\n layer.rx_neg_out -= self.beta * self.vel_rx_neg_out[l]\n layer.ry_neg_out -= self.beta * self.vel_ry_neg_out[l]\n\n # Now update\n for l, layer in enumerate(network.layers):\n self.vel_b[l] = -self.alpha * self.dc_db[l] + self.beta * self.vel_b[l]\n self.vel_q[l] = -self.alpha * self.dc_dq[l] + self.beta * self.vel_q[l]\n self.vel_rx_inp[l] = -self.alpha * self.dc_drx_inp[l] + self.beta * self.vel_rx_inp[l]\n self.vel_ry_inp[l] = -self.alpha * self.dc_dry_inp[l] + self.beta * self.vel_ry_inp[l]\n self.vel_rx_pos_out[l] = -self.alpha * self.dc_drx_pos_out[l] + self.beta * self.vel_rx_pos_out[l]\n self.vel_ry_pos_out[l] = -self.alpha * self.dc_dry_pos_out[l] + self.beta * self.vel_ry_pos_out[l]\n self.vel_rx_neg_out[l] = -self.alpha * self.dc_drx_neg_out[l] + self.beta * self.vel_rx_neg_out[l]\n self.vel_ry_neg_out[l] = -self.alpha * self.dc_dry_neg_out[l] + self.beta * self.vel_ry_neg_out[l]\n\n layer.b += self.vel_b[l]\n layer.q += self.vel_q[l]\n layer.rx_inp += self.vel_rx_inp[l]\n layer.ry_inp += self.vel_ry_inp[l]\n layer.rx_pos_out += self.vel_rx_pos_out[l]\n layer.ry_pos_out += self.vel_ry_pos_out[l]\n layer.rx_neg_out += self.vel_rx_neg_out[l]\n layer.ry_neg_out += self.vel_ry_neg_out[l]", "def update(self, weights, grads):\n\n learn_rate = self.learn_rate\n\n # Scale the learning rate by the iteration number\n t = self.iters + 1\n learn_rate_t = learn_rate * (np.sqrt(1 - self.beta2**t) /\n (1 - self.beta1**t))\n\n # Store the momentum and velocities for each node\n if self.ms is None:\n self.ms = [np.zeros(w.shape) for w in weights]\n if self.vs is None:\n self.vs = [np.zeros(w.shape) for w in weights]\n ms, vs = self.ms, self.vs\n\n # Make sure everything has the right length\n assert len(weights) == len(grads)\n assert len(weights) == len(ms)\n assert len(weights) == len(vs)\n\n # Now, for each weight stack, update momentum, velocity, weights\n new_ms = []\n new_vs = []\n new_weights = []\n for w, g, m, v in zip(weights, grads, ms, vs):\n # Momentum update\n m_t = (self.beta1 * m) + (1.0 - self.beta1) * g\n\n # Velocity update\n v_t = (self.beta2 * v) + (1.0 - self.beta2) * g**2\n\n # Update the weights\n w_t = w - learn_rate_t * m_t / (np.sqrt(v_t) + 1e-8)\n\n new_ms.append(m_t)\n new_vs.append(v_t)\n new_weights.append(w_t)\n\n self.ms = new_ms\n self.vs = new_vs\n return new_weights", "def get_updates(self):\n if update_queue:\n return update_queue.pop()", "def update(self, ex):\n if not self.optimizer:\n raise RuntimeError('No optimizer set.')\n\n # Train mode\n self.network.train()\n\n if self.use_cuda:\n for key in ex:\n #if isinstance(ex[key], torch.Tensor):\n try:\n ex[key] = ex[key].cuda(non_blocking=True)\n except:\n pass\n\n # Run forward\n net_loss = self.network(ex)\n\n loss = net_loss[\"total_loss\"]\n\n loss.backward()\n\n clip_grad_norm_(self.network.parameters(), self.args.grad_clipping)\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n self.updates += 1\n return {\n 'loss': loss,\n \"loc_loss\": net_loss[\"loc_loss\"],\n \"fix_loss\": net_loss[\"target_loss\"],\n }", "def get_cost_updates(self):\n\n y = self.get_hidden_values()\n z = self.get_reconstructed_input(y)\n\n L = T.sum((self.x-z)**2, axis=1)\n\n cost = T.mean(L)\n\n return cost", "def get_num_updates(self):\n return self._num_updates", "def get_updates():\n global PREVIOUS_NEWEST_STR, UPDATED, WIKIDATA_WB_API, WIKIDATA_WB_PARAMS\n r = requests.get(url=WIKIDATA_WB_API, params=WIKIDATA_WB_PARAMS)\n root = etree.fromstring(r.text)\n seen = 0\n updates = []\n oldest_str = None\n newest_str = None\n for entry in root.iterchildren('{http://www.w3.org/2005/Atom}entry'):\n # print(etree.tostring(entry))\n q = entry.find('{http://www.w3.org/2005/Atom}title').text\n updated_str = entry.find('{http://www.w3.org/2005/Atom}updated').text\n if newest_str is None or updated_str > newest_str:\n newest_str = updated_str\n if oldest_str is None or updated_str < oldest_str:\n oldest_str = updated_str\n updated = dateutil.parser.parse(updated_str)\n if not re.match(r'''Q\\d+$''', q):\n # This is not an updated entity, ignore\n pass\n elif q in UPDATED and UPDATED[q] >= updated:\n # print(\"See %s update already\" % (q))\n seen += 1\n else:\n updates.append(q)\n # print(\"Got %s (updated at %s)\" % (q, updated))\n UPDATED[q] = updated\n print(\"%s: Got %d updates (ignored %d already seen)\" % (datetime.now(), len(updates), seen))\n if oldest_str > PREVIOUS_NEWEST_STR:\n print(\"WARNING: Gap between feed dates from %s to %s\" % (PREVIOUS_NEWEST_STR, oldest_str))\n PREVIOUS_NEWEST_STR = newest_str\n return updates", "def get_updates(offset=None):\r\n url = URL + \"getUpdates\"\r\n if offset:\r\n url += \"?offset={}\".format(offset)\r\n js = get_json_from_url(url)\r\n return js", "def UpdateInput(self, request, context):\n self.check_weights()\n\n # get values from message\n batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(request)\n print(\"Get inputs id: {0}, matrix shape: {1}, labels shape: {2}\".format(\n batch_id, outputs_of_lower.shape, labels.shape))\n\n weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()) \\\n + self.biases.transpose()\n # saving inputs during training, because for weights updating\n if is_train:\n inputs = {'matrix': outputs_of_lower,\n 'labels': labels}\n self.lower_layer_outputs[batch_id] = inputs\n self.weighted_sum_inputs[batch_id] = weighted_sum\n\n activations = self.nonlin(weighted_sum) # apply element wise\n\n # update weights immediately with SG, if enabled SG\n if self.enable_sg and is_train:\n print(\"update weights based on SG delta\")\n sg_delta = self.SG(activations, labels)\n # TODO use sg_delta to compute the gradients by sg_delta * self.nonline_prime(z)\n self.update_weights(self.lr, sg_delta, outputs_of_lower)\n self.sg_deltas[batch_id] = sg_delta\n\n # forward layer outputs\n self.forward_to_upper(batch_id, activations, labels, is_train)\n print(\"batch id: {0}, activations shape {1}\".format(\n batch_id, activations.shape))\n\n # return received\n return nn_pb.PlainResponse(message=\"Inputs received by layer {}\".format(\n self.layer_name))", "def updates(self, start=None, limit=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/updates'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json" ]
[ "0.67009324", "0.6332913", "0.6213321", "0.6131573", "0.59924716", "0.5910225", "0.58402723", "0.5834833", "0.580465", "0.5777845", "0.5642365", "0.56351185", "0.5573368", "0.55714744", "0.5546164", "0.5536157", "0.5521792", "0.5472434", "0.5462437", "0.5448215", "0.5409123", "0.5384905", "0.53689927", "0.5366509", "0.5345781", "0.53413755", "0.5339056", "0.53335065", "0.53331614", "0.5299375" ]
0.74391586
0
Retrieve the network's losses. Will only include losses that are either unconditional, or conditional on inputs to this model (e.g. will not include losses that depend on tensors that aren't inputs to this model).
def losses(self): losses = [] for layer in self.layers: losses += layer.losses if context.in_eager_mode(): return losses relevant_inputs = self.inputs or [] for i in range(1, len(self._inbound_nodes)): inputs = self.get_input_at(i) if isinstance(inputs, list): relevant_inputs += inputs else: relevant_inputs.append(inputs) reachable = tf_layers_util.get_reachable_from_inputs(relevant_inputs, losses) relevant_conditional_losses = [x for x in losses if x in reachable] unconditional_losses = [ x for x in losses if x._unconditional_loss] # pylint: disable=protected-access return list(set( relevant_conditional_losses + unconditional_losses + self._losses))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def losses(self):\n # compute all kinds of losses \n\n # 1. Logits losses for classification \n\n # 2. regression loss for bbox \n\n return classification_loss, bbox_reg_loss", "def get_losses(self):\n if self.loss is not None:\n return [self.loss]\n else:\n return []", "def get_loss(self, outputs, targets, masks):\n losses = dict()\n for idx in range(len(targets)):\n if 'loss' not in losses:\n losses['loss'] = self.loss(outputs[idx], targets[idx], masks[idx])\n else:\n losses['loss'] += self.loss(outputs[idx], targets[idx], masks[idx])\n return losses", "def get_loss(self, outputs, targets, masks, joints):\n losses = dict()\n heatmaps_losses, push_losses, pull_losses = self.loss(outputs, targets, masks, joints)\n for idx in range(len(targets)):\n if heatmaps_losses[idx] is not None:\n heatmaps_loss = heatmaps_losses[idx].mean(dim=0)\n if 'heatmap_loss' not in losses:\n losses['heatmap_loss'] = heatmaps_loss\n else:\n losses['heatmap_loss'] += heatmaps_loss\n if push_losses[idx] is not None:\n push_loss = push_losses[idx].mean(dim=0)\n if 'push_loss' not in losses:\n losses['push_loss'] = push_loss\n else:\n losses['push_loss'] += push_loss\n if pull_losses[idx] is not None:\n pull_loss = pull_losses[idx].mean(dim=0)\n if 'pull_loss' not in losses:\n losses['pull_loss'] = pull_loss\n else:\n losses['pull_loss'] += pull_loss\n return losses", "def losses(self):\n for name in self._nodes:\n if isinstance(self._nodes[name], Loss):\n yield name", "def losses(self):\n pass", "def get_loss(self, output, targets, masks, joints):\n losses = dict()\n targets = [target for _targets in targets for target in _targets]\n masks = [mask for _masks in masks for mask in _masks]\n joints = [joint for _joints in joints for joint in _joints]\n heatmaps_losses, push_losses, pull_losses = self.loss(output, targets, masks, joints)\n for idx in range(len(targets)):\n if heatmaps_losses[idx] is not None:\n heatmaps_loss = heatmaps_losses[idx].mean(dim=0)\n if 'heatmap_loss' not in losses:\n losses['heatmap_loss'] = heatmaps_loss\n else:\n losses['heatmap_loss'] += heatmaps_loss\n if push_losses[idx] is not None:\n push_loss = push_losses[idx].mean(dim=0)\n if 'push_loss' not in losses:\n losses['push_loss'] = push_loss\n else:\n losses['push_loss'] += push_loss\n if pull_losses[idx] is not None:\n pull_loss = pull_losses[idx].mean(dim=0)\n if 'pull_loss' not in losses:\n losses['pull_loss'] = pull_loss\n else:\n losses['pull_loss'] += pull_loss\n return losses", "def get_loss_names(self):\n losses = [tns.name[:-2].replace('loss_', '').split('/')[-1] for tns in tf.get_collection('losses')]\n return \"Losses: {}\".format(' '.join(losses))", "def get_tower_losses(self, tower, device):\n # Note: Network editor have to maintain each loss with 'loss' and 'vars' if it's a list.\n if isinstance(tower.loss, list):\n return tower.loss\n else:\n tower_vars = []\n trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n if self.replica:\n tower_vars = [var for var in trainable_vars if(var.name.startswith('tower_%d' % device))]\n else:\n tower_vars = trainable_vars\n\n return [{'loss': tower.loss, 'vars': tower_vars}]", "def build_losses(self):\n self.batch_losses = tf.squared_difference(self.predicted_rv, self.label)\n self.total_loss = tf.reduce_mean(self.batch_losses)", "def build_losses(self):\n self.batch_losses = tf.squared_difference(self.predicted_rv, self.label)\n self.total_loss = tf.reduce_mean(self.batch_losses)", "def get_loss(self):\r\n\r\n if F.loss_type==\"cosine\":\r\n self.losscos = r2d*tf.acos(1-tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1))\r\n self.loss = tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1)\r\n elif F.loss_type==\"mse2d\":\r\n xl, yl, zl = tf.split(self.labels, 3, axis=1)\r\n xo, yo, zo = tf.split(self.out, 3, axis=1)\r\n thetal, thetao = tf.asin(-yl), tf.asin(-yo)\r\n phil, phio = tf.atan2(-zl, -xl), tf.atan2(-zo, -xo)\r\n self.lb = tf.concat([thetal, phil], axis=1)\r\n self.ob = tf.concat([thetao, phio], axis=1)\r\n self.loss = tf.scalar_mul(tf.constant(r2d), tf.losses.mean_squared_error(self.lb, self.ob, 2))\r\n elif F.loss_type==\"mse3d\":\r\n self.loss = tf.losses.mean_squared_error(tf.nn.l2_normalize(self.labels, 0), tf.nn.l2_normalize(self.out, 0))", "def loss_names(self):\n return ['loss']", "def get_loss(self, inputs, targets, dags):\n if not isinstance(dags, list):\n dags = [dags]\n\n loss = 0\n for dag in dags:\n output = self.shared(inputs, dag)\n sample_loss = (self.model_loss(output, targets) /\n self.args.shared_num_sample)\n loss += sample_loss\n\n loss =loss/len(dags)\n\n return loss", "def compute_loss(self, inputs):\r\n outputs = self.net.compute_outputs(inputs)\r\n loss_grad = self.net.compute_loss_grad(outputs - inputs)\r\n loss = np.sum((inputs - outputs) ** 2, axis=0).mean() / 2.0\r\n return loss, loss_grad", "def get_loss(\n self,\n inputs,\n outputs,\n annotations,\n cand_net,\n add_controller_regularization=True,\n add_evaluator_regularization=True,\n ):\n return sum(self._criterion(inputs, outputs, annotations, cand_net).values())", "def compute_losses(self, predictions, targets):\n smpl_weight = targets['target_smpl_weight']\n\n losses = {}\n if self.loss_beta is not None:\n losses['loss_beta'] = self.loss_beta(\n predictions['pred_shape'] * smpl_weight,\n targets['target_beta'] * smpl_weight)\n if self.loss_theta is not None:\n pred_pose = rotmat_to_quat(predictions['pred_pose']).reshape(\n -1, 96)\n losses['loss_theta'] = self.loss_theta(\n pred_pose * smpl_weight * targets['target_theta_weight'],\n targets['target_theta'] * smpl_weight *\n targets['target_theta_weight'])\n if self.loss_twist is not None:\n losses['loss_twist'] = self.loss_twist(\n predictions['pred_phi'] * targets['target_twist_weight'],\n targets['target_twist'] * targets['target_twist_weight'])\n if self.loss_uvd is not None:\n pred_uvd = predictions['pred_uvd_jts']\n target_uvd = targets['target_uvd_29'][:, :pred_uvd.shape[1]]\n target_uvd_weight = targets['target_weight_29'][:, :pred_uvd.\n shape[1]]\n losses['loss_uvd'] = self.loss_uvd(\n 64 * predictions['pred_uvd_jts'],\n 64 * target_uvd,\n target_uvd_weight,\n avg_factor=target_uvd_weight.sum())\n\n return losses", "def get_current_losses(self):\n errors_ret = OrderedDict()\n for name in self.loss_names:\n if isinstance(name, str):\n errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number\n return errors_ret", "def get_current_losses(self):\n errors_ret = OrderedDict()\n for name in self.loss_names:\n if isinstance(name, str):\n errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number\n return errors_ret", "def get_current_validation_losses(self):\n errors_ret = OrderedDict()\n for name in self.loss_names:\n if isinstance(name, str):\n errors_ret[name+'_val'] = float(getattr(self, 'loss_' + name + '_val')) # float(...) works for both scalar tensor and float number\n return errors_ret", "def get_loss(self, outputs, heatmaps, masks, offsets, offset_weights):\n losses = dict()\n for idx in range(len(outputs)):\n pred_heatmap, pred_offset = outputs[idx]\n heatmap_weight = masks[idx].view(masks[idx].size(0), masks[idx].size(1), -1)\n losses['loss_hms'] = losses.get('loss_hms', 0) + self.loss(pred_heatmap, heatmaps[idx], heatmap_weight)\n losses['loss_ofs'] = losses.get('loss_ofs', 0) + self.offset_loss(pred_offset, offsets[idx], offset_weights[idx])\n return losses", "def get_loss(self, inputs, outputs, add_summary=True):\n cfg = self.cfg()\n torch.autograd.set_detect_anomaly(True)\n # g_loss = tf.zeros(dtype=tf.float32, shape=[])\n g_loss = self.add_proj_loss(inputs, outputs, cfg.proj_weight, add_summary)\n r_loss = self.regularization_loss(cfg)\n# print(g_loss, r_loss)\n g_loss += r_loss\n # if cfg.proj_weight:\n # g_loss += self.add_proj_loss(inputs, outputs, cfg.proj_weight, add_summary)\n\n # if cfg.drc_weight:\n # g_loss += add_drc_loss(cfg, inputs, outputs, cfg.drc_weight, add_summary)\n #\n # if cfg.pc_rgb:\n # g_loss += add_proj_rgb_loss(cfg, inputs, outputs, cfg.proj_rgb_weight, add_summary, self._sigma_rel)\n #\n # if cfg.proj_depth_weight:\n # g_loss += add_proj_depth_loss(cfg, inputs, outputs, cfg.proj_depth_weight, self._sigma_rel, add_summary)\n #\n # if add_summary:\n # tf.contrib.summary.scalar(\"losses/total_task_loss\", g_loss)\n\n return g_loss", "def get_loss_funcs():\n\n def _eucl_loss(x, y):\n return K.sum(K.square(x - y)) / batch_size / 2\n\n losses = {}\n losses[\"weight_stage1_L1\"] = _eucl_loss\n losses[\"weight_stage1_L2\"] = _eucl_loss\n losses[\"weight_stage2_L1\"] = _eucl_loss\n losses[\"weight_stage2_L2\"] = _eucl_loss\n losses[\"weight_stage3_L1\"] = _eucl_loss\n losses[\"weight_stage3_L2\"] = _eucl_loss\n losses[\"weight_stage4_L1\"] = _eucl_loss\n losses[\"weight_stage4_L2\"] = _eucl_loss\n losses[\"weight_stage5_L1\"] = _eucl_loss\n losses[\"weight_stage5_L2\"] = _eucl_loss\n losses[\"weight_stage6_L1\"] = _eucl_loss\n losses[\"weight_stage6_L2\"] = _eucl_loss\n\n return losses", "def _get_losses(self):\n # Fast-path already loaded\n if self.__losses is not None:\n return self.__losses\n # Initialize the dictionary\n self.__losses = dict()\n # Simply populate this dictionary\n for name in dir(torch.nn.modules.loss):\n if len(name) < 5 or name[0] == \"_\" or name[-4:] != \"Loss\": # Heuristically ignore non-loss members\n continue\n builder = getattr(torch.nn.modules.loss, name)\n if isinstance(builder, type): # Still an heuristic\n self.__losses[name[:-4].lower()] = self._make_drop_params(builder)\n # Add/replace the l1 and l2 losses\n self.__losses[\"l1\"] = self._l1loss_builder\n self.__losses[\"l2\"] = self._l2loss_builder\n # Return the dictionary\n return self.__losses", "def get_loss(self, logits: mx.sym.Symbol, labels: mx.sym.Symbol) -> List[mx.sym.Symbol]:\n if self.loss_config.normalization_type == C.LOSS_NORM_VALID:\n normalization = \"valid\"\n elif self.loss_config.normalization_type == C.LOSS_NORM_BATCH:\n normalization = \"null\"\n else:\n raise ValueError(\"Unknown loss normalization type: %s\" % self.loss_config.normalization_type)\n\n return [mx.sym.SoftmaxOutput(data=logits,\n label=labels,\n ignore_label=C.PAD_ID,\n use_ignore=True,\n normalization=normalization,\n smooth_alpha=self.loss_config.label_smoothing,\n name=C.SOFTMAX_NAME)]", "def get_loss(self, y_target, pred=None):\n last_model = self.models[-1]\n if not isinstance(last_model, ModelWithLoss):\n raise TypeError(f\"The last model in the pipeline has to e an instance of 'ceml.model.ModelWithLoss' but not of {type(last_model)}\")\n\n return last_model.get_loss(y_target, pred)", "def losses(self, logits, localisations,\n gclasses, glocalisations, gscores,\n match_threshold=0.5,\n negative_ratio=3.,\n alpha=1.,\n label_smoothing=0.,\n scope='ssd_losses'):\n return losses.ssd_losses(logits, localisations,\n gclasses, glocalisations, gscores,\n match_threshold=match_threshold,\n negative_ratio=negative_ratio,\n alpha=alpha,\n label_smoothing=label_smoothing,\n scope=scope)", "def get_losses(self, actions, rewards, masks):\n raise NotImplementedError", "def _parse_losses(self, losses):\n log_vars = OrderedDict()\n for loss_name, loss_value in losses.items():\n if isinstance(loss_value, torch.Tensor):\n log_vars[loss_name] = loss_value.mean()\n elif isinstance(loss_value, list):\n log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)\n elif isinstance(loss_value, dict):\n for name, value in loss_value.items():\n log_vars[name] = value\n else:\n raise TypeError(\n f'{loss_name} is not a tensor or list of tensors')\n\n loss = sum(_value for _key, _value in log_vars.items()\n if 'loss' in _key)\n\n log_vars['loss'] = loss\n for loss_name, loss_value in log_vars.items():\n # reduce loss when distributed training\n if dist.is_available() and dist.is_initialized():\n loss_value = loss_value.data.clone()\n dist.all_reduce(loss_value.div_(dist.get_world_size()))\n log_vars[loss_name] = loss_value.item()\n\n return loss, log_vars", "def training_losses(self):\r\n if self._training_losses is None:\r\n # Builds the per-task metrics and losses.\r\n # This the total summed training loss of tasks in the joint training.\r\n self._training_losses = dict(\r\n total_loss=tf.keras.metrics.Mean(\"training_loss\", dtype=tf.float32))\r\n for name in self.multi_task.tasks:\r\n self._training_losses[name] = tf.keras.metrics.Mean(\r\n \"training_loss\", dtype=tf.float32)\r\n return self._training_losses" ]
[ "0.7554572", "0.7521082", "0.6706245", "0.66327393", "0.6568412", "0.6431607", "0.6358024", "0.63523227", "0.63476425", "0.62977433", "0.62977433", "0.62793964", "0.6258504", "0.62469023", "0.6212453", "0.6212191", "0.6188569", "0.617553", "0.617553", "0.616311", "0.6154914", "0.6114184", "0.6081258", "0.60695785", "0.60608387", "0.60568184", "0.60399", "0.6022252", "0.60136586", "0.6008217" ]
0.80897933
0
Gets the network's input specs.
def input_spec(self): # If not a graph network, can't assume anything. if not self._is_graph_network: return None specs = [] for layer in self._input_layers: if layer.input_spec is None: specs.append(None) else: if not isinstance(layer.input_spec, list): raise TypeError('Layer ' + layer.name + ' has an input_spec attribute that ' 'is not a list. We expect a list. ' 'Found input_spec = ' + str(layer.input_spec)) specs += layer.input_spec if len(specs) == 1: return specs[0] return specs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_input_specs(cls):\n specs = InputData.parameterInputFactory('Dispatcher', ordered=False, baseNode=None)\n # TODO specific for pyomo dispatcher\n return specs", "def get_input_spec(self):\r\n return self.input_spec", "def get_input_specs(cls):\n input_specs = InputData.parameterInputFactory('Component', ordered=False, baseNode=None,\n descr=r\"\"\"defines a component as an element of the grid system. Components are defined by the action they\n perform such as \\xmlNode{produces} or \\xmlNode{consumes}; see details below.\"\"\")\n input_specs.addParam('name', param_type=InputTypes.StringType, required=True,\n descr=r\"\"\"identifier for the component. This identifier will be used to generate variables\n and relate signals to this component throughout the HERON analysis.\"\"\")\n # production\n ## this unit may be able to make stuff, possibly from other stuff\n input_specs.addSub(Producer.get_input_specs())\n # storage\n ## this unit may be able to store stuff\n input_specs.addSub(Storage.get_input_specs())\n # demands\n ## this unit may have a certain demand that must be met\n input_specs.addSub(Demand.get_input_specs())\n # this unit probably has some economics\n input_specs = CashFlowUser.get_input_specs(input_specs)\n return input_specs", "def get_input_descriptor_names(self):\n\n return ['input']", "def read_input(self, specs):\n print('DEBUGG specs:', specs)", "def get_inputs(self):\n return self.inputs", "def getInputSpecification(cls):\n inputSpecification = super(Metropolis, cls).getInputSpecification()\n return inputSpecification", "def get_inputs(self):\n return self.attributes[\"inputs\"]", "def get_inputs(self):\r\n raise NotImplementedError", "def get_inputs(self):\r\n raise NotImplementedError", "def get_input_specs(cls):\n specs = super(Storage, cls).get_input_specs()\n specs.addSub(ValuedParam.get_input_specs('rate'))\n specs.addSub(ValuedParam.get_input_specs('initial_stored'))\n return specs", "def get_model_specs(self):\n raise NotImplementedError()", "def get_input_names():\n names = [device.name for device in get_devices() if device.is_input]\n return list(sorted(names))", "def _get_inputs(self):\n return [InputDesc(tf.float32, (None, IMAGE_SIZE), 'input_sensor_1'),\n InputDesc(tf.float32, (None, IMAGE_SIZE), 'input_sensor_2'),\n InputDesc(tf.int32, (None,), 'label')]", "def get_input_descriptions(self):\n raise NotImplementedError", "def input_definitions(self) -> models.QuerySet:\n return self.input_specification.input_definitions", "def getInputSpecification(cls):\n spec = super().getInputSpecification()\n # TODO Entities should use factories to populate their allowable inputs\n # -> Entities themselves don't have inputs (I think)\n return spec", "def inputs(self) -> List[Union[ColSpec, TensorSpec]]:\n return self._inputs", "def _get_inputs(self):\n return self.__inputs", "def _get_inputs(self):\n return self.__inputs", "def _get_inputs(self):\n return self.__inputs", "def input_tensorspec(self):\n return self._tensorspec", "def get_input_specs(cls):\n specs = super(Demand, cls).get_input_specs()\n specs.addSub(ValuedParam.get_input_specs('penalty'))\n return specs", "def getInputSpecification(cls):\n inputSpecification = super(NDInverseWeight, cls).getInputSpecification()\n\n\n DataFilenameParameterInput = InputData.parameterInputFactory(\"dataFilename\", contentType=InputTypes.StringType)\n DataFilenameParameterInput.addParam(\"type\", InputTypes.StringType, True)\n inputSpecification.addSub(DataFilenameParameterInput)\n\n inputSpecification.addSub(InputData.parameterInputFactory(\"p\", contentType=InputTypes.FloatType))\n\n return inputSpecification", "def inputs(self) -> List[str]:\n return self._model.inputs", "def inputs(self):\n return self.inputs", "def inputs(self):\n return self._inputs", "def getInputSpecification(cls):\n inputSpecification = super(Metric, cls).getInputSpecification()\n featuresInput = InputData.parameterInputFactory(\"Features\", contentType=InputTypes.StringListType)\n featuresInput.addParam(\"type\", InputTypes.StringType)\n inputSpecification.addSub(featuresInput)\n targetsInput = InputData.parameterInputFactory(\"Targets\", contentType=InputTypes.StringListType)\n targetsInput.addParam(\"type\", InputTypes.StringType)\n inputSpecification.addSub(targetsInput)\n multiOutputInput = InputData.parameterInputFactory(\"multiOutput\", contentType=InputTypes.StringType)\n inputSpecification.addSub(multiOutputInput)\n multiOutput = InputTypes.makeEnumType('MultiOutput', 'MultiOutputType', ['mean','max','min','raw_values'])\n multiOutputInput = InputData.parameterInputFactory(\"multiOutput\", contentType=multiOutput)\n inputSpecification.addSub(multiOutputInput)\n weightInput = InputData.parameterInputFactory(\"weight\", contentType=InputTypes.FloatListType)\n inputSpecification.addSub(weightInput)\n pivotParameterInput = InputData.parameterInputFactory(\"pivotParameter\", contentType=InputTypes.StringType)\n inputSpecification.addSub(pivotParameterInput)\n metricInput = InputData.parameterInputFactory(\"Metric\", contentType=InputTypes.StringType)\n metricInput.addParam(\"class\", InputTypes.StringType, True)\n metricInput.addParam(\"type\", InputTypes.StringType, True)\n inputSpecification.addSub(metricInput)\n\n return inputSpecification", "def get_input_metadata(self):\n return []", "def get_input_specs(cls):\n if cls.tag == 'produces':\n desc = r\"\"\"indicates that this component produces one or more resources by consuming other resources.\"\"\"\n resource_desc = r\"\"\"the resource produced by this component's activity.\"\"\"\n elif cls.tag == 'stores':\n desc = r\"\"\"indicates that this component stores one resource, potentially absorbing or providing that resource.\"\"\"\n resource_desc = r\"\"\"the resource stored by this component.\"\"\"\n elif cls.tag == \"demands\":\n desc = r\"\"\"indicates that this component exclusively consumes a resource.\"\"\"\n resource_desc = r\"\"\"the resource consumed by this component.\"\"\"\n specs = InputData.parameterInputFactory(cls.tag, ordered=False, descr=desc)\n specs.addParam('resource', param_type=InputTypes.StringListType, required=True,\n descr=resource_desc)\n dispatch_opts = InputTypes.makeEnumType('dispatch_opts', 'dispatch_opts', ['fixed', 'independent', 'dependent'])\n specs.addParam('dispatch', param_type=dispatch_opts,\n descr=r\"\"\"describes the way this component should be dispatched, or its flexibility.\n \\texttt{fixed} indicates the component always fully dispatched at its maximum level.\n \\texttt{independent} indicates the component is fully dispatchable by the dispatch optimization algorithm.\n \\texttt{dependent} indicates that while this component is not directly controllable by the dispatch\n algorithm, it can however be flexibly dispatched in response to other units changing dispatch level.\n For example, when attempting to increase profitability, the \\texttt{fixed} components are not adjustable,\n but the \\texttt{independent} components can be adjusted to attempt to improve the economic metric.\n In response to the \\texttt{independent} component adjustment, the \\texttt{dependent} components\n may respond to balance the resource usage from the changing behavior of other components.\"\"\")\n\n cap = ValuedParam.get_input_specs('capacity')\n cap.descr = r\"\"\"provides the maximum value at which this component can act, in units of the indicated resource. \"\"\"\n #cap.removeSub('ARMA')\n #cap.removeSub('Function')\n #cap.removeSub('variable')\n cap.addParam('resource', param_type=InputTypes.StringType,\n descr=r\"\"\"indicates the resource that defines the capacity of this component's operation. For example,\n if a component consumes steam and electricity to produce hydrogen, the capacity of the component\n can be defined by the maximum steam consumable, maximum electricity consumable, or maximum\n hydrogen producable. Any choice should be nominally equivalent, but determines the units\n of the value of this node.\"\"\")\n specs.addSub(cap)\n\n minn = ValuedParam.get_input_specs('minimum')\n minn.descr = r\"\"\"provides the minimum value at which this component can act, in units of the indicated resource. \"\"\"\n minn.addParam('resource', param_type=InputTypes.StringType,\n descr=r\"\"\"indicates the resource that defines the minimum activity level for this component,\n as with the component's capacity.\"\"\")\n specs.addSub(minn)\n return specs" ]
[ "0.72373915", "0.7223589", "0.65798616", "0.61967623", "0.61767274", "0.6171686", "0.6164346", "0.615488", "0.61117357", "0.61117357", "0.606532", "0.60281813", "0.6026815", "0.5994371", "0.5980121", "0.59641206", "0.59398377", "0.5933163", "0.59111977", "0.59111977", "0.59111977", "0.5826733", "0.5810195", "0.5802906", "0.58015156", "0.57839835", "0.5691784", "0.5687823", "0.5683376", "0.5668533" ]
0.7786096
0
Loads all layer weights from a HDF5 save file. If `by_name` is False (default) weights are loaded based on the network's topology, meaning the architecture should be the same as when the weights were saved. Note that layers that don't have weights are not taken into account in the topological ordering, so adding or removing layers is fine as long as they don't have weights. If `by_name` is True, weights are loaded into layers only if they share the same name. This is useful for finetuning or transferlearning models where some of the layers have changed.
def load_weights(self, filepath, by_name=False): if h5py is None: raise ImportError('`load_weights` requires h5py.') with h5py.File(filepath, 'r') as f: if 'layer_names' not in f.attrs and 'model_weights' in f: f = f['model_weights'] if by_name: load_weights_from_hdf5_group_by_name(f, self.layers) else: load_weights_from_hdf5_group(f, self.layers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_weights(self, filepath, by_name=False, exclude=None):\n\n if exclude:\n by_name = True\n\n if h5py is None:\n raise ImportError('`load_weights` requires h5py.')\n f = h5py.File(filepath, mode='r')\n if 'layer_names' not in f.attrs and 'model_weights' in f:\n f = f['model_weights']\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n keras_model = self.keras_model\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\") else keras_model.layers\n\n # Exclude some layers\n if exclude:\n layers = filter(lambda l: l.name not in exclude, layers)\n print(f\"Exclude layers:\\n\\t{[l for l in exclude]}\")\n\n if by_name:\n saving.load_weights_from_hdf5_group_by_name(f, layers)\n else:\n saving.load_weights_from_hdf5_group(f, layers)\n if hasattr(f, 'close'):\n f.close()\n\n # Update the log directory\n self.set_log_dir(filepath)", "def load_weights(self, filepath, by_name=False, exclude=None):\n import h5py\n from keras.engine import saving\n\n if exclude:\n by_name = True\n\n if h5py is None:\n raise ImportError('`load_weights` requires h5py.')\n f = h5py.File(filepath, mode='r')\n if 'layer_names' not in f.attrs and 'model_weights' in f:\n f = f['model_weights']\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n keras_model = self.keras_model\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n # Exclude some layers\n if exclude:\n layers = filter(lambda l: l.name not in exclude, layers)\n\n if by_name:\n saving.load_weights_from_hdf5_group_by_name(f, layers)\n else:\n saving.load_weights_from_hdf5_group(f, layers)\n if hasattr(f, 'close'):\n f.close()", "def load_weights(self, filepath, by_name=False, exclude=None):\n import h5py\n from keras.engine import saving\n\n if exclude:\n by_name = True\n\n if h5py is None:\n raise ImportError('`load_weights` requires h5py.')\n f = h5py.File(filepath, mode='r')\n if 'layer_names' not in f.attrs and 'model_weights' in f:\n f = f['model_weights']\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n keras_model = self.keras_model\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n # Exclude some layers\n if exclude:\n layers = filter(lambda l: l.name not in exclude, layers)\n\n if by_name:\n saving.load_weights_from_hdf5_group_by_name(f, layers)\n else:\n saving.load_weights_from_hdf5_group(f, layers)\n if hasattr(f, 'close'):\n f.close()\n\n # Update the log directory\n self.set_log_dir(filepath)", "def load_weights_from_hdf5_group_by_name(f, layers):\n if 'keras_version' in f.attrs:\n original_keras_version = f.attrs['keras_version'].decode('utf8')\n else:\n original_keras_version = '1'\n if 'backend' in f.attrs:\n original_backend = f.attrs['backend'].decode('utf8')\n else:\n original_backend = None\n\n # New file format.\n layer_names = [n.decode('utf8') for n in f.attrs['layer_names']]\n\n # Reverse index of layer name to list of layers with name.\n index = {}\n for layer in layers:\n if layer.name:\n index.setdefault(layer.name, []).append(layer)\n\n # We batch weight value assignments in a single backend call\n # which provides a speedup in TensorFlow.\n weight_value_tuples = []\n for k, name in enumerate(layer_names):\n g = f[name]\n weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]\n weight_values = [g[weight_name] for weight_name in weight_names]\n\n for layer in index.get(name, []):\n symbolic_weights = layer.weights\n weight_values = preprocess_weights_for_loading(\n layer, weight_values, original_keras_version, original_backend)\n if len(weight_values) != len(symbolic_weights):\n raise ValueError('Layer #' + str(k) + ' (named \"' + layer.name +\n '\") expects ' + str(len(symbolic_weights)) +\n ' weight(s), but the saved weights' + ' have ' +\n str(len(weight_values)) + ' element(s).')\n # Set values.\n for i in range(len(weight_values)):\n weight_value_tuples.append((symbolic_weights[i], weight_values[i]))\n K.batch_set_value(weight_value_tuples)", "def _load_layer_weights(self, layer, name, h5file): \n group = h5file[name]\n length = group['length'][0]\n weights = [group[\"{}\".format(idx)] for idx in range(length)]\n layer.set_weights(weights)", "def _load_local_weights(self, h5file):\n for name, layer in self._layers_to_save.items():\n self._load_layer_weights(layer, name, h5file)", "def load_weights(self,\n filepath=str,\n by_name: bool=False,\n skip_mismatch: bool=False,\n options: Optional[\"SaveOptions\"]=None) -> None:\n params = dict(\n filepath=filepath,\n by_name=by_name,\n skip_mismatch=skip_mismatch,\n options=options\n )\n if is_local_path(filepath):\n ray.get([worker.load_weights.remote(**params)\n for worker in self.remote_workers])\n else:\n ray.get([worker.load_remote_weights.remote(**params)\n for worker in self.remote_workers])", "def load_weights_from_hdf5_group(f, layers):\n if 'keras_version' in f.attrs:\n original_keras_version = f.attrs['keras_version'].decode('utf8')\n else:\n original_keras_version = '1'\n if 'backend' in f.attrs:\n original_backend = f.attrs['backend'].decode('utf8')\n else:\n original_backend = None\n\n filtered_layers = []\n for layer in layers:\n weights = layer.weights\n if weights:\n filtered_layers.append(layer)\n\n layer_names = [n.decode('utf8') for n in f.attrs['layer_names']]\n filtered_layer_names = []\n for name in layer_names:\n g = f[name]\n weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]\n if weight_names:\n filtered_layer_names.append(name)\n layer_names = filtered_layer_names\n if len(layer_names) != len(filtered_layers):\n raise ValueError('You are trying to load a weight file '\n 'containing ' + str(len(layer_names)) +\n ' layers into a model with ' + str(len(filtered_layers)) +\n ' layers.')\n\n # We batch weight value assignments in a single backend call\n # which provides a speedup in TensorFlow.\n weight_value_tuples = []\n for k, name in enumerate(layer_names):\n g = f[name]\n weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]\n weight_values = [g[weight_name] for weight_name in weight_names]\n layer = filtered_layers[k]\n symbolic_weights = layer.weights\n weight_values = preprocess_weights_for_loading(\n layer, weight_values, original_keras_version, original_backend)\n if len(weight_values) != len(symbolic_weights):\n raise ValueError('Layer #' + str(k) + ' (named \"' + layer.name +\n '\" in the current model) was found to '\n 'correspond to layer ' + name + ' in the save file. '\n 'However the new layer ' + layer.name + ' expects ' +\n str(len(symbolic_weights)) +\n ' weights, but the saved weights have ' +\n str(len(weight_values)) + ' elements.')\n weight_value_tuples += zip(symbolic_weights, weight_values)\n K.batch_set_value(weight_value_tuples)", "def _save_layer_weights(self, layer, name, h5file):\n # How to save binary data in h5py: http://docs.h5py.org/en/latest/strings.html\n # How to serialize objects: https://docs.python.org/3.3/library/pickle.html\n weights = layer.get_weights()\n h5file.create_dataset(\"{}/length\".format(name), data=np.array([len(weights)]))\n for idx, array in enumerate(weights):\n h5file.create_dataset(\"{}/{}\".format(name, idx), data=array)", "def load_all_weights(model, filepath, include_optimizer=True):\n if h5py is None:\n raise ImportError('`load_all_weights` requires h5py.')\n\n with h5py.File(filepath, mode='r') as f:\n # set weights\n saving.load_weights_from_hdf5_group(f['model_weights'], model.layers)\n # Set optimizer weights.\n if include_optimizer and 'optimizer_weights' in f and hasattr(model, 'optimizer') and model.optimizer:\n optimizer_weights_group = f['optimizer_weights']\n optimizer_weight_names = [n.decode('utf8') for n in\n optimizer_weights_group.attrs['weight_names']]\n optimizer_weight_values = [optimizer_weights_group[n] for n in\n optimizer_weight_names]\n model.optimizer.set_weights(optimizer_weight_values)", "def _save_local_weights(self, h5file):\n for name, layer in self._layers_to_save.items():\n self._save_layer_weights(layer, name, h5file)", "def load_weights_new(self, filepath,\n skip_mismatch=False, reshape=False):\n\n with h5py.File(filepath, mode='r') as f:\n if 'layer_names' not in f.attrs and 'model_weights' in f:\n f = f['model_weights']\n #Nueva funcion desarrollada\n weights_proc.load_weights_from_hdf5_group_new(f, self.layers, reshape=reshape)\n if hasattr(f, 'close'):\n f.close()\n elif hasattr(f.file, 'close'):\n f.file.close()", "def save_all_weights(model, filepath, include_optimizer=True):\n if h5py is None:\n raise ImportError('`save_all_weights` requires h5py.')\n\n with h5py.File(filepath, 'w') as f:\n model_weights_group = f.create_group('model_weights')\n model_layers = model.layers\n saving.save_weights_to_hdf5_group(model_weights_group, model_layers)\n\n if include_optimizer and hasattr(model, 'optimizer') and model.optimizer:\n if isinstance(model.optimizer, optimizers.TFOptimizer):\n warnings.warn(\n 'TensorFlow optimizers do not '\n 'make it possible to access '\n 'optimizer attributes or optimizer state '\n 'after instantiation. '\n 'As a result, we cannot save the optimizer '\n 'as part of the model save file.'\n 'You will have to compile your model again after loading it. '\n 'Prefer using a Keras optimizer instead '\n '(see keras.io/optimizers).')\n else:\n # Save optimizer weights.\n symbolic_weights = getattr(model.optimizer, 'weights')\n if symbolic_weights:\n optimizer_weights_group = f.create_group('optimizer_weights')\n weight_values = K.batch_get_value(symbolic_weights)\n weight_names = []\n for i, (w, val) in enumerate(zip(symbolic_weights, weight_values)):\n # Default values of symbolic_weights is /variable for theano\n if K.backend() == 'theano':\n if hasattr(w, 'name') and w.name != \"/variable\":\n name = str(w.name)\n else:\n name = 'param_' + str(i)\n else:\n if hasattr(w, 'name') and w.name:\n name = str(w.name)\n else:\n name = 'param_' + str(i)\n weight_names.append(name.encode('utf8'))\n optimizer_weights_group.attrs['weight_names'] = weight_names\n for name, val in zip(weight_names, weight_values):\n param_dset = optimizer_weights_group.create_dataset(\n name,\n val.shape,\n dtype=val.dtype)\n if not val.shape:\n # scalar\n param_dset[()] = val\n else:\n param_dset[:] = val", "def load_weight(model):\n file = h5py.File(WEIGHT_SAVE, 'r')\n weight = []\n for i in range(len(file.keys())):\n weight.append(file['weight' + str(i)][:])\n model.set_weights(weight)", "def load_weights(model: torch.nn.Module, state_dict_path, layer_names: List = None, freeze=False):\n if torch.cuda.is_available():\n device = 'cuda'\n else:\n device = 'cpu'\n\n state_dict = torch.load(state_dict_path, map_location=torch.device(device))\n\n if layer_names is not None:\n state_dict = extract_layers_from_state_dict(state_dict, layer_names=layer_names)\n\n model.load_state_dict(state_dict, strict=False if layer_names is not None else True)\n logger.info(\"Loaded initial model weights for layer(s) {} from '{}'.\".format(layer_names, state_dict_path))\n\n if freeze:\n layers = []\n for layer_name in layer_names:\n if type(layer_name) == tuple:\n layers.append(dict(model.named_children())[layer_name[1]])\n else:\n layers.append(dict(model.named_children())[layer_name])\n\n for layer in layers:\n for param in layer.parameters():\n param.requires_grad = False\n logger.info(\"Freezed layer(s) {}.\".format([ln[0] if type(ln) == tuple else ln for ln in layer_names]))\n\n return model", "def load_weights(base_name, model, ep, opt):\n # Paths to encoder and decoder files\n model_name = os.path.join(base_name, \"i3d_ep\"+str(ep)+\"_\"+opt+\".pt\")\n if os.path.isfile(model_name):\n model.load_state_dict(torch.load(model_name))\n print(\"Loading I3D weights... : {}\".format(model_name))\n return model", "def load_weights(self, file_path, format=None, in_order=True, skip=False):\n _load_weights(self, file_path, format, in_order, skip)", "def load_weights(self, the_path):\n self.model.load_state_dict(torch.load(the_path))", "def load_weights(cls, model, path_to_weights_file, load_parts=True, verbose=False):\n path_to_weights_file = Path(path_to_weights_file)\n\n if not path_to_weights_file.is_absolute():\n path_to_weights_file = MODELS_DIR/path_to_weights_file\n\n if verbose:\n print('Load weights from {}.'.format(path_to_weights_file))\n\n device = torch.device(CUDA_DEVICE_NAME if torch.cuda.is_available() else 'cpu')\n model.to(device)\n\n if load_parts:\n model_dict = model.state_dict()\n\n # try to load those part of an existing model that match the architecture\n # Reference: https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/2\n pretrained_dict = torch.load(path_to_weights_file, map_location=device)\n\n no_correspondence = [key for key, value in pretrained_dict.items()\n if key not in model_dict or model_dict[key].shape!=value.shape]\n\n if len(no_correspondence)>0:\n print('Cannot load layers:')\n for key in no_correspondence:\n print(' * '+key)\n\n pretrained_dict = {key: value for key, value in pretrained_dict.items()\n if key in model_dict and model_dict[key].shape==value.shape}\n model_dict.update(pretrained_dict)\n model.load_state_dict(model_dict)\n else:\n model_dict = torch.load(path_to_weights_file, map_location=device)\n model.load_state_dict(model_dict)\n\n return model", "def weights(self, model, name=None):\n fgobj = self\n name_ = name\n if name_ == None:\n name_ = self.new_model_name()\n else: # todo: add a regex to validate a consequtive string\n name_ = \"%s_%s\" % (self.train.id, name_)\n\n def f(record):\n epoch = list(recorddf(record).epoch)[0]\n name_epoch = \"%s.e%s\" % (name_, epoch)\n path = self.weightdir / (\"%s\" % (name_epoch if name_epoch[-4:] == \".npy\" else \"%s.npy\" % (name_epoch)))\n if fgobj.verbose: print(\"[Model Save]:%s\" % (path))\n torch.save(model.state_dict(), path)\n return fgobj.save_weights(path, modelname=name_epoch, framewk=\"pytorch\")\n\n return f", "def load_weights(self, path: str):\n self.load_state_dict(torch.load(path))", "def load_weights(self, path: str):\n self.load_state_dict(torch.load(path))", "def load(self, filename):\n hebbian_weights = open(filename, \"r\").read().split('\\n')\n for i in xrange(self.hidden):\n weights = hebbian_weights[i].split('\\t')\n self.vis_layer[i].set_weights(weights)\n for i in xrange(self.layers):\n for j in xrange(self.hidden):\n weights = hebbian_weights[((i+1)*self.hidden)+j].split('\\t')\n self.hidden_layers[i][j].set_weights(weights)\n weights = hebbian_weights[-2].split('\\t')\n self.output_neuron.set_weights(weights)", "def load_pretrained_weights(model, model_name, load_fc=True, advprop=False):\n # AutoAugment or Advprop (different preprocessing)\n url_map_ = url_map_advprop if advprop else url_map\n state_dict = model_zoo.load_url(url_map_[model_name], map_location=torch.device('cpu'))\n # state_dict = torch.load('../../weights/backbone_efficientnetb0.pth')\n if load_fc:\n ret = model.load_state_dict(state_dict, strict=False)\n print(ret)\n else:\n state_dict.pop('_fc.weight')\n state_dict.pop('_fc.bias')\n res = model.load_state_dict(state_dict, strict=False)\n assert set(res.missing_keys) == {'_fc.weight', '_fc.bias'}, 'issue loading pretrained weights'\n print('Loaded pretrained weights for {}'.format(model_name))", "def load_model_weights(self):\n raise NotImplementedError", "def load_networks(self, which_epoch):\n for name in self.model_names:\n if isinstance(name, str):\n filename = '%s_net_%s.pth' % (which_epoch, name)\n path = os.path.join(self.save_dir, filename)\n net = getattr(self, 'net_' + name)\n try:\n state_dict = torch.load(path)\n state_dict = {name.replace('module.', '', 1) : param for name, param in state_dict.items()}\n # net.load_state_dict(torch.load(path))\n net.load_state_dict(state_dict)\n except:\n pretrained_dict = torch.load(path)\n model_dict = net.state_dict()\n try:\n pretrained_dict = {k:v for k,v in pretrained_dict.items() if k in model_dict}\n net.load_state_dict(pretrained_dict)\n print('Pretrained network %s has excessive layers; Only loading layers that are used' % name)\n except:\n print('Pretrained network %s has fewer layers; The following are not initialized:' % name)\n not_initialized = set()\n for k, v in pretrained_dict.items():\n if v.size() == model_dict[k].size():\n model_dict[k] = v\n\n for k, v in model_dict.items():\n if k not in pretrained_dict or v.size() != pretrained_dict[k].size():\n not_initialized.add(k.split('.')[0])\n print(sorted(not_initialized))\n net.load_state_dict(model_dict)\n if len(self.gpu_ids) > 0 and torch.cuda.is_available():\n net.cuda()\n if not self.isTrain:\n net.eval()", "def save_weights(self, filepath, overwrite=True):\n if h5py is None:\n raise ImportError('`save_weights` requires h5py.')\n # If file exists and should not be overwritten:\n if not overwrite and os.path.isfile(filepath):\n proceed = ask_to_proceed_with_overwrite(filepath)\n if not proceed:\n return\n with h5py.File(filepath, 'w') as f:\n save_weights_to_hdf5_group(f, self.layers)", "def load_networks(self, epoch):\n for name in self.network_names:\n if isinstance(name, str):\n load_filename = '{0}_net_{1}.pth'.format(epoch, name)\n load_path = os.path.join(self.save_dir, load_filename)\n net = getattr(self, 'net')\n if isinstance(net, torch.nn.DataParallel):\n net = net.module\n print('loading the model from {0}'.format(load_path))\n state_dict = torch.load(load_path, map_location=self.device)\n if hasattr(state_dict, '_metadata'):\n del state_dict._metadata\n\n net.load_state_dict(state_dict)", "def load_weights(self, weights_path):\n\n # Open the weights file\n fp = open(weights_path, \"rb\")\n header = np.fromfile(fp, dtype=np.int32, count=5) # First five are header values\n\n # Needed to write header when saving weights\n self.header_info = header\n\n self.seen = header[3]\n weights = np.fromfile(fp, dtype=np.float32) # The rest are weights\n fp.close()\n\n ptr = 0\n for i, (module_def, module) in enumerate(zip(self.module_defs, self.module_list)):\n if module_def[\"type\"] == \"convolutional\":\n conv_layer = module[0]\n if module_def[\"batch_normalize\"]:\n # Load BN bias, weights, running mean and running variance\n bn_layer = module[1]\n num_b = bn_layer.bias.numel() # Number of biases\n # Bias\n bn_b = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.bias)\n bn_layer.bias.data.copy_(bn_b)\n ptr += num_b\n # Weight\n bn_w = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.weight)\n bn_layer.weight.data.copy_(bn_w)\n ptr += num_b\n # Running Mean\n bn_rm = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.running_mean)\n bn_layer.running_mean.data.copy_(bn_rm)\n ptr += num_b\n # Running Var\n bn_rv = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(bn_layer.running_var)\n bn_layer.running_var.data.copy_(bn_rv)\n ptr += num_b\n else:\n # Load conv. bias\n num_b = conv_layer.bias.numel()\n conv_b = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(conv_layer.bias)\n conv_layer.bias.data.copy_(conv_b)\n ptr += num_b\n # Load conv. weights\n num_w = conv_layer.weight.numel()\n conv_w = torch.from_numpy(weights[ptr : ptr + num_w]).view_as(conv_layer.weight)\n conv_layer.weight.data.copy_(conv_w)\n ptr += num_w", "def load_weights(self, filename):\n checkpoint = torch.load(filename)\n if not checkpoint['input_size'] == self.state_size:\n print(f\"Error when loading weights from checkpoint {filename}: input size {checkpoint['input_size']} doesn't match state size of agent {self.state_size}\")\n return None\n if not checkpoint['output_size'] == self.action_size:\n print(f\"Error when loading weights from checkpoint {filename}: output size {checkpoint['output_size']} doesn't match action space size of agent {self.action_size}\")\n return None\n my_actor_hidden_layers = [each.out_features for each in self.actor_local.hidden_layers if each._get_name()!='BatchNorm1d']\n if not checkpoint['actor_hidden_layers'] == my_actor_hidden_layers:\n print(f\"Error when loading weights from checkpoint {filename}: actor hidden layers {checkpoint['actor_hidden_layers']} don't match agent's actor hidden layers {my_actor_hidden_layers}\")\n return None\n my_critic_hidden_layers = [each.out_features for each in self.critic_local.hidden_layers if each._get_name()!='BatchNorm1d']\n if not checkpoint['critic_hidden_layers'] == my_critic_hidden_layers:\n print(f\"Error when loading weights from checkpoint {filename}: critic hidden layers {checkpoint['critic_hidden_layers']} don't match agent's critic hidden layers {my_critic_hidden_layers}\")\n return None\n self.actor_local.load_state_dict(checkpoint['actor_state_dict'])\n self.critic_local.load_state_dict(checkpoint['critic_state_dict'])" ]
[ "0.77666646", "0.7640353", "0.7537666", "0.67017555", "0.65264434", "0.64799273", "0.63887185", "0.6366901", "0.6157574", "0.5986104", "0.5866132", "0.5698096", "0.5360071", "0.5323279", "0.5293548", "0.5216804", "0.50810814", "0.506642", "0.49889043", "0.49872407", "0.49843055", "0.49843055", "0.4968593", "0.4949812", "0.49477434", "0.49431533", "0.49329665", "0.49317786", "0.4918129", "0.49130365" ]
0.8069089
0
Returns a JSON string containing the network configuration. To load a network from a JSON save file, use `keras.models.model_from_json(json_string, custom_objects={})`.
def to_json(self, **kwargs): if not self._is_graph_network: raise NotImplementedError def get_json_type(obj): # If obj is any numpy type if type(obj).__module__ == np.__name__: return obj.item() # If obj is a python 'type' if type(obj).__name__ == type.__name__: return obj.__name__ raise TypeError('Not JSON Serializable:', obj) model_config = self._updated_config() return json.dumps(model_config, default=get_json_type, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_config(network, filename):\n with open(filename, \"wt\") as my_file:\n my_file.write(network.to_json())\n return None", "def load_network(file_name):\n with open(file_name) as file:\n data = json.load(file)\n\n cost_fn = getattr(sys.modules[__name__], data[\"cost_func\"])\n act_fn = getattr(sys.modules[__name__], data[\"act_func\"])\n metric = getattr(sys.modules[__name__], data[\"metric\"])\n\n network = Network([1, 1], act_func=act_fn, cost_func=cost_fn, metric=metric)\n network.layers_num = data[\"layers_num\"]\n network.weights = [np.array(w) for w in data[\"weights\"]]\n network.biases = [np.array(b) for b in data[\"biases\"]]\n\n return network", "def build_network(config):\n network_cfg = config['network']\n\n network_name = network_cfg['name']\n\n network_params = list(inspect.signature(eval(network_name).__init__).parameters)[1:]\n\n args = [f'{param}={network_cfg[param]}' for param in network_params if network_cfg.get(param)]\n\n try:\n model = eval('{}({})'.format(network_name, ', '.join(args)))\n except:\n raise ValueError('Can\\'t load network.')\n\n return model.to(device='cuda')", "def get_network_config():\n config_path = f\"{os.path.dirname(os.path.realpath(__file__))}/../configs/benchmark-{os.environ['HMY_PROFILE']}.json\"\n assert os.path.isfile(config_path), f\"`{config_path}` does not exist!\"\n with open(config_path, 'r') as f:\n return json.load(f)", "def load_network_config(config_path):\n return load_json_file(config_path)", "def load_net_from_file(filename):\n\n print(\"Loading neural net from {}\".format(filename))\n with open(filename, \"r\") as fd:\n net = json.load(fd)\n\n print(\"net = {}\".format(pprint.pformat(net)))\n return net", "def build_model(path_to_network_model, path_to_weights):\n\n # with tf.device('/gpu:0'):\n json_file = open(path_to_network_model, 'r')\n model_json = json_file.read()\n json_file.close()\n # custom_objects={\"backend\": K, \"tf\": tf}\n model = model_from_json(model_json, custom_objects={\"tf\": tf})\n model.compile(\n loss='logcosh',\n optimizer='adam')\n model.load_weights(path_to_weights)\n return model", "def network_dict(self):\n\n data = {}\n data['network'] = {\n 'weights': self._conv_weights + self._lin_weights,\n 'dims': self.net_dims,\n 'activation': 'relu',\n 'accuracy': self.accuracy,\n 'kernel_size': self.kernel_size\n }\n\n return data", "def load(filename):\n f = open(filename, \"r\")\n data = json.load(f)\n f.close()\n cost = getattr(sys.modules[__name__], data[\"cost\"])\n net = Network(data[\"sizes\"], cost=cost)\n net.weights = [np.array(w) for w in data[\"weights\"]]\n net.biases = [np.array(b) for b in data[\"biases\"]]\n return net", "def dump_net_to_file(net_def, filename):\n\n print(\"DUMP TO FILE:\\n{}\\n\".format(net_def))\n\n with open(filename, \"w\") as fd:\n line = json.dumps(net_def)\n fd.write(\"{}\\n\".format(line))\n\n print(\"Wrote neural net to {}\".format(filename))", "def get_network(network: str, config):\n using_spatial = False # If true input is fed as patches.\n using_attention = False\n patch_return_size = 1\n\n if network == 'cohen':\n model = CohenMLP(seq_len=config.seq_len)\n elif network == 'oksuz_rnn':\n model = OksuzRNN(config.gru, input_size=config.rnn_input_size, hidden_size=config.rnn_hidden_size,\n seq_len=config.seq_len, num_layers=config.rnn_num_layers,\n bidirectional=config.rnn_bidirectional)\n elif network == 'hoppe':\n spatial_pooling = None if config.spatial_pooling.lower() == 'none' else config.spatial_pooling.lower()\n using_spatial = True if spatial_pooling is not None else False\n model = Hoppe(config.gru, input_size=config.rnn_input_size, hidden_size=config.rnn_hidden_size,\n seq_len=config.seq_len, num_layers=config.rnn_num_layers,\n bidirectional=config.rnn_bidirectional, spatial_pooling=spatial_pooling,\n patch_size=config.patch_size)\n elif network == 'rnn_attention':\n using_attention = True\n model = RNNAttention(input_size=config.rnn_input_size, hidden_size=config.rnn_hidden_size,\n batch_size=config.batch_size, seq_len=config.seq_len,\n num_layers=config.rnn_num_layers, bidirectional=config.rnn_bidirectional)\n elif network == 'song':\n using_attention=True\n model = Song(seq_len=config.seq_len)\n elif network == 'soyak':\n using_spatial = True\n patch_return_size = config.patch_size - 2\n model = Soyak(patch_size=config.patch_size, seq_len=config.seq_len)\n elif network == 'patch_size':\n using_spatial = True\n model = PatchSizeTest(seq_len=config.seq_len, patch_size=config.patch_size)\n elif network == 'balsiger':\n using_spatial = True\n model = Balsiger(seq_len=config.seq_len, patch_size=config.patch_size)\n elif network == 'rca_unet':\n using_spatial = True\n patch_return_size = config.patch_size\n using_attention = config.rcab_attention\n model = RCAUNet(seq_len=config.seq_len, patch_size=config.patch_size,\n temporal_features=config.num_temporal_features, attention=config.rcab_attention)\n elif network == 'r2plus1d':\n using_spatial = True\n using_attention = True if config.non_local_level > 0 else False\n model = R2Plus1D(patch_size=config.patch_size, seq_len=config.seq_len, factorise=config.factorise,\n dimensionality_reduction_level=config.dimensionality_reduction_level,\n non_local_level=config.non_local_level)\n elif network == 'r1d':\n model = R1D(seq_len=config.seq_len)\n else:\n import sys # Should not be able to reach here as we provide a choice.\n print(\"Invalid network. Exiting...\")\n sys.exit(1)\n\n return model, using_spatial, using_attention, patch_return_size", "def save_network(self, file_name):\n data = {\n \"layers_num\": self.layers_num,\n \"weights\": [w.tolist() for w in self.weights],\n \"biases\": [b.tolist() for b in self.biases],\n \"act_func\": str(self.act_func.__name__),\n \"cost_func\": str(self.cost_func.__name__),\n \"metric\": str(self.metric.__name__)\n }\n\n with open(file_name, 'w') as file:\n json.dump(data, file)", "def _load_neural_network(self, json_file, weights_file):\n assert isinstance(json_file, str),\\\n \"json_file not entered as a string.\"\n assert isinstance(weights_file, str),\\\n \"weights file not entered as a string.\"\n\n json_model = open(file_path(json_file), 'r')\n loaded_model_json = json_model.read()\n json_model.close()\n loaded_model = model_from_json(loaded_model_json)\n loaded_model.load_weights(file_path(weights_file))\n loaded_model.compile(loss='mean_squared_error', optimizer='Adadelta')\n self.model = loaded_model\n return", "def load_config(filename):\n with open(filename, \"r\") as my_file:\n my_file = my_file.read()\n return K.models.model_from_json(my_file)", "def network_config(self) -> 'outputs.NetworkConfigResponse':\n return pulumi.get(self, \"network_config\")", "def network_config(self) -> 'outputs.NetworkConfigResponse':\n return pulumi.get(self, \"network_config\")", "def save_configuration(model, NN_type, num_of_cells, num_of_CUEs = None, num_of_D2Ds = None):\n\n # Insert debugging assertions\n assert type(model) is keras.engine.sequential.Sequential, \"The 'model' must be sequential model.\"\n assert type(NN_type) is str, \"The 'NN_type' must be string.\"\n assert num_of_cells in constants.cell_range, f\"The 'num_of_cells' must be element in {constants.cell_range}.\"\n assert num_of_CUEs in constants.CUE_range or num_of_CUEs is None, f\"The 'num_of_CUEs' must be element in {constants.CUE_range}.\"\n assert num_of_D2Ds in constants.D2D_range or num_of_D2Ds is None, f\"The 'num_of_D2Ds' must be element in {constants.D2D_range}.\"\n\n # Get the path to the file to save the configuration to\n model_dir = pathlib.Path.cwd().joinpath('model')\n cell_dir = f'{num_of_cells}-cell'\n model_dir = model_dir.joinpath(cell_dir)\n\n if num_of_CUEs and num_of_D2Ds:\n file_name = f'configuration_Cell_{num_of_cells}_CUE_{num_of_CUEs}_D2D_{num_of_D2Ds}_{NN_type}.json'\n else:\n file_name = f'configuration_Cell_{num_of_cells}_{NN_type}.json'\n\n file_path = str(model_dir.joinpath(file_name))\n\n # Save the configuration in JSON format\n configuration = model.to_json()\n\n with open(file_path, 'w') as json_file:\n json_file.write(configuration)", "def convert_h5_to_json(model_h5_file, model_json_file):\n\n model = tf.keras.models.load_model(model_h5_file)\n json_dict = {}\n\n for l in model.layers:\n json_dict[l.name] = {\n 'input_shape': l.input_shape[1:],\n 'output_shape': l.output_shape[1:],\n 'num_neurons': l.output_shape[-1]\n }\n\n if 'conv' in l.name:\n all_weights = l.weights[0]\n neuron_weights = []\n\n # Iterate through neurons in that layer\n for n in range(all_weights.shape[3]):\n cur_neuron_dict = {}\n cur_neuron_dict['bias'] = l.bias.numpy()[n].item()\n\n # Get the current weights\n cur_weights = all_weights[:, :, :, n].numpy().astype(float)\n\n # Reshape the weights from (height, width, input_c) to\n # (input_c, height, width)\n cur_weights = cur_weights.transpose((2, 0, 1)).tolist()\n cur_neuron_dict['weights'] = cur_weights\n\n neuron_weights.append(cur_neuron_dict)\n\n json_dict[l.name]['weights'] = neuron_weights\n\n elif 'output' in l.name:\n all_weights = l.weights[0]\n neuron_weights = []\n\n # Iterate through neurons in that layer\n for n in range(all_weights.shape[1]):\n cur_neuron_dict = {}\n cur_neuron_dict['bias'] = l.bias.numpy()[n].item()\n\n # Get the current weights\n cur_weights = all_weights[:, n].numpy().astype(float).tolist()\n cur_neuron_dict['weights'] = cur_weights\n\n neuron_weights.append(cur_neuron_dict)\n\n json_dict[l.name]['weights'] = neuron_weights\n\n dump(json_dict, open(model_json_file, 'w'), indent=2)", "def build_graph_network(config, is_training=False):\n if not isinstance(config, graph_network_pb2.GraphNetwork):\n raise ValueError('Config has to be an instance of GraphNetwork proto.')\n\n network_oneof = config.WhichOneof('graph_network_oneof')\n if not network_oneof in _MODELS:\n raise ValueError('Invalid model %s!' % network_oneof)\n\n return _MODELS[network_oneof](getattr(config, network_oneof),\n is_training=is_training)", "def load_model():\n with open(JSON_PATH, 'r') as json_file:\n loaded_model_json = json_file.read()\n json_model = model_from_json(loaded_model_json)\n json_model.load_weights(WEIGHT_PATH)\n\n return json_model", "def load_model(model_path, model_name, net=None):\n config_file, model_file = _get_config_file(model_path, model_name), _get_model_file(model_path, model_name)\n assert os.path.isfile(\n config_file\n ), f'Could not find the config file \"{config_file}\". Are you sure this is the correct path and you have your model config stored here?'\n assert os.path.isfile(\n model_file\n ), f'Could not find the model file \"{model_file}\". Are you sure this is the correct path and you have your model stored here?'\n with open(config_file) as f:\n config_dict = json.load(f)\n if net is None:\n act_fn_name = config_dict[\"act_fn\"].pop(\"name\").lower()\n act_fn = act_fn_by_name[act_fn_name](**config_dict.pop(\"act_fn\"))\n net = BaseNetwork(act_fn=act_fn, **config_dict)\n net.load_state_dict(torch.load(model_file, map_location=device))\n return net", "def network_configuration(self) -> pulumi.Output['outputs.ServiceNetworkConfiguration']:\n return pulumi.get(self, \"network_configuration\")", "def make_job_config_json(self, job_description):\n bench_name = job_description[0]\n bench_type = job_description[1]\n bench_preference = job_description[2]\n config_file = self._node_mgr_path / f'{bench_name}_{bench_type}_{bench_preference}' / 'job.json'\n\n # FIXME: hard coded\n # Dict[str, Dict[str, Any]]\n output = dict()\n config = dict()\n config[\"name\"] = bench_name\n config[\"type\"] = bench_type\n config[\"num_of_threads\"] = 2\n if self._node_type == NodeType.IntegratedGPU:\n config[\"binding_cores\"] = \"0,3-5\"\n elif self._node_type == NodeType.CPU:\n config[\"binding_cores\"] = \"0-3\"\n config[\"numa_nodes\"] = \"0\"\n config[\"cpu_freq\"] = 2.1\n config[\"cpu_percent\"] = 100\n if self._node_type == NodeType.IntegratedGPU:\n config[\"gpu_freq\"] = 1300500000\n\n output[\"workloads\"] = config\n\n with config_file.open('w') as fp:\n fp.seek(0)\n json.dump(output, fp, indent=4)\n return config_file", "def translate(model_path: str, output_path: str, network_name: t.Optional[str]) -> None:\n\n ctx = parse(pathlib.Path(model_path).read_text(\"utf-8\"))\n\n if network_name is None:\n if len(ctx.networks) != 1:\n network_names = {network.name for network in ctx.networks}\n print(\n f\"Please specify a network name. Valid network names are {network_names}.\"\n )\n return\n (network,) = ctx.networks\n else:\n network = ctx.get_network_by_name(network_name)\n\n translation = translator.translate_network(network)\n\n pathlib.Path(output_path).write_text(translation.json_network, \"utf-8\")", "def load_model_dict_GNN(model_dict, is_cuda=False):\n model = GNN(\n model_type=model_dict[\"model_type\"],\n num_features=model_dict[\"num_features\"],\n num_classes=model_dict[\"num_classes\"],\n normalize=model_dict[\"normalize\"],\n reparam_mode=model_dict[\"reparam_mode\"],\n prior_mode=model_dict[\"prior_mode\"],\n struct_dropout_mode=model_dict[\"struct_dropout_mode\"],\n dropout=model_dict[\"dropout\"],\n latent_size=model_dict[\"latent_size\"],\n sample_size=model_dict[\"sample_size\"],\n num_layers=model_dict[\"num_layers\"],\n with_relu=model_dict[\"with_relu\"],\n val_use_mean=model_dict[\"val_use_mean\"],\n reparam_all_layers=model_dict[\"reparam_all_layers\"],\n is_cuda=is_cuda,\n )\n if \"state_dict\" in model_dict:\n model.load_state_dict(model_dict[\"state_dict\"])\n return model", "def ReadNet(model_def):\n with open(model_def) as f:\n net = cp.NetParameter()\n pb.text_format.Parse(f.read(), net)\n return net", "def read_config(self):\n return json.load(open(self.config_dir + \"/\" + self.graph_type.lower() + \"_config.json\"))", "def load(filename):\n f = open(filename, \"r\")\n data = json.load(f)\n f.close()\n net = MFoMNetwork(data[\"sizes\"])\n net.weights = [np.array(w) for w in data[\"weights\"]]\n net.biases = [np.array(b) for b in data[\"biases\"]]\n return net", "def load(config_file: typing.TextIO) -> \"TrainingConfig\":\n return TrainingConfig.from_json(config_file.read())", "def load_model():\n with open(MODEL_SAVE_JSON, 'r') as fp:\n json_string = fp.read()\n model = model_from_json(json_string)\n return model" ]
[ "0.6274934", "0.6163578", "0.6156433", "0.6108377", "0.6100293", "0.6095509", "0.59524125", "0.593517", "0.59179795", "0.5894862", "0.5893687", "0.5891758", "0.58499247", "0.5719799", "0.5684551", "0.5684551", "0.5666706", "0.5662442", "0.5588125", "0.5571106", "0.5545788", "0.54962105", "0.5444417", "0.54022485", "0.53892785", "0.5385259", "0.53827053", "0.5380341", "0.5353887", "0.5349204" ]
0.6523721
0
Returns the list of input tensors necessary to compute `tensor`. Output will always be a list of tensors (potentially with 1 element).
def get_source_inputs(tensor, layer=None, node_index=None): if not hasattr(tensor, '_keras_history'): return tensor if layer is None or node_index: layer, node_index, _ = tensor._keras_history if not layer._inbound_nodes: return [tensor] else: node = layer._inbound_nodes[node_index] if not node.inbound_layers: # Reached an Input layer, stop recursion. return node.input_tensors else: source_tensors = [] for i in range(len(node.inbound_layers)): x = node.input_tensors[i] layer = node.inbound_layers[i] node_index = node.node_indices[i] previous_sources = get_source_inputs(x, layer, node_index) # Avoid input redundancy. for x in previous_sources: if x not in source_tensors: source_tensors.append(x) return source_tensors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tensor_list(self) -> List[\"NmTensor\"]:\n # Get the right output dictionary.\n d = self._manual_outputs if len(self._manual_outputs) > 0 else self._default_outputs\n\n output_tensor_list = []\n # Get tensors by acessing the producer-ports.\n for k, v in d.items():\n producer_step = v.producer_step_module_port.step_number\n producer_port_name = v.producer_step_module_port.port_name\n # Find the right output tensor.\n tensor = self._tensors_ref[producer_step][producer_port_name]\n # Add it to the list.\n output_tensor_list.append(tensor)\n # Return the result.\n return output_tensor_list", "def collect_tensors(\n container: Union[torch.Tensor, Dict, List, Tuple,\n Set]) -> List[torch.Tensor]:\n\n def _collect(x, out, out_ids) -> None:\n if torch.is_tensor(x):\n if id(x) not in out_ids:\n out_ids.add(id(x))\n out.append(x)\n elif isinstance(x, PackedSequence):\n _collect(x, out, out_ids)\n elif isinstance(x, dict) or isinstance(x, OrderedDict):\n for value in x.values():\n _collect(value, out, out_ids)\n elif isinstance(x, list) or isinstance(x, tuple) or isinstance(x, set):\n for value in x:\n _collect(value, out, out_ids)\n\n tensors = []\n _collect(container, tensors, set())\n return tensors", "def gather_from_all(tensor: torch.Tensor) -> torch.Tensor:\n if tensor.ndim == 0:\n # 0 dim tensors cannot be gathered. so unsqueeze\n tensor = tensor.unsqueeze(0)\n\n if is_distributed_training_run():\n tensor, orig_device = convert_to_distributed_tensor(tensor)\n gathered_tensors = GatherLayer.apply(tensor)\n gathered_tensors = [\n convert_to_normal_tensor(_tensor, orig_device)\n for _tensor in gathered_tensors\n ]\n else:\n gathered_tensors = [tensor]\n gathered_tensor = torch.cat(gathered_tensors, 0)\n return gathered_tensor", "def all_gather_create_tensor_list(tensor: torch.Tensor, ngpus_per_node: int) -> List[torch.Tensor]:\n # tensor_list -> Output list. It should contain correctly-sized tensors to be used \n # for output of the collective.\n tensor_list = [ torch.zeros_like(tensor) for _ in range(ngpus_per_node) ]\n # Gathers tensors from the whole group in a list. \n # The variable `tensor` will not be affected by this operation.\n dist.all_gather(tensor_list=tensor_list, tensor=tensor)\n return tensor_list", "def apply_to_tensor(input_, func):\n if isinstance(input_, torch.nn.Module):\n return [apply_to_tensor(c, func) for c in input_.children()]\n elif isinstance(input_, torch.nn.Parameter):\n return func(input_.data)\n elif isinstance(input_, Tensor):\n return func(input_)\n elif isinstance(input_, string_classes):\n return input_\n elif isinstance(input_, collections.Mapping):\n return {k: apply_to_tensor(sample, func) for k, sample in input_.items()}\n elif isinstance(input_, collections.Iterable):\n return [apply_to_tensor(sample, func) for sample in input_]\n elif input_ is None:\n return input_\n else:\n return input_", "def dims_list(tensor: tf.Tensor) -> List[Union[int, tf.Tensor]]:\n static = tensor.shape.as_list()\n if None not in static:\n return static\n dynamic = tf.unstack(tf.shape(tensor))\n return [(d if s is None else s) for s, d in zip(static, dynamic)]", "def tensors(self):\n return [x[0] for x in self.__normalizeData__(self.__tensors__)]", "def _process_tensor_fetches(self, tensor_fetches):\n # If none or empty list.\n if tensor_fetches is None:\n raise RuntimeError('tensor_fetches provided to tensor_tracer cannot be '\n 'None.')\n if not isinstance(tensor_fetches, (list, tuple)):\n tensor_fetches = [tensor_fetches]\n elif not tensor_fetches:\n raise RuntimeError('tensor_fetches provided to tensor_tracer cannot be '\n 'empty list.')\n fetches = []\n for fetch in tensor_fetches:\n if isinstance(fetch, tensor_lib.Tensor):\n fetches.append(fetch)\n else:\n raise RuntimeError('Given tensor_fetch:%s is not a tensor.' % fetch)\n return fetches", "def flatten(x_tensor):\n # TODO: Implement Function\n return tf.contrib.layers.flatten(x_tensor)", "def get_tensors(loaded_graph):\n \n # TODO: Implement Function\n input_tensor = loaded_graph.get_tensor_by_name('input: 0')\n initial_state_tensor = loaded_graph.get_tensor_by_name('initial_state: 0')\n final_state_tensor = loaded_graph.get_tensor_by_name('final_state: 0')\n probs_tensor = loaded_graph.get_tensor_by_name('probs: 0')\n \n\n return input_tensor, initial_state_tensor, final_state_tensor, probs_tensor", "def get_tensors(object_):\n if torch.is_tensor(object_):\n return [object_]\n elif isinstance(object_, (str, float, int)):\n return []\n\n tensors = set()\n\n if isinstance(object_, collections.Mapping):\n for value in object_.values():\n tensors.update(get_tensors(value))\n elif isinstance(object_, collections.Iterable):\n for value in object_:\n tensors.update(get_tensors(value))\n else:\n members = [\n value for key, value in inspect.getmembers(object_)\n if not isinstance(value, (collections.Callable, type(None)))\n ]\n tensors.update(get_tensors(members))\n\n return tensors", "def get_tensors(loaded_graph):\n with loaded_graph.as_default():\n input_tensor = loaded_graph.get_tensor_by_name(\"input:0\")\n initial_state_tensor = loaded_graph.get_tensor_by_name(\"initial_state:0\")\n final_state_tensor = loaded_graph.get_tensor_by_name(\"final_state:0\")\n probs_tensor = loaded_graph.get_tensor_by_name(\"probs:0\")\n\n return input_tensor, initial_state_tensor, final_state_tensor, probs_tensor", "def concat(self, tensors):\n # check the arguments and try the fast path: only one tensor\n tensors = list(tensors)\n if not tensors:\n return []\n length = len(tensors[0])\n if length == 0:\n raise ValueError('`tensors` must be list of non-empty Tensor '\n 'lists.')\n for t in tensors[1:]:\n if len(t) != length:\n raise ValueError('`tensors` must be list of Tensor lists of '\n 'the same length.')\n if length == 1:\n return [t[0] for t in tensors]\n\n # do the slow path: concat all tensors\n with tf.device(self.main_device), tf.name_scope('average_tensors'):\n return [tf.concat(t, axis=0) for t in tensors]", "def cast_to_tensor_class(self, inputs: List[Any]) -> List[BackendEagerTensor]:\n values = []\n for i, a in enumerate(inputs):\n try:\n values.append(self.tensor_class(a))\n except TypeError as e:\n raise TypeError(\n f\"Unable to convert input {i}, with type {type(a)}.\") from e\n return values", "def get_tensors(loaded_graph):\n g = loaded_graph\n InputTensor = g.get_tensor_by_name(\"input:0\")\n InitialStateTensor = g.get_tensor_by_name(\"initial_state:0\")\n FinalStateTensor = g.get_tensor_by_name(\"final_state:0\") \n ProbsTensor = g.get_tensor_by_name(\"probs:0\")\n\n return InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor", "def get_tensors(self, loaded_graph):\n return loaded_graph.get_tensor_by_name(\"input:0\"),\\\n loaded_graph.get_tensor_by_name(\"initial_state:0\"),\\\n loaded_graph.get_tensor_by_name(\"final_state:0\"),\\\n loaded_graph.get_tensor_by_name(\"probs:0\"),\\\n loaded_graph.get_tensor_by_name(\"keep_prob:0\")", "def t2n(*tensors):\n data_logger.info('t2n({}:{}{})'\n .format(type(tensors).__name__, len(tensors),\n tuple(type(t).__name__ for t in tensors)))\n if len(tensors) == 1:\n return list(map(np.array, (tensors[0],)))[0]\n return list(map(np.array, tensors))", "def FetchTensor(tensor):\n return FetchTensorCC(_stringify_tensor(tensor))", "def get_tensors(loaded_graph):\n # todo 需要编程:\n input_tensor = loaded_graph.get_tensor_by_name('input:0')\n initial_state_tensor = loaded_graph.get_tensor_by_name(\"initial_state:0\")\n final_state_tensor = loaded_graph.get_tensor_by_name(\"final_state:0\")\n probs_tensor = loaded_graph.get_tensor_by_name(\"probs:0\")\n return input_tensor, initial_state_tensor, final_state_tensor, probs_tensor", "def FetchTensor(tensor):\n return _C.FetchTensor(_stringify_tensor(tensor))", "def flatten(x_tensor):\n # TODO: Implement Function\n b, w, h, d = x_tensor.get_shape().as_list()\n img_size = w * h * d\n return tf.reshape(x_tensor, [-1, img_size])", "def get_shape(input_tensor):\n return input_tensor.get_shape().as_list()", "def cat(tensors, dim=0):\n assert isinstance(tensors, (list, tuple))\n if len(tensors) == 1:\n return tensors[0]\n return torch.cat(tensors, dim)", "def cat(tensors, dim=0):\n assert isinstance(tensors, (list, tuple))\n if len(tensors) == 1:\n return tensors[0]\n return torch.cat(tensors, dim)", "def get_shape(tensor):\n return tensor.get_shape().as_list()", "def dataset_to_tensors(dataset):\r\n tensor_num = len(dataset[0])\r\n tensors = [torch.LongTensor([sample[i] for sample in dataset]) for i in range(tensor_num)]\r\n return tensors", "def _to_tensor(cls, tensor):\n if isinstance(tensor, Tensor):\n return tensor\n return Tensor(data=tensor)", "def input_tensor(interpreter):\n tensor_index = interpreter.get_input_details()[0]['index']\n return interpreter.tensor(tensor_index)()[0]", "def tensor2tensor(tensor, *, device=None):\r\n assert gg.TF_ENABLED, 'Currently tensorflow backend is not enabled.'\r\n if pytorch.is_tensor(tensor):\r\n m = tensoras(tensor)\r\n device = gf.device(device, backend=\"tensorflow\")\r\n return astensor(m, device=device, backend=\"tensorflow\")\r\n elif tensorflow.is_tensor(tensor):\r\n m = tensoras(tensor)\r\n device = gf.device(device, backend=\"torch\")\r\n return astensor(m, device=device, backend=\"torch\")\r\n else:\r\n raise ValueError(\r\n f\"The input must be a TensorFlow or PyTorch Tensor, buf got {type(tensor).__name__}\"\r\n )", "def stack_torch_tensors(input_tensors):\n\n unrolled = [input_tensors[k].reshape(-1, 1) for k in range(len(input_tensors))]\n\n return torch.cat(unrolled)" ]
[ "0.67549485", "0.6570103", "0.6545245", "0.64817333", "0.6465132", "0.6455797", "0.63641167", "0.6112972", "0.6110776", "0.60301006", "0.6023867", "0.6018743", "0.6014307", "0.5977601", "0.5966194", "0.59441453", "0.5907508", "0.5889415", "0.5858512", "0.5828894", "0.5811696", "0.5795782", "0.5759257", "0.5759257", "0.5747929", "0.5739468", "0.57370013", "0.57309216", "0.57308644", "0.5701715" ]
0.67650205
0
Converts layers weights from Keras 1 format to Keras 2.
def preprocess_weights_for_loading(layer, weights, original_keras_version=None, original_backend=None): if layer.__class__.__name__ == 'Bidirectional': num_weights_per_layer = len(weights) // 2 forward_weights = preprocess_weights_for_loading( layer.forward_layer, weights[:num_weights_per_layer], original_keras_version, original_backend) backward_weights = preprocess_weights_for_loading( layer.backward_layer, weights[num_weights_per_layer:], original_keras_version, original_backend) weights = forward_weights + backward_weights if original_keras_version == '1': if layer.__class__.__name__ == 'TimeDistributed': weights = preprocess_weights_for_loading( layer.layer, weights, original_keras_version, original_backend) if layer.__class__.__name__ == 'Conv1D': shape = weights[0].shape # Handle Keras 1.1 format if shape[:2] != (layer.kernel_size[0], 1) or shape[3] != layer.filters: # Legacy shape: # (filters, input_dim, filter_length, 1) assert shape[0] == layer.filters and shape[2:] == (layer.kernel_size[0], 1) weights[0] = np.transpose(weights[0], (2, 3, 1, 0)) weights[0] = weights[0][:, 0, :, :] if layer.__class__.__name__ == 'Conv2D': if layer.data_format == 'channels_first': # old: (filters, stack_size, kernel_rows, kernel_cols) # new: (kernel_rows, kernel_cols, stack_size, filters) weights[0] = np.transpose(weights[0], (2, 3, 1, 0)) if layer.__class__.__name__ == 'Conv2DTranspose': if layer.data_format == 'channels_last': # old: (kernel_rows, kernel_cols, stack_size, filters) # new: (kernel_rows, kernel_cols, filters, stack_size) weights[0] = np.transpose(weights[0], (0, 1, 3, 2)) if layer.data_format == 'channels_first': # old: (filters, stack_size, kernel_rows, kernel_cols) # new: (kernel_rows, kernel_cols, filters, stack_size) weights[0] = np.transpose(weights[0], (2, 3, 0, 1)) if layer.__class__.__name__ == 'Conv3D': if layer.data_format == 'channels_first': # old: (filters, stack_size, ...) # new: (..., stack_size, filters) weights[0] = np.transpose(weights[0], (2, 3, 4, 1, 0)) if layer.__class__.__name__ == 'GRU': if len(weights) == 9: kernel = np.concatenate([weights[0], weights[3], weights[6]], axis=-1) recurrent_kernel = np.concatenate( [weights[1], weights[4], weights[7]], axis=-1) bias = np.concatenate([weights[2], weights[5], weights[8]], axis=-1) weights = [kernel, recurrent_kernel, bias] if layer.__class__.__name__ == 'LSTM': if len(weights) == 12: # old: i, c, f, o # new: i, f, c, o kernel = np.concatenate( [weights[0], weights[6], weights[3], weights[9]], axis=-1) recurrent_kernel = np.concatenate( [weights[1], weights[7], weights[4], weights[10]], axis=-1) bias = np.concatenate( [weights[2], weights[8], weights[5], weights[11]], axis=-1) weights = [kernel, recurrent_kernel, bias] if layer.__class__.__name__ == 'ConvLSTM2D': if len(weights) == 12: kernel = np.concatenate( [weights[0], weights[6], weights[3], weights[9]], axis=-1) recurrent_kernel = np.concatenate( [weights[1], weights[7], weights[4], weights[10]], axis=-1) bias = np.concatenate( [weights[2], weights[8], weights[5], weights[11]], axis=-1) if layer.data_format == 'channels_first': # old: (filters, stack_size, kernel_rows, kernel_cols) # new: (kernel_rows, kernel_cols, stack_size, filters) kernel = np.transpose(kernel, (2, 3, 1, 0)) recurrent_kernel = np.transpose(recurrent_kernel, (2, 3, 1, 0)) weights = [kernel, recurrent_kernel, bias] if layer.__class__.__name__ in ['Model', 'Sequential']: new_weights = [] # trainable weights for sublayer in layer.layers: num_weights = len(sublayer.trainable_weights) if num_weights > 0: new_weights.extend( preprocess_weights_for_loading( layer=sublayer, weights=weights[:num_weights], original_keras_version=original_keras_version, original_backend=original_backend)) weights = weights[num_weights:] # non-trainable weights for sublayer in layer.layers: num_weights = len([ l for l in sublayer.weights if l not in sublayer.trainable_weights ]) if num_weights > 0: new_weights.extend( preprocess_weights_for_loading( layer=sublayer, weights=weights[:num_weights], original_keras_version=original_keras_version, original_backend=original_backend)) weights = weights[num_weights:] weights = new_weights conv_layers = ['Conv1D', 'Conv2D', 'Conv3D', 'Conv2DTranspose', 'ConvLSTM2D'] if layer.__class__.__name__ in conv_layers: if original_backend == 'theano': weights[0] = conv_utils.convert_kernel(weights[0]) if layer.__class__.__name__ == 'ConvLSTM2D': weights[1] = conv_utils.convert_kernel(weights[1]) if K.int_shape(layer.weights[0]) != weights[0].shape: weights[0] = np.transpose(weights[0], (3, 2, 0, 1)) if layer.__class__.__name__ == 'ConvLSTM2D': weights[1] = np.transpose(weights[1], (3, 2, 0, 1)) # Convert the weights of CuDNNLSTM so that they could be loaded into LSTM if layer.__class__.__name__ == 'LSTM' and len(weights) == 3: # Determine if loading a CuDNNLSTM layer from the number of bias weights: # CuDNNLSTM has (units * 8) weights; while LSTM has (units * 4) # if there's no bias weight in the file, skip this conversion units = weights[1].shape[0] bias = weights[2] if len(bias) == units * 8: # reshape the kernels kernels = np.split(weights[0], 4, axis=1) kernels = [ kernel.reshape(-1).reshape(kernel.shape, order='F') for kernel in kernels ] weights[0] = np.concatenate(kernels, axis=1) # transpose the recurrent kernels recurrent_kernels = np.split(weights[1], 4, axis=1) recurrent_kernels = [kernel.T for kernel in recurrent_kernels] weights[1] = np.concatenate(recurrent_kernels, axis=1) # split the bias into half and merge weights[2] = bias[:units * 4] + bias[units * 4:] return weights
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_weights2(net):\n\tfor m in net.modules():\n\t\tif isinstance(m, nn.Conv2d):\n\t\t\tnn.init.xavier_uniform_(m.weight)\n\t\t\tif m.bias is not None:\n\t\t\t\tnn.init.constant_(m.bias, 0)\n\t\t\t\n\t\telif isinstance(m, nn.BatchNorm2d):\n\t\t\tnn.init.constant_(m.weight, 1)\n\t\t\tnn.init.constant_(m.bias, 0)\n\t\t\n\t\telif isinstance(m, nn.Linear):\n\t\t\tnn.init.xavier_uniform_(m.weight)\n\n\t\t\tif m.bias is not None:\n\t\t\t\tnn.init.constant_(m.bias, 0)\n\n\treturn net", "def get_weights_and_biases_from_Keras(self):\n\t\t\n\t\tilayer_id = 21\n\n\t\tmodel = VGG16()\n\t\tprint(type(model))\n\t\twatcher = ww.WeightWatcher(model=model, log_level=logging.WARNING)\n\t\t\n\t\tdetails = watcher.describe(layers=[21])\n\t\tprint(details)\n\t\t\n\t\tN = details.N.to_numpy()[0]\n\t\tM = details.M.to_numpy()[0]\n\t\t\n\t\tparams = ww.DEFAULT_PARAMS.copy()\n\t\tparams[ww.ADD_BIASES] = True\n\t\t\n\t\tweights = watcher.get_Weights(layer=ilayer_id, params=params)\n\t\tself.assertEqual(len(weights),1)\n\t\t\n\t\tW = weights[0]\n\t\tself.assertEqual(np.max(W.shape),N)\n\t\tself.assertEqual(np.min(W.shape),M)\n\n\t\tpass", "def get_conv_1_1_weights(vgg_weights_path):\r\n\ttemp_mod = Sequential()\r\n\ttemp_mod.add(ZeroPadding2D((1,1),input_shape=(224, 224, 3)))\r\n\ttemp_mod.add(Convolution2D(64, (3, 3), activation='relu', name='conv1_1'))\r\n\ttemp_mod.load_weights(vgg_weights_path, by_name=True)\r\n\tconv1_1_weigths = temp_mod.get_layer('conv1_1').get_weights()\r\n\treturn conv1_1_weigths", "def keras_model():\n\n model = Sequential([\n Conv2D(8, (2, 2), input_shape=(16, 16, 3,)),\n BatchNormalization(momentum=.3, epsilon=.65),\n AvgPool2D(),\n MaxPool2D(),\n BatchNormalization(momentum=.4, epsilon=.25),\n Conv2D(4, (2, 2), activation=tf.nn.tanh, kernel_regularizer=tf.keras.regularizers.l2(0.5)),\n Flatten(),\n Dense(2, activation='softmax', name=\"keras_model\")])\n return model", "def get_conv1_weights(squeezenet_weights_path):\r\n\ttemp_mod = Sequential()\r\n\ttemp_mod.add(Convolution2D(64, (3, 3), strides=(2, 2), padding='valid', name='conv1',input_shape=(227, 227, 3)))\r\n\ttemp_mod.load_weights(squeezenet_weights_path, by_name=True)\r\n\tconv1_weights = temp_mod.get_layer('conv1').get_weights()\r\n\treturn conv1_weights", "def gensim_to_keras(model):\n layer = model.wv.get_keras_embedding()\n return (layer)", "def SRCNNv2(input_shape, depth_multiplier=1, multi_output=False):\n inputs = Input(input_shape, name=\"inputs\")\n conv1 = Convolution2D(filters=64, kernel_size=9, padding=\"same\", name=\"conv1\", activation=\"relu\")(inputs)\n conv2 = Convolution2D(filters=64, kernel_size=7, padding=\"same\", name=\"conv2\", activation=\"relu\")(conv1)\n #conv3 = Convolution2D(filters=64, kernel_size=3, padding=\"same\", name=\"conv3\", activation=\"relu\")(conv2)\n\n mapping = Convolution2D(filters=32, kernel_size=5, padding=\"same\", name=\"mapping\", activation=\"relu\")(conv2)\n #mapping2 = Convolution2D(filters=16, kernel_size=7, padding=\"same\", name=\"mapping2\", activation=\"relu\")(mapping)\n \n \n if multi_output:\n out = Convolution2D(filters=2, kernel_size=5, padding=\"same\", name=\"output\", activation=\"sigmoid\")(mapping)\n else:\n out = Convolution2D(filters=1, kernel_size=5, padding=\"same\", name=\"output\", activation=\"sigmoid\")(mapping)\n return Model(inputs, out)", "def add_conv_type1(model, depth, input_shape=None):\n if input_shape is not None:\n model.add(Convolution2D(depth, 5, 5, subsample=(2, 2), \\\n input_shape=input_shape))\n else:\n model.add(Convolution2D(depth, 5, 5, subsample=(2, 2), \\\n activation='relu', W_regularizer=l2(0.05)))", "def get_model_2(parameters):\n # Parameters\n BANDS = parameters['num_features']\n CLASSES = parameters['num_classes']\n \n # Sequential model\n model = keras.models.Sequential()\n \n # Add convolution (1)\n # -------------------\n # Input_shape (batch, rows, cols, channels) = (-, 9, BANDS, 1)\n # Output_shape (batch, rows, cols, channels) = (-, BANDS - 15, 1, 32)\n model.add(keras.layers.Conv2D(filters=32,\n kernel_size=(9, 16),\n padding='same',\n data_format=\"channels_last\",\n activation='tanh',\n input_shape=(9, BANDS,1)))\n \n # Add convolution (2)\n # -------------------\n # Input_shape (batch, rows, cols, channels) = (-, BANDS - 15, 1, 32)\n # Output_shape (batch, rows, cols, channels) = (-, BANDS - 30, 1, 32)\n model.add(keras.layers.Conv2D(filters=32,\n kernel_size=(1, 16),\n padding='same',\n data_format=\"channels_last\",\n activation='tanh'))\n \n # Add convolution (3)\n # -------------------\n # Input_shape (batch, rows, cols, channels) = (-, BANDS - 30, 1, 32)\n # Output_shape (batch, rows, cols, channels) = (-, BANDS - 45, 1, 32)\n model.add(keras.layers.Conv2D(filters=32,\n kernel_size=(1, 16),\n padding='same',\n data_format=\"channels_last\",\n activation='tanh'))\n \n # Flatten before dense layer\n model.add(keras.layers.Flatten())\n \n # Add fully connected (4)\n # -----------------------\n # Intput_shape (batch, rows, cols, channels) = (-, (BANDS - 45) x 1 x 32)\n # Output_shape (batch, dim) = (-, 800)\n model.add(keras.layers.Dense(units=800,\n activation='tanh'))\n \n # Add fully connected (5)\n # -----------------------\n # Intput_shape (batch, dim) = (-, 800)\n # Output_shape (batch, dim) = (-, 800)\n model.add(keras.layers.Dense(units=800,\n activation='softmax'))\n \n # Add fully connected to reduce to number of categories\n # -----------------------------------------------------\n # Intput_shape (batch, dim) = (-, 800)\n # Output_shape (batch, dim) = (-, CLASSES)\n model.add(keras.layers.Dense(units=CLASSES,\n activation='softmax'))\n \n # Compile model\n model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n \n # Print the model summary to output file\n # To print to stdout: model.summary()\n with open(OUTPUT_FILE, 'a') as f:\n # Pass the file handle in as a lambda function to make it callable\n model.summary(print_fn=lambda x: f.write(x + '\\n'))\n \n # Return the model\n return model", "def add_conv_type2(model, depth):\n model.add(Convolution2D(depth, 3, 3, subsample=(1, 1)))", "def convert_layers(model):\n\n import logging\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n\n for name, module in model._modules.items():\n if len(list(module.children())) > 0:\n model._modules[name] = convert_layers(model=module)\n try:\n module_str = str(module)\n module_new = eval(module_str)\n try:\n module_new.weight = module.weight\n module_new.bias = module.bias\n except:\n pass\n model._modules[name] = module_new\n logger.info(\"Quantizing \" + str(name) + \" \" + str(module))\n except:\n pass\n return model", "def build_model():\n model_weights = np.load(WEIGHTS_PATH, encoding='latin1').item()\n model = Sequential()\n model.add(InputLayer(batch_input_shape=(1, None, 1)))\n\n filter_parameters = [\n {'name': 'conv1', 'num_filters': 16, 'padding': 32,\n 'kernel_size': 64, 'conv_strides': 2,\n 'pool_size': 8, 'pool_strides': 8},\n\n {'name': 'conv2', 'num_filters': 32, 'padding': 16,\n 'kernel_size': 32, 'conv_strides': 2,\n 'pool_size': 8, 'pool_strides': 8},\n\n {'name': 'conv3', 'num_filters': 64, 'padding': 8,\n 'kernel_size': 16, 'conv_strides': 2},\n\n {'name': 'conv4', 'num_filters': 128, 'padding': 4,\n 'kernel_size': 8, 'conv_strides': 2},\n\n {'name': 'conv5', 'num_filters': 256, 'padding': 2,\n 'kernel_size': 4, 'conv_strides': 2,\n 'pool_size': 4, 'pool_strides': 4},\n\n {'name': 'conv6', 'num_filters': 512, 'padding': 2,\n 'kernel_size': 4, 'conv_strides': 2},\n\n {'name': 'conv7', 'num_filters': 1024, 'padding': 2,\n 'kernel_size': 4, 'conv_strides': 2},\n\n {'name': 'conv8_2', 'num_filters': 401, 'padding': 0,\n 'kernel_size': 8, 'conv_strides': 2},\n ]\n\n for x in filter_parameters:\n model.add(ZeroPadding1D(padding=x['padding']))\n model.add(Conv1D(x['num_filters'],\n kernel_size=x['kernel_size'],\n strides=x['conv_strides'],\n padding='valid'))\n weights = model_weights[x['name']]['weights'].reshape(model.layers[-1].get_weights()[0].shape)\n biases = model_weights[x['name']]['biases']\n\n model.layers[-1].set_weights([weights, biases])\n\n if 'conv8' not in x['name']:\n gamma = model_weights[x['name']]['gamma']\n beta = model_weights[x['name']]['beta']\n mean = model_weights[x['name']]['mean']\n var = model_weights[x['name']]['var']\n\n model.add(BatchNormalization())\n model.layers[-1].set_weights([gamma, beta, mean, var])\n model.add(Activation('relu'))\n if 'pool_size' in x:\n model.add(MaxPooling1D(pool_size=x['pool_size'],\n strides=x['pool_strides'],\n padding='valid'))\n\n #\n return Model(inputs=model.input, outputs=model.get_layer('activation_7').output)", "def init_weights(layer):\r\n layer_name = layer.__class__.__name__\r\n if layer_name.find(\"Conv\") != -1:\r\n layer.weight.data.normal_(0.0, 0.02)\r\n elif layer_name.find(\"BatchNorm\") != -1:\r\n layer.weight.data.normal_(1.0, 0.02)\r\n layer.bias.data.fill_(0)", "def patchify2d(w_in, w_out, k, *, bias=True):\n return nn.Conv2d(w_in, w_out, k, stride=k, padding=0, bias=bias)", "def medium2_model(optimizer='rmsprop', init='glorot_uniform', dropout=0.25):\n model = Sequential()\n model.add(Dropout(dropout, input_shape=(12,)))\n model.add(Dense(32, input_dim=12, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(4)))\n model.add(Dropout(dropout))\n model.add(Dense(32, kernel_initializer=init, activation='relu', kernel_constraint=maxnorm(4)))\n model.add(Dropout(dropout))\n model.add(Dense(1, kernel_initializer=init))\n # Compile model\n model.compile(loss='mean_squared_error', optimizer=optimizer)\n return model", "def demoModel(dim, num_classes):\n import numpy as np\n from keras.models import Sequential, Model\n from keras.layers import Input\n from keras.layers import Conv2D, ZeroPadding2D, MaxPooling2D, Conv2DTranspose, Cropping2D\n from keras.layers import concatenate, UpSampling2D, Reshape\n import keras.backend as K\n\n # Build model\n input_image = Input(shape=(dim, dim, 3))\n\n conv = Conv2D(24, (3, 3), activation='relu', padding='same')(input_image)\n\n pool = MaxPooling2D((2, 2), strides=(2, 2), name=\"pool\")(conv)\n\n conv1x1 = Conv2D(24, (1, 1), padding='same', activation='relu')(pool)\n\n up = UpSampling2D(size=(2,2))(conv1x1)\n up_conv = Conv2D(24, 2, activation = 'relu', padding = 'same')(up)\n merge = concatenate([conv,up_conv], axis = 3)\n\n conv = Conv2D(12, 3, activation = 'relu', padding = 'same')(merge)\n\n activation = Conv2D(num_classes, (1, 1), activation = \"softmax\")(conv)\n\n # need to reshape for training\n output = Reshape((dim*dim, 3))(activation)\n\n model = Model(inputs=[input_image], outputs=output)\n\n model.summary()\n\n return model", "def SRCNN(input_shape, depth_multiplier=1, multi_output=False):\n inputs = Input(input_shape, name=\"inputs\")\n conv1 = Convolution2D(filters=64*depth_multiplier, kernel_size=9, padding=\"same\", name=\"conv1\", activation=\"relu\")(inputs)\n #conv1 = BatchNormalization(name='bn_conv1')(conv1)\n \n mapping = Convolution2D(filters=32*depth_multiplier, kernel_size=1, padding=\"same\", name=\"mapping\", activation=\"relu\")(conv1)\n #mapping = BatchNormalization(name='bn_mapping')(mapping)\n \n if multi_output:\n out = Convolution2D(filters=2, kernel_size=5, padding=\"same\", name=\"output\", activation=\"sigmoid\")(mapping)\n else:\n out = Convolution2D(filters=1, kernel_size=5, padding=\"same\", name=\"output\", activation=\"sigmoid\")(mapping)\n return Model(inputs, out)", "def init_weights(layer):\n layer_name = layer.__class__.__name__\n if layer_name.find(\"Conv\") != -1 and hasattr(layer, 'weight'):\n layer.weight.data.normal_(0.0, 0.02)\n elif layer_name.find(\"BatchNorm\") != -1 and hasattr(layer, 'weight'):\n layer.weight.data.normal_(1.0, 0.02)\n layer.bias.data.fill_(0)", "def get_model_1(parameters):\n # Parameters\n n1 = parameters['n1']\n k1 = parameters['k1']\n n2 = parameters['n2']\n n3 = parameters['n3']\n k2 = parameters['k2']\n n4 = parameters['n4']\n n5 = parameters['n5']\n NUM_FILTERS_C1 = 20\n \n # Sequential model\n model = keras.models.Sequential()\n \n # Add C1 layer\n # ------------\n # Input_shape (batch, rows, cols, channels) = (-, n1, 1, 1)\n # Output_shape (batch, rows, cols, channels) = (-, n2, 1, NUM_FILTERS_C1)\n model.add(keras.layers.Conv2D(filters=NUM_FILTERS_C1,\n kernel_size=(k1, 1),\n padding='valid',\n data_format=\"channels_last\",\n activation='tanh',\n kernel_initializer='random_uniform',\n bias_initializer='random_uniform',\n input_shape=(n1,1,1)))\n \n # Add M2 layer\n # ------------\n # Input_shape (batch, rows, cols, channels) = (-, n2, 1, NUM_FILTERS_C1)\n # Output_shape (batch, rows, cols, channels) = (-, n3, 1, NUM_FILTERS_C1)\n model.add(keras.layers.MaxPooling2D(pool_size=(k2, 1),\n padding='same',\n data_format=\"channels_last\"))\n \n # Flatten before dense layer\n model.add(keras.layers.Flatten())\n \n # Add F3 layer\n # ------------\n # Intput_shape (batch, rows, cols, channels) = (-, n3 x 1 x NUM_FILTERS_C1)\n # Output_shape (batch, dim) = (-, n4)\n model.add(keras.layers.Dense(units=n4,\n activation='tanh',\n kernel_initializer='random_uniform',\n bias_initializer='random_uniform'))\n \n # Add F4 layer\n # ------------\n # Intput_shape (batch, dim) = (1, n4)\n # Output_shape (batch, dim) = (1, n5)\n model.add(keras.layers.Dense(units=n5,\n activation='softmax',\n kernel_initializer='random_uniform',\n bias_initializer='random_uniform'))\n \n # Compile model\n model.compile(optimizer='sgd',\n loss='mean_squared_error',\n metrics=['accuracy'])\n \n # Print the model summary to output file\n # To print to stdout: model.summary()\n with open(OUTPUT_FILE, 'a') as f:\n # Pass the file handle in as a lambda function to make it callable\n model.summary(print_fn=lambda x: f.write(x + '\\n'))\n \n # Return the model\n return model", "def _get_conv_weight(model):\n layer_name = 'conv2d_1'\n weights = model.get_layer(layer_name).get_weights()\n # weights[0] --- kernel weights\n # weights[1] --- kernel biases\n weights = np.asarray(weights[0])\n mean_weights = np.mean(weights, axis=0)\n mean_weights = np.reshape(mean_weights, (3,3,1,16))\n return mean_weights", "def mobile_net_v2(inputs):\n\n layer = _conv_block(inputs, 32, (3, 3), strides=(1, 1))\n\n for i in [16, 24, 32, 64, 96, 160, 320]:\n layer = _inverted_residual_block(layer, i, (3, 3), t=1, strides=1, n=1)\n\n layer = _conv_block(layer, 300, (1, 1), strides=(1, 1))\n layer = GlobalAveragePooling2D()(layer)\n layer = Reshape((1, 1, 300))(layer)\n layer = Dropout(0.3, name='Dropout')(layer)\n\n return layer", "def set_model_with_layer_config(self, layer_config={}):\n available_type = [\n \"float16\", \"float32\", \"float64\", \"bfloat16\", \"int8\", \"int16\", \"int32\"\n ]\n specific_layers = {}\n for layer, layer_datatype in layer_config.items():\n if layer_datatype not in available_type:\n logger.error('Unknown layer {} data type: {}, expecte one of supported ' \\\n 'data type {}.'.format(layer, layer_datatype, available_type))\n else:\n specific_layers[layer] = layer_datatype\n self.optimize_model()\n\n model_config = self._optimized_model.get_config()\n for l in model_config[\"layers\"]:\n # the dtype is not int\n if l[\"config\"]['name'] in layer_config.keys() and layer_config[\n l[\"config\"]['name']] not in [\"int8\", \"int16\", \"int32\"]:\n l[\"config\"][\"dtype\"] = layer_config[l[\"config\"]['name']]\n converted_model = keras.Model.from_config(\n model_config, custom_objects=self.custom_objects)\n\n for new_layer, old_layer in zip(converted_model.layers,\n self._optimized_model.layers):\n old_layer_weights = old_layer.get_weights()\n if old_layer.name in layer_config.keys():\n change_type_weights = [\n w.astype(new_layer.dtype) for w in old_layer_weights\n ]\n new_layer.set_weights(change_type_weights)\n logger.info(\"Convert model layer {} data type to {}\".format(\n new_layer.name, new_layer.dtype))\n else:\n new_layer.set_weights(old_layer_weights)\n self._optimized_model = converted_model\n return specific_layers", "def transition_layer(X, nb_filters, compression):\n output = K.layers.BatchNormalization()(X)\n output = K.layers.Activation('relu')(output)\n output = K.layers.Conv2D(int(nb_filters * compression), 1,\n kernel_initializer='he_normal')(output)\n\n # transition layer\n X = K.layers.AvgPool2D(2)(output)\n # number of filters within the output\n nb_filters = int(nb_filters * compression)\n return X, nb_filters", "def initialize_weights(self):\n for layer in self._cnn_layers:\n weights_initializer.WeightsInitializer.initialize_layer_or_model(layer)", "def make_neural_net_two_layer():\n i0 = Input('i0', -1.0) # this input is immutable\n i1 = Input('i1', 0.0)\n i2 = Input('i2', 0.0)\n seed_random()\n wt1 = random_weight()\n wt2 = random_weight()\n wt3 = random_weight()\n wt4 = random_weight()\n wt5 = random_weight()\n wt6 = random_weight()\n\t\n w1A = Weight('w1A', wt1)\n w2A = Weight('w2A', wt2)\n w1B = Weight('w1B', wt3)\n w2B = Weight('w2B', wt4)\n wA = Weight('wA', -1)\n wB = Weight('wB', -1)\n wAC = Weight('wAC', wt5)\n wBC = Weight('wBC', wt6)\n wC = Weight('wC', -1)\n\n # Inputs must be in the same order as their associated weights\n A = Neuron('A', [i1,i2,i0], [w1A,w2A,wA])\n B = Neuron('B', [i1,i2,i0], [w1B,w2B,wB])\n C = Neuron('C', [A,B,i0], [wAC,wBC,wC])\n P = PerformanceElem(C, 0.0)\n\n net = Network(P,[A, B, C])\n return net", "def init_two_layer_model(input_size, hidden_size, output_size):\n # initialize a model\n model = {}\n model['W1'] = 0.00001 * np.random.randn(input_size, hidden_size)\n model['b1'] = np.zeros(hidden_size)\n model['W2'] = 0.00001 * np.random.randn(hidden_size, output_size)\n model['b2'] = np.zeros(output_size)\n return model", "def ConvertWeights(net_orig, net_new, suffix='_fold', eps=1e-5):\n for layer_name in net_orig.params.keys():\n if layer_name[:2] == 'bn':\n scale_layer_name = layer_name.replace('bn', 'scale')\n \n mu = net_orig.params[layer_name][0].data\n var = net_orig.params[layer_name][1].data\n \n # The standard Caffe implementation uses this, whilst some others do not\n if len(net_orig.params[layer_name]) == 3:\n mov_ave_factor = net_orig.params[layer_name][2].data[0]\n mu = mu * (1 / mov_ave_factor)\n var = var * (1 / mov_ave_factor)\n \n sigma = np.sqrt(var + eps) \n gamma = net_orig.params[scale_layer_name][0].data\n beta = net_orig.params[scale_layer_name][1].data\n \n gamma_new = gamma / sigma\n beta_new = beta - gamma * mu / sigma\n\n new_scale_layer_name = scale_layer_name + suffix\n net_new.params[new_scale_layer_name][0].data[...] = gamma_new\n net_new.params[new_scale_layer_name][1].data[...] = beta_new", "def shallow_CNN(num_bands = None, k_1 = None, k_2 = None, k_3 = None):\n active = 'relu'\n active2 = 'tanh'\n active3 = 'linear'\n inp = Input(shape=(None, None, num_bands))\n# bn = BatchNormalization()(inp)\n l1 = Conv2D(64, kernel_size=k_1, activation= active, padding='same', kernel_initializer='he_normal' )(inp)\n l2 = Conv2D(48, kernel_size=k_2, activation=active, padding='same', kernel_initializer='he_normal')(l1)\n l3 = Conv2D(32, kernel_size=k_3, activation=active, padding='same', kernel_initializer='he_normal')(l2)\n l4 = Conv2D(1, kernel_size=k_3, activation=active2, padding='same', kernel_initializer='he_normal',name=\"details\")(l3)\n# l4= Conv2D(1, kernel_size=k_3, activation=active2, padding='same', kernel_initializer='he_normal')(l3)\n# inp2 = Input(shape=(None, None, 1))\n inp1 = Input(shape=(None, None, 1))\n out = Add(name=\"band\")([l4, inp1])\n out1 = Conv2D(1, kernel_size=k_3, activation=active3, padding='same', kernel_initializer='he_normal',name=\"struct\")(out)\n out2 = Conv2D(1, kernel_size=k_3, activation=active3, padding='same', kernel_initializer='he_normal',name=\"TV\")(out)\n model = Model([inp, inp1], [out, out1, out2], name='shallow_CNN')\n \n# out= Conv2D(1, kernel_size=k_3, activation='relu', padding='same', kernel_initializer='he_normal',name=\"nothing\")(out1)\n# model = Model(inp, l4, name='shallow_CNN')\n return model", "def l2(weights, name=None):\n with ops.op_scope([weights], name, 'l2_regularizer') as scope:\n my_scale = ops.convert_to_tensor(scale,\n dtype=weights.dtype.base_dtype,\n name='scale')\n return standard_ops.mul(my_scale, nn.l2_loss(weights), name=scope)", "def layer2(x, weight_shape, bias_shape):\n \n weight_init = tf.random_normal_initializer(stddev=(2.0/weight_shape[0])**0.5)\n W = tf.get_variable(\"W\", weight_shape, initializer=weight_init)\n \n bias_init = tf.constant_initializer(value=0)\n b = tf.get_variable(\"b\", bias_shape, initializer=bias_init)\n \n return tf.nn.relu(tf.matmul(x, W) + b)" ]
[ "0.6203628", "0.61930245", "0.612963", "0.5974079", "0.5968323", "0.5933419", "0.59218013", "0.577767", "0.5749905", "0.5710342", "0.56769973", "0.5662022", "0.5621589", "0.56125194", "0.5564876", "0.5558931", "0.5555018", "0.5540978", "0.55320054", "0.5530641", "0.55190396", "0.5511444", "0.5502627", "0.55025655", "0.55006945", "0.54265046", "0.5413906", "0.5385004", "0.5384576", "0.538453" ]
0.6847917
0
Decorator that handles tuple/TensorShape conversion. Used in `compute_output_shape` and `build`.
def shape_type_conversion(fn): def wrapper(instance, input_shape): if input_shape is not None: if isinstance(input_shape, list): input_shape = [ tuple(tensor_shape.TensorShape(x).as_list()) for x in input_shape] else: input_shape = tuple(tensor_shape.TensorShape(input_shape).as_list()) output_shape = fn(instance, input_shape) if output_shape is not None: if isinstance(output_shape, list): return [tensor_shape.TensorShape(x) for x in output_shape] return tensor_shape.TensorShape(output_shape) return wrapper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def infer_shape_make_tuple(track, *args):\n sh = [await x['shape'] for x in args]\n return TupleShape(sh)", "def wrap(func, *args, unsqueeze=False):\n\n # Convert input types where applicable\n args = list(args)\n for i, arg in enumerate(args):\n if type(arg) == np.ndarray:\n args[i] = torch.from_numpy(arg)\n if unsqueeze:\n args[i] = args[i].unsqueeze(0)\n\n result = func(*args)\n\n # Convert output types where applicable\n if isinstance(result, tuple):\n result = list(result)\n for i, res in enumerate(result):\n if type(res) == torch.Tensor:\n if unsqueeze:\n res = res.squeeze(0)\n result[i] = res.numpy()\n return tuple(result)\n elif type(result) == torch.Tensor:\n if unsqueeze:\n result = result.squeeze(0)\n return result.numpy()\n else:\n return result", "def infer_shape(inputs, mod=None):\n out_type = infer_type(inputs, mod=mod)\n checked_type = out_type.checked_type\n if hasattr(checked_type, 'shape'):\n # Regular operator that outputs tensors\n return get_const_tuple(out_type.checked_type.shape)\n # The return type is not a tensor, for example List\n return checked_type", "def shape_from_args(self):\n return tuple()", "def convert_input_to_tuple(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n data = args[0].api.payload\n try:\n kwargs['tupled_output'] = json.loads(data,\n object_hook=_json_object_hook)\n return fn(*args, **kwargs)\n except Exception:\n data = json.dumps(data)\n kwargs['tupled_output'] = json.loads(data,\n object_hook=_json_object_hook)\n return fn(*args, **kwargs)\n\n return wrapper", "def shape_from_args(self) -> Tuple[int, ...]:\n return tuple()", "def flatten_args(shapes):\n def flatten_args_dec(func):\n\n @wraps(func)\n def new_func(array1d, *args, **kwargs):\n args = tuple(unflatten(array1d, shapes)) + args\n return func(*args, **kwargs)\n\n return new_func\n\n return flatten_args_dec", "def copy_shape_func(attrs, inputs, _):\n input = inputs[0]\n if len(input.shape) == 0:\n return [_copy_shape_func_scalar(input)]\n return [_copy_shape_func_tensor(input)]", "def tuple_cast(inputs, dst_type):\n return iter_cast(inputs, dst_type, return_type=tuple)", "def normalize_shape(shape: Union[int, Tuple[int, ...], None]) -> Tuple[int, ...]:\n\n if shape is None:\n raise TypeError(\"shape is None\")\n\n # handle 1D convenience form\n if isinstance(shape, numbers.Integral):\n shape = (int(shape),)\n\n # normalize\n shape = cast(Tuple[int, ...], shape)\n shape = tuple(int(s) for s in shape)\n return shape", "async def infer_shape_tuple_setitem(track, seq, idx, value):\n seq_sh = await seq['shape']\n idx_v = await idx['value']\n value_sh = await value['shape']\n new_sh = list(seq_sh.shape)\n new_sh[idx_v] = value_sh\n return TupleShape(new_sh)", "def _tensor_shape_tensor_conversion_function(s,\n dtype=None,\n name=None,\n as_ref=False):\n _ = as_ref\n if not s.is_fully_defined():\n raise ValueError(\n f\"Cannot convert a partially known TensorShape {s} to a Tensor.\")\n s_list = s.as_list()\n int64_value = 0\n for dim in s_list:\n if dim >= 2**31:\n int64_value = dim\n break\n\n if dtype is not None:\n if dtype not in (dtypes.int32, dtypes.int64):\n raise TypeError(f\"Cannot convert TensorShape {s} to dtype {dtype}. \"\n \"Allowed dtypes are tf.int32 and tf.int64.\")\n if dtype == dtypes.int32 and int64_value:\n raise ValueError(f\"Cannot convert TensorShape {s} to dtype int32; \"\n f\"a dimension is too large. Consider using tf.int64.\")\n else:\n dtype = dtypes.int64 if int64_value else dtypes.int32\n if name is None:\n name = \"shape_as_tensor\"\n return constant(s_list, dtype=dtype, name=name)", "def simple_using_output_shape_tuple():\n examples = [\n benchmark.Example(\n inputs=[],\n output=tf.zeros((2, 3, 4, 5)),\n ),\n ]\n constants = []\n description = 'Construct a 4D zeros tensor'\n target_program = 'tf.zeros((2, 3, 4, 5))'\n source = 'handwritten task'\n return benchmark.Benchmark(examples=examples,\n constants=constants,\n description=description,\n target_program=target_program,\n source=source,\n name='simple_using_output_shape_tuple')", "def __call__(self, batch: iter) -> torch.Tensor or list(torch.Tensor):\n if torch.is_tensor(batch[0]):\n return torch.cat(tuple(t.unsqueeze(0) for t in batch), 0).view(-1, self.batch_size, *batch[0].size())\n elif isinstance(batch[0], int):\n return torch.LongTensor(batch).view(-1, self.batch_size)\n elif isinstance(batch[0], collections.Iterable):\n # if each batch element is not a tensor, then it should be a tuple\n # of tensors; in that case we collate each element in the tuple\n transposed = zip(*batch)\n return tuple(self.__call__(samples) for samples in transposed)\n\n raise TypeError((\"batch must contain tensors, numbers, or lists; found {}\"\n .format(type(batch[0]))))", "def _covert_list_tensor_to_tuple_tensor(list_of_tensor):\n if isinstance(list_of_tensor, list):\n tuple_of_tensor = ()\n for tensor in list_of_tensor:\n tuple_of_tensor += (tensor,)\n return tuple_of_tensor\n return list_of_tensor", "def preserve_shape(func):\n @wraps(func)\n def wrapped_function(img, *args, **kwargs):\n shape = img.shape\n result = func(img, *args, **kwargs)\n result = result.reshape(shape)\n return result\n\n return wrapped_function", "def _normalize_shape(shape):\n\n if isinstance(shape, (np.integer, int)):\n if shape < 1:\n raise ValueError(\"shape value must be greater than 0: %d\"\n % shape)\n shape = (shape,) # N is a shorthand for (N,)\n try:\n shape = tuple(shape)\n except TypeError:\n raise TypeError(\"shape must be an integer or sequence: %r\"\n % (shape,))\n\n # XXX Get from HDF5 library if possible.\n # HDF5 does not support ranks greater than 32\n if len(shape) > 32:\n raise ValueError(\n f\"shapes with rank > 32 are not supported: {shape!r}\")\n\n return tuple(SizeType(s) for s in shape)", "def _standardize_input(self, value):\n tuple_maker = lambda x: ((x,) \n if (any(isinstance(x, t) for t in (str, bytes, bytearray)) \n or not isinstance(x, collections.Iterable))\n else (x if hasattr(x, \"__len__\") else tuple(x)))\n \n if isinstance(value, Dta):\n value = value._varvals\n else: # force input into 2d structure\n if (any(isinstance(value, t) for t in (str,bytes,bytearray))\n or not isinstance(value, collections.Iterable)):\n value = ((value,),)\n else:\n value = tuple(tuple_maker(v) for v in value)\n \n return value", "def _ShapeTensor(shape):\n if isinstance(shape, (tuple, list)) and not shape:\n dtype = dtypes.int32\n else:\n dtype = None\n return ops.convert_to_tensor(shape, dtype=dtype, name=\"shape\")", "def _get_shape(value):\n if isinstance(value.type, ArrayT):\n assert value.__class__ is Array, \"Don't know how to convert %s into tuple\" % value\n elts = value.elts \n \n elt_types = get_types(elts)\n assert all(isinstance(t, IntT) for t in elt_types), \\\n \"Shape elements must be integers, not %s\" % elt_types\n return Tuple(elts = elts, type = make_tuple_type(elt_types))\n elif isinstance(value.type, TupleT):\n assert all(isinstance(t, ScalarT) for t in value.type.elt_types), \\\n \"Shape tuple %s : %s has non-scalar elements\" % (value, value.type)\n return value\n elif isinstance(value.type, ScalarT):\n assert isinstance(value.type, IntT), \\\n \"Can't make shape tuple from non-integer scalar %s : %s\" % (value, value.type)\n return make_tuple((value,))\n assert False, \"Can't make shape tuple from value %s : %s\" % (value, value.type)", "def convert_shape(node, **kwargs):\n return create_basic_op_node('Shape', node, kwargs)", "def cast_from_tensor_class(self, results: List[BackendEagerTensor]\n ) -> Union[Any, Tuple[Any]]:\n if isinstance(results, (tuple, list)):\n if len(results) == 1:\n return results[0].value\n return tuple(r.value for r in results)\n return results.value", "def _to_tuple(values: Union[int, Iterable[int]]) -> Tuple[int, ...]:\n try:\n return tuple(values)\n except TypeError:\n return (values,)", "def hinted_tuple_hook(obj):\n if '__tuple__' in obj:\n return tuple(obj['items'])\n return obj", "async def infer_shape_scalar(track, *args):\n return NOSHAPE", "def tuple(x):\n pass", "def handle_input(data, device=None):\n if type(data) == tuple:\n x, y = data\n x = torch.tensor(x, dtype=torch.float)\n y = torch.tensor(y, dtype=torch.float)\n if not device is None:\n x = x.to(device)\n y = y.to(device)\n return x, y\n if type(data) == np.ndarray:\n x = torch.tensor(data, dtype=torch.float)\n if not device is None:\n x = x.to(device)\n return x\n else:\n return data", "def promote_shapes(*args):\n if len(args) < 2:\n return args\n else:\n shapes = [jnp.shape(arg) for arg in args]\n batch_shape = lax.broadcast_shapes(*shapes)\n num_dims = len(batch_shape)\n return [\n jnp.reshape(arg, (1,) * (num_dims - len(s)) + s)\n if len(s) < num_dims\n else arg\n for arg, s in zip(args, shapes)\n ]", "def get_input_tensor_shape(args_list):\n tensor_list = []\n for arg in args_list:\n if isinstance(arg, Tensor):\n tmp_shape = arg.shape\n tmp_type = arg.dtype\n tensor_list.append(PythonTensor(np.ones(tmp_shape), dtype=tmp_type))\n else:\n tensor_list.append(arg)\n\n return tuple(tensor_list)", "async def infer_shape_shape(track, ary):\n shp = await ary['shape']\n return TupleShape((NOSHAPE,) * len(shp))" ]
[ "0.64385897", "0.61577266", "0.61547065", "0.6123404", "0.6027381", "0.6014724", "0.5868795", "0.5852126", "0.5830542", "0.57883346", "0.5783651", "0.56933624", "0.56317014", "0.55889386", "0.55715406", "0.55495816", "0.553933", "0.5517297", "0.5499581", "0.54156256", "0.54018635", "0.53536046", "0.53235793", "0.5283915", "0.5279062", "0.5264118", "0.52619135", "0.525535", "0.5238331", "0.52382535" ]
0.7243811
0
Builds a map of the graph of layers. This recursively updates the map `layer_indices`, the list `nodes_in_decreasing_depth` and the set `network_nodes`.
def build_map(tensor, finished_nodes, nodes_in_progress, layer, node_index, tensor_index): node = layer._inbound_nodes[node_index] # pylint: disable=protected-access # Prevent cycles. if node in nodes_in_progress: raise ValueError('The tensor ' + str(tensor) + ' at layer "' + layer.name + '" is part of a cycle.') # Don't repeat work for shared subgraphs if node in finished_nodes: return node_key = _make_node_key(layer.name, node_index) # Update network_nodes. network_nodes.add(node_key) # Store the traversal order for layer sorting. if layer not in layer_indices: layer_indices[layer] = len(layer_indices) nodes_in_progress.add(node) # Propagate to all previous tensors connected to this node. for i in range(len(node.inbound_layers)): x = node.input_tensors[i] layer = node.inbound_layers[i] node_index = node.node_indices[i] tensor_index = node.tensor_indices[i] build_map(x, finished_nodes, nodes_in_progress, layer, node_index, tensor_index) finished_nodes.add(node) nodes_in_progress.remove(node) nodes_in_decreasing_depth.append(node)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _map_graph_network(inputs, outputs):\n # Network_nodes: set of nodes included in the graph of layers\n # (not all nodes included in the layers are relevant to the current graph).\n network_nodes = set() # ids of all nodes relevant to the Network\n nodes_depths = {} # dict {node: depth value}\n layers_depths = {} # dict {layer: depth value}\n layer_indices = {} # dict {layer: index in traversal}\n nodes_in_decreasing_depth = []\n\n def build_map(tensor,\n finished_nodes,\n nodes_in_progress,\n layer,\n node_index,\n tensor_index):\n \"\"\"Builds a map of the graph of layers.\n\n This recursively updates the map `layer_indices`,\n the list `nodes_in_decreasing_depth` and the set `network_nodes`.\n\n Arguments:\n tensor: Some tensor in a graph.\n finished_nodes: Set of nodes whose subgraphs have been traversed\n completely. Useful to prevent duplicated work.\n nodes_in_progress: Set of nodes that are currently active on the\n recursion stack. Useful to detect cycles.\n layer: Layer from which `tensor` comes from. If not provided,\n will be obtained from `tensor._keras_history`.\n node_index: Node index from which `tensor` comes from.\n tensor_index: Tensor_index from which `tensor` comes from.\n\n Raises:\n ValueError: if a cycle is detected.\n \"\"\"\n node = layer._inbound_nodes[node_index] # pylint: disable=protected-access\n\n # Prevent cycles.\n if node in nodes_in_progress:\n raise ValueError('The tensor ' + str(tensor) + ' at layer \"' +\n layer.name + '\" is part of a cycle.')\n\n # Don't repeat work for shared subgraphs\n if node in finished_nodes:\n return\n\n node_key = _make_node_key(layer.name, node_index)\n # Update network_nodes.\n network_nodes.add(node_key)\n\n # Store the traversal order for layer sorting.\n if layer not in layer_indices:\n layer_indices[layer] = len(layer_indices)\n\n nodes_in_progress.add(node)\n\n # Propagate to all previous tensors connected to this node.\n for i in range(len(node.inbound_layers)):\n x = node.input_tensors[i]\n layer = node.inbound_layers[i]\n node_index = node.node_indices[i]\n tensor_index = node.tensor_indices[i]\n build_map(x, finished_nodes, nodes_in_progress, layer,\n node_index, tensor_index)\n\n finished_nodes.add(node)\n nodes_in_progress.remove(node)\n nodes_in_decreasing_depth.append(node)\n\n finished_nodes = set()\n nodes_in_progress = set()\n for x in outputs:\n layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access\n build_map(x, finished_nodes, nodes_in_progress,\n layer=layer,\n node_index=node_index,\n tensor_index=tensor_index)\n\n for node in reversed(nodes_in_decreasing_depth):\n # If the depth is not set, the node has no outbound nodes (depth 0).\n depth = nodes_depths.setdefault(node, 0)\n\n # Update the depth of the corresponding layer\n previous_depth = layers_depths.get(node.outbound_layer, 0)\n # If we've seen this layer before at a higher depth,\n # we should use that depth instead of the node depth.\n # This is necessary for shared layers that have inputs at different\n # depth levels in the graph.\n depth = max(depth, previous_depth)\n layers_depths[node.outbound_layer] = depth\n nodes_depths[node] = depth\n\n # Update the depth of inbound nodes.\n # The \"depth\" of a node is the max of the depths\n # of all layers it is connected to.\n for i in range(len(node.inbound_layers)):\n inbound_layer = node.inbound_layers[i]\n node_index = node.node_indices[i]\n inbound_node = inbound_layer._inbound_nodes[node_index] # pylint: disable=protected-access\n previous_depth = nodes_depths.get(inbound_node, 0)\n nodes_depths[inbound_node] = max(depth + 1, previous_depth)\n\n # Build a dict {depth: list of nodes with this depth}\n nodes_by_depth = {}\n for node, depth in nodes_depths.items():\n if depth not in nodes_by_depth:\n nodes_by_depth[depth] = []\n nodes_by_depth[depth].append(node)\n\n # Build a dict {depth: list of layers with this depth}\n layers_by_depth = {}\n for layer, depth in layers_depths.items():\n if depth not in layers_by_depth:\n layers_by_depth[depth] = []\n layers_by_depth[depth].append(layer)\n\n # Get sorted list of layer depths.\n depth_keys = list(layers_by_depth.keys())\n depth_keys.sort(reverse=True)\n\n # Set self.layers and self._layers_by_depth.\n layers = []\n for depth in depth_keys:\n layers_for_depth = layers_by_depth[depth]\n # Network.layers needs to have a deterministic order:\n # here we order them by traversal order.\n layers_for_depth.sort(key=lambda x: layer_indices[x])\n layers.extend(layers_for_depth)\n\n # Get sorted list of node depths.\n depth_keys = list(nodes_by_depth.keys())\n depth_keys.sort(reverse=True)\n\n # Check that all tensors required are computable.\n # computable_tensors: all tensors in the graph\n # that can be computed from the inputs provided.\n computable_tensors = []\n for x in inputs:\n computable_tensors.append(x)\n\n layers_with_complete_input = [] # To provide a better error msg.\n for depth in depth_keys:\n for node in nodes_by_depth[depth]:\n layer = node.outbound_layer\n if layer:\n for x in node.input_tensors:\n if x not in computable_tensors:\n raise ValueError('Graph disconnected: '\n 'cannot obtain value for tensor ' + str(x) +\n ' at layer \"' + layer.name + '\". '\n 'The following previous layers '\n 'were accessed without issue: ' +\n str(layers_with_complete_input))\n for x in node.output_tensors:\n computable_tensors.append(x)\n layers_with_complete_input.append(layer.name)\n\n # Ensure name unicity, which will be crucial for serialization\n # (since serialized nodes refer to layers by their name).\n all_names = [layer.name for layer in layers]\n for name in all_names:\n if all_names.count(name) != 1:\n raise ValueError('The name \"' + name + '\" is used ' +\n str(all_names.count(name)) + ' times in the model. '\n 'All layer names should be unique.')\n return network_nodes, nodes_by_depth, layers, layers_by_depth", "def aggregate_maps(self, G=[], node_tags={}):\n\n self.map = nx.DiGraph() \n \n # loop over graphs in list of graphs\n for g in G:\n # add nodes to graph\n self.map.add_nodes_from(g.nodes())\n # loop over edges \n for edge in g.edges(data=True):\n # if edge is already present, add weight to edge\n if self.map.has_edge(edge[0],edge[1]):\n self.map[edge[0]][edge[1]]['weight'] += edge[2]['weight']\n # if edge is not already present, add it with weight=1\n else:\n self.map.add_edge(edge[0],edge[1], weight=1)\n \n # relabel nodes according to mapping provided by 'node_tags'\n nx.set_node_attributes(self.map, name = 'node_label', values = node_tags) \n nx.relabel_nodes(self.map, mapping=node_tags, copy=False)\n\n # assign a random color to each node\n colour_list = np.random.choice( list(colors.get_named_colors_mapping().values()), len(self.map) )\n colour_dict = dict( zip(self.map.nodes, colour_list) )\n nx.set_node_attributes(self.map, name = 'node_color', values = colour_dict)\n\n # save node attributes to CoMap object\n self.node_labels = nx.get_node_attributes(self.map, name = 'node_label')\n self.node_colors = nx.get_node_attributes(self.map, name = 'node_color')\n \n return", "def _form_computation_graph(self, idx):\n _list, _set = list, set\n if type(idx) is int:\n node_layers = [np.array([idx], dtype=np.int64)]\n elif type(idx) is list:\n node_layers = [np.array(idx, dtype=np.int64)]\n\n for _ in range(self.n_layers):\n prev = node_layers[-1]\n arr = [node for node in prev]\n arr.extend([e[0] for node in arr for e in self.nbrs_t[node]])\n arr = np.array(_list(_set(arr)), dtype=np.int64)\n node_layers.append(arr)\n node_layers.reverse()\n\n mappings = [{j: i for (i, j) in enumerate(arr)} for arr in node_layers]\n\n return node_layers, mappings", "def create_basic_cyclic_adjacency_map():\n sample_adj_map = {\n \"A\": [\"B\", \"C\"],\n \"C\": [\"D\"],\n \"D\": [\"E\"],\n \"E\": [\"C\"]\n }\n graph = generate_graph(sample_adj_map, node_start_name=\"A\")\n return graph", "def generate_network_graph(network):\n num_nodes = len(network)\n network_graph = {}\n\n for y_node in range(num_nodes):\n neighbors = {}\n for x_node in range(num_nodes):\n if network[y_node][x_node] is not 0:\n neighbors[x_node] = network[y_node][x_node]\n network_graph[y_node] = neighbors\n\n return network_graph", "def _build_depth_graph(self):\n self.depth_net = DepthNetwork(self.cfg.STRUCTURE, is_training=False)\n images = self.images_placeholder[tf.newaxis]\n poses = self.poses_placeholder[tf.newaxis]\n intrinsics = self.intrinsics_placeholder[tf.newaxis]\n\n # fix the input shape\n images = tf.reshape(images, [1, 5, 192, 1088, 3])\n Ts = VideoSE3Transformation(matrix=poses)\n\n depths = self.depth_net.forward(Ts, images, intrinsics)\n self.outputs['depths'] = depths", "def build_node_graph(self):\n G = pgv.AGraph(strict=False, directed=True)\n temp_dict = defaultdict(int) #key - from_to_ip, val - counter\n\n for i, ip in enumerate(self.node_graph_dict.keys()):\n G.add_node(ip, shape='rect', label='%d' % i)\n logger.info(\"All nodes added\")\n\n for ip, droplist in self.node_graph_dict.iteritems():\n for gnid, dropids in droplist:\n for did in dropids:\n tip = self.gnid_ip_dict[self.oid_gnid_dict[did]]\n k = '{0}_{1}'.format(ip, tip)\n temp_dict[k] += 1\n\n for k, v in temp_dict.iteritems():\n ks = k.split('_')\n G.add_edge(ks[0], ks[1], weight=v)\n\n return G", "def build_graph():\n file = open(\"../data/data.json\", \"r\")\n data = json.load(file)\n node_dict = {}\n for id in data:\n node_dict[id] = Node(data[id][\"name\"], data[id][\"product\"], data[id][\"production_volume\"])\n for id in data:\n current_node = node_dict[id]\n for costumer_id in data[id][\"costumers\"]:\n current_node.costumers.append(node_dict[str(costumer_id)])\n current_node.out_edge_capacity_drop[node_dict[str(costumer_id)].name] = 0\n for supplier_id in data[id][\"suppliers\"]:\n current_node.suppliers.append(node_dict[str(supplier_id)])\n current_node.in_edge_capacity_drop[node_dict[str(supplier_id)].name] = 0\n return node_dict", "def create_basic_adjacency_map_3():\n sample_adj_map = {\n \"A\": [\"B\", \"C\"],\n \"C\": [\"D\", \"E\"],\n \"D\": [\"X\", \"Y\"],\n \"E\": [\"X\"],\n \"X\": [\"Z\"],\n \"Y\": [\"Z\"]\n }\n graph = generate_graph(sample_adj_map, node_start_name=\"A\")\n return graph", "def _build_graph_general(self): \n\n #Find a canonical coloring scheme\n #Each node has a color that is determined by the non-mapped aspects\n nodecolors=set()\n for nl in self.net.iter_node_layers():\n nodecolors.add(self._slice_node_layer_not_allowed(nl))\n nodecolors_sorted=sorted(list(nodecolors))\n del nodecolors\n self._assert_full_order(nodecolors_sorted)\n self.colormap=dict( ((color,colorid) for colorid,color in enumerate(nodecolors_sorted) ))\n\n #each aux node has a color that is determined by the aspect\n self.auxcolormap=dict( ((auxcolor, auxcolorid+len(self.colormap)) for auxcolorid,auxcolor in enumerate(sorted(self.asp)) ) )\n\n\n #Add the underlying network\n #node-layers:\n for nl in self.net.iter_node_layers():\n nlid=self._get_node_id(nl)\n color=self._slice_node_layer_not_allowed(nl)\n colorid=self.colormap[color]\n self.add_node(nlid,colorid)\n\n #edges between node-layers:\n for nl1 in self.net.iter_node_layers():\n for nl2 in self.net[nl1]:\n nl1id=self._get_node_id(nl1)\n nl2id=self._get_node_id(nl2)\n self.add_link(nl1id,nl2id)\n\n\n #Add the auxiliary nodes and edges\n #add the aux nodes\n for a in self.asp:\n for elayer in self.net.slices[a]:\n auxid=self._get_auxnode_id( (a,elayer) )\n auxcolorid=self.auxcolormap[a]\n self.add_node(auxid,auxcolorid)\n \n #add the aux edges\n for nl in self.net.iter_node_layers():\n for a in self.asp:\n nlid=self._get_node_id(nl)\n auxid=self._get_auxnode_id( (a,nl[a]) )\n self.add_link(nlid,auxid)", "def create_basic_adjacency_map_2():\n sample_adj_map = {\n \"A\": [\"B\", \"C\"],\n \"C\": [\"D\", \"E\"],\n \"D\": [\"X\"],\n \"E\": [\"X\"]\n }\n graph = generate_graph(sample_adj_map, node_start_name=\"A\")\n return graph", "def getGraph(self):\n for layer in self.layers:\n assert isinstance(layer, Dense), \"Can't compute graph\"\n\n neurons = [self.layers[0].inputDim]\n for layer in self.layers:\n neurons.append(layer.outputDim)\n\n # create a dictionary which saves nodes in the given layers\n nodes = {}\n for i in range(len(self.layers)+1):\n start = sum(neurons[:i])\n nodes[i] = range(start,start+neurons[i])\n\n # create a directed Graph\n graph = nx.DiGraph()\n\n # create edges between consecutive layers\n for l in range(len(self.layers)):\n for x in nodes[l]:\n for y in nodes[l+1]:\n graph.add_edge(x,y)\n\n # compute positions of nodes\n maxNodes = max(neurons)\n for layer in range(len(self.layers)+1):\n layerNodes = neurons[layer]\n for i, node in enumerate(nodes[layer]):\n height = i + 0.5 * (maxNodes - layerNodes)\n # save coordinates of node in graph\n graph.nodes[node]['pos'] = (\n layer,\n height\n )\n\n pos = nx.get_node_attributes(graph,'pos')\n\n #color the nodes\n colorMap = []\n for node in graph.nodes():\n if node in nodes[0]:\n colorMap.append('red')\n elif node in nodes[len(self.layers)]:\n colorMap.append('green')\n else:\n colorMap.append('blue')\n\n return (graph, pos, colorMap)", "def create_basic_adjacency_map_1():\n sample_adj_map = {\n \"A\": [\"B\", \"C\"],\n \"C\": [\"D\", \"E\"],\n \"D\": [\"X\"]\n }\n graph = generate_graph(sample_adj_map, node_start_name=\"A\")\n return graph", "def _build_reprojection_graph(self):\n EPS = 1e-8\n depths = self.depths_placeholder[tf.newaxis]\n poses = self.poses_placeholder[tf.newaxis]\n intrinsics = self.intrinsics_placeholder[tf.newaxis]\n\n batch, num, ht, wd = tf.unstack(tf.shape(depths), num=4)\n Ts = VideoSE3Transformation(matrix=poses)\n intrinsics = intrinsics_vec_to_matrix(intrinsics)\n\n ii, jj = tf.meshgrid(tf.range(0, num), tf.range(num, num+1))\n ii = tf.reshape(ii, [-1])\n jj = tf.reshape(jj, [-1])\n\n Tij = Ts.gather(jj) * Ts.gather(ii).inv()\n X0 = projective_ops.backproject(depths, intrinsics)\n X1 = Tij(X0)\n\n coords = projective_ops.project(X1, intrinsics)\n depths = X1[..., 2]\n\n indicies = tf.cast(coords[..., ::-1] + .5, tf.int32)\n indicies = tf.reshape(indicies, [-1, 2])\n depths = tf.reshape(depths, [-1])\n\n depth = tf.scatter_nd(indicies, depths, [ht, wd])\n count = tf.scatter_nd(indicies, tf.ones_like(depths), [ht, wd])\n\n depth = depth / (count + EPS)\n self.outputs['depth_reprojection'] = depth", "def calc_depths(self):\n graph = self._graph\n depth_cache = dict()\n\n for n in graph.topological_iter():\n preds = graph.predecessors(n)\n if not preds:\n depth_cache[n.op.key] = 0\n else:\n depth_cache[n.op.key] = 1 + max(depth_cache[ni.op.key] for ni in preds)\n return depth_cache", "def infomap_communities(G):\n name_map = {}\n name_map_inverted = {}\n for n in G.nodes():\n id_ = hash(n) % 100000\n name_map_inverted[id_] = n\n name_map[n] = id_\n \n infomapSimple = infomap.Infomap(\"--two-level\")\n network = infomapSimple.network()\n \n for n1, n2, data in G.edges(data=True):\n network.addLink(name_map[n1], name_map[n2], data['weight'] if 'weight' in data else 1)\n\n infomapSimple.run()\n\n return dict(\n (name_map_inverted[node.physicalId], node.moduleIndex())\n for node in infomapSimple.iterTree()\n if node.isLeaf()\n )", "def _ComputeLayerMapping(self):\n raw_manifest = self._v2_image.manifest()\n manifest = json.loads(raw_manifest)\n\n v2_ancestry = [fs_layer['blobSum'] for fs_layer in manifest['fsLayers']]\n v1_jsons = [v1_layer['v1Compatibility'] for v1_layer in manifest['history']]\n\n def ExtractId(v1_json):\n v1_metadata = json.loads(v1_json)\n return v1_metadata['id']\n\n # Iterate once using the maps to deduplicate.\n self._v1_to_v2 = {}\n self._v1_json = {}\n self._v1_ancestry = []\n for (v1_json, v2_digest) in zip(v1_jsons, v2_ancestry):\n v1_id = ExtractId(v1_json)\n if v1_id in self._v1_to_v2:\n assert self._v1_to_v2[v1_id] == v2_digest\n assert self._v1_json[v1_id] == v1_json\n continue\n self._v1_to_v2[v1_id] = v2_digest\n self._v1_json[v1_id] = v1_json\n self._v1_ancestry.append(v1_id)", "def get_layers(delphin_dict: dict) -> dict:\n\n x_list = convert_discretization_to_list(delphin_dict)\n\n index = 0\n layers_dict = dict()\n for assignment in delphin_dict['DelphinProject']['Assignments']['Assignment']:\n if assignment['@type'] == 'Material':\n layer = dict()\n layer['material'] = assignment['Reference']\n range_ = [int(x)\n for x in assignment['Range'].split(' ')]\n layer['x_width'] = sum(x_list[range_[0]:range_[2]+1])\n layer['x_index'] = range_[0], range_[2]\n layers_dict[index] = layer\n index += 1\n\n return layers_dict", "def build_network(self):\n # Position the node centers\n self.set_node_centers()\n\n # Set the nodes\n self.nodes = []\n for i in range(self.n_states):\n node = Node(\n self.node_centers[i],\n self.node_radius,\n self.labels[i]\n )\n self.nodes.append(node)", "def _get_level_map(self):\n \n # get the initial group mapping across sub-domains just based on\n # particle IDs\n groups_map = self._get_gid_map()\n\n sc = self.sc\n\n sqc = SQLContext(sc)\n\n if self.DEBUG: \n print 'spark_fof DEBUG: groups in initial mapping = %d'%groups_map.cache().count()\n\n \n # create the spark GraphFrame with group IDs as nodes and group connections as edges\n v_df = sqc.createDataFrame(groups_map.flatMap(lambda x: x)\n .distinct()\n .map(lambda v: Row(id=int(v))))\n e_df = sqc.createDataFrame(groups_map.map(lambda (s,d): Row(src=int(s), dst=int(d))))\n\n # persist the graph, allowing it to spill to disk if necessary\n g_graph = graphframes.GraphFrame(v_df, e_df).persist(StorageLevel.MEMORY_AND_DISK_SER)\n \n # generate mapping\n def make_mapping(items): \n \"\"\"Helper function to generate mappings to lowest node ID\"\"\"\n compid, nodes = items\n nodes = list(nodes)\n base_node = min(nodes)\n return [(node,base_node) for node in nodes if node != base_node]\n \n nPartitions = sc.defaultParallelism*5\n\n timein = time.time()\n group_mapping = (g_graph.connectedComponents()\n .rdd.map(lambda row: (row.component, row.id))\n .groupByKey(nPartitions)\n .filter(lambda (k,v): len(v.data)>1)\n .flatMap(make_mapping)).cache()\n \n if self.DEBUG:\n print 'spark_fof DEBUG: groups in final mapping = %d'%len(mapping)\n\n print 'spark_fof: domain group mapping build took %f seconds'%(time.time()-timein)\n self.group_mapping = group_mapping\n\n return group_mapping", "def getMoreComplexInLayerGraph(self):\n makeLayers = self.makeLayers\n addNodesToLayer = self.addNodesToLayer\n addNodeToLayer = self.addNodeToLayer\n eastWestEdgeFromTo = self.eastWestEdgeFromTo\n addInLayerEdge = self.addInLayerEdge\n setFixedOrderConstraint = self.setFixedOrderConstraint\n\n layers = makeLayers(3)\n leftNodes = addNodesToLayer(4, layers[0])\n middleNodes = addNodesToLayer(3, layers[1])\n rightNode = addNodeToLayer(layers[2])\n setFixedOrderConstraint(middleNodes[0])\n setFixedOrderConstraint(middleNodes[1])\n\n eastWestEdgeFromTo(leftNodes[1], middleNodes[0])\n\n eastWestEdgeFromTo(leftNodes[3], middleNodes[1])\n eastWestEdgeFromTo(leftNodes[2], middleNodes[1])\n addInLayerEdge(middleNodes[0], middleNodes[1], PortSide.WEST)\n eastWestEdgeFromTo(leftNodes[0], middleNodes[0])\n addInLayerEdge(middleNodes[0], middleNodes[2], PortSide.WEST)\n\n addInLayerEdge(middleNodes[0], middleNodes[1], PortSide.EAST)\n eastWestEdgeFromTo(middleNodes[0], rightNode)\n\n return self.graph", "def generate_topology_map(self, max_depth):\n self.topology = self._get_slaves(max_depth)", "def offset_graph():\n pylon_graph = graph.graph()\n base = square(ORIGIN, LENGTH)\n base_ids = pylon_graph.add_nodes(base, \"base\")\n pylon_graph.connect_neighbours(base_ids, LENGTH)\n all_ids = []\n for i in range(LEVELS):\n level = offset(base, LENGTH * i, \"z\")\n level_ids = pylon_graph.add_nodes(level, \"level\" + str(i))\n all_ids.extend(level_ids)\n pylon_graph.connect_neighbours(all_ids, LENGTH)\n return pylon_graph", "def network(self):\n G = nx.MultiDiGraph()\n reaction_hash = []\n product_count = 0\n mapping = {}\n reaction_count = 0\n\n for r in self.reactions:\n reaction_count += 1\n\n reaction_dict = r.__dict__\n G.add_edge(reaction_dict.get('left'), hash(r))\n G.add_edge(reaction_dict.get('right'), hash(r))\n G.add_edge(hash(r), reaction_dict.get('left2'))\n G.add_edge(hash(r), reaction_dict.get('right2'))\n\n product_count += 1\n mapping[reaction_dict.get('left')] = \"x{}\".format(product_count)\n product_count += 1\n mapping[reaction_dict.get('right')] = \"x{}\".format(product_count)\n product_count += 1\n mapping[reaction_dict.get('left2')] = \"x{}\".format(product_count)\n product_count += 1\n mapping[reaction_dict.get('right2')] = \"x{}\".format(product_count)\n\n mapping[hash(r)] = \"r{}\".format(reaction_dict.get(\"reaction_n\"))\n reaction_hash.append(hash(r))\n\n return G, mapping", "def build_graph(graph_dict): \n #make networkX graph\n G = nx.Graph()\n G.add_nodes_from(graph_dict.keys())\n for key in graph_dict:\n for i in range(len(graph_dict[key])):\n G.add_edge(key,graph_dict[key][i])\n return G", "def build_neuron_network(nb_features_map: Union[Sequence[int], None] = None,\n size_linear_layers: Union[Sequence[int], None] = None,\n dropout_rate: Union[Tuple[float, float], float] = 0.3,\n conv_kernel_size: Union[Sequence[int], int] = 3,\n conv_stride: int = 1,\n conv_padding: int = 1,\n conv_activation: str = \"relu\",\n conv_architecture: str = \"CPD\",\n pool_kernel_size: int = 2,\n pool_stride: int = 2,\n dense_activation: str = \"relu\",\n pretrained: Union[str, None] = None,\n grayscale: bool = True,\n optimizer: str = \"Adam\",\n weight_decay: float = 0.,\n learning_rate: float = 0.001,\n ) -> Tuple[nn.Module, List, torch.optim.Optimizer]:\n # Initializations\n if pretrained is not None:\n grayscale = False\n if grayscale:\n channels = 1\n else:\n channels = 3\n if nb_features_map is None:\n nb_features_map = [8]\n if size_linear_layers is None:\n size_linear_layers = []\n height = 224\n width = 224\n module = nn.Module()\n shapes = [(\"input\", channels, height, width)]\n layers = {\"extractor\": [], \"regressor\": []}\n if not hasattr(dropout_rate, \"__len__\"):\n dropout_rate = (dropout_rate, 0.)\n next_dropout_rate = dropout_rate[0]\n # If a pretrained model is used:\n if pretrained is None:\n # Input checks\n if hasattr(conv_kernel_size, \"__len__\"):\n if len(conv_kernel_size) != len(nb_features_map):\n raise ValueError(\"The length of nb_features_map shall match the length of conv_kernel_size\")\n else:\n conv_kernel_size = [conv_kernel_size] * len(nb_features_map)\n # Feature extractor\n next_layer_type = itertools.cycle(conv_architecture)\n nb_feature_map = None\n i = 0\n while True:\n layer_type = next(next_layer_type)\n if layer_type == \"C\":\n # Convolutional layer\n try:\n nb_feature_map = nb_features_map[i]\n except IndexError:\n break\n name = \"conv2d-{:02d}\".format(i+1)\n conv = nn.Conv2d(shapes[-1][1], nb_feature_map, conv_kernel_size[i], stride=conv_stride,\n padding=conv_padding)\n layers[\"extractor\"].append((name, conv))\n h, w = output_shape_conv_and_pool_layer(rows=shapes[-1][2], columns=shapes[-1][3],\n kernel=conv_kernel_size[i], stride=conv_stride,\n padding=conv_padding)\n shapes.append((name, nb_feature_map, h, w))\n i += 1\n # Activation\n if conv_activation == \"relu\":\n activ = nn.ReLU()\n elif conv_activation == \"elu\":\n activ = nn.ELU(alpha=0.1)\n elif conv_activation == \"leaky\":\n activ = nn.LeakyReLU()\n else:\n activ = nn.ReLU()\n name = \"{}-{:02d}\".format(conv_activation, i)\n layers[\"extractor\"].append((name, activ))\n # activation does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n elif layer_type == \"P\":\n # Max-pooling\n name = \"maxpool2d-{:02d}\".format(i)\n pool = nn.MaxPool2d(pool_kernel_size, pool_stride)\n layers[\"extractor\"].append((name, pool))\n h, w = output_shape_conv_and_pool_layer(rows=shapes[-1][2], columns=shapes[-1][3],\n kernel=pool_kernel_size, stride=pool_stride)\n shapes.append((name, nb_feature_map, h, w))\n elif layer_type == \"D\":\n # Dropout\n if next_dropout_rate > 0.:\n name = \"dropout-{:02d}\".format(i)\n dropout = nn.Dropout(p=next_dropout_rate)\n layers[\"extractor\"].append((name, dropout))\n # Dropout does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n next_dropout_rate += dropout_rate[1]\n elif layer_type == \"B\":\n # Batch normalization\n name = \"batchnorm-{:02d}\".format(i)\n batch = nn.BatchNorm2d(shapes[-1][1])\n layers[\"extractor\"].append((name, batch))\n # Batch norm. does not change the size\n shapes.append((name, shapes[-1][1], shapes[-1][2], shapes[-1][3]))\n # Add a flatten layer\n name = \"flatten\"\n flatten = nn.Flatten(1)\n layers[\"extractor\"].append((name, flatten))\n shapes.append((name, shapes[-1][1] * shapes[-1][2] * shapes[-1][3]))\n # Create extractor module\n extractor = nn.Sequential(OrderedDict(layers[\"extractor\"]))\n module.add_module(\"extractor\", extractor)\n elif pretrained == \"VGG16\":\n pre_trained = models.vgg16(pretrained=True)\n modules = []\n for _name, _module in pre_trained.named_children():\n if _name != 'classifier':\n modules.append((_name, _module))\n modules.append((\"flatten\", nn.Flatten(1)))\n vgg16 = nn.Sequential(OrderedDict(modules))\n # Freeze all parameters in the pre-trained model\n # So we prevent gradients from being calculated, it will save computation time\n for param in vgg16.parameters():\n param.requires_grad = False\n module.add_module('extractor', vgg16)\n shapes.append((pretrained, 25088))\n else:\n raise ValueError(f\"Unknown pre-trained model '{pretrained}'.\")\n # Regressor\n for i, size_linear_layer in enumerate(size_linear_layers):\n # Add a linear layer\n name = \"linear-{:02d}\".format(i + 1)\n linear = nn.Linear(shapes[-1][1], size_linear_layer)\n layers[\"regressor\"].append((name, linear))\n shapes.append((name, size_linear_layer))\n # Activation\n if dense_activation == \"relu\":\n activ = nn.ReLU()\n elif dense_activation == \"elu\":\n activ = nn.ELU(alpha=0.1)\n elif dense_activation == \"leaky\":\n activ = nn.LeakyReLU()\n else:\n activ = nn.ReLU()\n name = \"{}-{:02d}\".format(dense_activation, i + 1)\n layers[\"regressor\"].append((name, activ))\n shapes.append((name, shapes[-1][1])) # activation does not change the size\n # Dropout\n if next_dropout_rate > 0.:\n name = \"dropout-{:02d}\".format(i + 1)\n dropout = nn.Dropout(p=next_dropout_rate)\n layers[\"regressor\"].append((name, dropout))\n shapes.append((name, shapes[-1][1])) # Dropout does not change the size of array\n next_dropout_rate += dropout_rate[1]\n # Add the final layer, the output size is fixed to 68 x 2 = 136\n name = \"output\"\n linear = nn.Linear(shapes[-1][1], 136)\n layers[\"regressor\"].append((name, linear))\n shapes.append((name, 136))\n # Create regressor module\n regressor = nn.Sequential(OrderedDict(layers[\"regressor\"]))\n module.add_module(\"regressor\", regressor)\n # Weight initialization\n module.apply(weight_initialization)\n # Optimizer\n if optimizer == \"Adam\":\n optim = torch.optim.Adam(module.parameters(), lr=learning_rate, weight_decay=weight_decay)\n elif optimizer == \"AdamW\":\n optim = torch.optim.AdamW(module.parameters(), lr=learning_rate, weight_decay=weight_decay)\n elif optimizer == \"SGD\":\n optim = torch.optim.SGD(module.parameters(), lr=learning_rate, weight_decay=weight_decay, momentum=0.9)\n else:\n raise ValueError(f\"Unknown optimizer {optimizer}.\")\n return module, shapes, optim", "def make_complete_graph(num_nodes):\n complete_digraph = {}\n if num_nodes > 0 and type(num_nodes) == int:\n neighbors = set([idx for idx in range(num_nodes)])\n for idx in range(num_nodes):\n complete_digraph[idx] = neighbors.copy() #creates adjacency set\n complete_digraph[idx].remove(idx) # pop out self-loop \n return complete_digraph", "def createLevelMap(self):\n for a in self.hierarchy.iterkeys():\n self.lvl = 0\n self.calcLevel(a)\n if self.lvl > self.levelMap.highestLevel: self.levelMap.highestLevel = self.lvl\n self.levelMap.addLevelData(AgentName=a, Level=self.lvl)", "def map_from_parent_nid(self, layer_id, parent_nids, remap_local=...):\n ...", "def createLevelMap(self):\n\t\tfor a in self.hierarchy.iterkeys():\n\t\t\tself.lvl = 0\n\t\t\tself.calcLevel(a)\n\t\t\tif self.lvl > self.levelMap.highestLevel: self.levelMap.highestLevel = self.lvl\n\t\t\tself.levelMap.addLevelData(AgentName=a, Level=self.lvl)" ]
[ "0.7749785", "0.58500004", "0.58035445", "0.57934177", "0.5785982", "0.57679087", "0.5744957", "0.5577869", "0.5560324", "0.5552233", "0.55086184", "0.5504372", "0.54469365", "0.5445193", "0.54325163", "0.5395003", "0.5385376", "0.53417903", "0.5309939", "0.5306035", "0.5246748", "0.5227875", "0.5218673", "0.52011585", "0.5189576", "0.518347", "0.51827055", "0.5180276", "0.5166586", "0.51566213" ]
0.6577763
1
Function to Track Moisture Level Returns Moisture Value
def track_moisture_level(): try: normal_level_init = 470 low_level_init = 560 global LIMIT_FLAG sensor_read = sensorData.read_moisture() generate_json.define_structure("moisture", sensor_read) if sensor_read > low_level_init: if LIMIT_FLAG != 3: # When it is dry (Moisture Level Low) LIMIT_FLAG = 3 blynk.notify('Moisture Level Low! Irrigation Needed') blynk.email('[email protected]', 'Alert: Moisture Level Low', 'Moisture Level Low! Irrigation Needed') logging_write() elif normal_level_init <= sensor_read <= low_level_init: if LIMIT_FLAG != 2: LIMIT_FLAG = 2 logging_write() else: if LIMIT_FLAG != 1: LIMIT_FLAG = 1 logging_write() return sensor_read except Exception as e: logging_write(e)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def current_moisture(self) -> int:\n return int(self.get_state(self.entity_ids['current_moisture']))", "def moisture(self):\n if self.moisture_sensor is None:\n return None\n else:\n return self.moisture_sensor.percent", "def maCruise(self):\n return .77", "def coverage(self):\r\n return 0, 1", "def mortality(self):\n pass", "def functionality(self):\n self._functionality = 0.12 * self.CAMC + 0.22 * self.NOP + 0.22 * self.CIS + 0.22 * self.DSC + 0.22 * self.NOH\n return round(self._functionality, 5)", "def _get_detection_time_multiplier(self):\n return self.__detection_time_multiplier", "def lapserate_moist_adiabate():\n return 6.5", "async def handle_mo4r(message: types.Message):\n await handle_change_threshold(message, 1.5)", "def FUNC_MOX(t):\n mox1 = 3.5e-3 # [kg/s] oxidizer mass flow rate before slottling\n mox2 = 7.5e-3 # [kg/s] oxidizer mass flow rate after slottling\n if t < 5.0:\n mox = mox1\n elif 5.0<=t and t<14.0:\n mox = mox2\n elif 14.0<=t and t<20.0:\n mox = mox1\n else:\n mox = mox2\n return mox", "def get_capacitor(self):\n cap = 0.5 * self.metric_.logdet()\n return cap", "def get_M(self):\n return 1.0", "def moi(self):\n\n pass", "def test_c(self):\n self.failIf(cgs.speed_of_light/mks.speed_of_light!=100)", "def particulate_matter_2_5(self) -> float | None:\n return round_state(self._get_sensor_value(API_PM25))", "def sensitivity(self):\n return self.recall", "def test_changeIlluminationLevel(self):\n fade_to_black = \"Your environs fade to black due to Ineffable Spooky Magic.\"\n no_change = \"You do it. Swell.\"\n dark_to_light = \"Your environs are suddenly alight.\"\n brighten = \"Your environs seem slightly brighter.\"\n endarken = \"Your environs seem slightly dimmer.\"\n Manipulator.createFor(self.playerWrapper.actor)\n\n self._test(\n \"illuminate 0\",\n [fade_to_black],\n [fade_to_black])\n\n ll = self.store.findUnique(\n objects.LocationLighting,\n objects.LocationLighting.thing == self.location)\n self.assertEquals(ll.candelas, 0)\n\n self._test(\n \"illuminate 0\",\n [no_change])\n self.assertEquals(ll.candelas, 0)\n\n self._test(\n \"illuminate 100\",\n [dark_to_light],\n [dark_to_light])\n self.assertEquals(ll.candelas, 100)\n\n self._test(\n \"illuminate 110\",\n [brighten],\n [brighten])\n self.assertEquals(ll.candelas, 110)\n\n self._test(\n \"illuminate 100\",\n [endarken],\n [endarken])\n self.assertEquals(ll.candelas, 100)\n\n self._test(\n \"illuminate 0\",\n [fade_to_black],\n [fade_to_black])\n self.assertEquals(ll.candelas, 0)", "def getLevel(self):\n return self.level", "def test_update_impact_level(self):\n pass", "def test_status(self):\n measurement = self.measurement(self.metric())\n s_m = ScaleMeasurement(\n previous_scale_measurement=None,\n measurement=measurement,\n status=\"target_met\",\n )\n status = s_m.status()\n self.assertIs(type(status), str)\n self.assertEqual(status, \"target_met\")", "def _gain(self):\n return None", "def update_odometer(self, mileage):\n if mileage > self.odometer:\n self.odometer = mileage\n return self.odometer\n else:\n return \"you can't rolled back an odometer\"", "def test_moist_lapse_scalar():\n temp = moist_lapse(np.array([800.]) * units.mbar, 19.85 * units.degC, 1000. * units.mbar)\n assert_almost_equal(temp, 284.64 * units.kelvin, 2)", "def particulate_matter_10(self) -> float | None:\n return round_state(self._get_sensor_value(API_PM10))", "def level(self):\n return self.__pin.pwm", "def get_reward(self): # beware, this should not exceed -1.0 * constant-penalty (reward hacking)\n if self.death:\n return self.death_constant_penalty\n elif self.done: # track_complete\n return 0.0\n\n # environment collision:\n if self.current_collision_time_stamp == self.last_collision_time_stamp:\n col_rew = 0.0\n else:\n # print('DEBUG: collision detected')\n col_rew = self.collision_penatly\n\n # current gate passed:\n if self.objective_status == GateStatus.NOT_CROSSED_NOR_PASSED:\n gate_rew = 0.0\n else:\n if self.objective_status == GateStatus.CROSSED:\n # print(\"DEBUG: \", self.drone_name, \" crossed a gate\")\n self.nb_crossed_gates += 1\n gate_rew = self.gate_crossed_reward\n if self.track_complete:\n # print(\"DEBUG: \", self.drone_name, \" gets end of track bonus\")\n gate_rew += self.end_of_track_bonus\n else: # maybe better to check all gates for pass?\n # print(\"DEBUG: \", self.drone_name, \" missed a gate\")\n gate_rew = self.gate_missed_penalty\n if self.track_complete:\n # print(\"DEBUG: \", self.drone_name, \" completed the track\")\n self.track_complete = False\n self.done = True\n self.objective_status = GateStatus.NOT_CROSSED_NOR_PASSED\n\n # velocity toward objective:\n # print(\"DEBUG: \", self.drone_name, \" distance to gate: \", self.current_distance)\n distance_difference = self.last_distance - self.current_distance\n\n # gate-facing reward:\n rew_gate_facing = self.gate_facing_reward_gain * quaternions_to_gate_yaw_cos(self.current_kinematics.orientation, self.current_objective.gate_pose.orientation)\n # print(\"DEBUG: rew_gate_facing:\", rew_gate_facing)\n\n rew = self.constant_penalty + distance_difference * self.velocity_gain + col_rew + gate_rew + rew_gate_facing\n\n if self.opponent_RewardFunction is None or self.kill: # single agent rewards\n # print(\"--- DEBUG: single agent setting ---\")\n return rew\n else: # multi agent rewards\n # print(\"--- DEBUG: multi agent setting ---\")\n opp_rew = 0.0\n self.opponent_position = self.opponent_RewardFunction.current_position\n distance_to_opponent = self.current_position.distance_to(self.opponent_position)\n # kill = self.opponent_RewardFunction.pending_death\n # death = self.pending_death\n # print(\"DEBUG: \", self.drone_name, \" distance to opponent: \", distance_to_opponent)\n # if (self.opponent_RewardFunction.nb_crossed_gates > self.nb_crossed_gates) or (self.opponent_RewardFunction.nb_crossed_gates == self.nb_crossed_gates and self.current_distance >= self.opponent_RewardFunction.current_distance):\n if (self.opponent_RewardFunction.current_objective_idx > self.current_objective_idx) or (self.opponent_RewardFunction.current_objective_idx == self.current_objective_idx and self.current_distance >= self.opponent_RewardFunction.current_distance):\n # we are behind the opponent, and therefore we must avoid avoid collisions\n # print(\"DEBUG: \", self.drone_name, \" is behind\")\n opp_rew += self.lag_penalty\n if distance_to_opponent <= self.collision_radius:\n # if death:\n # print(\"DEBUG: death of \", self.drone_name)\n self.death = True\n opp_rew += self.death_penalty\n self.done = True\n else:\n # print(\"DEBUG: \", self.drone_name, \" is leading\")\n # we are leading the way\n if distance_to_opponent <= self.collision_radius:\n # if kill:\n # print(\"DEBUG: kill performed by \", self.drone_name)\n self.kill = True\n opp_rew += self.kill_reward\n # assert not math.isnan(rew), f\"DEBUG: multiagent rew is {rew}\"\n # assert not math.isnan(opp_rew), f\"DEBUG: multiagent opp_rew is {opp_rew}\"\n return rew + opp_rew", "def test_status(self):\n self.assertEqual('perfect', self.__metric.status())", "def base_unit() -> ureg:\n return ureg.meter", "def measure(self):\n pass", "def getTelemetryValue(self) -> float:\n\t\treturn super().getTelemetryValue()" ]
[ "0.66716444", "0.63705415", "0.57610047", "0.57574034", "0.57160485", "0.5705539", "0.5647979", "0.55928165", "0.5576037", "0.5556875", "0.54964304", "0.5485822", "0.5459541", "0.54503155", "0.54245687", "0.5390041", "0.5365124", "0.536031", "0.53520393", "0.53512824", "0.53495175", "0.53327036", "0.5332071", "0.530764", "0.5287442", "0.52571803", "0.52547354", "0.5224565", "0.52219486", "0.5215851" ]
0.7396042
0
explainerdashboard CLI tool. Used to launch an explainerdashboard from the commandline. \b explainerdashboard run Run explainerdashboard and start browser directly from command line. \b
def explainerdashboard_cli(ctx):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():\r\n pass", "def cli():\n pass" ]
[ "0.6720353", "0.6720353", "0.6720353", "0.6720353", "0.6720353", "0.6720353", "0.6720353", "0.6720353", "0.6720353", "0.6720353", "0.6720353", "0.6720353", "0.6720353", "0.6720353", "0.6720353", "0.6720353", "0.6720353", "0.6720353", "0.6720353", "0.6720353", "0.6720353", "0.6720353", "0.6720353", "0.6720353", "0.6720353", "0.6720353", "0.6720353", "0.6720353", "0.6672476", "0.6663817" ]
0.84108174
0
(file open for reading) > list of float Read and return list of grades in gradefile
def read_grades(gradefile): #skip over header line = gradefile.readline() while line != '\n': line = gradefile.readline() #read the grades, accumlating them into a list. grades = [] line = gradefile.readline() while line != '': #We have a string containing info for single student. #Find last space and take everything after space. grade = line[line.rfind(' ') + 1:] grades.append(float(grade)) line = gradefile.readline() return grades
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def populate_grades_list(file_object):\n email_list = []\n grade_list = []\n\n for line in file_object:\n tmp_list = line.split()\n email_list.append(tmp_list[0])\n grade_list.append(tmp_list[1::])\n\n for value_list in grade_list:\n for i, value in enumerate(value_list):\n value_list[i] = float(value)\n\n return email_list, grade_list", "def get_float_list(gene_file, c):\n\tfile = open(gene_file,'r')\n\tList = []\n\tfor line in file:\n\t\tif not re.match(\"#\", line):\n\t\t\tline = line.strip()\n\t\t\tsline = line.split()\n\t\t\tList.append(atof(sline[c]))\n\tfile.close()\n\treturn List", "def solution():\n file = get_source()\n results = []\n for c in range(int(file.readline())):\n grades = sorted([int(v) for v in file.readline().split()][1:])\n average = float(sum(grades))/len(grades)\n first = next((i for i,g in enumerate(grades) if g > average), len(grades))\n people_above_average = len(grades) - first\n results.append(people_above_average * 100.0 / len(grades))\n for r in results:\n print '%.3f%%' % r", "def readAllfromFile(self):\n with open(self._fname, 'r') as f:\n lines = f.readlines()\n readList = []\n for line in lines:\n line = line.strip()\n if len(line) > 1:\n gra = self._readGrafromLine(line)\n readList.append(gra)\n f.close()\n return readList", "def read_floats(filepointer):\n\tdata = read_strings(filepointer)\n\tif not data:\n\t\treturn None\n\ttry:\n\t\tdata = [float(x) for x in data]\n\t\treturn data\n\texcept:\n\t\t# try the next line\n\t\treturn read_floats(filepointer)", "def load_regain_values(filename):\n gain_lines = open(filename,\"r\").readlines()\n gain_lines = [l.split() for l in gain_lines if len(l)>0 and l[0]!='#'] #remove comments and blanks\n tubes,gain_vals = zip(*[(int(l[0]),float(l[1])) for l in gain_lines])\n return Array(gain_vals)", "def get_data(dataf):\n with open(dataf) as f:\n label = []\n e_val = []\n for line in f:\n label.append(float(line.split()[1]))\n e_val.append(-1 * float(line.split()[0]))\n return label, e_val", "def harvest_engrad(engrad):\n try:\n lines = open(engrad).readlines()\n except IOError:\n return []\n num_atoms = int(lines[3].strip())\n energy = lines[7].strip()\n grad = []\n for i in range(12, 13 + num_atoms*3, 3):\n grad.append(list(map(float, lines[i:i + 3])))\n return grad", "def readAngles(fileName: str) -> List[float]:\n outList = list()\n with open(fileName, 'r') as fileIn:\n for line in fileIn:\n val = float(line)\n outList.append(val)\n return outList", "def _read_scores(self,path):\n scores = dict()\n fin = open(path,'r')\n for line in fin:\n k, v = line.split()\n scores[k.strip()] = float(v.strip())\n return scores", "def read_file(filename):\n\n name_list = []\n sales_list = []\n\n try:\n in_file = open(filename, 'r')\n name = in_file.readline().rstrip('\\n')\n while name != '':\n name_list.append(name)\n sales = float(in_file.readline())\n sales_list.append(sales)\n name = in_file.readline().rstrip('\\n')\n\n in_file.close()\n\n return name_list, sales_list\n\n except IOError:\n print('Could not open %s for reading.' % filename)\n sys.exit(1) # cannot continue if this happens", "def loadpoly(file):\n out = []\n with open(file) as f:\n while True:\n try:\n density = float(f.readline())\n except ValueError:\n break\n l = []\n try:\n while True:\n line = f.readline().replace(\",\", \" \").split()\n l.append((float(line[0]), float(line[1])))\n except (ValueError, IndexError):\n out.append((l, density))\n if not out:\n raise ValueError(f\"Could not read '{file}'\")\n return out", "def get_g(file_name):\n \n r,g = np.loadtxt(file_name, dtype = 'float', unpack = 'true')\n \n return r,g", "def load_from_file(rfile=\"data_ass6.txt\"):\n datafile = open(rfile, \"r\")\n linenum = 0\n for line in datafile:\n linenum += 1\n if linenum == 1:\n groupdata = line.strip().split()\n group = StudentGroup(groupdata[0])\n group.maxidy = int(groupdata[1])\n else:\n studentdata = line.strip().split()\n student = Student(int(studentdata[0]), studentdata[1])\n student.active = bool(studentdata[2])\n group.studentlist.append(student)\n for i in range(3, len(studentdata), 2):\n student.grades[studentdata[i]] = float(studentdata[i + 1])\n datafile.close()\n print \"Data loaded from \" + rfile\n return group", "def readHaml(fileName: str) -> List[Tuple[str, float]]:\n outList = list()\n with open(fileName, 'r') as fileIn:\n for line in fileIn:\n val = (line.split()[0], float(line.split()[1]))\n outList.append(val)\n return outList", "def read_data(path):\n data_set = []\n y = -1\n with open(path, \"r\") as file:\n for line in file:\n y = y+1\n data_set.append([])\n currentline = line.split(\",\")\n for x in currentline:\n data_set[y].append(float(x.rstrip()))\n return data_set", "def readfile(filename,multiplier=1.0):\n with open(filename,'r') as f:\n lines = f.readlines()\n vec = [multiplier*float(a.strip()) for a in lines]\n return vec", "def read_points():\n\tpoints = []\n\tf = open(r'sample_points.txt')\n\twhile True:\n\t\tnstr = f.readline()\n\t\tif len(nstr) == 0:\n\t\t\tbreak\n\t\tline = nstr.rstrip('\\n').split(', ')\n\t\t# print(line)\n\n\t\tpoints.append((round(float(line[0]),3),round(float(line[1]),3))) \n\n\tprint(points)\n\treturn points", "def fill(fname):\n return [[float(line.split()[-3]), float(line.split()[-1])]\\\n for line in open(fname).readlines()\\\n if FITRESRE1.match(line)]", "def datareader(self, path):\n\n f = open(path, 'r')\n data = f.read()\n data = data.split('\\n')\n data_tmp = []\n for idx in range(len(data)):\n if str(data[idx]).find('@data') >= 0:\n data_tmp = data[idx + 1:]\n break\n res = []\n for record in data_tmp:\n record = record.split(',')\n record = map(float, record)\n res.append(record)\n return res", "def read_list(f, nb_freqs):\n alist = []\n while len(alist) < nb_freqs:\n line = f.readline()\n splitted = line.split()\n well_splitted = True\n for entry in splitted:\n well_splitted = well_splitted and entry.count('.') <= 1\n if well_splitted:\n entries = splitted\n else:\n if line.count('-') > 0:\n # Probably coming from an SDSS spectrum.\n entries = [line[i:i+12] for i in range(0, len(line) - 1, 12)]\n else:\n entries = [line[i:i+8] for i in range(0, len(line) - 1, 8)]\n for entry in entries:\n try:\n alist.append(float(entry))\n except ValueError:\n # If conversion to float fails, put 0 instead.\n alist.append(0)\n return numpy.array(alist)", "def _parse(file_name) -> Tuple[Optional[List[List[float]]], Optional[IOError]]:\n try:\n with open(pkg_resources.resource_filename(__data_pkg__, file_name)) as file_handler:\n next(file_handler)\n return [[float(x) for x in line.split(\" \") if len(x) > 0] for line in file_handler], None\n except IOError as err:\n return None, err", "def f_open(loc):\n file = open(loc)\n t, U = [], []\n for l in file:\n data = l.split(\",\") # 3<=>t; 4<=>U\n t.append(float(data[3]))\n U.append(float(data[4]))\n return t, U", "def readFile(self, fname):\r\n self.scores = []\r\n self.fname = fname\r\n try:\r\n with open(fname, 'r') as f:\r\n for line in f:\r\n self.appendScore(line.split(' '))\r\n except:\r\n pass", "def read_line(l):\n return [read_float(l[s]) for s in slices['data']]", "def get_score_summary(fname):\n gradedata = {}\n fhandler = open(fname, 'r')\n rest_data = csv.reader(fhandler)\n for row in rest_data:\n if row[10] not in ['P', '', 'GRADE']:\n gradedata[row[0]] = [row[1], row[10]]\n gradedata.update(gradedata)\n fhandler.close()\n\n gradereview = {}\n for value in gradedata.itervalues():\n if value[0] not in gradereview.iterkeys():\n count1 = 1\n count2 = GRADESCL[value[1]]\n else:\n count1 = gradereview[value[0]][0] + 1\n count2 = gradereview[value[0]][1] + GRADESCL[value[1]]\n gradereview[value[0]] = (count1, count2)\n gradereview.update(gradereview)\n grade = {}\n for key in gradereview.iterkeys():\n count1 = gradereview[key][0]\n count2 = gradereview[key][1]/gradereview[key][0]\n grade[key] = (count1, count2)\n return grade", "def read_results(path):\r\n new_line = []\r\n \r\n with open(path, \"r+\") as f:\r\n \r\n line = f.readlines()\r\n \r\n for i in range(len(line)):\r\n \r\n if line[i][-3:-1] != ' %': \r\n \r\n new_line.append(float(line[i][:-5]))\r\n \r\n else:\r\n \r\n new_line.append(float(line[i][:-3]))\r\n \r\n return new_line", "def read_txt(path):\n mz = []\n i = []\n with open(path) as f:\n for line in f:\n line = line.split()\n mz.append(float(line[0]))\n i.append(float(line[1]))\n return mz, i", "def _read_out(path):\n with open(path, 'r') as f:\n out = f.readlines()\n hits = [float(h) for h in out[0].split(';')]\n misses = [float(m) for m in out[1].split(';')]\n return hits, misses", "def Read_Al_Palik():\n fid = open('/Volumes/Stock/PostDoc_Erlangen/DocMatos/Materials_Data_Base/Al_from_Palik.dat','r')\n L,eps = [],[]\n while 1: \n line = fid.readline()\n if line =='': \n break \n else :\n L.append(float(line[:25]))\n eps.append(float(line[27:54])+1j*float(line[55:-2]))\n return np.array(L),np.array(eps)" ]
[ "0.703265", "0.6650445", "0.65691876", "0.65250295", "0.63102", "0.62889344", "0.62173235", "0.6180634", "0.6176317", "0.6104537", "0.60217613", "0.6019209", "0.5997103", "0.5946862", "0.5933064", "0.5927422", "0.5918964", "0.59073734", "0.5896533", "0.588818", "0.58771574", "0.5872458", "0.5852904", "0.5827478", "0.5826336", "0.5821687", "0.5819614", "0.57741934", "0.5771146", "0.5704688" ]
0.81548846
0
This function is automatically called when the task has completed (successfully or not). You implement finished() to do whatever followup stuff should happen after the task is complete. finished is always called from the main thread, so it's safe to do GUI operations and raise Python exceptions here. result is the return value from self.run.
def finished(self, result): raise NotImplementedError("Subclasses mut override finished()")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def task_done(self) -> None:\n pass", "def taskCompleted(self) -> None:\n ...", "def taskCompleted(self) -> None:\n ...", "def finished(self):", "def finished(self):\r\n raise NotImplementedError", "def finished(self):\n raise NotImplementedError()", "def finished(self):\n pass", "def _finished(self) -> None:", "def workerFinished(self, ret):\n self.worker.deleteLater()\n self.thread.quit()\n self.thread.wait()\n self.thread.deleteLater()\n # remove widget from message bar\n self.iface.messageBar().popWidget(self.messageBar)\n if ret is not None:\n # report the result\n #layer, total_area = ret\n self.iface.messageBar().pushMessage('Finished!')\n else:\n # notify the user that something went wrong\n self.iface.messageBar().pushMessage('Job cancelled.', level=QgsMessageBar.WARNING, duration=3)", "def task_completed(self, worker_result):\n self.status = 'completed'\n self.modification_time = current_millis()\n self.result = {'content': worker_result.result,\n 'version': worker_result.version}\n return self", "async def done(self, *args, **kwargs):\n raise NotImplementedError()", "def _return_result(self, done):\n chain_future(done, self._running_future)\n\n self.current_future = done\n self.current_index = self._unfinished.pop(done)", "def done(self):\n raise NotImplementedError()", "def finish(self) -> None:", "def finish(self) -> None:", "def exit(self) -> None:\n\n self.result = self.handle_success('finished-task')", "def done(self, result, noraise=False):\n # [??] Should this be in a critical section?\n\n # Has done() already been called on this task?\n if self.ev_done.is_set():\n # ??\n if isinstance(self.result, Exception) and (not noraise):\n raise self.result\n return self.result\n\n # calculate running time and other finalization\n self.endtime = time.time()\n try:\n self.totaltime = self.endtime - self.starttime\n except AttributeError:\n # task was not initialized properly\n self.totaltime = 0.0\n self.result = result\n\n # Release thread waiters\n self.ev_done.set()\n\n # Perform callbacks for event-style waiters\n self.make_callback('resolved', self.result)\n\n # If the result is an exception, then our final act is to raise\n # it in the caller, unless the caller explicitly supressed that\n if isinstance(result, Exception) and (not noraise):\n raise result\n\n return result", "def onThreadFinished(self):\n\n # reenable the button now thread method is finished\n self.pushButtonCheck.setEnabled(True)\n\n self.debug('Thread Method Finished!')\n\n qApp.restoreOverrideCursor()", "def set_task_finished(self):\n self.busy = False", "def finish(self):", "def finish(self):", "def done(self):", "def done(self):", "def task_done(self):\n if hasattr(self._input, \"task_done\"):\n self._input.task_done()", "def finished(self):\n\t\telog(\"finished\")", "def task_done(self):\n self._queue.task_done()", "def finish(self):\n pass", "def finish(self):\n pass", "def finish(self):\r\n\r\n self._is_finished = True", "def finish_task(self):\n self.report_total_usage()\n if self.retry:\n self.retry = False\n self.curr_retries = 0\n self.state = \"done\"\n self.ready_for_step += 1\n self.RM.release_allocation(self, self.using.nodes)\n self.using.clear()\n self.curr_exec_time = 0\n # log message\n self.fwk.logEvent(self.sim.name, self.name, \"finish_task\", \"finished running\")" ]
[ "0.6918335", "0.6911658", "0.6911658", "0.68672836", "0.6820632", "0.67570865", "0.67491585", "0.6728455", "0.67025614", "0.66183585", "0.6411025", "0.6394211", "0.63900214", "0.63749427", "0.63749427", "0.63746125", "0.6339039", "0.6332644", "0.6322808", "0.6296955", "0.6296955", "0.6294525", "0.6294525", "0.6287467", "0.62731546", "0.62219584", "0.6179118", "0.6179118", "0.61733913", "0.61611867" ]
0.7348429
0
Fetch license artifacts associated with the service model and search licensekeygroupUUID and entitlementpooluuid associated with the given att part number and nominal throughput in a request
def license_optim(request_json): mdc_from_json(request_json) req_id = request_json["requestInfo"]["requestId"] model_name = request_json.get('placementInfo', {}).get('serviceInfo', {}).get('modelInfo', {}).get('modelName') service_name = model_name license_info = [] for demand in request_json.get('placementInfo', {}).get('demandInfo', {}).get('licenseDemands', []): license_info.append( {'serviceResourceId': demand['serviceResourceId'], 'resourceModuleName': demand['resourceModuleName'], 'entitlementPoolList': "NOT SUPPORTED", 'licenseKeyGroupList': "NOT SUPPORTED" }) return license_info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_and_download_files(context):\n\n\n input_path = 'input/'\n if os.path.isdir(input_path):\n log.debug('Path already exists: ' + input_path)\n else:\n log.debug('Creating: ' + input_path)\n os.mkdir(input_path)\n\n fw = context.client\n\n if 'classification_measurement' in context.config:\n class_meas = context.config['classification_measurement'].split()\n else:\n class_meas = ['T1']\n\n # session and acquisition include/exclude lists can come from:\n # project info metadata,\n # subject info metadata, and\n # config options\n # The last one wins (how about getting it from an input file also, eh?)\n ses_exclude_list = None\n ses_include_list = None\n acq_exclude_list = None\n acq_include_list = None\n\n fs = 'freesurfer_longitudinal_'\n where = 'Found in project info'\n # check for exclude/include lists of regexs for sessions in project info\n sel = context.gear_dict['project'].info.get(fs + 'session_excludelist')\n if sel:\n ses_exclude_list = sel.split()\n log.info(where+' '+fs+'session_excludelist: \"'+sel+'\"')\n sil = context.gear_dict['project'].info.get(fs + 'session_includelist')\n if sil:\n ses_include_list = sil.split()\n log.info(where+' '+fs+'session_includelist: \"'+sil+'\"')\n # check for exclude/include lists of regexs for acquisitions in project info\n ael = context.gear_dict['project'].info.get(fs + 'acquisition_excludelist')\n if ael:\n acq_exclude_list = ael.split()\n log.info(where+' '+fs+'acquisition_excludelist: \"'+ael+'\"')\n ail = context.gear_dict['project'].info.get(fs + 'acquisition_includelist')\n if ail:\n acq_include_list = ail.split()\n log.info(where+' '+fs+'acquisition_includelist: \"'+ail+'\"')\n\n where = 'Found in subject info'\n # check for exclude/include lists of regexs for sessions in subject info\n sel = context.gear_dict['subject'].info.get(fs + 'session_excludelist')\n if sel:\n ses_exclude_list = sel.split()\n log.info(where+' '+fs+'session_excludelist: \"'+sel+'\"')\n sil = context.gear_dict['subject'].info.get(fs + 'session_includelist')\n if sil:\n ses_include_list = sil.split()\n log.info(where+' '+fs+'session_includelist: \"'+sil+'\"')\n # check for exclude/include lists of regexs for acquisitions in subject info\n ael = context.gear_dict['subject'].info.get(fs + 'acquisition_excludelist')\n if ael:\n acq_exclude_list = ael.split()\n log.info(where+' '+fs+'acquisition_excludelist: \"'+ael+'\"')\n ail = context.gear_dict['subject'].info.get(fs + 'acquisition_includelist')\n if ail:\n acq_include_list = ail.split()\n log.info(where+' '+fs+'acquisition_includelist: \"'+ail+'\"')\n\n where = 'Found in config'\n # set up exclude/include lists of reegexs for sessions in config\n if 'session_excludelist' in context.config:\n ses_exclude_list = context.config['session_excludelist'].split()\n log.info(where+' session_excludelist: \"'+str(ses_exclude_list)+'\"')\n if 'session_includelist' in context.config:\n ses_include_list = context.config['session_includelist'].split()\n log.info(where+' session_includelist: \"'+str(ses_include_list)+'\"')\n\n # set up exclude/include lists of reegexs for acquisitions in config\n if 'acquisition_excludelist' in context.config:\n acq_exclude_list = context.config['acquisition_excludelist'].split()\n log.info(where+' acquisition_excludelist: \"'+str(acq_exclude_list)+'\"')\n if 'acquisition_includelist' in context.config:\n acq_include_list = context.config['acquisition_includelist'].split()\n log.info(where+' acquisition_includelist: \"'+str(acq_include_list)+'\"')\n\n # go through all sessions, acquisitions to find files\n for session in context.gear_dict['subject'].sessions():\n\n lemme_out = False\n if ses_exclude_list:\n for regex in ses_exclude_list:\n if re.search(regex, session.label): # if excluded, skip\n log.info('Session \"' + session.label + '\" matches ' + \\\n 'exclusion regex, skipping it')\n lemme_out = True\n continue\n if lemme_out:\n continue\n\n if ses_include_list:\n match = False\n for regex in ses_include_list:\n if not re.search(regex, session.label):\n match = True\n if match:\n continue # if not included (matches any regex), skip\n else:\n log.info('Session \"' + session.label + '\" matches ' \\\n 'an inclusion regex, keeping it')\n\n for acquisition in fw.get_session_acquisitions(session.id):\n\n lemme_out = False\n if acq_exclude_list:\n for regex in acq_exclude_list:\n if re.search(regex, acquisition.label): # if excluded, skip\n log.info('Acquisition \"' + acquisition.label + \\\n '\" matches exclusion regex, skipping it')\n lemme_out = True\n continue\n if lemme_out:\n continue\n\n if acq_include_list:\n match = False\n for regex in acq_include_list:\n if not re.search(regex, acquisition.label):\n match = True\n if match:\n continue # if not included (matches any regex), skip\n else:\n log.info('Acquisition \"' + acquisition.label + '\" ' + \\\n 'matches an inclusion regex, keeping it')\n\n for afile in acquisition.files:\n\n # Scan must be nifti\n if afile.type == 'nifti':\n\n found_one = False\n for cm in class_meas:\n if 'Measurement' in afile.classification:\n if cm in afile.classification['Measurement']:\n found_one = True\n log.info('Found ' + cm + ' file')\n\n if found_one:\n download_it(fw, acquisition, afile.name, input_path)\n context.gear_dict['visits'].append(\n make_file_name_safe(session.label, '_'))\n else:\n log.info('Ignoring ' + afile.name)", "async def limited_req(path, session, semaphore, **kwargs):\n async with semaphore:\n async with session.get('https://iceprod2-api.icecube.wisc.edu'+path, params=kwargs) as response:\n return await response.json()", "def search_for_books(search_criteria, product_url, headers):\n\tprint \"od api in search_for_books \"\n\tlist_of_books = []\n\tlist_book = []\n\tq = search_criteria\n\tlimit = 300\t\t# 25 by default 300 max\n\toffset = 0\t\t# number of titles to skip\n\tformats = \"\"\n\tsort = \"Author:desc\" \t\t# :desc\n\tlastupdatetime = \"\" \n\tseries = \"\" \n\tsearch_parms = \"?q=%s&limit=%s&offset=0&formats=%s&sort=%s\" % (q, limit, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t formats, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t sort)\n\tod_url=\"%s%s\" % (product_url, search_parms)\n\n\tprint \"overdrive url = \", od_url, \"\\n\"\n\tod_url = od_url.replace(' ', '%20')\n\tbook_response = requests.get(od_url, headers=headers)\n\n\tprint \"book search response == \", book_response, \"reason = \", book_response.reason, \"\\n\"\n\tif book_response.status_code == 401:\n\t print \"Patron is not authorize to use this library == \", od_url, \"\\n\"\n\telif book_response.status_code > 201:\n\t\tprint \"Get request failed == \", book_response.reason\n\telif book_response.status_code == 200 or book_response.status_code == 201:\n\t\tprint \"Get request to get the a list of books was successful\", \"\\n\"\n\n\t\tbook_response_data = json.loads(book_response.content)\n\t\tprint \"OverDrive book count == \", book_response_data['totalItems'], \"\\n\"\n\n\t\tif book_response_data['totalItems'] > 0:\n\t\t\tproducts = book_response_data['products']\n\t\t\tfor product in products:\n\t\t\t\tbook_data = {}\t\n\t\t\t\tbook_data['images'] = product['images']['thumbnail']['href']\n\t\t\t\tbook_data['title'] = product['title']\n\t\t\t\tbook_data['author'] = product['primaryCreator']['name']\n\t\t\t\tbook_data['availableToDownload'] = product['links']['availability']['href']\n\t\t\t\tbook_data['id'] = product['id']\n\t\t\t\tbook_data['metadata'] = product['links']['metadata']['href']\n\t\t\t\tbook_data['origin'] = 'ODCOM'\n\t\t\t\tlist_book = [book_data]\n\t\t\t\tlist_of_books.extend(list_book)\n\t\t\t#end for\n\t\t#end if\n\t#end if\n\n\treturn list_of_books", "def fusion_api_get_licenses(self, uri=None, param='', api=None, headers=None):\n return self.license.get(uri=uri, param=param, api=api, headers=headers)", "def vcac_getvm_detail_svrreq(self, srid):\n \n self.reqid=srid\n try:\n #Get the name of the vm and return JSON formatted response\n \n jfile=os.path.join(\"%s\", \"%s.json\") % (self.data['rundir'], self.reqid )\n print \"\\n\"\n print \"######## [Waiting for customization for SR: %s] ########\" % self.reqid\n print \"\\n\"\n time.sleep(300.0)\n vrapath=BASE_DIR + '/' + 'tools/vracc/bin/'\n cmd=\"cd %s && ./cloudclient.sh vra machines list --requestId %s --format \" \\\n \"JSON --export %s\" % ( vrapath, self.reqid, jfile )\n request = execute_action(cmd)\n except APIError, e:\n print \"Found error## vcac_getvm_detail_svrreq: %s\" % str(e)\n sys.exit(1)\n else:\n logging.debug(\"Verify return value after validation query: %s\" % (request))\n self.gtintval = self.gtintval + 300\n if os.path.exists(jfile) and os.stat(jfile).st_size > 0:\n logging.info(\"After provision data file: %s\" % (jfile))\n try:\n with open(jfile) as data_file:\n reqData = json.load(data_file)\n except APIError, e:\n print \"Loading Json found problem: %s\" % str(e)\n sys.exit(1)\n\n \n if 'name' in reqData[0] and 'status' in reqData[0]:\n logging.debug(\"Value ##### %s\" % reqData[0]['name'])\n for j in range(len(reqData[0]['networks'])):\n logging.info(\"Hostname %s configured \" \\\n \"with Ip address %s\" % \\\n ( reqData[0]['name'], reqData[0]['networks'][j]['address']))\n self.vmstat[self.reqid]['vmname']=reqData[0]['name']\n self.vmstat[self.reqid]['ipaddress']=reqData[0]['networks'][j]['address']\n self.vmstat[self.reqid]['vmid']=reqData[0]['catalogResource']['parentResourceRef']['id']\n print \"\\n\"\n print \"SR Reached IP: %s (HH:MM:SS)\" % \\\n str(datetime.timedelta(seconds=self.gtintval))\n break\n else:\n self.vmstat[self.reqid]['vmname'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n\n else:\n logging.warn(\"- vcac cloudclient json file missing \" \\\n \"or does not contains hostname or Ip \" \\\n \"details i.e empty\")\n self.vmstat[self.reqid]['vmname'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n self.vmstat[self.reqid]['ipaddress'] = \"\"\n #self.update_helpdesk(self.reqdata)\n \n \n logging.debug(\"Before return: %s\" % reqData )\n logging.debug(\"Real Value return: %s\" % self.vmstat )\n return self.vmstat", "def _make_get_request(self,url,object_fh,params=None,return_type=None,extras=None):\n \n if params is None:\n params = {}\n \n if extras is None:\n extras = {}\n \n #Polite Pool Work\n #---------------------------------------\n #Example \n #GroovyBib/1.1 (https://example.org/GroovyBib/; mailto:[email protected]) BasedOnFunkyLib/1.4.\n\n #It is unclear if we need to match this format\n #This is good enough for now\n #Eventually we might allow a user to describe their application\n #version, and url\n ua_str = 'st_crossref/%s (https://github.com/ScholarTools/crossref_api_python; mailto:%s)' % (VERSION,user_config.email)\n \n headers = {'user-agent': ua_str}\n \n \n #TODO Check params and # of results ...\n \n #TODO: Implement rate limits ...\n \n \n #The params get passed directly\n r = self.session.get(url,params=params,headers=headers) \n \n\n #Update limits\n #--------------------- \n headers = r.headers\n self.rate_limit = headers.get('X-Rate-Limit-Limit',50)\n self.rate_limit_interval = int(headers.get('X-Rate-Limit-Interval','1s')[:-1])\n \n #TODO: Implement ...https://konghq.com/blog/how-to-design-a-scalable-rate-limiting-algorithm/\n \n\n #These are debug only and should not be used for anything else\n #-------------------------------------------------------------\n self.last_url = url\n self.last_response = r \n self.last_params = params \n \n if r.status_code == 404:\n #This typically happens when the DOI is invalid\n #TODO: Make this a named exception\n raise errors.RequestError(r.text)\n \n json_data = r.json()\n if json_data['status'] == 'failed':\n self.last_error = json_data\n raise errors.CrossrefAPIError(json_data['message'])\n \n #Example error \n \"\"\"\n {'status': 'failed', 'message-type': 'validation-failure', \n 'message': [{'value': 'sample', \n 'message': 'This route does not support sample', 'type': 'parameter-not-allowed'}]}\n \"\"\" \n \n #TODO: return_type\n if return_type == 'json' or object_fh is None:\n return json_data\n else:\n return object_fh(json_data,self)", "def licenses(self,filter=None,n_rows=None,n_random=None,\n offset=None,query=None,sort_by=None,order=None,\n facet=None,cursor=None,select=None,return_type=None):\n \n params = self._options_to_dict(filter=filter,n_rows=n_rows,\n n_random=n_random,offset=offset,query=query,\n sort_by=sort_by,order=order,facet=facet,cursor=cursor,\n select=None)\n \n url = self.BASE_URL + 'licenses'\n #return self._make_search_request(url,models.LicenseSearchResult,options,_filter)\n return self._make_get_request(url,models.LicenseSearchResult,params,return_type)", "def collect_inv(spc, num_threads):\n global remaining\n devices = spc.device_management.devices.get(\n filter_={'managedStatus': 'In Sync'},\n paging={'start': 0, 'limit': 200},\n sortby=['name', 'platform'])\n\n print(\"There are %d devices to process\" % len(devices))\n remaining = len(devices)\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=int(num_threads)) as executor:\n for device in devices:\n f = executor.submit(process_device, spc, device)\n f.add_done_callback(finished)\n\n print(\"\\nAll Over!!!\")", "def _handle_custom_award_download(self):\n self.tinyshield_models.extend(\n [\n {\n \"name\": \"agencies\",\n \"key\": \"filters|agencies\",\n \"type\": \"array\",\n \"array_type\": \"object\",\n \"object_keys\": {\n \"type\": {\"type\": \"enum\", \"enum_values\": [\"funding\", \"awarding\"], \"optional\": False},\n \"tier\": {\"type\": \"enum\", \"enum_values\": [\"toptier\", \"subtier\"], \"optional\": False},\n \"toptier_name\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": True},\n \"name\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": False},\n },\n },\n {\"name\": \"agency\", \"key\": \"filters|agency\", \"type\": \"integer\"},\n {\n \"name\": \"date_range\",\n \"key\": \"filters|date_range\",\n \"type\": \"object\",\n \"optional\": False,\n \"object_keys\": {\n \"start_date\": {\"type\": \"date\", \"default\": \"1000-01-01\"},\n \"end_date\": {\"type\": \"date\", \"default\": datetime.strftime(datetime.utcnow(), \"%Y-%m-%d\")},\n },\n },\n {\n \"name\": \"date_type\",\n \"key\": \"filters|date_type\",\n \"type\": \"enum\",\n \"enum_values\": [\"action_date\", \"last_modified_date\"],\n \"default\": \"action_date\",\n },\n {\n \"name\": \"place_of_performance_locations\",\n \"key\": \"filters|place_of_performance_locations\",\n \"type\": \"array\",\n \"array_type\": \"object\",\n \"object_keys\": {\n \"country\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": False},\n \"state\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": True},\n \"zip\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": True},\n \"district_original\": {\n \"type\": \"text\",\n \"text_type\": \"search\",\n \"optional\": True,\n \"text_min\": 2,\n \"text_max\": 2,\n },\n \"district_current\": {\n \"type\": \"text\",\n \"text_type\": \"search\",\n \"optional\": True,\n \"text_min\": 2,\n \"text_max\": 2,\n },\n # TODO: To be removed in DEV-9966\n \"district\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": True},\n \"county\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": True},\n \"city\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": True},\n },\n },\n {\n \"name\": \"place_of_performance_scope\",\n \"key\": \"filters|place_of_performance_scope\",\n \"type\": \"enum\",\n \"enum_values\": [\"domestic\", \"foreign\"],\n },\n {\n \"name\": \"prime_award_types\",\n \"key\": \"filters|prime_award_types\",\n \"type\": \"array\",\n \"array_type\": \"enum\",\n \"min\": 0,\n \"enum_values\": list(award_type_mapping.keys()),\n },\n {\n \"name\": \"recipient_locations\",\n \"key\": \"filters|recipient_locations\",\n \"type\": \"array\",\n \"array_type\": \"object\",\n \"object_keys\": {\n \"country\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": False},\n \"state\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": True},\n \"zip\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": True},\n \"district_original\": {\n \"type\": \"text\",\n \"text_type\": \"search\",\n \"optional\": True,\n \"text_min\": 2,\n \"text_max\": 2,\n },\n \"district_current\": {\n \"type\": \"text\",\n \"text_type\": \"search\",\n \"optional\": True,\n \"text_min\": 2,\n \"text_max\": 2,\n },\n # TODO: To be removed in DEV-9966\n \"district\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": True},\n \"county\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": True},\n \"city\": {\"type\": \"text\", \"text_type\": \"search\", \"optional\": True},\n },\n },\n {\n \"name\": \"recipient_scope\",\n \"key\": \"filters|recipient_scope\",\n \"type\": \"enum\",\n \"enum_values\": (\"domestic\", \"foreign\"),\n },\n {\"name\": \"sub_agency\", \"key\": \"filters|sub_agency\", \"type\": \"text\", \"text_type\": \"search\"},\n {\n \"name\": \"sub_award_types\",\n \"key\": \"filters|sub_award_types\",\n \"type\": \"array\",\n \"array_type\": \"enum\",\n \"min\": 0,\n \"enum_values\": all_subaward_types,\n },\n ]\n )\n\n filter_all_agencies = False\n if str(self._json_request[\"filters\"].get(\"agency\", \"\")).lower() == \"all\":\n filter_all_agencies = True\n self._json_request[\"filters\"].pop(\"agency\")\n\n self._json_request = self.get_validated_request()\n custom_award_filters = self._json_request[\"filters\"]\n final_award_filters = {}\n\n # These filters do not need any normalization\n for key, value in custom_award_filters.items():\n if key in [\n \"recipient_locations\",\n \"recipient_scope\",\n \"place_of_performance_locations\",\n \"place_of_performance_scope\",\n ]:\n final_award_filters[key] = value\n\n if get_date_range_length(custom_award_filters[\"date_range\"]) > 366:\n raise InvalidParameterException(\"Invalid Parameter: date_range total days must be within a year\")\n\n final_award_filters[\"time_period\"] = [\n {**custom_award_filters[\"date_range\"], \"date_type\": custom_award_filters[\"date_type\"]}\n ]\n\n if (\n custom_award_filters.get(\"prime_award_types\") is None\n and custom_award_filters.get(\"sub_award_types\") is None\n ):\n raise InvalidParameterException(\n \"Missing one or more required body parameters: prime_award_types or sub_award_types\"\n )\n\n self._json_request[\"download_types\"] = []\n final_award_filters[\"prime_and_sub_award_types\"] = {}\n\n if custom_award_filters.get(\"prime_award_types\"):\n self._json_request[\"download_types\"].append(\"prime_awards\")\n final_award_filters[\"prime_and_sub_award_types\"][\"prime_awards\"] = custom_award_filters[\"prime_award_types\"]\n\n if custom_award_filters.get(\"sub_award_types\"):\n self._json_request[\"download_types\"].append(\"sub_awards\")\n final_award_filters[\"prime_and_sub_award_types\"][\"sub_awards\"] = custom_award_filters[\"sub_award_types\"]\n\n if \"agency\" in custom_award_filters:\n if \"agencies\" not in custom_award_filters:\n final_award_filters[\"agencies\"] = []\n\n if filter_all_agencies:\n toptier_name = \"all\"\n else:\n toptier_name = (\n ToptierAgency.objects.filter(toptier_agency_id=custom_award_filters[\"agency\"])\n .values(\"name\")\n .first()\n )\n if toptier_name is None:\n raise InvalidParameterException(f\"Toptier ID not found: {custom_award_filters['agency']}\")\n toptier_name = toptier_name[\"name\"]\n\n if \"sub_agency\" in custom_award_filters:\n final_award_filters[\"agencies\"].append(\n {\n \"type\": \"awarding\",\n \"tier\": \"subtier\",\n \"name\": custom_award_filters[\"sub_agency\"],\n \"toptier_name\": toptier_name,\n }\n )\n else:\n final_award_filters[\"agencies\"].append({\"type\": \"awarding\", \"tier\": \"toptier\", \"name\": toptier_name})\n\n if \"agencies\" in custom_award_filters:\n final_award_filters[\"agencies\"] = [\n val for val in custom_award_filters[\"agencies\"] if val.get(\"name\", \"\").lower() != \"all\"\n ]\n\n self._json_request[\"filters\"] = final_award_filters", "def qs_license():\r\n paragraph = document.add_paragraph('')\r\n document.add_heading('License', level=1)\r\n lic_metric = ['lef', 'serial', 'name', 'organization', 'product', 'numberOfCores', 'isExpired', 'expiredReason', 'isBlacklisted', 'isInvalid']\r\n qs_lic = get_qlik_sense.get_license()\r\n num_of_metric = len(qs_lic)\r\n table = document.add_table(rows=num_of_metric+1, cols=2)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'details'\r\n\r\n for metric in range(len(qs_lic)):\r\n row = table.rows[metric+1]\r\n row.cells[0].text = str(lic_metric[metric])\r\n row.cells[1].text = str(qs_lic[metric][0])\r\n document.add_page_break()", "def download_earning_reports(self, form='10-Q', year_range=3, force_update=False):\n if self.components.empty:\n self.get_compo_list()\n\n cik_series = self.components['CIK'].astype(str)\n cik_to_ticker = pd.Series(cik_series.index.values, index=cik_series).to_dict()\n\n sec_archive_base = 'https://www.sec.gov/Archives'\n xbrl_idx_base = sec_archive_base + '/edgar/full-index'\n xbrl_pattern = re.compile(r'([0-9]+)\\|(.*)\\|%s\\|(.*)\\|(.*)'%form)\n link_pattern = re.compile(r'[-\\.txt]')\n #instance_pattern = re.compile(r'instance=[\\'\\\"]*([\\w\\-]+\\.xml)[\\'\\\"]*') # e.g. <Report instance=\"amtd-20170630.xml\">\n instance_pattern = re.compile(r'>([\\w]+-[0-9]+\\.xml)<') # e.g. <File>bebe-20140104.xml</File>\n year_end = dt.datetime.today().year\n year_start = year_end - year_range\n for year in range(year_start, year_end+1):\n for quarter in ['QTR1', 'QTR2', 'QTR3', 'QTR4']:\n xbrl_idx = '%s/%s/%s/xbrl.idx' %(xbrl_idx_base, year, quarter)\n try:\n r = requests.get(xbrl_idx)\n except requests.exceptions.RequestException as e:\n print('Error: xbrl.idx request exception, link %s' %xbrl_idx)\n print(e)\n continue\n if r.status_code != requests.codes.ok:\n print('Error: requests get failure, url %s, status_code %d' %(xbrl_idx, r.status_code))\n continue\n # Parse each line and extract lines with specified form(e.g.10-Q).\n #\n # Example:\n # CIK|Company Name|Form Type|Date Filed|Filename\n # 1173313|American BriVision (Holding) Corp|10-K/A|2017-09-22|edgar/data/1173313/0001213900-17-009907.txt\n # 1173313|American BriVision (Holding) Corp|10-Q|2017-08-21|edgar/data/1173313/0001213900-17-009012.txt\n # 1173313|American BriVision (Holding) Corp|S-1/A|2017-07-17|edgar/data/1173313/0001213900-17-007661.txt\n # 1173313|American BriVision (Holding) Corp|S-1/A|2017-09-22|edgar/data/1173313/0001213900-17-009909.txt\n # 1173431|TD AMERITRADE HOLDING CORP|10-Q|2017-07-24|edgar/data/1173431/0001173431-17-000108.txt\n # 1173431|TD AMERITRADE HOLDING CORP|8-K|2017-07-18|edgar/data/1173431/0001173431-17-000104.txt\n all_edgar_links = dict() # CIK-to-link dict\n for line in r.text.splitlines():\n m = xbrl_pattern.findall(line)\n if len(m) > 0:\n all_edgar_links[m[0][0]] = m[0][-1]\n # Download links\n for cik in all_edgar_links.keys():\n if cik not in cik_to_ticker.keys():\n #print('Skip CIK ' + cik) # FIXME: TEST ONLY\n continue\n link = all_edgar_links[cik] # e.g. 'edgar/data/1173431/0001173431-17-000108.txt'\n link=link.split('/') # e.g. ['edgar', 'data', '1173431', '0001173431-17-000108.txt']\n link[-1] = link_pattern.sub('', link[-1]) # e.g. '000117343117000108'\n link = '/'.join(link) # e.g. 'edgar/data/1173431/000117343117000108'\n url = sec_archive_base+'/'+link+'/FilingSummary.xml'\n try:\n r = requests.get(url)\n except requests.exceptions.RequestException as e:\n print('%s: FilingSummary request failure, link %s' %(cik_to_ticker[cik], url))\n print(e)\n continue\n m = instance_pattern.search(r.text)\n if m and len(m.groups()) > 0:\n xbrl_file = m.groups()[0]\n print('%s => %s => %s' %(cik_to_ticker[cik], cik, xbrl_file)) # FIXME: TEST ONLY\n # download file url = sec_archive_base+'/'+link+'/'+xbrl_file\n ticker = Symbol(cik_to_ticker[cik])\n ticker.download_earning(sec_archive_base+'/'+link, xbrl_file, form, force_update=force_update)\n else:\n print('Error: failed to find XBRL file for %s, url %s, status_code %d' %(cik_to_ticker[cik], url, r.status_code))\n continue", "def list_supporting_elements(self, workspace_unique_id=None, subset_unique_id=None, request=None): \n print('list_supporting_elements', request)\n# workspace_object = self._get_workspace_object(unique_id=workspace_unique_id) \n# subset_object = workspace_object.get_subset_object(subset_unique_id)\n \n quality_element_list = ['secchi depth', 'oxygen balance']\n print('request', request)\n return_list = []\n for quality_element in quality_element_list: \n \n# # Check request \n# qe_dict = None\n# if request:\n# for qe in request:\n# if qe['value'] == quality_element:\n# qe_dict = qe\n# break\n \n quality_element_dict = self.dict_quality_element(workspace_unique_id=workspace_unique_id, \n subset_unique_id=subset_unique_id, \n quality_element=quality_element, \n request=request)\n \n return_list.append(quality_element_dict)\n \n return return_list", "def select_provisioning_services_prsa(requirements, offerings, error_tr = 0.15):\n\n # Generate virtual services\n virtual_offerings = generate_virtual_services(offerings, error_tr)\n # Defining arrival specification.\n latency_req = {}\n\n for i in requirements:\n latency_req[i] = requirements[i]['period'] - requirements[i]['request_time']\n\n # Sort by request time\n latency_req_sorted = collections.OrderedDict(sorted(latency_req.items(), key=lambda t: t[1]))\n\n decision = {}\n for key, value in latency_req_sorted.items():\n flag = 0\n decision[key] = {}\n for virtual_offering in virtual_offerings:\n if virtual_offerings[virtual_offering]['accuracy'] < requirements[key]['accuracy'] and virtual_offerings[virtual_offering]['latency'] < requirements[key]['period'] - requirements[key]['request_time']:\n if flag == 0:\n decision[key]['key'] = virtual_offering\n decision[key]['accuracy'] = virtual_offerings[virtual_offering]['accuracy']\n decision[key]['latency'] = virtual_offerings[virtual_offering]['latency']\n decision[key]['elements'] = virtual_offerings[virtual_offering]['elements']\n decision[key]['power_consumption'] = virtual_offerings[virtual_offering]['power_consumption'] \n flag = 1\n elif decision[key]['power_consumption'] > virtual_offerings[virtual_offering]['power_consumption']:\n decision[key]['key'] = virtual_offering\n decision[key]['accuracy'] = virtual_offerings[virtual_offering]['accuracy']\n decision[key]['latency'] = virtual_offerings[virtual_offering]['latency']\n decision[key]['elements'] = virtual_offerings[virtual_offering]['elements']\n decision[key]['power_consumption'] = virtual_offerings[virtual_offering]['power_consumption'] \n \n try:\n len(virtual_offerings[decision[key]['key']])\n except:\n for virtual_offering in virtual_offerings:\n if virtual_offerings[virtual_offering]['latency'] < requirements[key]['period'] - requirements[key]['request_time']:\n if flag == 0:\n decision[key]['key'] = virtual_offering\n decision[key]['accuracy'] = virtual_offerings[virtual_offering]['accuracy']\n decision[key]['latency'] = virtual_offerings[virtual_offering]['latency']\n decision[key]['elements'] = virtual_offerings[virtual_offering]['elements']\n decision[key]['power_consumption'] = virtual_offerings[virtual_offering]['power_consumption'] \n flag = 1\n elif decision[key]['accuracy'] > virtual_offerings[virtual_offering]['accuracy']:\n decision[key]['key'] = virtual_offering\n decision[key]['accuracy'] = virtual_offerings[virtual_offering]['accuracy']\n decision[key]['latency'] = virtual_offerings[virtual_offering]['latency']\n decision[key]['elements'] = virtual_offerings[virtual_offering]['elements']\n decision[key]['power_consumption'] = virtual_offerings[virtual_offering]['power_consumption'] \n try:\n len(virtual_offerings[decision[key]['key']])\n except:\n for virtual_offering in virtual_offerings:\n if flag == 0:\n decision[key]['key'] = virtual_offering\n decision[key]['accuracy'] = virtual_offerings[virtual_offering]['accuracy']\n decision[key]['latency'] = virtual_offerings[virtual_offering]['latency']\n decision[key]['elements'] = virtual_offerings[virtual_offering]['elements']\n decision[key]['power_consumption'] = virtual_offerings[virtual_offering]['power_consumption'] \n flag = 1\n elif decision[key]['latency'] > virtual_offerings[virtual_offering]['latency']:\n decision[key]['key'] = virtual_offering\n decision[key]['accuracy'] = virtual_offerings[virtual_offering]['accuracy']\n decision[key]['latency'] = virtual_offerings[virtual_offering]['latency']\n decision[key]['elements'] = virtual_offerings[virtual_offering]['elements']\n decision[key]['power_consumption'] = virtual_offerings[virtual_offering]['power_consumption'] \n\n\n pprint.pprint(decision)\n return decision", "def query_part_info(parts, distributors, currency):\n solved = set()\n # Loop through the parts looking for those sourced by local distributors\n # that won't be found online. Place any user-added info for these parts\n # (such as pricing) into the part dictionary.\n for p in parts:\n # Find the manufacturer's part number if it exists.\n pn = p.fields.get('manf#') # Returns None if no manf# field.\n\n # Now look for catalog number, price list and webpage link for this part.\n for dist in distributors:\n cat_num = p.fields.get(dist + ':cat#')\n pricing = p.fields.get(dist + ':pricing')\n link = p.fields.get(dist + ':link')\n avail = p.fields.get(dist + ':avail')\n if cat_num is None and pricing is None and link is None:\n continue\n\n cat_num = cat_num or pn or make_unique_catalog_number(p, dist)\n p.fields[dist + ':cat#'] = cat_num # Store generated cat#.\n # Get the DistData for this distributor\n dd = p.dd.get(dist, DistData())\n dd.part_num = cat_num\n\n if link:\n url_parts = list(urlsplit(link))\n if url_parts[0] == '':\n url_parts[0] = u'http'\n link = urlunsplit(url_parts)\n else:\n # This happens when no part URL is found.\n debug_obsessive('No part URL found for local \\'{}\\' distributor!'.format(dist))\n dd.url = link\n\n price_tiers = {}\n try:\n local_currency = re.findall('[a-zA-Z]{3}', pricing)[0].upper()\n except Exception:\n local_currency = currency\n old_pricing = pricing\n pricing = re.sub('[^0-9.;:]', '', pricing) # Keep only digits, decimals, delimiters.\n for qty_price in pricing.split(';'):\n splitted = qty_price.split(SEPRTR)\n if len(splitted) == 2:\n qty, price = splitted\n if local_currency:\n dd.currency = local_currency\n try:\n price_tiers[int(qty)] = float(price)\n except ValueError:\n warning(W_BADPRICE, 'Malformed pricing number: `{}` at {}'.format(old_pricing, p.refs))\n else:\n warning(W_BADPRICE, 'Malformed pricing entry: `{}` at {}'.format(qty_price, p.refs))\n # dd.moq = min(price_tiers.keys())\n if not price_tiers:\n # This happens when no pricing info is found.\n debug_obsessive('No pricing information found for local \\'{}\\' distributor!'.format(dist))\n dd.price_tiers = price_tiers\n\n # Availability\n if avail is not None:\n dd.qty_avail = avail\n\n # Update the DistData for this distributor\n p.dd[dist] = dd\n # We have data for this distributor. Avoid marking normal distributors.\n if dist in dist_local_template.api_distributors:\n solved.add(dist)\n return solved", "def select_provisioning_services_ptsa(requirements, offerings, error_tr = 0.15):\n\n\t# Generate virtual services\n\tvirtual_offerings = generate_virtual_services(offerings, error_tr)\n\n\t# Defining (acc, lat) requirements for the whole bucket.\n\taccuracy_req = {}\n\tlatency_req = {}\n\n\t# Specify variables accuracy and latency-req_time\n\tfor i in requirements:\n\t\taccuracy_req[i] = requirements[i]['accuracy']\n\t\tlatency_req[i] = requirements[i]['period'] - requirements[i]['request_time']\n\n\t# Sort by latency\n\tlatency_req_sorted = collections.OrderedDict(sorted(latency_req.items(), key=lambda t: t[1]))\n\n\t# Find the highest accuracy requirement\n\tacc_req_key = min(accuracy_req, key=accuracy_req.get)\n\t# Find the highest latency requirement\n\tlat_first_key = latency_req_sorted.keys()[0]\n\n\t# Algorithm execution, specification of the final requirements (reduction of requirements)\n\trequirements_final = []\n\twhile True:\n\n\t\tnew_accuracy = {}\n\t\tfor key, value in latency_req_sorted.items():\n\n\t\t\tif acc_req_key != key:\n\t\t\t\tnew_accuracy[key] = accuracy_req[key]\n\t\t\telse:\n\t\t\t\tbreak\n\n\t\trequirements_final.append((acc_req_key, accuracy_req[acc_req_key], latency_req[acc_req_key]))\n\n\t\ttry:\n\t\t\tacc_req_key = min(new_accuracy, key=new_accuracy.get)\n\t\texcept:\n\t\t\tbreak\n\n\t# Specification of unions of possible elementary services to meet the requirements\n\tpossible_services = {}\n\tfor requirement in requirements_final:\n\t\tpossible_services[requirement[0]] = []\n\n\t\tfor key in virtual_offerings:\n\n\t\t\tif virtual_offerings[key]['accuracy'] < requirement[1] and virtual_offerings[key]['latency'] < requirement[2]:\n\n\t\t\t\tpossible_services[requirement[0]].append(virtual_offerings[key]['elements'])\n\n\t\t# In case there are no services available that will meet the requirements - don't \n\t\t# focus on power minimization in that case\n\t\tif len(possible_services[requirement[0]]) == 0:\n\n\t\t\t# If there are multiple services that meet the latency requirement, find the best accuracy\n\t\t\tflag = 0\n\t\t\tfor key in virtual_offerings:\n\t\t\t\tif flag == 0:\n\t\t\t\t\tsmallest_latency = (key, virtual_offerings[key]['latency'])\n\t\t\t\t\tflag = 1\n\t\t\t\telif virtual_offerings[key]['latency'] < smallest_latency[1]:\n\t\t\t\t\tsmallest_latency = (key, virtual_offerings[key]['latency'])\n\n\t\t\tpossible_alternatives = {}\n\t\t\tfor key in virtual_offerings:\n\t\t\t\tif virtual_offerings[key]['latency'] < requirement[2]:\n\t\t\t\t\tpossible_alternatives[key] = virtual_offerings[key]['accuracy']\n\n\t\t\t# If none of the services meet the latency requirement, use the closest one to the latency requirement\n\t\t\tif len(possible_alternatives) != 0:\n\t\t\t\tkey = min(possible_alternatives, key=possible_alternatives.get)\n\t\t\t\tpossible_services[requirement[0]].append(virtual_offerings[key]['elements'])\n\t\t\telse:\n\t\t\t\tpossible_services[requirement[0]].append(virtual_offerings[smallest_latency[0]]['elements'])\n\n\tdecision_tmp = make_decision_min_power(possible_services, offerings)\n\n\tservices = {}\n\tfor i in decision_tmp:\n\t\tservices[i] = {}\n\t\tservices[i]['accuracy'] = offerings[i]['accuracy']\n\t\tservices[i]['latency'] = offerings[i]['latency']\n\t\tservices[i]['power_consumption'] = offerings[i]['power_consumption']\n\t\tservices[i]['elements'] = offerings[i]['elements']\n\n\tvirtual_tmp = generate_virtual_services(services, error_tr)\n\n\tpower_min = None\n\tpower_exp = virtual_tmp[0]['power_consumption']\n\tdecision = {}\n\tfor i in requirements:\n\t\tfor j in virtual_tmp:\n\t\t\tif requirements[i]['accuracy'] < virtual_tmp[j]['accuracy'] and requirements[i]['period'] < virtual_tmp[j]['latency'] and power_exp > virtual_tmp[j]['power_consumption']:\n\t\t\t\tpower_min = j\n\t\t\t\tpower_exp = virtual_tmp[j]['power_consumption']\n\t\t\t\n\t\tif power_min is None:\n\t\t\taccuracy = None\n\t\t\tfor j in virtual_tmp:\n\t\t\t\tif requirements[i]['period'] < virtual_tmp[j]['latency']:\n\t\t\t\t\tif accuracy is None or accuracy > virtual_tmp[j]['accuracy']:\n\t\t\t\t\t\taccuracy_index = j\n\t\t\t\t\t\taccuracy = virtual_tmp[j]['accuracy']\n\t\t\t\t\t\n\t\t\tif accuracy is None:\n\t\t\t\tlatency_key = 0\n\t\t\t\tlatency_exp = virtual_tmp[0]['latency']\n\t\t\t\tfor j in virtual_tmp:\n\t\t\t\t\tif latency_exp > virtual_tmp[j]['latency']:\n\t\t\t\t\t\tlatency_key = j\n\n\t\t\t\tdecision[i] = {}\n\t\t\t\tdecision[i]['key'] = i\n\t\t\t\tdecision[i]['accuracy'] = virtual_tmp[latency_key]['accuracy']\n\t\t\t\tdecision[i]['latency'] = virtual_tmp[latency_key]['latency']\n\t\t\t\tdecision[i]['elements'] = virtual_tmp[latency_key]['elements']\n\t\t\t\tdecision[i]['power_consumption'] = virtual_tmp[latency_key]['power_consumption']\n\n\t\t\telse:\n\t\t\t\tdecision[i] = {}\n\t\t\t\tdecision[i]['key'] = i\n\t\t\t\tdecision[i]['accuracy'] = virtual_tmp[accuracy_index]['accuracy']\n\t\t\t\tdecision[i]['latency'] = virtual_tmp[accuracy_index]['latency']\n\t\t\t\tdecision[i]['elements'] = virtual_tmp[accuracy_index]['elements']\n\t\t\t\tdecision[i]['power_consumption'] = virtual_tmp[accuracy_index]['power_consumption']\n\n\t\telse:\n\t\t\tdecision[i] = {}\n\t\t\tdecision[i]['key'] = i\n\t\t\tdecision[i]['accuracy'] = virtual_tmp[power_min]['accuracy']\n\t\t\tdecision[i]['latency'] = virtual_tmp[power_min]['latency']\n\t\t\tdecision[i]['elements'] = virtual_tmp[power_min]['elements']\n\t\t\tdecision[i]['power_consumption'] = virtual_tmp[power_min]['power_consumption']\n\n\treturn decision", "def get_details(self, model_definition_uid):\n ##For CP4D, check if either spce or project ID is set\n self._client._check_if_either_is_set()\n op_name = 'getting model_definition details'\n modeldef_uid = str_type_conv(model_definition_uid)\n ModelDefinition._validate_type(modeldef_uid, u'model_definition_uid', STR_TYPE, False)\n\n url = self._href_definitions.get_model_definition_assets_href() + u'/' + modeldef_uid\n paramvalue = self._client._params()\n if not self._ICP:\n response_get = requests.get(\n url,\n params=self._client._params(),\n headers=self._client._get_headers()\n )\n else:\n response_get = requests.get(\n url,\n params=paramvalue,\n headers=self._client._get_headers(),\n verify=False\n )\n if response_get.status_code == 200:\n get_model_definition_details = self._handle_response(200, op_name, response_get)\n response = self._get_required_element_from_response(get_model_definition_details)\n\n if not self._client.CLOUD_PLATFORM_SPACES and not self._client.ICP_PLATFORM_SPACES:\n return response\n else:\n\n entity = response[u'entity']\n\n try:\n del entity[u'wml_model_definition'][u'ml_version']\n except KeyError:\n pass\n\n final_response = {\n \"metadata\": response[u'metadata'],\n \"entity\": entity\n }\n\n return final_response\n # return self._get_required_element_from_response(get_model_definition_details)\n else:\n return self._handle_response(200, op_name, response_get)", "def allocate(self, slice_urn, client_cert, credentials,\n rspec, end_time=None):\n # TODO: Check if sliver_urn is valid for RO\n result = []\n #Default end time = 30 days\n default_end_time = datetime.now() + timedelta(days=30)\n if end_time == None:\n end_time = default_end_time\n if self._verify_users:\n logger.debug(\"allocate: authenticate the user...\")\n client_urn, client_uuid, client_email =\\\n self.auth(client_cert, credentials, slice_urn, (\"createsliver\",))\n logger.info(\"Client urn=%s, uuid=%s, email=%s\" % (\n client_urn, client_uuid, client_email,))\n\n logger.info(\"slice_urn=%s, end_time=%s, rspec=%s\" % (\n slice_urn, end_time, rspec,))\n req_rspec = SERMv3RequestParser(from_string=rspec)\n \n self.__validate_rspec(req_rspec.get_rspec())\n\n se_manifest, se_slivers, se_db_slivers = SERMv3ManifestFormatter(), [], []\n \n links = req_rspec.links()\n nodes = req_rspec.nodes()\n\n # Workaround for \"1:n\" case: Get Vlan pairs from link->felix:vlan param\n sliceVlansPairs = req_rspec.getVlanPairs()\n\n # check if the requested resources (ports, vlans) are available\n reservation_ports = self.SESlices._allocate_ports_in_slice(nodes)\n availability_result = self.SEResources.check_available_resources(reservation_ports['ports'])\n\n # print \"WWWW: \", self.SEResources.get_port_mapping()\n\n if availability_result != False:\n\n # Mark resources as reserved\n self.SEResources.set_resource_reservation(reservation_ports['ports'])\n if end_time != None:\n alarm_time = end_time\n SESchedulerService.get_scheduler().add_job( se_job_release_resources,\n \"date\",\n run_date=alarm_time,\n args=[datetime.now(),\n reservation_ports['ports'],\n slice_urn])\n\n self.SESlices._create_manifest_from_req_n_and_l(se_manifest, nodes,links, sliceVlansPairs)\n logger.debug(\"SE-ManifestFormatter=%s\" % (se_manifest,))\n\n s = self.SESlices._allocate_ports_in_slice(nodes) \n \n self.SESlices.set_link_db(slice_urn, end_time,links, nodes, sliceVlansPairs)\n \n\n links_db, nodes, links = self.SESlices.get_link_db(slice_urn)\n for sliver in links_db[\"geni_sliver_urn\"]:\n result.append( \n { \n \"geni_sliver_urn\": sliver,\n \"geni_expires\": links_db['geni_expires'],\n \"geni_allocation_status\": links_db[\"geni_allocation_status\"],\n \"geni_operational_status\" : links_db[\"geni_operational_status\"]\n }\n )\n\n se_slivers = result\n logger.info(\"allocate successfully completed: %s\", slice_urn)\n logger.debug(\"requested SE-Sliver(%d)=%s\" % (len(se_slivers), se_slivers,))\n return (\"%s\" % se_manifest, se_slivers)\n\n else:\n raise geni_ex.GENIv3GeneralError(\"Allocation Failed. Requested resources are not available.\")", "def api_request(update, oformat, stream, params, yr, mntlist, tstep, back):\n # open connection to era5 files db \n conn = db_connect(cfg)\n # create empty list to store cdsapi requests\n rqlist = []\n # list of faster ips to alternate\n ips = cfg['altips']\n i = 0 \n # assign year and list of months\n if type(yr) is list:\n yrs = yr\n else:\n yrs = [yr]\n\n if mntlist == []: \n mntlist = [\"%.2d\" % i for i in range(1,13)]\n # retrieve stream arguments\n dsargs = define_args(stream, tstep)\n era5log.debug(f'Stream attributes: {dsargs}')\n # get variables details from json file\n vardict = read_vars()\n # define params to download\n if update and params == []:\n params = dsargs['params']\n \n # according to ECMWF, best to loop through years and months and do either multiple\n # variables in one request, or at least loop through variables in the innermost loop.\n \n for y in yrs:\n # build Copernicus requests for each month and submit it using cdsapi modified module\n for mn in mntlist:\n # for each output file build request and append to list\n # loop through params and months requested\n for varp in params:\n queue, var, cdsname = define_var(vardict, varp, era5log)\n # if grib code exists but cds name is not defined skip var and print warning\n if not queue:\n continue\n # create list of filenames already existing for this var and yr\n nclist = []\n sql = \"select filename from file where location=?\" \n tup = (f\"{stream}/{var}/{y}\",)\n if tstep == 'mon':\n tup = (f\"{stream}/{var}/monthly\",)\n nclist += query(conn, sql, tup)\n era5log.debug(nclist)\n\n stagedir, destdir, fname, daylist = target(stream, var, y, mn, dsargs, tstep, back)\n # if file already exists in datadir then skip\n if file_exists(fname, nclist):\n era5log.info(f'Skipping {fname} already exists')\n continue\n rdict = build_dict(dsargs, y, mn, cdsname, daylist, oformat, tstep, back)\n rqlist.append((dsargs['dsid'], rdict, os.path.join(stagedir,fname),\n os.path.join(destdir, fname), ips[i % len(ips)])) \n # progress index to alternate between ips\n i+=1\n era5log.info(f'Added request for {fname}')\n if back:\n break\n \n era5log.debug(f'{rqlist}')\n\n # parallel downloads\n if len(rqlist) > 0:\n # set num of threads = number of params, or use default from config\n if len(params) > 1:\n nthreads = len(params)\n else:\n nthreads = cfg['nthreads']\n pool = ThreadPool(nthreads)\n results = pool.imap(do_request, rqlist)\n pool.close()\n pool.join()\n else:\n era5log.info('No files to download!')\n era5log.info('--- Done ---')", "def __init__(self) -> None:\n self.path_config = '/home/equipment/EQ-scripts/equipment.conf'\n self.configParse()\n self.request_devices = \"\"\"With arm_address as (SELECT av.obj_id device_id,\n av.value_raw house_id\n FROM os_usr.dev_attr_values av\n WHERE av.attr_id = 3),\n swithes as (SELECT device_type_id\n FROM os_eqm.device_types\n WHERE device_class IN\n (\n SELECT device_class_id\n FROM os_eqm.device_classes\n WHERE guid IN\n (\n SELECT obj_guid\n FROM os_lib.objects_in_nav_categories\n WHERE nav_cat_id in\n (\n SELECT nav_cat_id\n FROM nav_categories\n WHERE guid = '75C0F3733B084DBDAC604167D298B2F5'\n )\n )\n ))\n SELECT d.device_id,\n na.net_address,\n dt.name,\n trim(os_usr.ertel_utils.get_prop_str(d.device_id,'MAC_ADRES_USTROJSTVA')) \n mac_sw\n FROM os_usr.geo_addresses ga,\n os_eqm.net_addresses na,\n arm_address arm ,\n device_types dt,\n devices d,\n swithes sw\n WHERE arm.house_id = ga.house_id\n and arm.device_id = d.device_id\n and na.device_id = d.device_id and na.is_management = '1'\n AND dt.device_type_id = d.device_type\n and dt.device_type_id in sw.device_type_id\n and ga.unified_house_id = '<house_id>'\n \"\"\"\n self.request_adresses = \"\"\"SELECT av.obj_id device_id, av.value_raw house_id\n FROM os_usr.dev_attr_values av \n WHERE av.attr_id = 2 AND av.VALUE_RAW LIKE '%<house>%'\"\"\"", "def getpubmed(terms, dbase, useremail):\r\n \r\n # determine how many articles there are for the search terms\r\n\r\n Entrez.email = useremail\r\n Entrez.tool = toolname\r\n handle = Entrez.egquery(term=terms)\r\n record = Entrez.read(handle)\r\n handle.close()\r\n for row in record[\"eGQueryResult\"]:\r\n if row[\"DbName\"] == dbase:\r\n cnt = int(row[\"Count\"])\r\n time.sleep(0.5) \r\n # Fetch the Pubmed IDs\r\n Entrez.email = useremail\r\n try:\r\n handle = Entrez.esearch(db=dbase, term=terms, retmax=cnt)\r\n record = Entrez.read(handle)\r\n handle.close()\r\n except HTTPException as e:\r\n time.sleep(0.5) \r\n print(\"Network problem: %s\" % e)\r\n print(\"Second (and final) attempt...\")\r\n handle = Entrez.esearch(db=dbase, term=terms, retmax=cnt)\r\n record = Entrez.read(handle)\r\n handle.close()\r\n\r\n idlist = record[\"IdList\"]\r\n cnt=len(idlist)\r\n time.sleep(0.5) \r\n # get the articles\r\n if cnt > 200000:\r\n print(\"Number of abstracts is very large, requires ID list batching to avoid timeout errror\")\r\n \r\n for a in range(0,(cnt//200000)+1):\r\n print('ID List batch ',a+1,'/',(cnt//200000)+1)\r\n if a == 0:\r\n topidlist = idlist\r\n del idlist\r\n if a == (cnt//200000):\r\n subidlist = topidlist[a*200000:]\r\n else:\r\n subidlist = topidlist[a*200000:((a+1)*200000)]\r\n subcnt = len(subidlist)\r\n if subcnt > 10000: # needs edgecase for when subcnt <= 10000\r\n print(\"number of abstracts in ID list batch is greater than 10,000: Batched Retrieval Necessary\")\r\n if a == (cnt//200000):\r\n subbatchrange = range(0,(subcnt//10000)+1)\r\n else:\r\n subbatchrange = range(0,(subcnt//10000))\r\n for b in subbatchrange:\r\n \r\n time.sleep(0.5) \r\n print('Retrieving batch ',(b+1),'/',(subcnt//10000)) \r\n handle = Entrez.efetch(db=dbase, id=subidlist, rettype=\"medline\",\r\n retmode=\"text\",retstart = (b * 10000),retmax = 10000 )\r\n recordsbatch = Medline.parse(handle)\r\n if a == 0 and b == 0:\r\n records = list(recordsbatch)\r\n else:\r\n records.extend(recordsbatch)\r\n \r\n \r\n \r\n elif cnt > 10000:\r\n print(\"number of abstracts is greater than 10,000: Batched Retrieval Necessary\")\r\n for b in range(0,(cnt//10000)+1):\r\n time.sleep(0.5) \r\n print('Retrieving batch ',b,'/',(cnt//10000))\r\n \r\n handle = Entrez.efetch(db=dbase, id=idlist, rettype=\"medline\",\r\n retmode=\"text\",retstart = (b * 10000),retmax = 10000 )\r\n recordsbatch = Medline.parse(handle)\r\n if b == 0:\r\n records = list(recordsbatch)\r\n else: \r\n records.extend(recordsbatch)\r\n else:\r\n handle = Entrez.efetch(db=dbase, id=idlist, rettype=\"medline\",\r\n retmode=\"text\")\r\n records = Medline.parse(handle)\r\n\r\n records = list(records)\r\n\r\n return records, cnt", "def search_entries_license(search_license):\n returns = {}\n\n ledger_client = get_ledger_client()\n entries = ledger_client.get_ledger_entries()\n\n for entry in entries:\n try:\n json_read = json.loads(str(entry.contents))\n if (json_read[\"Licence\"]) == search_license:\n returns.update(json_read)\n\n except:\n # not a valid json\n pass\n\n return returns", "def _remax_apt(self, complete_url, img_path):\n\n # as dicussed earlier, the best format to store all the information\n # is by creating dictionaries to store the unstructured information\n # but here are the features that are common across all aparments and \n # I also picked some features I think are important\n soup = self._soup_attempts(complete_url)\n price = self._get_price(soup)\n street, city, state, zipcode = self._get_address(soup)\n sidict = self._get_sideinfo(soup)\n bedrooms = self._access_dict(sidict, 'Bedrooms Total')\n bathrooms = self._access_dict(sidict, 'Bathrooms Total')\n fireplace = self._access_dict(sidict, 'Fireplace')\n living_area = self._access_dict(sidict, 'Living Area')\n property_type = self._access_dict(sidict, 'Property Type')\n year_built = self._parse_year(self._access_dict(sidict, 'Year Built'))\n lot_size = self._access_dict(sidict, 'Lot Size')\n waterfront = self._access_dict(sidict, 'Waterfront')\n ac = self._access_dict(sidict, 'Cooling')\n tax = self._access_dict(sidict, 'Tax Annual Amount')\n tax_year = self._parse_year(self._access_dict(sidict, 'Tax Year'))\n\n try:\n if 'central' in ac.lower():\n ac = 1\n except:\n ac = 0\n\n # package all the features into a list \n unit = [\n street, \n city, \n state, \n zipcode,\n price,\n bedrooms,\n bathrooms,\n fireplace,\n living_area,\n property_type,\n year_built,\n lot_size,\n waterfront,\n ac,\n tax,\n tax_year,\n complete_url,\n ]\n\n img_urls = self._get_img_urls(soup)\n if img_urls:\n self._save_images(img_urls, img_path, f\"{street.title()}, {city.replace('-', ' ').title()}, {state.upper()}\")\n\n return unit", "def getFileInfo(region, ub, queuename, guids, dsname, dsdict, lfns, pinitdir, analysisJob, tokens, DN, sitemover, error, workdir, dbh, DBReleaseIsAvailable, \\\n scope_dict, pfc_name=\"PoolFileCatalog.xml\", filesizeIn=[], checksumIn=[], thisExperiment=None):\n\n fileInfoDic = {} # FORMAT: fileInfoDic[file_nr] = (guid, pfn, size, checksum, filetype, copytool) - note: copytool not necessarily the same for all file (e.g. FAX case)\n replicas_dic = {} # FORMAT: { guid1: [replica1, .. ], .. } where replica1 is of type replica\n surl_filetype_dictionary = {} # FORMAT: { sfn1: filetype1, .. } (sfn = surl, filetype = DISK/TAPE)\n copytool_dictionary = {} # FORMAT: { surl1: copytool1, .. }\n totalFileSize = 0L\n ec = 0\n pilotErrorDiag = \"\"\n\n tolog(\"Preparing to build paths for input files\")\n\n # Get the site information object\n si = getSiteInformation(thisExperiment.getExperiment())\n\n # In case we are staging in files from an object store, we can do a short cut and skip the catalog lookups below\n copytool, dummy = getCopytool(mode=\"get\")\n if \"objectstore\" in copytool:\n tolog(\"Objectstore stage-in: cutting a few corners\")\n\n # Format: fileInfoDic[file_nr] = (guid, gpfn, size, checksum, filetype, copytool)\n # replicas_dic[guid1] = [replica1, ..]\n\n espath = si.getObjectstorePath(\"eventservice\") #getFilePathForObjectStore(filetype=\"eventservice\")\n logpath = si.getObjectstorePath(\"logs\") #getFilePathForObjectStore(filetype=\"logs\")\n\n i = 0\n try:\n for lfn in lfns:\n if \".log.\" in lfn:\n fullpath = os.path.join(logpath, lfns[i])\n else:\n fullpath = os.path.join(espath, lfns[i])\n fileInfoDic[i] = (guids[i], fullpath, filesizeIn[i], checksumIn[i], 'DISK', copytool) # filetype is always DISK on objectstores\n replicas_dic[guids[i]] = [fullpath]\n surl_filetype_dictionary[fullpath] = 'DISK' # filetype is always DISK on objectstores\n i += 1\n except Exception, e:\n tolog(\"!!WARNING!!2233!! Failed to create replica and file dictionaries: %s\" % (e))\n ec = -1\n tolog(\"fileInfoDic=%s\" % str(fileInfoDic))\n tolog(\"replicas_dic=%s\" % str(replicas_dic))\n return ec, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic\n\n # If the pilot is running on a Tier 3 site, then neither LFC nor PFC should be used\n if si.isTier3():\n tolog(\"Getting file info on a Tier 3 site\")\n\n # Create file path to local SE (not used for scope based paths)\n path = sitemover.getTier3Path(dsname, DN) # note: dsname will only be correct for lib files, otherwise fix dsdict, currently empty for single lib file input?\n file_nr = -1\n for lfn in lfns:\n file_nr += 1\n\n # Use scope based path if possible\n# #if scope_dict and readpar('useruciopaths').lower() == \"true\":\n# if scope_dict and (\"/rucio\" in readpar('seprodpath') or \"/rucio\" in readpar('sepath')):\n# se_path = sitemover.getRucioPath(file_nr, tokens, scope_dict, lfn, path, analysisJob)\n# else:\n# se_path = os.path.join(path, lfn)\n se_path = os.path.join(path, lfn)\n\n # Get the file info\n ec, pilotErrorDiag, fsize, fchecksum = sitemover.getLocalFileInfo(se_path, csumtype=\"default\")\n if ec != 0:\n return ec, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic\n\n # Fill the dictionaries\n fileInfoDic[file_nr] = (guids[file_nr], se_path, fsize, fchecksum, 'DISK', copytool) # no tape on T3s, so filetype is always DISK\n surl_filetype_dictionary[fullpath] = 'DISK' # filetype is always DISK on T3s\n\n # Check total file sizes to avoid filling up the working dir, add current file size\n try:\n totalFileSize += long(fsize)\n except:\n pass\n else:\n # Get the PFC from the proper source\n ec, pilotErrorDiag, xml_from_PFC, xml_source, replicas_dic, surl_filetype_dictionary, copytool_dictionary = \\\n getPoolFileCatalog(ub, guids, lfns, pinitdir, analysisJob, tokens, workdir, dbh,\\\n DBReleaseIsAvailable, scope_dict, filesizeIn, checksumIn,\\\n sitemover, pfc_name=pfc_name, thisExperiment=thisExperiment)\n\n if ec != 0:\n return ec, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic\n\n tolog(\"Using XML source %s\" % (xml_source))\n if xml_from_PFC == '':\n pilotErrorDiag = \"Failed to get PoolFileCatalog\"\n tolog(\"!!FAILED!!2999!! %s\" % (pilotErrorDiag))\n tolog(\"Mover get_data finished (failed)\")\n return error.ERR_NOPFC, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic\n\n xmldoc = minidom.parseString(xml_from_PFC) \n fileList = xmldoc.getElementsByTagName(\"File\")\n\n # Extracts the guids from the file list\n guids_filelist = getGuids(fileList)\n fileInfoDictionaryFromDispatcher = getFileInfoDictionaryFromDispatcher(lfns, filesizeIn, checksumIn) \n file_nr = -1\n for thisfile in fileList:\n file_nr += 1\n # Get the SURL and GUID from the XML\n gpfn = str(thisfile.getElementsByTagName(\"pfn\")[0].getAttribute(\"name\"))\n guid = guids_filelist[file_nr]\n\n # Get the filesize and checksum from the primary location (the dispatcher)\n _lfn = getLFN(gpfn, lfns) #os.path.basename(gpfn)\n\n # Remove any __DQ2 substring from the LFN if necessary\n if \"__DQ2\" in _lfn:\n _lfn = stripDQ2FromLFN(_lfn)\n fsize, fchecksum = getFileInfoFromDispatcher(_lfn, fileInfoDictionaryFromDispatcher)\n\n # Get the file info from the metadata [from LFC]\n if not fsize or not fchecksum:\n ec, pilotErrorDiag, fsize, fchecksum = getFileInfoFromMetadata(thisfile, guid, replicas_dic, region, sitemover, error)\n if ec != 0:\n return ec, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic\n\n # Even though checksum and file size is most likely already known from LFC, more reliable file\n # info is stored in Rucio. Try to get it from there unless the dispatcher has already sent it to the pilot\n if dsdict == {}:\n _dataset = dsname\n else:\n _dataset = getDataset(os.path.basename(gpfn), dsdict)\n _filesize, _checksum = sitemover.getFileInfoFromRucio(scope_dict[_lfn], _dataset, guid)\n if _filesize != \"\" and _checksum != \"\":\n if _filesize != fsize:\n tolog(\"!!WARNING!!1001!! Catalog file size (%s) not the same as Rucio file size (%s) (using Rucio value)\" % (fsize, _filesize))\n if _checksum != fchecksum:\n tolog(\"!!WARNING!!1001!! Catalog checksum (%s) not the same as Rucio checksum (%s) (using Rucio value)\" % (fchecksum, _checksum))\n fsize = _filesize\n fchecksum = _checksum\n\n # Get the filetype for this surl\n filetype = getFiletypeFromDictionary(gpfn, surl_filetype_dictionary)\n\n # Extract the copytool for this PFN\n _copytool = extractCopytoolForPFN(gpfn, copytool_dictionary)\n\n # Store in the file info dictionary\n fileInfoDic[file_nr] = (guid, gpfn, fsize, fchecksum, filetype, _copytool)\n\n # Check total file sizes to avoid filling up the working dir, add current file size\n try:\n totalFileSize += long(fsize)\n except:\n pass\n\n return ec, pilotErrorDiag, fileInfoDic, totalFileSize, replicas_dic", "def list(self, limit=None):\n ##For CP4D, check if either spce or project ID is set\n self._client._check_if_either_is_set()\n href = self._href_definitions.get_model_definition_search_asset_href()\n if limit is None:\n data = {\n \"query\": \"*:*\"\n }\n else:\n ModelDefinition._validate_type(limit, u'limit', int, False)\n data = {\n \"query\": \"*:*\",\n \"limit\": limit\n }\n\n if not self._ICP:\n response = requests.post(href, params=self._client._params(), headers=self._client._get_headers(),json=data)\n else:\n response = requests.post(href, params=self._client._params(), headers=self._client._get_headers(),json=data, verify=False)\n self._handle_response(200, u'model_definition assets', response)\n asset_details = self._handle_response(200, u'model_definition assets', response)[\"results\"]\n model_def_values = [\n (m[u'metadata'][u'name'], m[u'metadata'][u'asset_type'], m[u'metadata'][u'asset_id']) for\n m in asset_details]\n\n self._list(model_def_values, [u'NAME', u'ASSET_TYPE', u'GUID'], limit, _DEFAULT_LIST_LENGTH)", "async def test_get_all_assets(client):\n params = [('access_token', 'access_token_example'),\n ('group_id', 56)]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='GET',\n path='/v1/fleet/assets',\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def _retrieve_vol(self, currentspot, itersize):\n \n instance = self.instance\n maskonly = self.maskonly\n resource_server = self.resource_server\n resource_port = self.resource_port\n uuid = self.uuid\n server = self.server\n islabel = self.islabel\n\n def retrieve_vol(partvol):\n part, volume = partvol\n\n # check if there is no mask data\n if maskonly and ((part.mask is None) or (0 not in part.mask)):\n return (part, volume)\n\n # fetch data\n # TODO: only fetch smaller subset if masked\n \n # grab extents and size (but take subset of data already exists)\n offset = part.get_offset()\n reloffset = part.get_reloffset()\n zoff = offset.z + reloffset.z\n yoff = offset.y + reloffset.y\n xoff = offset.x + reloffset.x\n \n volsize = part.get_volsize()\n volz = volsize.z\n voly = volsize.y\n volx = volsize.x\n if volume is not None:\n z,y,x = volume.shape\n volz = z\n voly = y\n volx = x\n\n # perform fetch\n node_service = retrieve_node_service(server, uuid,\n resource_server, resource_port) \n newvol = None\n offset = (zoff, yoff, xoff)\n shape= (volz, voly, volx)\n if resource_server != \"\": # throttling unnecessary with resource server\n if islabel:\n newvol = node_service.get_labels3D(instance.name, shape, offset, throttle=False)\n else:\n newvol = node_service.get_gray3D(instance.name, shape, offset, throttle=False)\n else: # throttle volume fetches if no resource server\n if islabel:\n newvol = node_service.get_labels3D(instance.name, shape, offset, throttle=True)\n else:\n newvol = node_service.get_gray3D(instance.name, shape, offset, throttle=True)\n\n # mask return data\n if maskonly:\n # 0 out areas not masked (1)\n newvol[part.mask != 0] = 0 \n\n # 0 out areas that are overwritten by DVID\n volume[part.mask == 0] = 0\n\n # combine\n newvol = newvol + volume\n return (part, newvol)\n\n if self.usespark:\n return self.partitions.map(retrieve_vol)\n else:\n res = []\n for partnum in range(currentspot, currentspot+itersize):\n res.append(retrieve_vol(self.partitions[partnum]))\n return res", "def __search(self, req, resp):\n\n # init\n # request: DmpRequest\n request = self.__init_request(req)\n\n # trigger dmp feature list\n # features: DmpFeatureSet\n features = self.__do_trigger(request)\n\n # predict\n self.__do_predict(request, features)\n\n # display\n # response: DmpResponse\n response = self.__features2response(features)\n\n # to json\n resp.body = json.dumps(response)\n\n return defines.ReturnCode.SUCC", "def _download_selected_event_files(self, repo_metadata, output) -> List[ManifestResource]:\n # Body\n if repo_metadata:\n self._logger.debug(\"OpenFDA FAERs metadata received\")\n fda_output = create_folder(os.path.join(output.prod_dir, \"fda-inputs\"))\n fda = OpenfdaHelper(fda_output, manifest_service=get_manifest_service())\n # Parallel data gathering\n self._logger.debug(f\"Download processes pool -- {mp.cpu_count()}\")\n download_pool_nprocesses = mp.cpu_count() * 2\n download_pool_chunksize = int(\n len(repo_metadata['results']['drug']['event']['partitions']) / download_pool_nprocesses\n )\n with mp.Pool(processes=download_pool_nprocesses) as download_pool:\n try:\n return list(itertools.chain.from_iterable(download_pool.map(fda.do_download_openfda_event_file,\n repo_metadata['results']['drug'][\n 'event']['partitions'],\n chunksize=download_pool_chunksize)))\n except Exception as e:\n self._logger.error(\"Something went wrong: \" + str(e))\n return []", "def get_component_info_for_one_asset(self, asset_id):\n try:\n self.cur.execute(\"select id, latitude, longitude, installation_date, commissioning_date \\\n from assets \\\n where id = %s\", (asset_id, ))\n except:\n print(\"I am unable to get data\")\n\n row = self.cur.fetchone() \n asset_id = row[0]\n latitude = row[1]\n longitude = row[2]\n installation_date = row[3]\n commissioning_date = row[4]\n\n try:\n self.cur.execute(\"select id \\\n from components \\\n where asset_id = %s \\\n and is_deleted = 'f' and component_kind = 0\", (asset_id, ))\n except:\n print(\"I am unable to get data\")\n\n component_id_row = self.cur.fetchone() \n component_id = component_id_row[0] \n\n try:\n self.cur.execute(\"select s.name as street_name \\\n from assets as a, streets as s \\\n where a.id = %s \\\n and a.street_id = s.id\", (asset_id, ))\n except:\n print(\"I am unable to get data\")\n\n street_name_row = self.cur.fetchone() \n street_name = street_name_row[0]\n\n try:\n self.cur.execute(\"select parent_id \\\n from components A,communications_nodes B \\\n where A.id=B.id and asset_id = %s\", (asset_id, ))\n except:\n print(\"I am unable to get data\")\n\n cabinet_id_row = self.cur.fetchone() \n cabinet_id = cabinet_id_row[0]\n\n try:\n self.cur.execute(\"select c.asset_id, lt.type_designation, lt.actual_wattage \\\n from luminaire_types lt, luminaires l, components c \\\n where c.asset_id = %s and c.id = l.id and l.luminaire_type_id = lt.id\", (asset_id, ))\n except:\n print(\"I am unable to get data\")\n\n nominal_wattage_row = self.cur.fetchone() \n nominal_wattage = nominal_wattage_row[2]\n\n results = []\n results.append((asset_id, component_id, latitude, longitude, installation_date, commissioning_date, street_name, cabinet_id, nominal_wattage))\n\n return results", "def submit_download(self, submit_download_parameters):\n\n data_scope = None if submit_download_parameters.data_scope is None else ' '.join(\n submit_download_parameters.data_scope)\n download_file_type = submit_download_parameters.file_type\n download_entities = self.service_client.factory.create('ArrayOfDownloadEntity')\n download_entities.DownloadEntity = submit_download_parameters.download_entities\n\n # entities = None if submit_download_parameters.entities is None else ' '.join(\n # submit_download_parameters.entities)\n\n format_version = BULK_FORMAT_VERSION_6\n last_sync_time_in_utc = submit_download_parameters.last_sync_time_in_utc\n\n if submit_download_parameters.campaign_ids is None:\n response = self.service_client.DownloadCampaignsByAccountIds(\n AccountIds={'long': [self._authorization_data.account_id]},\n DataScope=data_scope,\n DownloadFileType=download_file_type,\n DownloadEntities=download_entities,\n FormatVersion=format_version,\n LastSyncTimeInUTC=last_sync_time_in_utc,\n )\n headers = self.service_client.get_response_header()\n else:\n response = self.service_client.DownloadCampaignsByCampaignIds(\n Campaigns={\n 'CampaignScope': [\n {'CampaignId': campaign_id, 'ParentAccountId': self._authorization_data.account_id}\n for campaign_id in submit_download_parameters.campaign_ids\n ]\n },\n DataScope=data_scope,\n DownloadFileType=download_file_type,\n DownloadEntities=download_entities,\n FormatVersion=format_version,\n LastSyncTimeInUTC=last_sync_time_in_utc,\n )\n headers = self.service_client.get_response_header()\n operation = BulkDownloadOperation(\n request_id=response,\n authorization_data=self._authorization_data,\n poll_interval_in_milliseconds=self._poll_interval_in_milliseconds,\n environment=self._environment,\n tracking_id=headers['TrackingId'] if 'TrackingId' in headers else None,\n **self.suds_options\n )\n return operation" ]
[ "0.54286665", "0.5196006", "0.5137875", "0.4929878", "0.47651935", "0.4748385", "0.47454703", "0.4692453", "0.46507284", "0.4643923", "0.4641228", "0.4612098", "0.4580337", "0.45568007", "0.45433447", "0.4523338", "0.4513945", "0.45056123", "0.4497337", "0.44970152", "0.44823134", "0.44820797", "0.44769853", "0.44715378", "0.44689965", "0.44628042", "0.44601238", "0.44513607", "0.4447966", "0.44364703" ]
0.5655947
0
Set a random start point for a new snake and return its coordinates.
def get_new_snake(): global direction, snake, X_start, Y_start X = [x for x in range(40, WINDOWWIDTH - 80, 20)] #multiplier list 20 Y = [y for y in range(40,WINDOWHEIGHT - 80, 20)]#multiplier list 20 X_start = random.choice(X)#random multiplier of 20 Y_start = random.choice(Y)#random multiplier of 20 direction = "right" snake = [[X_start, Y_start], [X_start - 20, Y_start], [X_start - 40, Y_start]] #first 3 cells of snake
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _random_start_position(self):\r\n self.position = np.array(random.choice(self.start_positions),\r\n dtype=np.int16)", "def rand_start_pos(self):\n free_list = np.where(self.grid_map == self.empty_value)\n pos_idx = np.random.randint(free_list[0].shape[0])\n self.set_start_pos((free_list[0][pos_idx], free_list[1][pos_idx]))", "def create_food(self, snake):\n while not self.pos:\n x = random.randint(0, 7)\n y = random.randint(0, 7)\n if [x, y] not in snake:\n self.pos = [x, y]", "def choose_starting_points(self, side):\n # Left Side\n if side == 1:\n x = np.random.uniform(self.left_side[\"x_min\"], self.left_side[\"x_max\"])\n y = np.random.uniform(self.left_side[\"y_min\"], self.left_side[\"y_max\"])\n # Bottom\n elif side == 2:\n x = np.random.uniform(self.bottom[\"x_min\"], self.bottom[\"x_max\"])\n y = np.random.uniform(self.bottom[\"y_min\"], self.bottom[\"y_max\"])\n # Right Side\n elif side == 3:\n x = np.random.uniform(self.right_side[\"x_min\"], self.right_side[\"x_max\"])\n y = np.random.uniform(self.right_side[\"y_min\"], self.right_side[\"y_max\"])\n # Top\n elif side == 4:\n x = np.random.uniform(self.top[\"x_min\"], self.top[\"x_max\"])\n y = np.random.uniform(self.top[\"y_min\"], self.top[\"y_max\"])\n else:\n raise ValueError(\"Invalid number for sides!\")\n\n return x, y", "def _init_random_coord(self):\n x_coord = \\\n random.randrange(Screen.SCREEN_MIN_X, Screen.SCREEN_MAX_X)\n y_coord = \\\n random.randrange(Screen.SCREEN_MIN_Y, Screen.SCREEN_MAX_Y)\n self.x_coord = x_coord\n self.y_coord = y_coord", "def set_random_pos(self, which):\n available = [[r, c] for r, row in enumerate(self.maze)\n for c, value in enumerate(row) if value == ' ']\n choice = random.choice(available)\n if which == 'starting':\n self.current_pos = choice\n elif which == 'finishing':\n self.finish_pos = choice", "def getRandomPosition(self):\n x = random.randint(0, self.width - 1)\n y = random.randint(0, self.height - 1)\n return Position(x, y)", "def initpoint(self):\n col = int(random.uniform(0, COLS))\n row = int(random.uniform(0, ROWS))\n return (row, col)", "def random_coordinates():\n return Coordinates(random.randint(0, 14), random.randint(0, 14))", "def getRandomPosition(self):\n posx = random.randrange(0, self.width)\n posy= random.randrange(0, self.height)\n randPos = Position(posx, posy)\n return randPos", "def random_position(self):\n\t\treturn (random.randint(1, self.max_x-2), random.randint(1,self.max_y-2))", "def create_snake(self):\n for position in SNAKE_STARTING_POSITIONS:\n self.add_segment(position)", "def set_start_coords(self, x:int, y:int) -> None:\r\n self.start_x = x\r\n self.start_y = y", "def set_starting_pos(self):\n if self.start and self.is_unoccupied(*self.start):\n self.current_pos = self.start[:]\n else:\n self.set_random_pos('starting')", "def getRandomCoordinates( self, size ):\n if not self.mIsLoaded: self.__loadIndex()\n\n token = random.choice( self.mIndex.keys() ) \n strand = random.choice( (\"+\", \"-\") )\n pos_id, pos_seq, lcontig = self.mIndex[token][:3]\n rpos = random.randint( 0, lcontig )\n if random.choice( (\"True\", \"False\") ):\n start = rpos\n end = min(rpos + size, lcontig)\n else:\n start = max(0, rpos - size)\n end = rpos\n \n return token, strand, start, end", "def _get_random_pos_on_a_side(self):\n pass", "def getRandomPosition(self):\n posX = np.random.uniform(0, self.width)\n posY = np.random.uniform(0, self.height)\n return Position(posX, posY)", "def getRandomPosition(self):\n return Position(random.uniform(0, self.width), random.uniform(0, self.height))", "def random_pose(self):\n position = self._start\n while self[position].distance < np.sum(self._rooms.shape) * 2:\n position = np.array(\n [random.randrange(limit) for limit in self._rooms.shape]\n )\n direction = random.choice(self.exits(position))\n return (position, direction)", "def RandomCoordinate(): \r\n return ReturnRounded(np.random.uniform(-10,10))", "def getRandomPosition(self):\n\t\tp = Position(random.randrange(0, self.width), random.randrange(0, self.height))\n\t\treturn p", "def getRandomPosition(self):\n return Position(random.random()*self.w, random.random()*self.h)", "def __new_snake(self):\n self._snake = self.Snake(Direction.RIGHT, Position(4, 4), Position(3, 4), Position(2, 4))", "def Generate_Random( self ):\n print( 'Generating Random coordinates' )\n stands = self.Data.Stand.keys()\n stands.sort()\n for s in stands:\n trees = self.Data.Stand[s].Tree.keys()\n trees.sort()\n for t in trees:\n self.Data.Stand[s].Tree[t].X = random.uniform( 0, 208.71 )\n self.Data.Stand[s].Tree[t].Y = random.uniform( 0, 208.71 )", "def random_pos(self, ):\n self.pos_item['needle'] = self.shuffle_pos()\n self.pos_item['ether'] = self.shuffle_pos()\n self.pos_item['tube'] = self.shuffle_pos()", "def reset_pos(self):\n self.rect.y = random.randrange(-1000, -10)\n self.rect.x = random.randrange(0, WIDTH)", "def reset_pos(self):\r\n self.rect.x = random.randrange(50, 640)\r\n self.rect.y = random.randrange(-300, -80)", "def exploring_starts(self):\n def random_choice(l): return l[np.random.randint(len(l))]\n return map(random_choice, (self.env.states, self.env.moves))", "def __new_candy(self):\n array_set = list(range(self.__col_row_num[0] * self.__col_row_num[1]))\n for s in self.__snake:\n step_x = s.x() // self.__cell_edge\n step_y = s.y() // self.__cell_edge\n array_set.remove(step_y * self.__col_row_num[0] + step_x)\n pos = random.choice(array_set)\n x_pos = pos % self.__col_row_num[0] * self.__cell_edge\n y_pos = pos // self.__col_row_num[0] * self.__cell_edge\n self.__candy.move(QtCore.QPoint(x_pos, y_pos))", "def generate(self, start_point: int) -> None:\n self.map_points = [start_point for i in range(self.map_length)]\n current_segment = 0\n\n while current_segment <= self.map_length - c.SAFE_EXCESS:\n how_much_segments_to_side = random.randrange(c.MIN_SEGMENTS_TO_SIDE, c.MAX_SEGMENTS_TO_SIDE, 1)\n max_curvature = math.log(self.score.get_score() + 1, c.MAX_CURVATURE_COEFFICIENT)\n if max_curvature > c.MAX_CURVATURE:\n max_curvature = c.MAX_CURVATURE\n curvature = round(random.uniform(-max_curvature, max_curvature), 1)\n\n for j in range(current_segment, current_segment + how_much_segments_to_side):\n if current_segment + how_much_segments_to_side <= self.map_length:\n while not (\n c.MAX_LEFT_DEVIATION_OF_ROAD < self.map_points[\n j] + curvature < c.MAX_RIGHT_DEVIATION_OF_ROAD):\n curvature = round(random.uniform(-max_curvature, max_curvature), 1)\n self.map_points[j + 1] = self.map_points[j] + curvature\n\n current_segment += how_much_segments_to_side" ]
[ "0.7055944", "0.66870147", "0.65640426", "0.6366627", "0.63529134", "0.6328111", "0.6247258", "0.622765", "0.62188524", "0.61395854", "0.61192894", "0.6099123", "0.60775715", "0.6012391", "0.59755707", "0.5974625", "0.5952304", "0.59483", "0.59292364", "0.5928669", "0.59225017", "0.59164745", "0.58862996", "0.5885452", "0.58838665", "0.5857264", "0.58462536", "0.58365095", "0.5834252", "0.5826437" ]
0.68626577
1
make a list of recent acis observation
def get_recent_obsid(): # #--- extract a list of the last two weeks of acis observations # stop = time.strftime('%Y:%j:%H:%M:%S', time.gmtime()) stop = Chandra.Time.DateTime(stop).secs start = stop - 86400 * 14 a_list = make_obsid_list(start, stop) return a_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_observation(self):\n return []", "def get_observation_list(self):\n return self.observations", "def _get_obs(self):\n\n\n return OrderedDict(\n [\n (\"count\", self.count),\n (\"observation\", self.state.copy()),\n\n ]\n )", "def get_recent_state(self, current_observation):\n # This code is slightly complicated by the fact that subsequent observations might be\n # from different episodes. We ensure that an experience never spans multiple episodes.\n # This is probably not that important in practice but it seems cleaner.\n state = [current_observation]\n idx = len(self.recent_observations) - 1\n for offset in range(0, self.window_length - 1):\n current_idx = idx - offset\n current_terminal = self.recent_terminals[current_idx - 1] if current_idx - 1 >= 0 else False\n if current_idx < 0 or (not self.ignore_episode_boundaries and current_terminal):\n # The previously handled observation was terminal, don't add the current one.\n # Otherwise we would leak into a different episode.\n break\n state.insert(0, self.recent_observations[current_idx])\n while len(state) < self.window_length:\n state.insert(0, zeroed_observation(state[0]))\n return state", "def getchangableobslist(self):\n return self.__changableobslist", "def recent_comic_titles():\r\n\treturn [comic.title for comic in Comic.objects.all().order_by('-created_on')[0:10]]", "def getChanges():", "def history():", "def parse_latest_observations(gml: bytes) -> List[Observation]:\n\n merged = _extract_features(gml)\n return [_dict_to_observation(i) for i in merged]", "def _compute_last_observations(self):\n observations = {}\n for ts in self.ts_ids:\n observations[ts] = self.traffic_signals[ts]._compute_observation()\n return observations", "def recently(self):\n items = []\n for item in self.p.entries:\n dt = datetime.fromtimestamp(mktime(item.published_parsed))\n delta = datetime.today() - dt\n\n if delta.days > self.days:\n continue\n items.append(item)\n if 'verbose' in self.args and self.args['verbose']:\n print delta.days, dt\n self.items = items\n return items", "def get_constituents(self, as_of_datetime):\n\n assert isinstance(as_of_datetime, datetime), 'Invalid datetime type.'\n\n # Get current list of constituents\n C = self.current_list[['Symbol', 'current_member']].set_index('Symbol')\n\n # Get list of historical changes to index since the as of date\n these_chgs = self.changes.loc[self.changes[('Date', 'Date')] >= as_of_datetime]\n\n # Count the number of times each ticker was added to index\n adds = these_chgs[[('Date', 'Date'), ('Added', 'Ticker')]].dropna().droplevel(level=0, axis=1)\n adds['action'] = -1\n A = adds.pivot(index='Ticker', columns='Date', values='action').fillna(0).sum(axis=1).to_frame(name='undo_adds')\n\n # Count the number of times each ticker was deleted from index\n deletes = these_chgs[[('Date', 'Date'), ('Removed', 'Ticker')]].dropna().droplevel(level=0, axis=1)\n deletes['action'] = 1\n D = deletes.pivot(index='Ticker', columns='Date', values='action').fillna(0).sum(axis=1).to_frame(name='undo_deletes')\n\n # Merge the current list, adds, and deletes\n as_of_constituents = pd.concat([C, A, D], axis=1).fillna(0)\n as_of_constituents['as_of_member'] = as_of_constituents.sum(axis=1)\n\n return list(as_of_constituents[as_of_constituents['as_of_member'] == 1].index)", "def recent_arima_sensors(now=dt.datetime.now(), timerange=dt.timedelta(days=5)):\n dt_from = now - timerange\n query = (\n db.session.query(ModelRunClass.sensor_id)\n .filter(ModelRunClass.time_created >= dt_from)\n .distinct()\n )\n ids = db.session.execute(query).fetchall()\n ids = [i[0] for i in ids]\n return ids", "def get_change_list():\n today = date.today()\n current_month = today.month\n current_year = today.year\n current_month_pred = dict()\n previous_month_pred = dict()\n change = list()\n commodity_list = [\"arhar\", \"bajra\", \"barley\", \"copra\", \"cotton\", \"sesamum\", \"gram\", \"groundnut\",\n \"jowar\", \"maize\", \"masoor\", \"moong\", \"niger\", \"paddy\", \"ragi\", \"rape\", \"jute\",\n \"safflower\", \"soyabean\", \"sugarcane\", \"sunflower\", \"urad\", \"wheat\"]\n if current_month == 1:\n previous_month = 12\n previous_year = current_year - 1\n else:\n previous_month = current_month - 1\n previous_year = current_year\n for crop in commodity_list:\n model_path = \"static/models/\" + crop + \".joblib\"\n model = load(model_path)\n current_month_wpi = model.predict(pd.DataFrame([current_month, current_year]).T)[0]\n current_month_pred[crop] = current_month_wpi\n previous_month_wpi = model.predict(pd.DataFrame([previous_month, previous_year]).T)[0]\n previous_month_pred[crop] = previous_month_wpi\n change.append((((current_month_wpi - previous_month_wpi) * 100 / previous_month_wpi), crop))\n sorted_change = change\n return sorted_change, current_month_pred, previous_month_pred", "def getHistoricalData(dev, MAC):\n # Creating a MongoDB client and switching to the relevant database\n database = parseDatabase('MongoDB')\n mongo_client = pymongo.MongoClient(database['host'], int(database['port']))\n db = mongo_client[dev]\n \n # Creating an array consisting of the dates for which data has to be retrieved\n base = datetime.datetime.today() - datetime.timedelta(days=1)\n date_list = [base - datetime.timedelta(days=x) for x in range(0, 5)]\n \n data = []\n # Loop to get data for the last five days\n for date in date_list:\n date_string = date.strftime(\"%Y-%m-%d\")\n col = db[date_string]\n temp = dict()\n results = col.find({'MAC':MAC})\n for i in results:\n temp = i\n if not temp=={}:\n temp['date'] = date_string\n temp.pop('_id', None)\n to_pop = ['First detection', 'Last detection', 'Number', 'MAC', 'Last updated']\n for item in to_pop:\n temp.pop(item, None)\n data.append(temp)\n\n else:\n temp['date'] = date_string\n temp['Duration'] = 0\n data.append(temp)\n continue\n \n\n return data", "def get_observation_ids(self) -> list:\n return list(self.time_to_obs.keys())", "def acquisitions_from_ifg_dates(ifg_dates):\n acq_dates = []\n for ifg_date in ifg_dates: # loop through the dates for each ifg\n dates = ifg_date.split('_') # split into two YYYYMMDDs\n for date in dates: # loop through each of these\n if date not in acq_dates: # if it't not already in the list...\n acq_dates.append(date) # add to it\n return acq_dates", "def print_list(fo, obsid, date, date2, date3):\n\n hold = []\n#\n#--- sort the list according to date3\n#\n sorted_index = numpy.argsort(date3)\n obsid = [obsid[i] for i in sorted_index]\n date = [date[i] for i in sorted_index]\n date2 = [date2[i] for i in sorted_index]\n for i in range(0, len(obsid)):\n if date[i] != '' and date2[i] != '' and obsid[i] != '':\n line = date[i] + ' ' + date2[i] + ' ' + str(obsid[i]) + '\\n'\n hold.append(line)\n hlen = len(hold)\n#\n#--- reverse the list so that the newest at the top\n#\n for i in range(0, hlen):\n j = hlen - i -1\n fo.write(hold[j])", "def _get_observation(self, observation):", "def getExpired(self, idle=365):\n cutOff = datetime.datetime.now() - datetime.timedelta(days=idle)\n return [x for x in self.xeps if x.status == \"Experimental\" and x.date < cutOff]", "def Tobs_past_year(): \n results = pd.DataFrame(session.query(Measurement.date,Measurement.tobs).\\\nfilter(Measurement.date.between(One_yrs_ago,current_time)).all());\n\n dates_of_last_year=list(results.sort_values(by='date')['date'].unique()) \n aa1=results.sort_values(by='date').groupby('date')\n last_year_tobs={dates_of_last_year[i]:list(aa1.get_group(dates_of_last_year[i])['tobs'])\\\n for i in range(len(aa1))}\n print(f\"Route /api/v1.0/tobs/past_year is being visited\")\n return jsonify(last_year_tobs)", "def get_recent_history(session=None): \n from model_old_schema.reference import Reference, RefBad\n\n def f(session):\n min_date = datetime.date.today() - datetime.timedelta(days=10)\n refs = session.query(Reference).filter_by(created_by = session.user).filter(Reference.date_created >= min_date)\n refbads = session.query(RefBad).filter_by(created_by = session.user).filter(Reference.date_created >= min_date)\n \n history = {}\n today = datetime.date.today()\n for i in range(10):\n new_date = today - datetime.timedelta(days=i)\n history[new_date] = HistoryEntry(new_date)\n \n for ref in refs:\n if ref.date_created in history:\n history[ref.date_created].inc_ref_count()\n \n for refbad in refbads:\n if refbad.date_created in history:\n history[refbad.date_created].inc_refbad_count()\n \n return history\n \n return f if session is None else f(session)", "def get_albums_recent_added(session_):\n artists = session_.query(Album).order_by(Album.added_at.desc()).all()\n return artists", "def get_artists_recent_added(session_):\n # artists = session_.query(Artist).order_by(Artist.name.asc()).paginate()\n artists = session_.query(Artist).order_by(Artist.added_at.asc()).all()\n return artists", "def get_historic_data(self):\n\n historic_market_events = []\n\n return historic_market_events", "def agg_history(self):\n cd_list, cr_list = zip(*self._history)\n return pd.concat(cd_list), pd.concat(cr_list)", "def _get_observation(self): \n i = 0\n\n # Gets unique observation lines\n observation_lines = set()\n while i < self.lines_per_observation:\n line = self.lines.pop()\n observation_lines.add(line)\n i += 1\n\n LOGGER.debug('Observation lines: %s', observation_lines)\n observation = self._parse_sensor_observation(observation_lines) \n return observation", "def date_list(self):\n if self._date_list is None or self._file_modified:\n with open(self.data_filepath, 'r', newline='') as reader:\n reader = csv.reader(reader)\n self._date_list = [DatePoint.unfreeze(date[0]) for date in reader]\n self._file_modified = False\n return self._date_list", "def ata_sct_temperature_history(self) -> SmartSsdAtaSctTemperatureHistory:\n return self._ata_sct_temperature_history", "def make_obsid_list(start, stop):\n line = 'operation=browse\\n'\n line = line + 'dataset=flight\\n'\n line = line + 'detector=acis\\n'\n line = line + 'level=1\\n'\n line = line + 'filetype=evt1\\n'\n line = line + 'tstart=' + str(start) + '\\n'\n line = line + 'tstop=' + str(stop) + '\\n'\n line = line + 'go\\n'\n\n out = mcf.run_arc5gl_process(line)\n#\n#--- save obsids\n#\n a_list = []\n if len(out) > 0:\n for ent in out:\n atemp = re.split('acisf', ent)\n btemp = re.split('_', atemp[1])\n obsid = btemp[0]\n#\n#--- make sure that obsid is numeric\n#\n try:\n chk = float(obsid)\n except:\n continue\n\n a_list.append(obsid)\n\n return a_list" ]
[ "0.62627023", "0.59185773", "0.5808576", "0.57445425", "0.55848485", "0.556348", "0.5557747", "0.55287105", "0.5524823", "0.5470817", "0.5451663", "0.54486525", "0.5441415", "0.5439831", "0.5406885", "0.5387726", "0.5383874", "0.5338046", "0.53371894", "0.5328252", "0.5317614", "0.5307985", "0.5276524", "0.5257058", "0.5228936", "0.52238077", "0.5206773", "0.5201209", "0.5193816", "0.5191037" ]
0.7570609
0
compress older plots files
def zip_old_plot_file(a1=30, a2=60): today = time.strftime('%Y:%j:%H:%M:%S', time.gmtime()) today = Chandra.Time.DateTime(today).secs stop = today - 86400 * a1 start = today - 86400 * a2 a_list = make_obsid_list(start, stop) for obsid in a_list: cdir = plot_dir + 'Ind_Plots/acisf' + str(obsid) + '_plot' if os.path.isdir(cdir): cmd = 'gzip -f ' + cdir + '/*.png' os.system(cmd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_plots(request, save_figs):\n\n def fin():\n if save_figs is not None:\n plt.savefig(f\"{os.path.join(save_figs, request.node.name)}.png\")\n plt.close(\"all\")\n\n request.addfinalizer(fin)", "def save_fig(ax_data, file_name):\n with open(file_name,'wb') as fid:\n pickle.dump(ax_data, fid)", "def hogg_savefig(prefix):\n for fn in [prefix + \".png\"]: # , prefix + \".pdf\"]:\n print \"saving \" + fn\n plt.savefig(fn)\n return None", "def compress():\n run_manage_cmd('compress_assets')", "def plot_to_file(file_prefix=None, file_suffix=None, **kwargs) -> str:\n file_prefix = file_prefix or 'plot'\n file_suffix = file_suffix or '.png'\n path = tempfile.mktemp(prefix='%s-' % file_prefix, suffix=file_suffix)\n plt.savefig(path, **kwargs)\n plt.close() # Else plt.show() happens automatically [sometimes: with plt.* but not with plotnine...]\n return path", "def save(file_name):\n setup()\n plt.savefig(file_name)", "def generate_all_cost_plots(suffix):\n directory_name = \"inputs/\"\n directory = os.fsencode(directory_name)\n outfolder = \"plots/\" + suffix.strip(\".in\") + \"/\"\n try:\n os.makedirs(outfolder)\n except FileExistsError:\n pass\n for file in os.listdir(directory):\n filename = os.fsdecode(file)\n if filename.endswith(suffix):\n print(\"Solving : \", filename)\n inputfile = directory_name + filename\n num_clusters, cost = cost_vs_clusters(inputfile)\n outfile = outfolder + filename.strip(\".in\") + \".png\"\n plot_cost_vs_clusters(cost,num_clusters,outfile)", "def do_compare_plots(cat7, cat7s, subdir,label):\n #Check the effect of the 5km/s split\n #dNdX\n cat7.plot_line_density(zmax=5)\n cat7s.plot_line_density(zmax=5, label=label)\n plt.legend(loc=0)\n save_figure(path.join(subdir,\"dndx_\"+label))\n plt.clf()\n\n #Omega_DLA\n cat7.plot_cddf(zmax=4,color=\"blue\")\n cat7s.plot_cddf(zmax=4,color=\"red\",label=label)\n plt.xlim(1e20, 1e23)\n plt.ylim(1e-28, 5e-21)\n plt.legend(loc=0)\n save_figure(path.join(subdir, \"cddf_\"+label))\n plt.clf()\n\n #Omega_DLA\n cat7.plot_omega_dla(zmax=5)\n cat7s.plot_omega_dla(zmax=5, label=label)\n plt.legend(loc=0)\n save_figure(path.join(subdir,\"omega_\"+label))\n plt.clf()", "def save_figs(self):\n if not self._fitted:\n self.fit()\n #self._message(\"Saving plots...\")\n # 1. Generate the required PNG plots\n # 1.1 Truncation plots\n for i,s in enumerate(self.samples):\n fig,ax=plt.subplots(1,2,figsize=(8,4))\n cyct=np.arange(self.nvalues)\n cycf=np.arange(self._cutoffidx[i])\n cycd=0.5*(cyct[1:]+cyct[:-1])\n ax[0].plot(cyct,self.samplesdata[:,i],'k.-',linewidth=0.5,label=\"Full series\")\n ax[0].plot(cycf,self.samplesdata[:self._cutoffidx[i],i],'r-',linewidth=1,label=\"Truncated\")\n ax[0].set_xlim([0,self.nvalues-1])\n ax[0].set_ylim([0,self.samplesdata.max()*1.1])\n ax[0].set_xlabel(\"Cycle\")\n ax[0].set_ylabel(\"Fluorescence (a.u.)\")\n ax[0].set_title(\"Detected fluorescence\")\n plt.legend(loc='upper left',frameon=False)\n # First derivative\n ax[1].plot(cycd,self.samplesdatadiff[:,i],'k.-',linewidth=0.5)\n ax[1].axvline(self._cutoffidx[i],color='r')\n ax[1].set_xlim([0,self.nvalues-1])\n ax[1].set_ylim([self.samplesdatadiff.min()*1.1,self.samplesdatadiff.max()*1.1])\n ax[1].set_xlabel(\"Cycle\")\n ax[1].set_ylabel(\"dF/dCycle (a.u.)\")\n ax[1].set_title(\"Fluorescence rate\")\n plt.tight_layout()\n fn=get_valid_fname(self.samples[i])\n figname=\"%s_%s_%s.svg\"%(self.ID,\"01truncation\",fn)\n self.info['samples'][s]['Data truncation for fitting']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n plt.close() \n # 1.2 Fitting plots\n for i,s in enumerate(self.samples):\n fig,ax=plt.subplots(1,3,figsize=(12,4))\n cyct=np.arange(self.nvalues)\n cycf=np.arange(self._cutoffidx[i])\n ax[0].plot(cyct,self.samplesdata[:,i],'k:',linewidth=0.5,label=\"Full series\")\n ax[0].plot(cycf,self.samplesdata[:self._cutoffidx[i],i],'r.-',linewidth=0.5,label=\"Truncated\")\n #ax[0].plot(cycf,self.mak3fpre[s],'y-',linewidth=1,label=\"prefit\")\n ax[0].plot(cycf,self.mak3fluorescence[s],'g-',linewidth=1,label=\"MAK3 fit\")\n ax[0].axvline(self._cutoffidx[i],color='k')\n ax[0].set_xlim([0,self.nvalues-1])\n ax[0].set_ylim([0,self.samplesdata.max()*1.1])\n ax[0].set_xlabel(\"Cycle\")\n ax[0].set_ylabel(\"Fluorescence (a.u.)\")\n ax[0].set_title(\"Detected fluorescence\")\n ax[0].legend(loc='upper left',frameon=False)\n # DNA levels\n ax[1].plot(cycf,self.mak3concentration[s],'g-',linewidth=1,label=\"MAK3\")\n ax[1].axvline(self._cutoffidx[i],color='k')\n ax[1].set_xlim([0,self.nvalues-1])\n ax[1].set_ylim([0,self.mak3concentration[s].max()*1.1])\n ax[1].set_xlabel(\"Cycle\")\n ax[1].set_ylabel(\"concentration (a.u.)\")\n ax[1].set_title(\"estimated cDNA levels\")\n # Efficiency\n ax[2].plot(cycf,self.mak3efficiency[s],'b-',linewidth=1,label=\"MAK3\")\n ax[2].axvline(self._cutoffidx[i],color='k')\n ax[2].set_xlim([0,self.nvalues-1])\n ax[2].set_ylim([0,1.1])\n ax[2].set_xlabel(\"Cycle\")\n ax[2].set_ylabel(\"Efficiency\")\n ax[2].set_title(\"Amplification efficiency\") \n plt.tight_layout()\n fn=get_valid_fname(self.samples[i])\n figname=\"%s_%s_%s.svg\"%(self.ID,\"02mak3\",fn)\n self.info['samples'][s]['MAK3 Fitting']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n plt.close()\n # 2 Initial concentrations\n figwdth=np.maximum(5,0.4*self.nsamples+1)\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.initialConcentration.values())\n k=list(self.initialConcentration.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00initialConcentration\")\n self.info['figname_initialConcentration']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n plt.close()\n # 3 Fitting Error\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.fitting_error.values())\n k=list(self.fitting_error.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n ax.set_ylim([0,1e-2])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00fittingError\")\n self.info['figname_fittingError']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n # 4 kinetic constant\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.k.values())\n k=list(self.k.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00kineticConstant\")\n self.info['figname_k']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n # 5 background fluorescence\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.Fb.values())\n k=list(self.Fb.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00bkgFluorescence\")\n self.info['figname_Fb']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))\n # 6 slope\n fig,ax=plt.subplots(1,1,figsize=(figwdth,7))\n v=list(self.slope.values())\n k=list(self.slope.keys())\n ax.bar(0.75+np.arange(self.nsamples),v,facecolor='k',width=0.5)\n ax.set_xticks(1+np.arange(self.nsamples))\n ax.set_xticklabels(k,rotation=90)\n ax.set_xlim([0,self.nsamples+1])\n ax.set_ylim([0,0.025])\n plt.tight_layout()\n figname=\"%s_%s_.svg\"%(self.ID,\"00fluorescenceSlope\")\n self.info['figname_slope']=figname\n plt.savefig('%s/%s'%(self.info['resultsdir'],figname))", "def save_plot(p, file_name, path='../static/images/'):\n p.output_backend = \"svg\"\n export_svgs(p, filename=path + file_name + '.svg')", "def save_plot(directory, name = None):\n global global_figure_count \n if directory[-1]!=\"/\":\n directory = directory + \"/\"\n directory_paths = directory.split(\"/\")\n prefix = directory_paths[0]\n for i in directory_paths[1:]:\n if not os.path.exists(prefix):\n os.mkdir(prefix)\n prefix = prefix + \"/\" + i\n\n if len(plt.get_fignums()) == 1:\n if name is None:\n plt.savefig(directory + 'figure%d.png' % global_figure_count, bbox_inches='tight', dpi = 600)\n global_figure_count+=1\n else:\n plt.savefig(directory + '%s' % name, bbox_inches='tight', dpi = 600)\n else:\n for i in plt.get_fignums():\n plt.figure(i)\n plt.savefig(directory + 'figure%d.png' % global_figure_count, bbox_inches='tight', dpi = 600)\n global_figure_count+=1\n plt.close('all')", "def cb_save(event):\n fig.savefig('sample.univariate_discrete.py.png', dpi=300, format='png', transparent=True)", "def save_plots(plot_dict, filename) :\n outfile = ROOT.TFile(filename, \"RECREATE\")\n\n for key in sorted(plot_dict) :\n if type( plot_dict[key] ) is types.DictType :\n directory = outfile.mkdir( key )\n directory.cd()\n save_plot( plot_dict[key], directory )\n outfile.cd()\n elif plot_dict[key] is not None :\n plot_dict[key].Write()\n \n outfile.Close()", "def test_plot_images(self):\n save_file(self.quart.plot_images)", "def save_plot(self):\r\n\t\t# Generate the plot\r\n\t\tself.generate_plot()\r\n\t\t# Create save directory\r\n\t\tdirectory = self.dir + '/%s/' % str(int(self.universe.init_time))\r\n\t\tif not path_exists(directory):\r\n\t\t\tmakedirs(directory)\r\n\t\t# Save image file\r\n\t\tself.fig.savefig(directory+str(self.universe.time))", "def compress_wrapper(args: Namespace) -> None:\n directory_path = os.path.join(DATASETS_DIR, args.directory)\n compress_datasets(directory_path, args.holdout)", "def save_plot(name):\n plt.savefig(name)\n plt.clf()", "def savePlots(self, dir, prefix, type):\n for key in self.plots.keys():\n fname = os.path.join(dir, prefix+'_'+key+'.'+type)\n self.plots[key].savefig(fname, bbox_inches='tight')", "def savePlots(self, dir, prefix, type):\n for key in self.plots.keys():\n fname = os.path.join(dir, prefix+'_'+key+'.'+type)\n self.plots[key].savefig(fname, bbox_inches='tight')", "def compress_files(self):\n archive_file_path = tkinter.filedialog.asksaveasfilename(parent=self,\n defaultextension=\".zip\",\n filetypes=[(\"Zip File\", \"*.zip\")])\n treeview_items = self.files_treeview.get_children()\n if archive_file_path and treeview_items:\n with ZipFile(archive_file_path, \"w\", ZIP_DEFLATED) as archive:\n for row in treeview_items:\n file_path = self.files_treeview.item(row, \"values\")[0]\n file_name = os.path.basename(file_path)\n archive.write(file_path, arcname=file_name)", "def plot_files(plot_file_name, files):\n curve_names, metric_sets, set_of_number_of_embeddings = _read_result_pickle(files)\n\n _plot_curves(plot_file_name, curve_names, metric_sets, set_of_number_of_embeddings)", "def compress_image(filename, s):\r\n image = imread(filename) / 255\r\n size = image.shape\r\n orig_entries = image.size\r\n #colored\r\n if len(size) == 3:\r\n #plot original\r\n orig = plt.subplot(121)\r\n orig.imshow(image)\r\n orig.axis(\"off\")\r\n #red in image\r\n R = image[:,:,0]\r\n #green in image\r\n G = image[:,:,1]\r\n #blue in image\r\n B = image[:,:,2]\r\n #approximate red, green and blue in range\r\n new_R, entries_R = svd_approx(R,s)\r\n new_R = np.clip(new_R,0,1)\r\n new_G, entries_G = svd_approx(G,s)\r\n new_G = np.clip(new_G,0,1)\r\n new_B, entries_B = svd_approx(B,s)\r\n new_B = np.clip(new_B,0,1)\r\n #stack all in one array\r\n new_image = np.dstack((new_R,new_G,new_B))\r\n #plot image\r\n new = plt.subplot(122)\r\n new.imshow(new_image)\r\n new.axis(\"off\")\r\n #title image with saved number of entries\r\n plt.suptitle(str(orig_entries - (entries_R+entries_G+entries_B)) + \" Entries\")\r\n\r\n\r\n #grayscale\r\n else:\r\n #plot original\r\n orig = plt.subplot(121)\r\n orig.imshow(image, cmap=\"gray\")\r\n orig.axis(\"off\")\r\n #approximate the image\r\n new_A, entries = svd_approx(image,s)\r\n #plot it\r\n new = plt.subplot(122)\r\n new.imshow(new_A, cmap=\"gray\")\r\n new.axis(\"off\")\r\n #title image with saved number of entries\r\n plt.suptitle(str(orig_entries - entries) + \" Entries\")\r\n\r\n plt.show()", "def compress_img():\n in_path = 'output/templates/rgb/'\n out_path = 'output/templates/imgs/'\n names = os.listdir(in_path)\n for i, name in enumerate(names):\n img = cv2.imread(in_path + name, 0)\n if any(np.array(img.shape) > 1000):\n img = cv2.resize(img, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)\n cv2.imwrite(out_path + name, img)\n\n return", "def compress_image(filename,k):", "def updateplot(self):\n plotfiles = []\n try:\n self.plotter.reset()\n self.plotter.set_xrange(self.xrangemin.value(), self.xrangemax.value())\n self.plotter.set_yrange(self.yrangemin.value(), self.yrangemax.value())\n self.plotter.set_bgirange(self.bgintmin.value(), self.bgintmax.value())\n self.plotter.set_pkrange(self.halphamin.value(), self.halphamax.value())\n for n,pf in enumerate(self.selecteddata):\n tf = os.path.join(self.tempdir, \"tf%d\" % n)\n self.dfparser.writefile(tf, pf)\n plotfiles.append(tf)\n self.plotter.set_plot(plotfiles)\n except datafile.Datafile_error as e:\n self.warningmsg.setText(e.args[0])\n self.plotter.clear()\n except plotter.Plotter_error as e:\n self.warningmsg.setText(e.args[0])\n self.plotter.clear()", "def generate_plots(path):\n videos = glob(path + '/*.mkv')\n print(path, len(videos), videos)\n\n if len(videos) == 0:\n return\n else:\n videos = videos[0]\n\n metadata_list = glob(path + '/metadata.txt')\n #print(path, len(metadata_list), metadata_list)\n\n if len(metadata_list) == 0:\n return \n\n P = Preprocessor()\n P.import_video(str(videos))\n P.read_metadata(path)\n P.preprocess()\n Im = P.frames_processed\n if len(Im) == 0:\n print(len(Im))\n return\n\n z_start = P.z_start\n z_end = P.z_end\n\n mean, cov = analyze_image(Im)\n\n window_size = 10\n mean_smoothed = smoothing.mean_moving_average(mean, window_size)\n cov_smoothed = smoothing.cov_moving_average(cov, window_size)\n\n c = CubicFitRotated()\n c.fit(mean=mean_smoothed, cov=cov_smoothed, z_start=z_start, z_end=z_end)\n\n try:\n os.mkdir(path + '/analysis')\n path += '/analysis'\n except OSError:\n pass\n\n\n plots.plot_mean(mean, z_start, z_end).savefig(path + '/beam_center.png')\n plots.plot_beta(cov, z_start, z_end).savefig(path + '/sigma_squared.png')\n\n export.export_mean(mean = mean, filename = path + '/center.csv', z_start = z_start, z_end = z_end)\n export.export_cov(cov = cov, filename = path + '/cov.csv', z_start = z_start, z_end = z_end)\n\n plt.close('all')", "def plot_waveforms(data, name, title, directory_name):\n plt.figure(figsize=(20, 10))\n plt.plot(data)\n plt.title(title)\n plt.savefig('./' + directory_name + '/' + name)\n pass", "def make_all_plots(dirname='plots'):\n for worker_type in ['ordinary', 'normal', 'master', None]:\n name = 'rajpal'\n if worker_type is not None:\n name += '-' + worker_type\n data = Data.from_rajpal_icml15(worker_type=worker_type)\n data.make_plots(name)\n data.make_data('{}.csv'.format(name))\n\n data = Data.from_bragg_hcomp13(positive_only=False)\n data.make_plots(os.path.join(dirname, 'bragg'))\n data.make_data(os.path.join(dirname, 'bragg.csv'))\n\n data = Data.from_bragg_hcomp13(positive_only=True)\n data.make_plots(os.path.join(dirname, 'bragg-pos'))\n data.make_data(os.path.join(dirname, 'bragg-pos.csv'))\n\n data = Data.from_lin_aaai12(workflow='tag')\n data.make_plots(os.path.join(dirname, 'lin-tag'))\n data.make_data(os.path.join(dirname, 'lin-tag.csv'))\n\n data = Data.from_lin_aaai12(workflow='wiki')\n data.make_plots(os.path.join('lin-wiki'))\n data.make_data(os.path.join('lin-wiki.csv'))\n\n make_bragg_teach_plots(dirname=dirname)", "def save_plot(self, ):\n pass", "def combine_plot(qa_out_path,brain_path):\n \n #Get the scan volume of the brain.\n brain_ref = nib.load(brain_path)\n brain_ref_shape = brain_ref.shape[0:3]\n \n plots_list = ['Rotate_Z_axis_000000.png','Rotate_Z_axis_000001.png','Rotate_Z_axis_000002.png',\n 'Rotate_Y_axis_000000.png','Rotate_Y_axis_000001.png','Rotate_Y_axis_000002.png',\n 'Rotate_X_axis_000000.png','Rotate_X_axis_000001.png','Rotate_X_axis_000002.png']\n y_labels = [\"Rotate with Z axis\",\"Rotate with Y axis\",\"Rotate with X axis\"]\n x_labels = [\"angle=0\",\"angle=120\",\"angle=240\"]\n \n #Temporary list to store the image nparray:\n im_arr=[] \n \n fig= plt.figure()\n plt.title(f'QA_tractography. Scan volume = {brain_ref_shape} \\n\\n', fontsize=60,fontweight='bold')\n plt.xticks([])\n plt.yticks([])\n plt.axis(\"off\")\n\n j = 0\n for i in range(9):\n #Load in the nine images into a nparray one by one.\n im_arr = np.array(Image.open(qa_out_path + \"/\" + plots_list[i]))\n #Change the background of the image into black:\n im_arr = np.where(im_arr<=0.01, 255, im_arr) \n ax = fig.add_subplot(3,3,i+1)\n ax.imshow(im_arr,interpolation=\"none\",alpha=0.9)\n \n #Set the X labels and Y labels\n if i<3:\n ax.set_title(x_labels[i],fontsize=60,fontweight='bold')\n if i % 3 == 0:\n ax.set_ylabel(y_labels[j],fontsize=60,fontweight='bold')\n j = j + 1\n plt.xticks([])\n plt.yticks([])\n \n fig.set_size_inches(40, 40, forward = True)\n fig.savefig(qa_out_path + \"/\" + 'qa_tractography.png', format='png')\n\n #Delete the Nine images which used to generate the qa_tractography.png \n for plot in plots_list:\n if os.path.exists(qa_out_path + \"/\" + plot):\n os.remove(qa_out_path + \"/\" + plot)\n else:\n print('No such file generated from streamlines window. Please check if the streamline.trk files is generated from the pipeline correctly or not')" ]
[ "0.606111", "0.60254896", "0.5936532", "0.5899535", "0.5893519", "0.58660996", "0.57554936", "0.5748019", "0.57389164", "0.5674885", "0.5673358", "0.56440294", "0.5628109", "0.5620251", "0.56082934", "0.5570723", "0.5566784", "0.5559577", "0.5559577", "0.54967564", "0.54904395", "0.5483825", "0.5476966", "0.547431", "0.547408", "0.54734606", "0.54479665", "0.54097015", "0.54086626", "0.5400718" ]
0.7255857
0
Evaluates the value of the cost function for given parameters by communicating with client.
def __call__(self, parameters) -> ValueEstimate: # Encode params to json string save_circuit_template_params(parameters, "current_optimization_params.json") with open("current_optimization_params.json", "r") as f: current_params_string = f.read() # POST params to proxy evaluation_id = self.client.post_argument_values(current_params_string) # POST status to EVALUATING self.client.post_status("EVALUATING") # WAIT for status to be OPTIMIZING while self.client.get_status() != "OPTIMIZING": time.sleep(1) # GET cost function evaluation from proxy evaluation_string = self.client.get_evaluation_result(evaluation_id) value_estimate = load_value_estimate(io.StringIO(evaluation_string)) return value_estimate
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eval_cost(self, params, **kwargs):\n raise NotImplementedError", "def evaluate_cost(self, msg):\n raise NotImplementedError()", "def EvaluateFunction(self, p_float=..., p_float=..., p_float=...):\n ...", "def _cost_method(self, *args, **kwargs):\n\n cost_val = 0.5 * np.linalg.norm(self.obs_data - self.op(args[0])) ** 2\n\n if 'verbose' in kwargs and kwargs['verbose']:\n print(' - DATA FIDELITY (X):', cost_val)\n\n return cost_val", "def objective_function(params):\n\n\tenergy = 0 # Initialize the energy in 0\n\n\tqc = get_var_form(params) # Obtain a quantum circuit instance from the parameters\n\n\tfor key in pauli_weights.keys(): # Iterate over the pauli string in the Pauli weight\n\n\t\tmc, n_measures = measure_circuit_factory(key) # Obtain the measurement circuit from the Pauli string\n\t\tqc_final = qc.compose(mc) # Combine both circuits\n\n\t\t# Execute the quantum circuit to obtain the probability distribution associated with the current parameters\n\t\tt_qc = transpile(qc_final, backend)\n\t\tq_obj = assemble(t_qc, shots=NUM_SHOTS)\n\t\tcounts = backend.run(q_obj).result().get_counts(qc_final)\n\n\t\tdistribution = get_distribution(counts, n_measures) # Convert the measured counts into a probability vector\n\n\t\t# Weight each probability by the diagonal factor, them sum all of them, and later multiply by the Pauli Weight\n\t\tenergy += np.sum(distribution * generate_diagonal_factors(n_measures)) * pauli_weights[key]\n\n\tenergy_list.append(energy) # Append the new computed energy\n\n\t# Print the iteration of the VQE and the energy\n\tprint('Iteration {}, Energy: {:.4f}'.format(len(energy_list), energy))\n\n\treturn energy", "def calcCostFun(self):\n\n self.start()\n F, K = self.model()\n \n return self.costFunction", "def test_independent_expval(self, execute_kwargs):\n dev = qml.device(\"default.qubit\", wires=2)\n params = jax.numpy.array([0.1, 0.2, 0.3])\n\n def cost(a, cache):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.RY(a[2], wires=0)\n qml.expval(qml.PauliZ(1))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n res = qml.interfaces.execute([tape], dev, cache=cache, **execute_kwargs)\n return res[0]\n\n res = jax.jit(jax.grad(cost), static_argnums=1)(params, cache=None)\n assert res.shape == (3,)", "def evaluate(self, representativeness: float, weight: float) -> float:\n pass", "def query_weight(self):\n # open socket connection (TCP/IP)\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n\n # set time out time for connections (seconds)\n s.settimeout(1)\n\n # connect to the terminal\n try:\n s.connect((self.IP_scale, self.PORT_scale))\n except Exception as e:\n print(\"Couldn't connect to the load cell when quering weight\")\n print(f\"Exception: {e}\")\n\n\n # send stable weight or, if timeout (in ms), then send dynamic weight\n request = self._fomat_request(\"SC 420\")\n s.sendall(request)\n\n # keep calling receive until the end of line symbols are received\n response = []\n while True:\n part_response = s.recv(1024).decode()\n response.append(part_response)\n \n if (\"\\r\" in part_response) or (\"\\n\" in part_response):\n break\n\n # format the reponse\n response_str = str(response).strip('[]')\n parsed_response = re.findall(r'\\b\\d+\\b', response_str)\n weight = int(parsed_response[0]) + int(parsed_response[1])/100\n\n\n return weight", "def test_independent_expval(self, execute_kwargs):\n dev = qml.device(\"default.qubit\", wires=2)\n params = jax.numpy.array([0.1, 0.2, 0.3])\n\n def cost(a, cache):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.RY(a[2], wires=0)\n qml.expval(qml.PauliZ(1))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n res = execute([tape], dev, cache=cache, **execute_kwargs)\n return res[0]\n\n res = jax.jit(jax.grad(cost), static_argnums=1)(params, cache=None)\n assert res.shape == (3,)", "def costFun(self, S, x):", "def objective(\n self,\n parameters: object\n ) -> float:\n pass", "def cost(self) -> float:", "def execute(self, parameters, messages):\n execute_tool(arcsdm.calculateweights.Calculate, self, parameters, messages)\n return", "def evaluate_function(self, trajectory):\n objective_values_by_tag = self.evaluate_function_by_objective(trajectory)\n objective_function_values = 0.\n objective_distance_to_goal = 0.\n objective_function_values_init = 0.\n\n reachability_cost = False\n\n # Judge if we are using reachability cost\n for tag, objective_values in objective_values_by_tag:\n if tag == 'reach_avoid_4d':\n reachability_cost = True\n\n # No freezing cost!\n if reachability_cost:\n for tag, objective_values in objective_values_by_tag:\n if tag == 'reach_avoid_4d' or 'avoid_4d':\n objective_function_values += self._reduce_objective_values(trajectory, objective_values)\n else:\n for tag, objective_values in objective_values_by_tag:\n objective_function_values += self._reduce_objective_values(trajectory, objective_values)\n\n # ## Freeze the sum of 2 costs, at the minimum of the sum of cost\n # if reachability_cost:\n # for tag, objective_values in objective_values_by_tag:\n # if tag == 'reach_avoid_4d' or 'avoid_4d':\n # objective_function_values += objective_values\n # if tag == 'distance_to_goal':\n # objective_distance_to_goal += objective_values\n # try:\n # ## Freeze_v1, freeze at the minimum cost of sum: reach_avoid + avoid\n # # objective_function_values = self._freeze_cost_v1(objective_function_values, objective_distance_to_goal)\n #\n # # Freeze v2, freeze at the minimum cost of only reach_avoid\n # objective_function_values = self._freeze_cost_v2(objective_values_by_tag, objective_function_values, objective_distance_to_goal)\n # objective_function_values = self._reduce_objective_values(trajectory, objective_function_values)\n # except ValueError:\n # print(\"cannot freeze in total cost\")\n # objective_function_values = self._reduce_objective_values(trajectory, objective_function_values)\n # else:\n # for tag, objective_values in objective_values_by_tag:\n # objective_function_values += self._reduce_objective_values(trajectory, objective_values)\n\n # ## Freeze the sum of 2 costs, at the minimum of the reach_avoid cost\n # if reachability_cost:\n # for tag, objective_values in objective_values_by_tag:\n # if tag == 'reach_avoid_4d' or 'avoid_4d':\n # objective_function_values = self._freeze_cost_v2(objective_values, objective_distance_to_goal)\n # objective_function_values += self._reduce_objective_values(trajectory, objective_values)\n # else:\n # for tag, objective_values in objective_values_by_tag:\n # objective_function_values += self._reduce_objective_values(trajectory, objective_values)\n\n return objective_function_values", "def compute_value(callback, graph):\n return callback(graph)", "def objective(self, args: Dict[str, Any]) -> float:\n pass", "def cost_function(self, config_samples):\n cost = self.work_tracker(config_samples)\n return cost", "def getCostFunction(self, evalpts, observations, sigma=None, metric=lambda x: sum(x*x)):\n #XXX: better interface for sigma?\n def _(params):\n ind = 0\n for F, n, ofilt, icheck in zip(self._forwardFactories, self._inputs, \\\n self._outputFilters, self._inputCheckers):\n # check input #XXX: is this worthwile to do?\n my_params = params[ind:ind+n]\n checkQ = icheck(my_params, evalpts)\n if checkQ is not None:\n # some parameters are out of range... returns \"cost\"\n return checkQ\n\n Gm = F(params[ind:ind+n])\n if ind == 0:\n x = ofilt(Gm(evalpts)) \n else:\n x = x + ofilt(Gm(evalpts)) \n ind = ind+n\n if sigma is None:\n x = x - observations\n else:\n x = (x - observations) / sigma\n #return sum(real((conjugate(x)*x)))\n #return sum(x*x) \n return metric(x)\n return _", "def eval_func(self, game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n \n if game.is_winner(player):\n return float(\"inf\")\n \n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n if game.move_count < ((game.height * game.width)/2):\n return float(self.weights[0] * own_moves - \n self.weights[1] * opp_moves - \n self.weights[2] * __distance_from_center__(game, player))\n else:\n return float(self.weights[3] * own_moves - \n self.weights[4] * opp_moves - \n self.weights[5] * __distance_from_center__(game, player))", "def evaluate(self, *args, **kwargs):\n params = self.process_args(args, kwargs)\n a = params['a']\n b = params['b']\n return a * self.x + b", "def execute(self, parameters, messages):\n execute_tool(arcsdm.acterbergchengci.Calculate, self, parameters, messages)\n return", "def evaluationFunction(problem, gFunc, hFunc, node):\n #g = getattr(searchAgents, gFunc)\n #h = getattr(searchAgents, hFunc)\n h = hFunc\n #return g(node) + h(node)\n return gFunc + h(node, problem)", "def _handle_value_message(self, variable_name, recv_msg):\n self._neighbors_values[variable_name] = recv_msg.value\n # if we have a value for all neighbors, compute the best value for\n # conflict reduction\n if len(self._neighbors_values) == len(self._neighbors):\n if self.logger.isEnabledFor(logging.DEBUG):\n self.logger.debug(\n f\"Received values from all neighbors : {self._neighbors_values}\"\n )\n # Compute the current_cost on the first step (initialization) of\n # the algorithm\n if self.current_cost is None:\n reduced_cs = []\n concerned_vars = set()\n cost = 0\n for c in self.utilities:\n asgt = filter_assignment_dict(self._neighbors_values, c.dimensions)\n reduced_cs.append(c.slice(asgt))\n cost = functools.reduce(\n operator.add, [f(self.current_value) for f in reduced_cs]\n )\n # Cost for variable, if any:\n concerned_vars.update(c.dimensions)\n\n for v in concerned_vars:\n if v.name == self.name:\n cost += v.cost_for_val(self.current_value)\n else:\n cost += v.cost_for_val(self._neighbors_values[v.name])\n\n self.value_selection(self.current_value, cost)\n\n new_values, val_cost = self._compute_best_value()\n self._gain = self.current_cost - val_cost\n if ((self._mode == \"min\") & (self._gain > 0)) or (\n (self._mode == \"max\") & (self._gain < 0)\n ):\n self._new_value = random.choice(new_values)\n else:\n self._new_value = self.current_value\n\n if self.logger.isEnabledFor(logging.INFO):\n self.logger.info(\n f\"Best local value for {self.name}: {self._new_value}\"\n f\" {self._gain} (neighbors: {self._neighbors_values})\"\n )\n\n self._send_gain()\n\n self._wait_for_gains()\n else:\n # Still waiting for other neighbors\n if self.logger.isEnabledFor(logging.DEBUG):\n waited = [n for n in self._neighbors if n not in self._neighbors_values]\n if self.logger.isEnabledFor(logging.DEBUG):\n self.logger.debug(\n f\"Waiting for values from other neighbors: {waited}\"\n )", "def cost(self, output, labels, weights):\n raise NotImplementedError('Must be overridden by concrete subclass')", "def __call__(self, x, u, k):\n first_time_through = True\n for cost, arg, weight in zip(self._costs, self._args, self._weights):\n if arg == \"x\":\n cost_input = x\n else:\n cost_input = u[arg]\n\n current_term = weight * cost(cost_input, k)\n if current_term > 1e8:\n print(\"Warning: cost %s is %f\" % (cost._name, current_term))\n print(\"Input is: \", cost_input)\n\n# if cost._name[:4] == \"bike\":\n# print(cost._name, \": \", current_term)\n\n if first_time_through:\n total_cost = current_term\n else:\n total_cost += current_term\n\n first_time_through = False\n\n return total_cost", "def compute(self, node, input_vals):\r\n raise NotImplementedError", "def compute(self, *args):\n return _ITKCostFunctionsPython.itkMultipleValuedVnlCostFunctionAdaptor_compute(self, *args)", "def cost(self,e1,e2):\n pass", "def cost_function(params, count):\n circuit = models.Circuit(nqubits)\n for l in range(layers):\n for q in range(nqubits):\n circuit.add(gates.RY(q, theta=0))\n for q in range(0, nqubits - 1, 2):\n circuit.add(gates.CZ(q, q + 1))\n for q in range(nqubits):\n circuit.add(gates.RY(q, theta=0))\n for q in range(1, nqubits - 2, 2):\n circuit.add(gates.CZ(q, q + 1))\n circuit.add(gates.CZ(0, nqubits - 1))\n for q in range(nqubits):\n circuit.add(gates.RY(q, theta=0))\n\n cost = 0\n circuit.set_parameters(\n params\n ) # this will change all thetas to the appropriate values\n for i in range(len(ising_groundstates)):\n final_state = circuit(np.copy(ising_groundstates[i]))\n cost += np.real(encoder.expectation(final_state.state()))\n\n if count[0] % 50 == 0:\n print(count[0], cost / len(ising_groundstates))\n count[0] += 1\n\n return cost / len(ising_groundstates)" ]
[ "0.68833715", "0.6552716", "0.6414888", "0.6136946", "0.6054446", "0.6020302", "0.5953084", "0.59045607", "0.58985734", "0.5881405", "0.58281946", "0.5827903", "0.5775339", "0.57697356", "0.57682675", "0.5752939", "0.57292604", "0.5696659", "0.56805944", "0.56728137", "0.5629252", "0.5626765", "0.5623027", "0.56166536", "0.5611599", "0.56094843", "0.560757", "0.56047845", "0.56025845", "0.5592754" ]
0.6750966
1
Kills excel Background process inorder to fetch the active worksheet. This is dangerous and close all other excel sheets before using this function.
def kill_excel_bg(): excel_process = [ process for process in psutil.process_iter() if process.name() == "EXCEL.EXE" ] for process in excel_process: xl_files = [f.path for f in process.open_files() if ".xl" in f.path] print(xl_files) if len(xl_files) == 0: process.kill()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def CloseExcelFile(self):\r\n self.ss.close(False) # SaveChanges = False\r\n self.xl.Application.Quit()", "def clean_up():\n if is_excel_running():\n # Prevents Excel from reopening\n # if it has been closed manually or never been opened\n for app in Apps():\n try:\n app.xl.run_VB_macro(\"CleanUp\")\n except (CommandError, AttributeError, aem.aemsend.EventError):\n # Excel files initiated from Python don't have the xlwings VBA module\n pass", "def close_workbook(self):\n self.workbook.close()", "def exit(self):\n if self.ws: # is not null or undefined\n self.ws.close()\n self.ws = None", "def exit(self):\n if self.ws: # is not null or undefined\n self.ws.close()\n self.ws = None", "def terminate(self):\n self._worker.kill()", "def closeExcelSheet(workbook, outputFileName):\n workbook.save(outputFileName)", "def kill(self):\r\n plt.close(self.fig)", "def closeExcelSheet(workbook, outputFileName):\n\tworkbook.save(outputFileName)", "def close(self):\n subprocess.call([\"pkill\", \"controller\"])", "def kill(self):\n\n self.proc.kill()", "def kill(self):\n self._destruct()\n pass", "def kill(self):\n self._update()\n if self.running_mode == \"local\":\n for process in self.processes:\n try:\n process.kill()\n except psutil.NoSuchProcess:\n # The process has just terminated\n # In multiprocess run this is likely to happen when other processes stops.\n pass\n elif self.running_mode == \"grid\":\n subprocess.check_call(\"qdel %d\" % self.job[\"job_number\"], shell=True)\n pass\n else:\n logger.warning(\"Asked for termination of a Run not known to be running.\")", "def kill(self):\n self._process.kill()", "def kill():\n sb.call(\"Taskkill /IM SLDWORKS.exe /F\")", "def terminate(self):\n plt.close('all')", "def cleanup():\n broadcast_proc.terminate()\n subprocess.call('sudo hciconfig hci0 noleadv', shell=True)\n if CELL:\n ser_command('Cell off', cell_ser)\n cell_ser.close()\n grovepi.digitalWrite(LED, 0)", "def gracefully_terminate(self):\n self.running = False", "def annihilate(self):\n self.master.destroy()", "def kill(self):\r\n try:\r\n if self.process:\r\n self.process.kill()\r\n self.process.wait()\r\n except WindowsError:\r\n # kill may not be available under windows environment\r\n pass", "def stop(self):\n self.ngrok.terminate()\n return", "def _finish(self):\n steppable_registry = CompuCellSetup.persistent_globals.steppable_registry\n steppable_registry.finish()\n self.close_frames()", "def kill(self):\n #overridden for documentation purposes\n stackless.tasklet.kill(self)", "def quit_worker(self):\n\n if self.isRunning():\n # TODO: Should find a better way of doing this by setting an external flag\n self.terminate()\n self.wait()\n\n self.pb.close()\n self.parent.setEnabled(True)\n self.parent.statusbar.showMessage(\"\")", "def force_stop(self):\n\n # Stopping thread\n self.quit()\n\n # Killing all running processes\n ProcessManager(self.cf_process).close_all_child()\n ProcessManager(self.server_process).close_all_child()", "def killJobs(self):\n self.worker_pool.close()\n self.status_pool.close()\n self.failure = True\n for job in self.active:\n try:\n job.killJob()\n except AttributeError:\n raise SchedulerError('killJob method is not defined')\n except: # Job already terminated\n pass\n self.job_queue_count = 0", "def __del__(self):\n self._proc.kill()", "def stop(self):\n try:\n self.process.terminate()\n self.process = None\n except AttributeError:\n return", "def on_close(self, ws, code, reason):\n self.wsapp.keep_running = False\n if not self.ws_opened.is_set():\n self.ws_opened.set()", "def close(self):\n self.tab.close()" ]
[ "0.65055853", "0.64582294", "0.616265", "0.5976525", "0.5976525", "0.5968671", "0.591073", "0.5901765", "0.59010446", "0.58406603", "0.58245236", "0.5819786", "0.57964534", "0.5766821", "0.5750225", "0.56709623", "0.564577", "0.56204385", "0.5613505", "0.5600134", "0.5597092", "0.5594534", "0.55301255", "0.55186266", "0.549504", "0.54605293", "0.54413027", "0.54353386", "0.5434607", "0.54324543" ]
0.71074814
0
Connects to the current active excel workbook and return win32com client object. Using this function has known issues.
def connect_to_excel() -> w32: xl = xl_app() if xl.ActiveWorkbook is None: kill_excel_bg() xl = xl_app() # type: w32 logging.info(f"Connected to excel sheet {xl.ActiveWorkbook.Name}") return ( xl if check_jupyter_excel_connection(xl) else Exception("Not connected to excel") )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def xl_app():\n # get the Excel application object from PyXLL and wrap it\n xl_window = get_active_object()\n xl_app = win32com.client.Dispatch(xl_window).Application\n # it's helpful to make sure the gen_py wrapper has been created\n # as otherwise things like constants and event handlers won't work.\n win32com.client.gencache.EnsureDispatch(xl_app)\n return xl_app", "def connect(self) -> bool:\n\n if self.connected:\n self.workbook.close()\n self.connected = False\n\n if self.settings.excelFileName:\n\n # What OLEDB does is scan the first n rows (default=8) and determines a data type.\n # If you leave out the IMEX=1 then it will return Null for any values that do not\n # match that data type. If you include IMEX=1 and the scan encounters mixed data\n # types then it will return text. If your sheet has a text header then you can help\n # this process by specifying HDR=No and discarding the header.\n # However OLEDB will always scan the first n rows to determine the data type and\n # return results accordingly.\n #\n # The Rows to scan is determined by the value of TypeGuessRows.\n #\n # The older Microsoft.Jet.OLEDB.4.0 driver would allow you to specify TypeGuessRows\n # in the connection string but Microsoft.ACE.OLEDB.12.0 does not.\n # TypeGuessRows is now held in the registry under...\n #\n # Excel 2007: HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Office\\12.0\\Access Connectivity Engine\\Engines\\Excel\\TypeGuessRows\n # Excel 2010: HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Office\\14.0\\Access Connectivity Engine\\Engines\\Excel\\TypeGuessRows\n # Excel 2013: HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Office\\15.0\\Access Connectivity Engine\\Engines\\Excel\\TypeGuessRows\n #\n # 32 Bit applications running on a 64 Bit Machine will find them under the Wow6432Node. E.g...\n #\n #HKEY_LOCAL_MACHINE\\SOFTWARE\\Wow6432Node\\Microsoft\\Office\\12.0\\Access Connectivity Engine\\Engines\\Excel\\TypeGuessRows\n\n connection_string = \\\n 'Driver={{Microsoft Excel Driver (*.xls, *.xlsx, *.xlsm, *.xlsb)}};DBQ={};ReadOnly=1;IMEX=1;'.\\\n format(self.settings.excelFileName)\n try:\n self.workbook = pyodbc.connect(connection_string, autocommit=True)\n except pyodbc.Error as err:\n # vs.SetItemText(importDialog, kWidgetID_excelSheetNameLabel, \"Invalid Excel file!\")\n vs.AlertCritical(err.value[1], \"Talk to Carlos\")\n else:\n self.connected = True\n\n return self.connected", "def connect_book(self):\n \n print('Connecting to excel workbook... ',end=''),\n self.book = xw.Book(self.file_path)\n \n try: self.sheet = self.book.sheets[self.sheet]\n except:\n while True:\n sheet_name = input(\"\"\"\\tSheet name: \"\"\") or 'Sheet1'\n try:\n self.sheet = self.book.sheets[sheet_name]\n break\n except: #forgive me, Guido, for I have sinned\n print('\\tNo matching sheet name, please try again.')\n continue\n \n print('Connected')", "def get_ssclient(config):\n return smartsheet.Smartsheet(config[\"ss_access_token\"])", "def google_sheets_connector():\n print(\"Connecting to Google Sheets\")\n scope = ['https://www.googleapis.com/auth/drive']\n credentials = ServiceAccountCredentials.from_json_keyfile_name('key.json', scope)\n client = gspread.authorize(credentials)\n sheet = client.open('backend').sheet1\n return sheet", "def check_jupyter_excel_connection(xl: w32) -> bool:\n return True if xl.ActiveWorkbook is not None else False", "def google_sheets_connector():\n scope = ['https://www.googleapis.com/auth/drive']\n credentials = ServiceAccountCredentials.from_json_keyfile_name('key.json', scope)\n client = gspread.authorize(credentials)\n sheet = client.open('backend').sheet1\n return sheet", "def _get_COM_object(self):\r\n prefix = \"LightTools API Server | \"\r\n\r\n if self._pid:\r\n if not psutil.pid_exists(self._pid):\r\n msg = \"Couldn't find a LightTools process with PID {}.\"\r\n raise ValueError(msg.format(self._pid))\r\n prefix += str(self._pid)\r\n\r\n start_time = time.time()\r\n connection_attempt_timed_out = (\r\n lambda current_time: current_time - start_time > self._timeout\r\n )\r\n\r\n rot = _comutils.RunningObjectTable()\r\n\r\n while not connection_attempt_timed_out(time.time()):\r\n comobjs = rot.get_objects()\r\n for name in comobjs:\r\n if name.startswith(prefix):\r\n return comobjs[name]\r\n else:\r\n msg = (\r\n \"Couldn't establish a connection to LightTools within {} \"\r\n \"seconds. Connection attempt aborted.\"\r\n )\r\n raise error.TimeOutError(msg.format(self._timeout))", "def getConnection( **kwargs ):\r\n port = kwargs.get( 'port', OpenOfficeConnection.port )\r\n host = kwargs.get( 'host', OpenOfficeConnection.host )\r\n if OpenOfficeConnection.desktop is None:\r\n local = uno.getComponentContext()\r\n resolver = local.ServiceManager.createInstanceWithContext( 'com.sun.star.bridge.UnoUrlResolver', local )\r\n context = resolver.resolve( \"uno:socket,host=%s,port=%s;urp;StarOffice.ComponentContext\" % ( host, port ) )\r\n OpenOfficeConnection.desktop = context.ServiceManager.createInstanceWithContext( 'com.sun.star.frame.Desktop', context )\r\n return OpenOfficeConnection.desktop", "def getExcelApp(self):\r\n return self.excelapp", "def connect():\n Rhino = win32com.client.Dispatch(\"Rhino4.Interface\")\n time.sleep(1)\n Rhino.Visible = True\n return Rhino", "def get_curr_connection(self):\n if not self._ws:\n self._ws = self.new_connection()\n return self._ws", "def get_excel(exceldocument):\r\n\r\n sheet = xlrd.open_workbook(exceldocument).sheet_by_index(0)\r\n return sheet", "def open_com_driver( self ):\r\n self.gui.print_info_string( \"Opening comm port\" )\r\n val = self.com_driver.open()\r\n if val:\r\n status = \"Open\"\r\n self.gui.print_info_string( \"Comm port open OK....\" )\r\n self.logger.info( \"open_driver, opened ok\" )\r\n msg = \"Open Comm Port OK\"\r\n self.gui.print_info_string( msg )\r\n\r\n else:\r\n self.gui.print_info_string( \"Comm port open NG\" )\r\n status = \"Open Failed\"\r\n self.logger.info( \"open failed, ignored\" )\r\n msg = \"Open Comm Port NG\"\r\n self.gui.print_info_string( msg )\r\n\r\n self.gui.set_open( status )\r\n\r\n return", "def getClient(self):\r\n client = SpreadsheetsService()\r\n\r\n try:\r\n client.GetWorksheetsFeed(self.spreadsheet_key, visibility='public',\r\n projection='basic')\r\n except gaierror:\r\n client = None\r\n\r\n return client", "def ow_connect():\n ow = boto3.client('opsworks', region_name='us-east-1')\n return ow", "def _get_client(self):\n self.logger.info('Connecting to MySQL running at \"%s\"...',\n self._connection_params['host'])\n\n # https://dev.mysql.com/doc/connector-python\n return connector.connect(**self._connection_params)", "def _open_session(self):\n return self.cluster.connect()", "def _get_connection(self, conf):\n return get_session()", "def _get_client(self):\n try:\n client = boto3_cached_conn(\n 'iam', **self.conn_details)\n\n if not client:\n raise ValueError(f\"boto3_cached_conn returned null IAM client for {self.account_number}\")\n\n return client\n\n except Exception as e:\n self.on_failure.send(self, error=e)\n self.current_app.logger.exception(f\"Failed to obtain boto3 IAM client for account {self.account_number}.\", exc_info=False)\n raise e", "def _get_client_impl(self):\n api_version = self._get_api_version(None)\n if api_version not in self._client_impls:\n self._create_client_impl(api_version)\n return self._client_impls[api_version]", "def connect_to_arduino(self):\n try:\n if self.emulate:\n print('(node {}) Emulating arduino'\\\n .format(time.strftime(TFORMAT)))\n my_emulator = ArduinoSerialEmulator()\n self.emulation_port = my_emulator.report_server()\n my_emulator.start()\n else:\n self.emulation_port=[]\n\n\n print('(node {}) Setting-up arduino communications'\\\n .format(time.strftime(TFORMAT)))\n arduino_COMS= SCM.SerialCommManager(0.01,\n verbose=self.verbose,\n emulatedPort=self.emulation_port,\n arduino_port=self.arduino_port)\n\n if arduino_COMS.is_arduino_connected():\n self.is_arduino_connected = True\n print('(node {}) Arduino connected'\\\n .format(time.strftime(TFORMAT)))\n else:\n self.is_arduino_connected = False\n return arduino_COMS\n\n except SerialException:\n print('(node {}) Serial exception ocurred. Try again in a few seconds '\\\n .format(time.strftime(TFORMAT)))\n self.is_arduino_connected = False\n raise\n except ValueError as err:\n raise ArduinoConnectionError", "def openExcelSheet(outputFileName):\n workbook = Workbook()\n worksheet = workbook.add_sheet(\"Sheet 1\")\n return workbook, worksheet", "def get_connection(self):\n if self.conn is None or self.conn.closed != 0:\n self._connect()\n logger.debug(f'The connection object is: {self.conn}.')\n return self.conn", "def connect():\n if not is_notebook():\n print('Python session is not running in a Notebook Kernel')\n return\n\n global _comm\n\n kernel = get_ipython().kernel\n kernel.comm_manager.register_target('tdb', handle_comm_opened)\n # initiate connection to frontend.\n _comm = Comm(target_name='tdb', data={})\n # bind recv handler\n _comm.on_msg(None)", "def get_excel(self, file_name):\n global download_component\n\n download_soup = BeautifulSoup(self.res.text, 'lxml')\n download_component = get_download_component(download_soup)\n\n #Start excel session\n xsess = requests.Session()\n xsess.headers = EXCEL_HEADERS\n \n #prepare excel session\n self.data['SAPEVENTQUEUE'] = \"Button_Press~E002Id~E004\" + \\\n download_component + \"~E003~E002ResponseData~E004delta~E005ClientAction~E004submit~E003~E002~E003\"\n self.res = self.sess.post(self.url, data=self.data)\n\n #parse data from prepared excel session\n fileid, action = get_excel_url(BeautifulSoup(self.res.text,'lxml-xml')) \n \n #replace\n xurl = HOST_URL + action\n xurl = xurl.replace(\"\\\\x2f\",\"/\")\n xurl = xurl.replace(\"\\\\x7e\",\"~\")\n xurl = xurl.replace(\"\\\\x3f\", \"?\")\n xurl = xurl.replace(\"\\\\x2d\",\"-\")\n xurl = xurl.replace(\"\\\\x3d\",\"=\")\n xurl = xurl.replace(\"\\\\x253a\",\":\")\n xurl = xurl.replace(\"\\\\x26\",\"&\")\n xres = xsess.post(xurl)\n \n #write file\n with open(file_name,'wb') as f:\n f.write(xres.content)", "def get_connection(self):\n return self.application.get_connection()", "def _get_spreadsheet(i):\n path = io_mgr.get_parties_spreadsheet(i)\n if not os.path.exists(path):\n raise IOError()\n\n return openpyxl.load_workbook(path, read_only=True)", "def _robotConnect(self, resp):\r\n # Read the response\r\n url = resp['url']\r\n current = resp.get('current', None)\r\n\r\n if current:\r\n print(\"Warning: There is a newer client (version: '{0}') \"\r\n 'available.'.format(current))\r\n\r\n print('Connect to Robot Process on: {0}'.format(url))\r\n\r\n # Make WebSocket connection to Robot Manager\r\n args = urlencode((('userID', self._userID), ('robotID', self._robotID),\r\n ('password', self._password)))\r\n factory = RCERobotFactory('{0}?{1}'.format(url, args), self)\r\n connectWS(factory)", "def _connect(self, slack_bot_token):\n slack_client = SlackClient(slack_bot_token)\n connect_response = slack_client.rtm_connect(\n with_team_state=False, auto_reconnect=True)\n\n if connect_response:\n logger.info(\"RTM connected\")\n return slack_client\n else:\n raise ConnectionError" ]
[ "0.6155974", "0.61225194", "0.5725281", "0.5470277", "0.5454899", "0.5421767", "0.53526795", "0.5294494", "0.5244303", "0.52142185", "0.5169555", "0.5121356", "0.5063958", "0.5020426", "0.49767986", "0.495653", "0.4915732", "0.4904937", "0.48884806", "0.47731617", "0.47678655", "0.47563314", "0.47305635", "0.47146678", "0.47097182", "0.46991426", "0.468957", "0.46768016", "0.4661818", "0.46461207" ]
0.74729425
0
Load modules from the pyxll config file. Useful in a jupyter notebook environment
def load_modules_from_config(cfg: str): pyxll_cfg = ConfigParser() pyxll_cfg.read(cfg) for path in pyxll_cfg["PYTHON"]["pythonpath"].split("\n"): sys.path.append(path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_modules_manually():\n #cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))\n cmd_folder = '../myutils/'\n if cmd_folder not in sys.path:\n sys.path.insert(0, cmd_folder)\n #print sys.path", "def load_modules(bot, config):\n for item in MODULES:\n importlib.import_module(\"cogs.\" + item).setup(bot, config)", "def x_list():\n\t_loadconfig()", "def load_config_file_xtra(config_path: Path) -> Dict[str, Any]:\n def not_allowed(*arg, **kw) -> None:\n raise RuntimeError(\"Operation not allowed\")\n\n code = compile(\n config_path.read_text(),\n config_path.name,\n \"exec\")\n safe_builtins = cast(Dict[str, Any], __builtins__).copy()\n for name in (\"eval\", \"exec\", \"compile\", \"__import__\"):\n safe_builtins[name] = not_allowed\n globals = {\n \"__builtins__\": safe_builtins,\n \"Path\": Path,\n \"platform\": platform,\n \"environ\": os.environ.copy(),\n }\n locals: Dict[str, Any] = {}\n exec(code, globals, locals)\n return locals", "def loadConfigs(self):\n self.onLoadConfig(urlopen(self.inipath))", "def load_config(self):\n pass", "def modules_load(machine_config):\n\t#---modules in LOCAL configuration must be loaded before checking version\n\timport importlib\n\tif 'module_path' in machine_config: module_path = machine_config['module_path']\n\telse:\n\t\tmodule_parent = os.environ.get('MODULESHOME','/usr/share/Modules/default')\n\t\tmodule_path = os.path.join(module_parent,'init','python.py')\n\tincoming = {}\n\tif sys.version_info<(3,0): execfile(module_path,incoming)\n\telse: exec(open(module_path).read(),incoming)\n\t#---note that modules that rely on dynamically-linked C-code must use EnvironmentModules\n\tmodlist = machine_config['modules']\n\tif type(modlist)==str: modlist = modlist.split(',')\n\tfor mod in modlist:\n\t\t#---always unload gromacs to ensure correct version\n\t\tincoming['module']('unload','gromacs')\n\t\tprint('[STATUS] module load %s'%mod)\n\t\tincoming['module']('load',mod)", "def loadModule(*args, allModules: bool=True, load: AnyStr=\"\", scan: bool=True,\n **kwargs)->List[AnyStr]:\n pass", "def load_from_module_path(self, filename: str) -> None:\n # pylint: disable=import-outside-toplevel\n import importlib.util\n spec = importlib.util.spec_from_file_location(\"base_config\", filename)\n module = importlib.util.module_from_spec(spec)\n if spec.loader is not None:\n spec.loader.exec_module(module)\n else:\n raise Exception(\"Could not get module loader from spec\")\n self.load_from_module(module)", "def loadConfigModule(name, options, tags):\n if isinstance(name, str):\n LOG.info('Loading %s', name)\n d = {}\n module = __import__(name[:-3], d, d)\n else:\n module = reload(name)\n onload = module.__dict__.get('onload')\n if callable(onload):\n try:\n onload(options, tags)\n except:\n LOG.fatal('Exception while loading %s', name)\n raise\n return module", "def load_config():\n path = os.environ.get('WORKER_CONFIG')\n if not path:\n path = _get_default_config_path()\n\n mod_name, file_ext = os.path.splitext(os.path.split(path)[-1])\n config = imp.load_source(mod_name, path)\n return config", "def load():\n # get (or create) config path\n p = initialize()\n return load_config(open(p['config']))", "def load_config_file(config_path: Path) -> Dict[str, Any]:\n code = compile(\n config_path.read_text(),\n config_path.name,\n \"exec\")\n locals: Dict[str, Any] = {}\n exec(code, {\"__builtins__\": __builtins__}, locals)\n return locals", "def _load_extensions(path):\n extension_dir = os.environ.get(path, path)\n print(f\"looking for extensions in {extension_dir}\")\n if not os.path.isdir(extension_dir):\n print(f\"No such {extension_dir}\")\n return\n\n import sys \n import importlib\n\n sys.path.append(path)\n imports = [ filename \n for filename in os.listdir(path)\n if not filename.startswith('__') \n and not filename.startswith('.') \n ]\n for filename in imports:\n module_name, _ = os.path.splitext(filename)\n module = importlib.import_module(module_name)\n for attribute_name in dir(module):\n if attribute_name.startswith('__'):\n continue\n globals()[attribute_name] = getattr(module, attribute_name)", "def _load_defined_tasks():\n task_path = Path(__file__).parent.resolve() / \"nalu_tasks\"\n py_files = glob.glob(str(task_path / \"[a-z]*.py\"))\n modset = {Path(ff).stem for ff in py_files}\n for pymod in modset:\n importlib.import_module(\".%s\"%pymod, 'exawind.nalu.nalu_tasks')", "def safely_import_config():\n\n while True:\n try:\n from hind import config\n break\n except ImportError:\n print(\"Cannot import config.py. Waiting and retrying...\")\n time.sleep(2)", "def load_modules(self):\n module_dir = os.path.dirname(__file__)\n names = [os.path.splitext(i) for i in os.listdir(module_dir)\n if os.path.isfile(os.path.join(module_dir, i))]\n # FIXME: sort 'plain' to start of list for devel.\n names.sort(key=lambda x: (not x[0].startswith('plain'), x[0]))\n modules = []\n for name in [i[0] for i in names if i[1].lower() == '.py']:\n try:\n modules.append(import_module('leo.plugins.editpane.' + name))\n DBG(f\"Loaded module: {name}\")\n except ImportError as e:\n DBG(\n f\"{e.__class__.__name__}: \"\n f\"Module not loaded (unmet dependencies?): {name}\")\n for module in modules:\n for key in dir(module):\n value = getattr(module, key)\n if hasattr(value, 'lep_type') and value not in self.widget_classes:\n if module not in self.modules:\n self.modules.append(module)\n self.widget_classes.append(value)\n self.widget_for[value.lep_type].append(value)", "def load_conf_modules():\n for modname in _list_module_names():\n mod = importutils.import_module('monasca_api.conf.' + modname)\n required_funcs = ['register_opts', 'list_opts']\n for func in required_funcs:\n if hasattr(mod, func):\n yield mod", "def get_trojan_config():\n global configured\n config_json = get_file_contents(trojan_config)\n conf = json.loads(base64.b64decode(config_json))\n configured = True\n for tsk in conf:\n if tsk['module'] not in sys.modules:\n exec (\"import %s\" % tsk['module'])\n return conf", "def consider_env(self): \n for spec in self._envlist(\"PYLIB\"):\n self.import_module(spec)", "def load_cfg(fname):\n if '/' not in fname:\n fname = __cwd__ + '/' + fname\n try:\n if __version__ >= 3.5:\n import importlib.util\n spec = importlib.util.spec_from_file_location(\"config\", fname)\n cf = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(cf)\n elif __version__ >= 3.3:\n from importlib.machinery import SourceFileLoader\n cf = SourceFileLoader(\"config\", fname).load_module()\n elif __version__ <= 3.0:\n import imp\n cf = imp.load_source('config', fname)\n except Exception:\n print(f'{FAIL}Failed.{_RST_}Cannot find file <{HEADER}{fname}{_RST_}> or the fallback config.py>') # noqa\n print(f'Or invalid line found in file. Try using import <{HEADER}{fname[:-3]}{_RST_}> yourself') # noqa\n exit(1)\n return cf", "def setup_config():\n global config\n config = modConfig.Config(cmdline.config)", "def load_sources(self):\n self.pymodule = imp.load_source(self.name, self.path)", "def load_Xlib_display(finder, module):\n finder.IncludeModule(\"Xlib.ext.xtest\")\n finder.IncludeModule(\"Xlib.ext.shape\")\n finder.IncludeModule(\"Xlib.ext.xinerama\")\n finder.IncludeModule(\"Xlib.ext.record\")\n finder.IncludeModule(\"Xlib.ext.composite\")\n finder.IncludeModule(\"Xlib.ext.randr\")", "def setup_confighelper(self):\n self.cfghelper = cfgmodule.MCfgModule()\n self.cfghelper.load_configfiles(self.configname, self.get_pkgdirimp_config())", "def load_module(name):\n return __import__(\"metaswitch.%s\" % name,\n fromlist=[\"ROUTES\"])", "def load_extensions(self):\n extension_module_name = f\"{utils.get_project_name()}.cogs\"\n for extension in CONF.LOADED_EXTENSIONS:\n try:\n self.load_extension(extension_module_name + \".\" + extension)\n LOG.debug(f\"The extension '{extension.split('.')[0]}' has been successfully loaded\")\n except Exception as e:\n message = f\"Failed to load extension '{extension.split('.')[0]}'\"\n LOG.exception(log.get_log_exception_message(message, e))", "def _load_config():\n\tcfg = configparser.ConfigParser()\n\tcfg.read(join(get_current_path(), 'ib.config'))\n\treturn cfg", "def load_reportlab(finder, module):\n finder.IncludeModule(\"reportlab.rl_settings\")", "def load_extensions(self, config):\n loaded_extensions = []\n for extension in self.extensions:\n load_func = getattr(extension, \"load\")\n loaded_extension = load_func(config)\n if loaded_extension:\n loaded_extensions.append(loaded_extension)\n return loaded_extensions" ]
[ "0.6194917", "0.59518373", "0.585345", "0.5834126", "0.5826517", "0.57478696", "0.5743501", "0.57053727", "0.5685117", "0.5639987", "0.56109077", "0.5590987", "0.550079", "0.5492593", "0.54770744", "0.54672354", "0.5466415", "0.54228854", "0.54109836", "0.5384434", "0.5383074", "0.5349694", "0.53315496", "0.5327057", "0.5310783", "0.52988636", "0.52865475", "0.52792674", "0.52701706", "0.5252694" ]
0.70186377
0
Register a callback for comm_close Will be called with the `data` of the close message. Call `on_close(None)` to disable an existing callback.
def on_close(self, callback): self._close_callback = callback
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_on_channel_close_callback(self):\n logger.info('Adding channel close callback')\n self._channel.add_on_close_callback(self.on_channel_closed)", "def add_on_channel_close_callback(self):\n self.logger.info('Adding channel close callback')\n self._channel.add_on_close_callback(self.on_channel_closed)", "def handle_close(self, msg):\n self.log.debug(\"handle_close[%s](%s)\", self.comm_id, msg)\n if self._close_callback:\n self._close_callback(msg)", "def set_close_callback( callback ):", "def register_on_close(self, action):\n self._action_on_close = action", "def register_closed_callback(self, func):\n self._closed_callback = func", "def set_close_callback(self, callback):\n self._close_callback = stack_context.wrap(callback)", "def async_on_close(self, func: CALLBACK_TYPE) -> None:\n self._on_close.append(func)", "def add_on_connection_close_callback(self):\n logger.info('Adding connection close callback')\n self._connection.add_on_close_callback(self.on_connection_closed)", "def add_on_connection_close_callback(self):\n self.logger.info('Adding connection close callback')\n self._connection.add_on_close_callback(self.on_connection_closed)", "def on_close(self):\n # remove callback so it doesn't get called in the future\n self._notifier.remove_callback(self.process_notification)", "def on_before_close(self, onbefore_close_callback, app=None):\n if app is not None:\n self._app = app\n self._onbefore_close_callback = onbefore_close_callback", "def close(self, data=None, metadata=None, buffers=None):\n if self._closed:\n # only close once\n return\n self._closed = True\n # nothing to send if we have no kernel\n # can be None during interpreter cleanup\n if not self.kernel:\n return\n self._publish_msg(\n 'comm_close', data=data, metadata=metadata, buffers=buffers,\n )\n self.kernel.comm_manager.unregister_comm(self)", "def _register_comm(self, comm):\n def handle_msg(msg):\n \"\"\"Handle a comm_msg message\"\"\"\n if comm._msg_callback:\n comm._msg_callback(msg)\n comm.handle_msg = handle_msg\n super(FrontendComm, self)._register_comm(comm)", "def close(self):\n if self.is_connected:\n def _wrapper_callback(proto):\n proto.send_close()\n return proto\n\n self.factory.on_ready(_wrapper_callback)", "def listen_del(self, f):\n self._coms.register_leaving_callback(f)", "def on_msg(self, callback, remove=False):\n self._msg_callbacks.register_callback(callback, remove=remove)", "def on_data(self, callback, remove=False):\n self._data_handlers.register_callback(callback, remove=remove)", "def onClose(self, event):\n pass", "def onClose(self, event):\n pass", "def onClose(self, event):\n pass", "def onClose (self):\n \n pass", "def on_del(self, callback):\n self._del_callback = callback if callable(callback) else _void", "def on_del(self, callback):\n self._del_callback = callback if callable(callback) else _void", "def sendClose(self, code=None, reason=None):", "def on_closing(event=None):\n my_msg.set(\"{quit}\")\n send()", "def on_closing(event=None):\n my_msg.set(\"{quit}\")\n send()", "def on_closing(event=None):\n my_msg.set(\"{quit}\")\n send()", "def on_closing(event=None):\n my_msg.set(\"{quit}\")\n send()", "def on_closing(event=None):\r\n my_msg.set(\"{quit}\")\r\n send()" ]
[ "0.66941077", "0.6671357", "0.65705276", "0.65651107", "0.6093962", "0.6089108", "0.60651124", "0.6064921", "0.6051649", "0.59976", "0.59700936", "0.5883123", "0.58765686", "0.57918346", "0.5536476", "0.5441828", "0.54223126", "0.5419279", "0.5379486", "0.5379486", "0.5379486", "0.53603154", "0.5319054", "0.5319054", "0.52924883", "0.5290534", "0.5290534", "0.5290534", "0.5290534", "0.52739275" ]
0.6788618
0
Handle a comm_close message
def handle_close(self, msg): self.log.debug("handle_close[%s](%s)", self.comm_id, msg) if self._close_callback: self._close_callback(msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_close(self):\n print(self.addr, \"bye\")\n self.close()", "def msg_close(version = NATIVE_HEADER_VERSION, order=\"<\"):\n return message_no_reply(CLOSE, \"\", \"\", version, order)", "def on_closing(event=None):\r\n my_msg.set(\"{quit}\")\r\n send()", "def sendClose(self, code=None, reason=None):", "def on_closing(event=None):\n my_msg.set(\"{quit}\")\n send()", "def on_closing(event=None):\n my_msg.set(\"{quit}\")\n send()", "def on_closing(event=None):\n my_msg.set(\"{quit}\")\n send()", "def on_closing(event=None):\n my_msg.set(\"{quit}\")\n send()", "def close(self):\n\n self.shm_command.write({'cmd': 'close', 'data': {}})\n time.sleep(0.2)", "def close(self, comm_id=None):\n with self.comm_lock:\n return super(FrontendComm, self).close(comm_id)", "def handleClose(self):\n logging.info(\"%s %s\", self.address, \"closed\")\n self.logbook.clients_disconnected_count += 1", "def handle_close(self):\n self.cmd_channel.debug(\"ActiveDTP.handle_close()\")\n self.close()", "def process_IN_CLOSE_WRITE(self, event):", "def notify_close(handle):\n return _u2i(_pigpio_command(_control, _PI_CMD_NC, handle, 0))", "def close(self):\n self.control_conn.sendall('CLOSE'.encode())", "def close(self, data=None, metadata=None, buffers=None):\n if self._closed:\n # only close once\n return\n self._closed = True\n # nothing to send if we have no kernel\n # can be None during interpreter cleanup\n if not self.kernel:\n return\n self._publish_msg(\n 'comm_close', data=data, metadata=metadata, buffers=buffers,\n )\n self.kernel.comm_manager.unregister_comm(self)", "def close_driver( self ):\r\n self.com_driver.close()\r\n self.gui.set_open( \"Closed\" )\r\n msg = \"Closed Comm Port\"\r\n self.gui.print_info_string( msg )\r\n return", "def handle_close(self):\n self.cmd_channel.debug(\"DTPHandler.handle_close()\")\n tot_bytes = self.get_transmitted_bytes()\n # If we used channel for receiving we assume that transfer is\n # finished when client close connection , if we used channel\n # for sending we have to check that all data has been sent\n # (responding with 226) or not (responding with 426).\n if self.receive:\n self.transfer_finished = True\n if self.transfer_finished:\n self.cmd_channel.respond(\"226 Transfer complete.\")\n self.cmd_channel.log(\"Transfer complete; \"\n \"%d bytes transmitted.\" %tot_bytes)\n else:\n self.cmd_channel.respond(\"426 Connection closed; transfer aborted.\")\n self.cmd_channel.log(\"Transfer aborted; \"\n \"%d bytes transmitted.\" %tot_bytes)\n self.close()", "def handle_close(self):\n self.cmd_channel.debug(\"PassiveDTP.handle_close()\")\n self.close()", "def _on_close(self):\n self.shell_obj.closed()", "def _close(self):\n log.Debug('dpbx.close():')", "def handle_close(self):\n self.active = False\n self.close()", "def handle_pipe_closed(self, channel: Channel, session: FBDPSession, msg: FBDPMessage, # pylint: disable=W0613\n exc: Exception=None) -> None:\n # FDBP converts exceptions raised in our event handler to CLOSE messages, so\n # here is the central place to handle errors in data pipe processing.\n # Note problem in service execution outcome\n if exc is not None:\n self.outcome = Outcome.ERROR\n self.details = exc\n #\n if self.stop_on_close:\n self.stop.set()", "def handle_close(self):\r\n if self.log_fh:\r\n self.log_fh.write(\r\n \"Server closed connection at %s. Shutting down.\\n\" %\r\n time())\r\n self.close()", "def _close(self):\n\n #to kill children loop over native children list\n #DO NOT create a custom process list\n #since terminated processes don't get correctly garbage collected\n for lw in multiprocessing.active_children():\n if type(lw) == loop_worker.LoopWorker:\n lw.kill()\n\n logging.info(\"Closing Agent\")\n closemsg = {'process': {\n 'name': multiprocessing.current_process().name, \n 'status': 'closed'}}\n \n self.msg_queue.put(dict(to=settings.IOTTLY_XMPP_SERVER_USER,msg='/json ' + json.dumps(closemsg)))\n self.msg_queue.put(None)\n if self.broker_process:\n self.broker_process.join()\n logging.info(\"Agent closed\")", "def on_cmd_close(self, session, _cmd_list):\n self.reply_text(session, \"closing this (%s) session\" % str(self))\n session.close()\n return False", "def send_close_request(self):\n\tdata = struct.pack(\"!4I\", 0b1101, 0b1001, self.epoch_no, self.handle_no)\n\tself.client_socket.sendto(data, self.address)\n\tself.client_socket.close()\t\n return", "def close(self, code=3000, message='Go away!'):\n if self.state != CLOSED:\n # Notify handler\n if self.handler is not None:\n self.handler.send_pack(proto.disconnect(code, message))\n\n super(Session, self).close(code, message)", "def close(self):\r\n self._sendLock.acquire()\r\n try:\r\n self._queue.put(\"CLOSE\")\r\n self._eventQueue.put((time.time(), \"CLOSE\"))\r\n self._closed = 1\r\n self._s.close()\r\n self._thread.join()\r\n self._eventThread.join()\r\n finally:\r\n self._sendLock.release()", "def bfm_close ( cid=0\n , rigor=False\n , verbose=False ):\n global _cosim\n _bfm_close = WrapFunction( _cosim\n , 'bfm_close'\n , ctypes.c_int\n ,[ctypes.c_int])\n return _bfm_close( cid )" ]
[ "0.6898485", "0.68468136", "0.6777746", "0.6761843", "0.6737819", "0.6737819", "0.6737819", "0.6737819", "0.66932285", "0.66553104", "0.66369873", "0.66330224", "0.6602895", "0.65806466", "0.65038866", "0.64970094", "0.6462982", "0.64401937", "0.6372569", "0.6240193", "0.6228597", "0.6224669", "0.61901915", "0.6176963", "0.6174015", "0.6166589", "0.612406", "0.6120498", "0.6062384", "0.60591865" ]
0.7536824
0
verifier si un element existe dans une list
def exist(self,list,a): i = 0 for elem in list: if (elem == a): i=i+1 if (i>0): return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_list_exists(this_list=[]):\n if isinstance(this_list, list) and len(this_list) > 0:\n return True\n else:\n return False", "def _listContains(self, l, entry):\n for i in range(0, len(l)):\n if l[i] == entry:\n return True\n return False", "def contains(list, e):\r\n for elem in list:\r\n if elem == e:\r\n return True\r\n return False", "def __contains__(self, elem):\n return elem in list(self)", "def is_element_of(node, lst):\n for k in lst:\n if node is k:\n return True\n return False", "def checkIfInList(_addr, _list):\n for item in _list:\n if (_addr.this == item.this):\n return True\n \n return False", "def __contains__(self, item: T) -> bool:\n for list_item in self:\n if list_item == item:\n return True\n\n return False", "def list_should_contain_value(self,list_,value,msg=None):\r\n\r\n default =\"%s contains value '%s'\" %(seq2str(list_),value)\r\n _verify_condition(vlaue not in list_,default,msg)", "def has_tag(lst, tag):\n if not isinstance(lst, list):\n lst = [lst]\n for l in lst:\n if l.tag == tag:\n return True\n else:\n return False", "def sublist_in(lst, sublst):\n for i in sublst:\n if i not in lst:\n return False\n return True", "def __contains__(self, item):\n cur_node = self.head\n while cur_node is not None:\n if item in cur_node.data_list:\n return True\n else:\n cur_node = cur_node.next_node\n\n return False", "def exista(self, item):\n if item not in self._items:\n return False\n for x in self._items:\n if x == item:\n return True", "def onlist(listtocheck, item):\n\n # Return the result\n return item in listtocheck", "def is_in_list(item, list_, kind):\n if item not in list_:\n raise KeyError(f'Specify {kind} from {list_}: got {item}')\n return True", "def Has(cls, word_list):\n entity = WordList.get_by_id(word_list)\n if entity:\n return True\n return False", "def id_in_list(obj_list, sb_object):\n if __debug__:\n print(\"Checking if sb_object in list...\")\n for sb_objects in obj_list:\n if sb_object.ID == sb_objects.ID:\n if __debug__:\n print(\"Object in list.\")\n return True\n if __debug__:\n print(\"Object not in list\")\n return False", "def contains(self, element):\n pass", "def has_duplicates(L):\r\n unique = []\r\n for e in L:\r\n if e in unique:\r\n return True\r\n unique.append(e)\r\n return False", "def check_for_list(check):", "def exists(self):\n\n return self.ids[-1] is not None", "def check_if_duplicates_list(list_of_elems):\n set_of_elems = set()\n for elem in list_of_elems:\n if elem in set_of_elems:\n return True\n else:\n set_of_elems.add(elem)\n return False", "def find(ss, list_seq):\n\tfor item in list_seq:\n\t\tif item in ss:\n\t\t\treturn True\n\treturn False", "def index_already_there(index, given_list):\n\n # check if ID already exists\n already_there = False\n if len(given_list)>0:\n for item in given_list:\n if isinstance(item, AutoBaseObject):\n if item.ID == index:\n already_there = True\n break\n else:\n print(\"Issue with list: item is not AutoBaseObject\")\n print(\" index=\\n\",index)\n sys.exit()\n return already_there", "def contains(self, element) -> bool:\n\n return self.__find_node(element) is not None", "def list_check(lst):\n for item in lst:\n if type(item) != list:\n return False\n return True", "def __contains__(self, item):\n # type(Any) -> bool\n return list.__contains__(self, self.ref(item))", "def IsEveryNodeInTheList(self, list_to_check):\n for node in self.nodes:\n if node.index not in list_to_check:\n return False\n return True", "def check(indivs, geno_list):\r\n\tfor i in xrange(0,len(indivs)):\r\n\t\tif indivs[i] not in geno_list:\r\n\t\t\t# print \"this is not in: \"+ indivs[i]\r\n\t\t\treturn False\r\n\treturn True", "def is_entry_in_list(entry, a_list):\n for item in a_list:\n if entry['description'] == item['description']:\n return True\n return False", "def index_is_in_list(the_list, index):\n return bool(0 <= index < len(the_list))" ]
[ "0.73109335", "0.71970487", "0.7061737", "0.7013981", "0.67763895", "0.66172284", "0.65882987", "0.6577764", "0.6576582", "0.65707767", "0.6550222", "0.65278035", "0.6522197", "0.65178317", "0.6487218", "0.6449266", "0.64304125", "0.63753456", "0.6375052", "0.63368744", "0.63216764", "0.63171345", "0.63162154", "0.6293845", "0.62910026", "0.62643784", "0.625847", "0.62458956", "0.6243092", "0.62176573" ]
0.7795414
0
Helper function to handle filtered items comma delimited string
def split_cmdline_filter_items(string): filter_items = string.split(',') return filter_items
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def comma_filter(value):\n return '{:,}'.format(value)", "def comma_code(items):\n item_len = len(items)\n \n if item_len == 0:\n return ''\n elif item_len == 1:\n return items[0]\n\n return ', '.join(items[:-1]) + ', and ' + items[-1]", "def sanitize_metrics_filter(metric_filter):\n if metric_filter is None:\n return []\n return metric_filter.replace(\" \", \"\").strip(\",\").split(\",\")", "def clean(item: list) -> list:\n item = [x.replace(\"'\", \"\")\n .replace('\"', '')\n .replace('[', '')\n .replace(']', '')\n .split(',') for x in item]\n\n return item", "def test_get_items_from_string() -> None:\n assert [\"i\", \"p\"] == common_util.get_items_from_string(\"i, ,p\")\n assert [\"i\", \"p\"] == common_util.get_items_from_string(\"i- -p\", separator=\"-\")\n assert [\"i\", \" \", \" p\"] == common_util.get_items_from_string(\"i, , p\", remove_blanks=False)\n assert [\"i\", \"p\"] == common_util.get_items_from_string(\"i, , p\")\n assert [] == common_util.get_items_from_string(\"\")", "def stringInputToList(x):\n return list(filter(None, [y.strip() for y in x.split(',')]))", "def listify(item, do_strip=False):\n if not item:\n return []\n elif isinstance(item, list):\n return item\n elif isinstance(item, string_types) and item.count(','):\n if do_strip:\n return [token.strip() for token in item.split(',')]\n else:\n return item.split(',')\n else:\n return [item]", "def split_by_comma(s):\n return s.strip().split(\",\")", "def spliceout(seq, s):\n if isinstance(s, str):\n predicate = (lambda x: s == x)\n else:\n predicate = s\n return \", \".join([elt.strip() for elt in seq.split(\",\") if not predicate(elt.strip())])", "def type_filter(self, items, types=None):", "def pure_list(comma_list):\n pure_items = []\n for comma_item in comma_list:\n for item in comma_item.split(','):\n pure_items.append(item)\n return pure_items", "def separate_comma(s):\n return s.split(',')", "def _clean_string(self, string):\n if string is None:\n return []\n str_list = string.strip().split(\",\")\n return [each.strip() for each in str_list]", "def itemFilterType(*args, text: Union[AnyStr, bool]=\"\", type: bool=True, q=True, query=True,\n e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def uniqueCheckString(aList):\r\n check = ','\r\n for v in aList:\r\n if f',{v},' in check:\r\n return True\r\n check = f'{check}{v},'\r\n return False", "def split_name_values(param_items):\n return_list = list()\n for single_item in param_items:\n temp_list = [single_item[1]]\n temp_list.extend(clear_useless_end(single_item[2]).split(\",\"))\n return_list.append(temp_list)\n\n return return_list", "def clean_name(name):\n return \",\".join(name.split(\",\")[:1])", "def clean_name(name):\n return \",\".join(name.split(\",\")[:1])", "def valid_value(self, value):\n for val in value.split(','):\n valid = super(MultiSelectField, self).valid_value(val)\n if not valid:\n return False\n return True", "def list_process(field, item_list:List[str]):\n # if isinstance(item_list, list):\n if len(item_list) == 0:\n return {\n\n }\n saved_list = []\n\n for i in item_list:\n saved_list.append(f\"{i}\")\n return {\n field: \",\".join(saved_list)\n }", "def split_choices(choices_string):\n return [x.strip() for x in choices_string.split(\",\") if x.strip()]", "def process_list_arg(arg):\n if isinstance(arg, list):\n return arg\n elif isinstance(arg, basestring):\n args = []\n for part in arg.split(\",\"):\n args.append(part.strip())\n return args", "def _parse_filter_list(filter: Optional[List]) -> Any:\n ret = None\n if filter:\n ret = set(filter)\n return ret", "def str2set(string_field, separator=','):\n\n def wrapper(self):\n \"\"\"Decorator wrapper method.\n \"\"\"\n cleaned_data = self.cleaned_data\n\n string_data = cleaned_data.get(string_field)\n\n list_data = []\n for string in string_data.split(separator):\n string_strip = string.strip()\n if string_strip and string_strip not in list_data:\n list_data.append(string_strip)\n\n return list_data\n\n return wrapper", "def parse_filter_unlabelled(task_output):\n task_list = []\n for item in task_output:\n item = item.split(\",\")\n if len(item) == 3:\n if check_status(item):\n task_list.append(item)\n\n return task_list", "def comma_separated_validator(**kwargs):\n for name, param in kwargs.items():\n if param is not None:\n try:\n param.split(',')\n except AttributeError:\n raise PyYouTubeException(ErrorMessage(\n status_code=ErrorCode.INVALID_PARAMS,\n message=f'Parameter {name} must be str or comma-separated list str'\n ))", "def clean_tag(elmt_with_commas, max_lenght):\r\n elmt_list = elmt_with_commas.split(\",\")\r\n elmt_list = [e.strip() for e in elmt_list if len(e) < max_lenght]\r\n return elmt_list", "def commaSplitter(str):\n # leave this here as a reminder of what I should do to make the argument parsing more robust\n\n # if sqrt != int(sqrt):\n # msg = \"%r is not a perfect square\" % string\n # raise argparse.ArgumentTypeError(msg)\n # return value\n return str.split(',')", "def filt(seq, lst):\n regex = \"(\" + \")|(\".join(seq) + \")\"\n regex = re.compile(regex)\n slst = list(filter(regex.search, lst))\n return slst\n\n\n # still need a checkUsername function ", "def stringfilter(items, queries, attribute=None):\n result = []\n if attribute is not None:\n key_path = attribute.split('.')\n else:\n key_path = None\n\n for item in items:\n if key_path is not None:\n string = _get_nested_value(item, key_path)\n if not isinstance(string, str):\n raise errors.AnsibleFilterError(\n f\"stringfilter: value under '{attribute}' in '{pformat(item)}' is not string: {pformat(string)}\"\n )\n else:\n if not isinstance(item, str):\n raise errors.AnsibleFilterError(f\"stringfilter: list item is not string: {pformat(item)}\")\n string = item\n\n for query in queries:\n if isinstance(query, str):\n if query == string:\n result.append(item)\n break\n elif isinstance(query, dict) and query.get('regex'):\n if re.search(query['regex'], string):\n result.append(item)\n break\n else:\n raise errors.AnsibleFilterError(\n f\"stringfilter: unrecognized query: {pformat(query)}\"\n )\n return result" ]
[ "0.6523944", "0.6259372", "0.61320263", "0.6083365", "0.6035963", "0.59400517", "0.58690673", "0.5866547", "0.5862891", "0.5861083", "0.5809238", "0.57314897", "0.5680077", "0.56491226", "0.56409234", "0.5610342", "0.560232", "0.560232", "0.5599939", "0.557739", "0.5575543", "0.55729735", "0.55665547", "0.55262196", "0.5514646", "0.55118996", "0.5483438", "0.5472759", "0.54589313", "0.5455292" ]
0.71210736
0
Method to parse commandline arguments of serving size and filter items
def parse_arguments(): global parser parser = argparse.ArgumentParser( description='Certainly this isn\'t how Food Network does it', formatter_class=argparse.RawDescriptionHelpFormatter, epilog=textwrap.dedent(''' Recipe List must appear as follows. ** ======= recipe_name serveing_size ingredient 0 ingredient 1 ingredient 2 ... ... ... ingredient n ''')) parser.add_argument('input_file', help="An input text file to read in recipes from. " "Must adhere certain structure.**") parser.add_argument('out_file', help="File to write json recipe data to.") parser.add_argument('-s', '--serving-size', type=str, help='The number of servings you\'d like to make.', dest='serving_size', default=4) parser.add_argument('-f', '--filter-items', type=split_cmdline_filter_items, dest='filter_items', help='A comma delimited string of ingredients to filter recipes by. ' 'Multi-word ingredients must be quoted.') global args args = parser.parse_args() global serving_size_override serving_size_override = args.serving_size global filter_ingredients filter_ingredients = args.filter_items
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cmd_size(args):", "def parse_args():\n\tglobal id_width\n\n\t# Parsing the args\n\targs = parser.parse_args()\n\n\t# Retrieving the args\n\tid_width = args.id_width", "def parse_args():\n\n parser = argparse.ArgumentParser(description=\"Benchmark Thing WoT server\")\n parser = utils.extend_server_arg_parser(parser)\n\n return parser.parse_args()", "def parse_args():\n\n parser = argparse.ArgumentParser(description='Disk metric sender')\n parser.add_argument('-v', '--verbose', action='store_true', default=None, help='Verbose?')\n parser.add_argument('--debug', action='store_true', default=None, help='Debug?')\n\n return parser.parse_args()", "def parse_args():\r\n desc = \"Check for the longest running requests in bookie\"\r\n parser = argparse.ArgumentParser(description=desc)\r\n\r\n parser.add_argument('-l', '--log', dest='log',\r\n action='store',\r\n default=None,\r\n required=True,\r\n help=\"log file we're reading requests from\")\r\n\r\n parser.add_argument('-n', '--number', dest='count',\r\n action='store',\r\n default=10,\r\n type=int,\r\n required=False,\r\n help=\"how many urls do we wish to see, default 10\")\r\n\r\n\r\n args = parser.parse_args()\r\n return args", "def parse_sizespec(self):\n\tline_splitted = self.line.split()\n\n\tfor argument_unit in line_splitted:\n\t # words[0] is the identifier of the argument\n\t # and words[1] is the argument value.\n\t words = self.split_argument_unit(argument_unit)\n\t if words[0] == 'N':\n\t\tself.node_number = int(words[1])\n\t elif words[0] == 'L':\n\t\tself.link_number = int(words[1])\n\t # if we do not recognize the argument an exception is raised\n\t else:\n\t raise ArgumentNotFoundError(found = words[0])", "def parse_args():\n parser = argparse.ArgumentParser(\n description='Farm')\n parser.add_argument(\n '-s', '--source',\n help='Harvest source (AMQP host such as amqp://guest:guest@localhost:5672)',\n required=True)\n parser.add_argument(\n '-q', '--queue',\n help='Queue name to harvest from',\n required=True)\n parser.add_argument(\n '-a', '--add',\n help='Harvester instance (file)',\n required=True,\n type=argparse.FileType('rb'))\n return vars(parser.parse_args())", "def parse_arguments(self):\n \n for arg in sys.argv[1:]:\n (key, sep, value) = arg.partition(\"=\")\n if sep != \"=\":\n raise ProcessorError(\"Illegal argument '%s'\" % arg)\n self.update_data(key, value)", "def parse_args():\n\tparser = argparse.ArgumentParser(description='Show video statistics.')\n\tparser.add_argument('--sort', metavar='FIELD', choices=['views', 'likes', 'dislikes'],\n\t default='views',\n\t help='sort by the specified field. Options are views, likes and dislikes.')\n\tparser.add_argument('--max', metavar='MAX', type=int, help='show the top MAX entries only.')\n\tparser.add_argument('--csv', action='store_true', default=False,\n\t help='output the data in CSV format.')\n\tparser.add_argument('--table', action='store_true', default=False,\n\t help='output the data in an ascii table.')\n\tparser.add_argument('--workers', type=int, default=8,\n\t help='number of workers to use, 8 by default.')\n\treturn parser.parse_args()", "def parse_command_line():\r\n\r\n parser = argparse.ArgumentParser(description='User args')\r\n parser.add_argument(\"--action\", choices=['train', 'predict', 'demo', 'test'], required=True, help=\"Choose action.\")\r\n parser.add_argument(\"--model\", choices=['vgg', 'unet', 'fpn'], required=True, help=\"Choose model.\")\r\n parser.add_argument(\"--dataset\", choices=['full', 'small'], required=True, help=\"Choose dataset.\")\r\n\r\n return parser.parse_args()", "def parse_command_line():\n parser = argparse.ArgumentParser(description='Parses ID\\'s from the DDI compendium search results, and then downloads the html and puts them into a sqlite database.')\n parser.add_argument('-f', '--file', dest='file',\n action='store',\n help='Filenname to be read')\n arg_manager = parser.parse_args()\n return arg_manager", "def parse_command_line():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"img\", type=argparse.FileType(\"r\"),\n help=\"The image file to test\")\n\n encoders = [\"sse2\", \"sse4.1\", \"avx2\"]\n parser.add_argument(\"--encoder\", dest=\"encoder\", default=\"avx2\",\n choices=encoders, help=\"select encoder variant\")\n\n testquant = [str(x) for x in range (0, 101, 10)]\n testqual = [\"-fastest\", \"-fast\", \"-medium\", \"-thorough\", \"-exhaustive\"]\n qualities = testqual + testquant\n parser.add_argument(\"--test-quality\", dest=\"quality\", default=\"medium\",\n choices=qualities, help=\"select compression quality\")\n\n parser.add_argument(\"--no-startup\", dest=\"noStartup\", default=False,\n action=\"store_true\", help=\"Exclude init\")\n\n args = parser.parse_args()\n\n return args", "def parse_command_line(argv):\n import argparse\n parser = argparse.ArgumentParser(\n description=\"\"\"\\\nShow information about a list of scales in Neuroglancer \"info\" JSON file format\n\"\"\")\n parser.add_argument(\"info_file\", nargs=\"?\", default=\"./info\",\n help=\"JSON file containing the information\")\n args = parser.parse_args(argv[1:])\n return args", "def parse_arguments(args):", "def parse_commandline():\n parser = optparse.OptionParser()\n\n parser.add_option(\"-w\", \"--wavelength\", default=600, type=int)\n parser.add_option(\"-f\", \"--filter\", default=1, type=int)\n parser.add_option(\"-g\", \"--grating\", default=3, type=int)\n parser.add_option(\"--doMonoWavelength\", action=\"store_true\", default=False)\n parser.add_option(\"--doMonoFilter\", action=\"store_true\", default=False)\n parser.add_option(\"--doMonoGrating\", action=\"store_true\", default=False)\n parser.add_option(\"--doGetMono\", action=\"store_true\", default=False)\n parser.add_option(\"-v\", \"--verbose\", action=\"store_true\", default=False)\n\n opts, args = parser.parse_args()\n\n return opts", "def parse_args():\n\n parser = argparse.ArgumentParser(\n description=('Print a summarized status of all non-optimal components '\n 'of all detected MegaRAID controllers'))\n parser.add_argument(\n '-c', dest='compress', action='store_true',\n help='Compress with zlib the summary to overcome NRPE output limits.')\n parser.add_argument(\n '-a', dest='all', action='store_true',\n help='Include all components in the summary.')\n\n return parser.parse_args()", "def parseArgs():\n\t\n\tparser = argparse.ArgumentParser(\n\t\tprog='device_sniffer',\n\t\tdescription='spot nearby devices using a wireless sniffer.',\n\t\tusage='%(prog)s <mode> [options]')\n\t\n\tparser.add_argument('--interface','-i', help=\"interface to scan on.\", dest=\"inf\", type=str)\n\tparser.add_argument('--verbse','-v', help=\"show extra output.\", dest=\"verbosity\", action=\"count\")\n\tparser.add_argument('types', help=\"type of devices to scan for.\", choices=['connected', 'scanning','both'], default='both', nargs='?')\n\targs = parser.parse_args()\n\treturn args", "def parse_command_line():\n parser = argparse.ArgumentParser(\"Falcon Quick Scan\")\n parser.add_argument(\"-f\", \"--config\",\n dest=\"config_file\",\n help=\"Path to the configuration file\",\n required=False\n )\n parser.add_argument(\"-l\", \"--log-level\",\n dest=\"log_level\",\n help=\"Default log level (DEBUG, WARN, INFO, ERROR)\",\n required=False\n )\n parser.add_argument(\"-d\", \"--check-delay\",\n dest=\"check_delay\",\n help=\"Delay between checks for scan results\",\n required=False\n )\n parser.add_argument(\"-p\", \"--pattern\",\n dest=\"pattern\",\n help=\"Target file patterns to scan (defaults to *.*)\",\n required=False\n )\n parser.add_argument(\"-r\", \"--region\",\n dest=\"region\",\n help=\"Region the target bucket resides in\",\n required=False\n )\n parser.add_argument(\"-t\", \"--target\",\n dest=\"target\",\n help=\"Target folder or bucket to scan. Bucket must have 's3://' prefix.\",\n required=True\n )\n\n return parser.parse_args()", "def handleCmdLine(self):\n description = \"Nagios monitoring script to check for open ports\\n\"\n usage = (\"%prog <options>\\n\")\n parser = OptionParser(usage=usage, description=description)\n\n parser.add_option(\"-c\", \"--config\",\n type=\"string\",\n help=\"path to open ports configuration file\")\n parser.add_option(\"-l\", \"--list\",\n type=\"string\",\n help=\"supply list of allowed ports seperated by comma.\")\n\n (self.options, args) = parser.parse_args()", "def parse_arguments():\n parser = argparse.ArgumentParser(description='Scraper')\n parser.add_argument('--prefix', help='Prefix for saving files', default=\"\")\n parser.add_argument('--path', help='Dir path', default=\"\")\n parser.add_argument('--urls_path', help='Url path', default=False)\n parser.add_argument('--url', help='Url', default=False)\n parser.add_argument('--disney', dest='disney', action='store_true', help=\"Choose all disney movies\")\n parser.add_argument('--ngram', help='Max ngram', default=2)\n\n args = parser.parse_args()\n return args", "def parse_command_line():\n parser = argparse.ArgumentParser()\n\n help_str = \\\n 'The collection folder to sort files into. ' \\\n 'If the folder does not exist, it will be created along with the ' \\\n 'necessary contents.'\n parser.add_argument('-c', '--collection', help=help_str)\n\n help_str = \\\n 'The source folder to import files from. Has to exist and ' \\\n 'has to be a folder.'\n parser.add_argument('-s', '--source', help=help_str, required=False)\n\n help_str = \\\n 'View the gallery in random order auto skpping after the' \\\n 'given amount of seconds'\n parser.add_argument('-v', '--view', help=help_str, required=False)\n\n return parser.parse_args()", "def parse_commandline():\n\n\tparser = optparse.OptionParser(usage = \"usage: %prog [options]\",\n\t\t\tdescription = \"This program takes a single 2D image and spits a number of images. Input and output images \"\n\t\t\t\"should be TGA.\")\n\n\tparser.add_option(\"-i\", \"--input\", dest = \"inp\", type = \"string\", help = \"specify the image to split.\")\n\n\tparser.add_option(\"-o\", \"--output\", dest = \"out\", type = \"string\", help = \"the directory of the output images.\")\n\n\tparser.add_option(\"-s\", \"--size\", dest = \"size\", type = \"string\", metavar=\"WxH\", help = \"size of the splits.\")\n\n\t# Add the default value on each option when printing help\n\tfor option in parser.option_list:\n\t\tif option.default != (\"NO\", \"DEFAULT\"):\n\t\t\toption.help += (\" \" if option.help else \"\") + \"[default: %default]\"\n\n\t(options, args) = parser.parse_args()\n\n\tif not options.inp or not options.out or not options.size:\n\t\tparser.error(\"argument is missing\")\n\n\t# Parse the --size\n\tsize = [0, 0]\n\ttry:\n\t\tsize_strs = options.size.split(\"x\")\n\t\tsize[0] = int(size_strs[0])\n\t\tsize[1] = int(size_strs[1])\n\texcept:\n\t\tparser.error(\"incorrect --size: %s\" % sys.exc_info()[0])\n\n\tconfig = Config()\n\tconfig.in_file = options.inp\n\tconfig.out_dir = options.out\n\tconfig.size = size\n\n\treturn config", "def _parse_command_line_arguments():\n parser = ArgumentParser(\n description=(\n 'Command-line tool to generate a list of unique from a TS file from FermiFAST'\n ),\n )\n parser.add_argument(\n 'ts-file',\n type=str,\n help=(\n 'A file containing the TS sky map'\n ),\n )\n parser.add_argument('--skiprows',\n type=int,\n help='number of rows to skip at the top (default 0)',\n required=False)\n parser.set_defaults(skiprows=0)\n arguments = vars(parser.parse_args())\n return arguments", "def parse_args():\n parser = MyParser(description='Data processing and analytics library \\\n for OpenStack Browbeat perf data')\n\n parser.add_argument('-s', '--summary', dest=\"days\", type=int, default=-1,\n help='-s N summary of last N days of results')\n\n parser.add_argument('--summary-uuid', dest=\"summary_uuid\", type=str,\n default=None,\n help='--summary-uuid UUID summary of a specific uuid')\n\n parser.add_argument('--short-summary', dest=\"short_days\", type=int,\n default=-1,\n help='--short-summary N gives \\\n summary of last N days of results but uses cockroach \\\n db so only provides with basic summary')\n\n parser.add_argument('--upload-timesummary', dest=\"timeseries_uuid\",\n type=str, default=None,\n help='--upload-timesummary UUID \\\n uploads the features computed from data obtained from\\\n graphite. ')\n\n parser.add_argument('--upload-logsummary', dest=\"loggin_uuid\",\n type=str, default=None,\n help='--upload-logsummary UUID \\\n uploads the log summary to crdb \\\n currently just summarizes over entire timeperiod. ')\n\n parser.add_argument('-u', '--update-db', dest='update', type=bool,\n default=False,\n help='-u True pushes data to cockroach db')\n\n parser.add_argument('--update-clf', dest=\"clf_days\", type=int,\n default=-1,\n help='--update-clf 60 will update all classifiers \\\n listed in config file under classifier_lists \\\n using data from last 60 days')\n\n parser.add_argument('--test-clf', dest=\"test_days\", type=int,\n default=-1,\n help='--test-clf 60 will train all classifiers \\\n listed in config file under classifier_lists \\\n using data from last 60 days and then test it \\\n and display metrics')\n\n parser.add_argument('-v', '--osp-version', dest='version', type=str,\n default=None,\n help='-v 11-tripleo only returns hits for that \\\n OpenStack version, \\\n only supported by summary right now')\n\n parser.add_argument('-c', '--config', dest='config', type=str,\n default=pkg_resources.resource_filename('bml',\n \"config.yml\"),\n help='-c <config file path> use custom config file')\n\n args = parser.parse_args()\n return args", "def argParse():\n p = ap.ArgumentParser()\n p.add_argument('field',\n help='Name of field')\n p.add_argument('telescope',\n help='Name of telescope',\n choices=['io', 'callisto', 'europa',\n 'ganymede', 'artemis', 'saintex',\n 'nites', 'rcos20'])\n p.add_argument('filt',\n help='Name of filter')\n return p.parse_args()", "def _find_verify_arguments(filters):\n if (\"minsize\" in filters and \"maxsize\" in filters and\n filters[\"maxsize\"] < filters[\"minsize\"]):\n exit_with_error(\"Maximum size cannot be less than minimum size.\")\n if (\"size\" in filters and \"maxsize\" in filters and\n filters[\"maxsize\"] < filters[\"size\"]):\n exit_with_error(\"Maximum size cannot be less than (exact) size.\")\n if (\"size\" in filters and \"minsize\" in filters and\n filters[\"minsize\"] > filters[\"size\"]):\n exit_with_error(\"Minimum size cannot be more than (exact) size.\")", "def get_arguments():\n parser = argparse.ArgumentParser(description=\"Argument Parser\")\n parser.add_argument(\"--batch-size\", type=int, default=BATCH_SIZE,\n help=\"Number of images sent to the network in one step.\")\n print('parsing')\n return parser.parse_args()", "def _parse_arguments():\n import argparse\n\n parser = argparse.ArgumentParser(description=__doc__)\n\n parser.add_argument(\n 'list_of_files', type=str,\n help='Input ASCII file with a list of files to be downloaded')\n\n return parser.parse_args()", "def _parse_create_args(self, args):\r\n size = args['--size']\r\n location = args['--datacenter']\r\n return int(size), str(location)", "def parse_args() -> argparse.Namespace:\n def delimited_list(delimiter: str, items: Optional[int] = None):\n def _parser(arg: str):\n args = arg.split(delimiter)\n\n if items and len(args) != items:\n raise argparse.ArgumentTypeError(\n 'Argument must be a '\n f'{delimiter}-delimited list with {items} items: \"{arg}\"')\n\n return args\n\n return _parser\n\n parser = argparse.ArgumentParser(\n 'Generate a size report card for binaries')\n parser.add_argument('--bloaty-config',\n type=delimited_list(';'),\n required=True,\n help='Data source configuration for Bloaty')\n parser.add_argument('--full',\n action='store_true',\n help='Display full bloat breakdown by symbol')\n parser.add_argument('--labels',\n type=delimited_list(';'),\n default='',\n help='Labels for output binaries')\n parser.add_argument('--out-dir',\n type=str,\n required=True,\n help='Directory in which to write output files')\n parser.add_argument('--target',\n type=str,\n required=True,\n help='Build target name')\n parser.add_argument('--title',\n type=str,\n default='pw_bloat',\n help='Report title')\n parser.add_argument('--source-filter',\n type=str,\n help='Bloaty data source filter')\n parser.add_argument('diff_targets',\n type=delimited_list(';', 2),\n nargs='+',\n metavar='DIFF_TARGET',\n help='Binary;base pairs to process')\n\n return parser.parse_args()" ]
[ "0.6223562", "0.6163945", "0.60260504", "0.5878589", "0.5855476", "0.58422273", "0.5816425", "0.5810505", "0.58065313", "0.5800577", "0.5779477", "0.5772803", "0.5751752", "0.5737089", "0.5699368", "0.5673736", "0.56566215", "0.56529987", "0.56464386", "0.5641782", "0.5639594", "0.56285346", "0.5618689", "0.5614174", "0.5611509", "0.56044096", "0.5603698", "0.5595417", "0.5590582", "0.5588165" ]
0.66262203
0
Function to parse txt file of recipes and construct 2D list tokenizing each of the recipes metadata and ingredient content
def construct_list_of_recipes(): sub_list = [] with open(args.input_file, 'r') as f: lines = [line if line == '\n' else line.rstrip('\n') for line in f] recipes_list = [] for element in lines: if element == '\n': recipes_list.append(sub_list) sub_list = [] else: sub_list.append(element) recipes_list.append(sub_list) return recipes_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_foods(foods_txt):\n foods = []\n for line in foods_txt:\n ingredients_txt, allergens_txt = line.split(\" (contains \")\n ingredients = ingredients_txt.split()\n allergens = allergens_txt[:-1].split(\", \")\n\n foods.append((ingredients, allergens))\n\n return foods", "def read_data(input_file):\n\n def process_line(labels, words):\n l = ' '.join([label for label in labels if len(label) > 0])\n w = ' '.join([word for word in words if len(word) > 0])\n lines.append((l, w))\n words = []\n labels = []\n return words, labels, lines\n\n rf = open(input_file, 'r')\n lines = [];\n words = [];\n labels = []\n for line in rf:\n word = line.strip().split(' ')[0]\n label = line.strip().split(' ')[-1]\n # here we dont do \"DOCSTART\" check\n\n if len(line.strip()) == 0: # and words[-1] == '.'\n words, labels, lines = process_line(labels, words)\n words.append(word)\n labels.append(label)\n rf.close()\n return lines", "def read_input(fname):\n f_data = open(fname).read().strip().replace(utils.TAB, utils.SPACE)\n X = []\n sentences = f_data.split(utils.CARRIGE_RETURN)\n for sen in sentences:\n words = sen.split(utils.NEWLINE)\n X.append(words)\n return [X, list(itertools.chain(*X))]", "def read_file(filename):\n contents, labels = [], []\n with open_file(filename) as f:\n for line in f:\n try:\n label,content = line.strip().split('\\t')\n contents.append(list(content))\n labels.append(label)\n except:\n pass\n return contents,labels", "def get_data_split(file_name, referents=False):\n with open(file_name, 'r') as file:\n data = file.readlines()\n data = [line.strip() for line in data if line != '1-----\\n' and line != '1-----']\n scene_mapping = utt_to_scene('gold_lexicon.txt')\n all_utterances = [e.split(\" \")[1:] for e in data[::2]]\n short_utterances = []\n long_utterances = []\n short_scenes = []\n long_scenes = []\n for utt in all_utterances:\n if len(utt) >= 5:\n long_utterances.append(utt)\n scene = [scene_mapping[word] for word in utt]\n if referents:\n long_scenes.append(scene)\n else:\n scene = list(itertools.chain.from_iterable(scene))\n long_scenes.append(scene)\n elif len(utt) <= 3:\n short_utterances.append(utt)\n scene = [scene_mapping[word] for word in utt]\n if referents:\n short_scenes.append(scene)\n else:\n scene = list(itertools.chain.from_iterable(scene))\n short_scenes.append(scene)\n return short_utterances, long_utterances, short_scenes, long_scenes", "def read_data(cls, input_file):\n with tf.gfile.Open(input_file, \"r\") as f:\n lines = []\n for line in f:\n line = line.strip()\n if line.startswith('-DOCSTART-'):\n continue\n else:\n word_labels = line.split('-seq-')\n assert len(word_labels) == 2\n\n words = word_labels[0]\n labels = word_labels[1]\n lines.append([words, labels])\n\n return lines", "def parse_file(self, infile, chardict, labeldict):\n examples = []\n fin = io.open(infile, 'r')\n # idx is for the index of the row in the \n # original file before shuffling and randomization\n idx = 0\n for line in fin: \n entity, label = map(clean, line.rstrip().split('\\t')[:2])\n # print entity\n ent = map(lambda c:chardict[c], list(entity))\n lab = map(lambda l:labeldict[l] if l in labeldict else 0, label.split(','))\n examples.append((idx, ent, lab))\n idx += 1\n fin.close()\n print \"num_rows:\", len(examples), \" index\", idx\n return examples", "def read_conll_file(file_name):\n data = []\n current_words = []\n current_tags = []\n\n for line in codecs.open(file_name, encoding='utf-8'):\n line = line.strip()\n \n if line:\n if line[0] == '#':\n continue # skip comments\n tok = line.split('\\t')\n if '-' in tok[0] or '.' in tok[0]:\n continue # skip special tokenized words\n word = tok[1]\n tag = tok[3]\n \n current_words.append(word)\n current_tags.append(tag)\n else:\n if current_words: # skip empty lines\n data.append((current_words, current_tags))\n current_words = []\n current_tags = []\n\n # check for last one\n if current_tags != [] and not raw:\n data.append((current_words, current_tags))\n return data", "def __read_data__(self):\n with open(self.file, 'r') as data:\n sentence = []\n tags = []\n for line in data:\n terms = line.rstrip().split(WHITESPACE)\n for term in terms:\n word_tag = tuple(term.split(TAGCHAR))\n word = word_tag[0]\n tag = word_tag[1]\n self.word_tag_dict[word_tag] += 1\n self.tag_dict[tag] += 1\n self.__add_to_word_dict__(word, tag)\n if self.isNumberWord(word):\n self.numbers += 1\n if word[0].isupper() and len(sentence) > 0:\n self.cap_no_start += 1\n sentence.append(word)\n tags.append(tag)\n if tag == ENDOFSENTENCE:\n self.sentences.append(tuple(sentence))\n self.tags.append(tuple(tags))\n sentence = []\n tags = []", "def _parse_ingredients(recipe):\n ingredients = []\n group_counter = 1\n counter = 0\n\n filtered_dict = {k: v for k, v in recipe.items() if \"ingredient\" in k}\n ingredient = {}\n\n for key, value in filtered_dict.items():\n if not value:\n continue\n\n elif key == f\"ingredient{group_counter}\":\n ingredient[\"name\"] = value\n\n elif key == f\"ingredientQuantity{group_counter}\":\n ingredient[\"quantity\"] = value\n\n elif key == f\"ingredientMeasurement{group_counter}\":\n ingredient[\"measurement\"] = value\n\n counter += 1\n if counter % 3 == 0:\n ingredients.append(ingredient)\n ingredient = {}\n group_counter += 1\n\n return ingredients", "def parse_file(self, file_path) -> list:\n data = []\n with open(file_path, 'rb') as f:\n lines = pickle.load(f)\n for line in lines:\n input, output = line\n if input.strip() == \"\" or output.strip() == \"\":\n continue\n input_len = len(input.split())\n output_len = len(output.split())\n if input_len > 50 or output_len > 50:\n continue\n data_item = Text2TextDataItem(input_text=input, output_text=output, tokenizer=self.tokenizer,\n share_vocab=self.share_vocab)\n data.append(data_item)\n return data", "def convert_input_to_list():\n\n f = open('pizza_source.txt', 'r')\n file_to_list = f.read().split('\\n')\n\n return file_to_list", "def process_data(file_object: TextIO) -> list:\n text_list = [line.replace('\\n', '').split(' ') for line in file_object]\n return text_list", "def _read(self, file_path: str) -> Iterator[Instance]:\n with open(file_path) as f:\n for line in f:\n pairs = line.split()\n words, tags = zip(*(pair.split(\"###\") for pair in pairs))\n yield self.text_to_instance([Token(word) for word in words], tags)", "def read_examples(input_file):\n examples = []\n unique_id = 0\n with tf.gfile.GFile(input_file, \"r\") as reader:\n while True:\n line = tokenization.convert_to_unicode(reader.readline())\n if not line:\n break\n \n line = line.strip()\n text_a = None\n text_b = None\n m = re.match(r\"^(.*) \\|\\|\\| (.*)$\", line)\n \n if m is None:\n text_a = line\n else:\n text_a = m.group(1)\n text_b = m.group(2)\n examples.append(InputExample(unique_id=unique_id,\n text_a=text_a, \n text_b=text_b))\n unique_id += 1\n return examples", "def read_processed_data_from_file(file, encoding='latin1'):\n\n with open(file, encoding=encoding) as f:\n raw = f.read()\n\n lines = raw.split('\\n')\n labeled_texts = []\n n = len(lines) - 1\n for i, line in enumerate(lines):\n print(f'\\rLoading review {i} of {n}', end='')\n if line == '':\n continue\n tagged_words = re.findall(r'(.+?\\\\.+?) ', line)\n label = re.findall(r'#(\\d+.\\d)#', line)[0]\n labeled_texts.append((tagged_words, label))\n print()\n return labeled_texts", "def lemmatise(path, model_spec) -> Dict[str, List[Sentence[Token]]]:\n\n # handle install\n # lets check if we need to install or not\n if check(model_spec) is not True:\n for model in download(model_spec):\n download(model_spec)\n\n # get tagger\n with shutup():\n tagger = get_tagger(model_spec, batch_size=256, device=\"cpu\", model_path=None)\n\n # import iterator and processor\n iterator, processor = getattr(get_imports(get_model(model_spec)), \"get_iterator_and_processor\")(max_tokens=256)\n\n # Get files content\n files = glob.glob(path + '/*.txt')\n content = defaultdict(list)\n for f in files:\n wit = os.path.splitext(os.path.split(f)[-1])[0]\n tok_id_diff = 0\n with open(f, 'r') as doc:\n for tok_id, token in enumerate(tagger.iter_tag_token(\n data=doc.read(),\n iterator=iterator,\n processor=processor,\n empty_token_on_sent_break=True\n )):\n if not content[wit]:\n content[wit].append([])\n # token_dict = {\"form\": t[0], \"id\": \"w_\" + str(tokenId), \"order_id\": str(tokenId)}\n if token is None:\n tok_id_diff -= 1\n content[wit].append([])\n else:\n content[wit][-1].append({\n **token,\n \"id\": f\"w_{tok_id + tok_id_diff}\",\n \"order_id\": str(tok_id + tok_id_diff)\n })\n\n return content", "def _read_data(cls, input_file): # 这里是对文件的处理\r\n with open(input_file, encoding='utf-8') as f:\r\n lines = []\r\n\r\n for line in f:\r\n line = json.loads(line)\r\n words = ' '.join(list(line['natural']))\r\n labels = ' '.join(line['tag_seq'])\r\n poss = line['pos_seq']\r\n dps = line['dp_seq']\r\n head = line['head_seq']\r\n lines.append([labels, words, poss, dps, head])\r\n\r\n return lines", "def get_recipes(recipe_list_path):\n autopkgr_path = os.path.expanduser(\n \"~/Library/Application Support/AutoPkgr/recipe_list.txt\")\n recipe_list_path = recipe_list_path if recipe_list_path else autopkgr_path\n if not os.path.exists(recipe_list_path):\n sys.exit(\"recipe_list file %s does not exist!\" % recipe_list_path)\n with open(recipe_list_path) as recipe_list:\n recipes = [recipe.strip() for recipe in recipe_list]\n return recipes", "def parse_file(input_file):\n \n all_lines = input_file.split('\\n')\n all_info_list = []\n for line in all_lines:\n line = line.split('\\t')\n info_per_row_list = []\n for value in line:\n my_string = \"\"\n value = value.strip('\\'\"')\n if len(value) == 0:\n value = \"NA\"\n my_string += value\n info_per_row_list += [my_string]\n all_info_list += [info_per_row_list]\n return all_info_list", "def import_data(in_file):\n\n print '\\n\\tImport data'\n sentence = []\n concept = []\n sentences = []\n concepts = []\n for line in open(in_file, 'r'):\n if line != '\\n':\n sentence += [ line.split()[0] ]\n concept += [ line.split()[1] ]\n else:\n sentences += [ sentence ]\n concepts += [ concept ]\n sentence = [ ]\n concept = [ ]\n pos = []\n lemma = []\n poss = []\n lemmas = []\n for line in open(in_file.replace('.data', '.feats.txt'), 'r'):\n if line != '\\n':\n pos += [ line.split()[ 1 ] ]\n lemma += [ line.split()[ 2 ] ]\n else:\n poss += [ pos ]\n lemmas += [ lemma ]\n pos = [ ]\n lemma = [ ]\n print '\\t--done'\n return sentences, poss, lemmas, concepts", "def read_cached_ingredients_words(file='../data/full-recipes-dataset/pre-processed-full-recipes-dataset-v2.json',\n file_words='../data/words-teste.txt'):\n print 'Reading ingredients of all recipes'\n config = myutils.load_json(file)\n\n ingr_word_list = []\n if not os.path.exists(file_words):\n with open(file_words, 'w') as f:\n \n for recipe in config.keys():\n for ingredient in config[recipe][\"ingredients\"]:\n # ingredient = ingredients_utils.clean_html(ingredient)\n ingredient = ingredients_utils.clean(ingredient)\n ingredient = ingredients_utils.clean_recipes_terms(ingredient)\n\n word_list = ingredient.split()\n for w in word_list:\n if len(w) == 1: # Removing words with just one letter.\n continue\n f.write(w + '\\n')\n\n print 'Saving words...'\n with open(file_words, 'r') as f:\n ingr_word_list = [line.rstrip('\\n') for line in f]\n\n return ingr_word_list", "def __prepareDataSet(fileName):\n\n labels = []\n utterances = []\n\n with open(fileName) as f:\n lines = f.readlines()\n\n for line in lines:\n try:\n act = line[:line.index(\" \")]\n utterance = line[line.index(\" \"):line.index(\"\\n\")]\n\n try:\n labels.append(act.strip())\n utterances.append(utterance.strip())\n\n except KeyError:\n pass\n\n except ValueError:\n pass\n\n return labels, utterances", "def data_parser(data):\n\n with open(data, 'r') as inp:\n\n # take every sample\n # the last line in the text file is empty, so reading until -1\n samples = inp.read().split('\\n')[:-1]\n\n vec = []\n labels = []\n for sample in samples:\n # file is tab delimited\n split_samples = sample.split('\\t')\n # last column contains the label\n labels.append(int(split_samples[-1]))\n\n features = []\n for feature in split_samples[:-1]:\n features.append(float(feature))\n vec.append(features)\n\n # make the features and labels as a numpy array\n vec = np.array(vec)\n labels = np.array(labels)\n return vec, labels", "def read_file(filename):\n\n sentences = open(filename).read().strip().split(\"\\n\\n\") #separate tweets\n ret = []\n for sent in sentences:\n lines = sent.split(\"\\n\") #each word in the tweet\n pairs = [L.split(\"\\t\") for L in lines] #Funniest O\n tokens = [tok for tok,tag in pairs]\n tags = [tag for tok,tag in pairs]\n ret.append( (tokens,tags) )\n return ret", "def prepare_text_data(descriptions):\n text_data = []\n for line in descriptions:\n tokens = prepare_text_for_lda(line)\n text_data.append(tokens)\n return text_data", "def read_data(filename, eos='#'):\n ### Exercise 6.1\n\n with open(filename) as f:\n utterances = []\n labels = []\n\n for line in f:\n # Get utterance output and length\n utter = line\n utter = utter.replace(\" \", \"\").replace(\"\\n\", \"\") + \"#\"\n utterances.append(utter)\n # Make empty sequence\n sequence = np.zeros(len(utter), dtype=int)\n sequence[0], sequence[len(utter) - 1] = 1, 1\n # Find indexes of beginning of words\n prev_char = \"\"\n count = 0\n new_word_indexs = []\n for char in line:\n if char == \" \":\n prev_char = char\n continue\n if prev_char == \" \":\n prev_char = char\n new_word_indexs.append(count)\n count += 1\n else:\n prev_char = char\n count += 1\n for index in new_word_indexs:\n sequence[index] = 1\n labels.append(sequence)\n\n return (utterances, labels)", "def split_terms(filename):\n\n try:\n with open(filename) as go_term_file:\n go_term_info = go_term_file.read()\n split_go_term_list = re.split(r\"\\[Term\\]\", go_term_info)\n return split_go_term_list\n\n except FileNotFoundError:\n return []", "def get_data(file_name, referents=False):\n with open(file_name, 'r') as file:\n data = file.readlines()\n data = [line.strip() for line in data if line != '1-----\\n' and line != '1-----']\n scene_mapping = utt_to_scene('gold_lexicon.txt')\n all_utterances = [e.split(\" \")[1:] for e in data[::2]]\n# print(all_utterances)\n scenes = []\n for utt in all_utterances:\n scene = [scene_mapping[word] for word in utt]\n if referents:\n scenes.append(scene)\n else:\n scene = list(itertools.chain.from_iterable(scene))\n scenes.append(scene)\n return all_utterances, scenes", "def separate_into_passports(file_contents: str) -> List[str]:\n result = file_contents.split(\"\\n\\n\")\n return result" ]
[ "0.6772814", "0.6052774", "0.59686327", "0.5949775", "0.589042", "0.58483", "0.5812816", "0.5793555", "0.57722795", "0.5772054", "0.57039815", "0.56501746", "0.5648882", "0.5641128", "0.5631791", "0.562814", "0.5627455", "0.56138176", "0.56124926", "0.55846786", "0.55565566", "0.55514866", "0.55379117", "0.553574", "0.5534757", "0.5526769", "0.5524145", "0.55209875", "0.5515858", "0.549856" ]
0.7190229
0
Function to construct a new output_dict excluding recipes with filter_ingredients
def filter_output_dict(output_dict): global filter_ingredients if filter_ingredients: filtered_dict = {k: v for k, v in output_dict.iteritems() if all(filter_item in v['ingredients'] for filter_item in filter_ingredients)} return filtered_dict else: return output_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter():\n ingredient = request.args.get(\"ingredient\")\n if ingredient == None: # no ingredient parameter was included in the request\n return Response(\n \"{\\\"error\\\":\\\"ingredient parameter is required\\\"}\",\n status=400,\n mimetype=\"application/json\")\n\n recipes = [\n recipe.to_json_dict()\n for recipe in recipebook.recipes\n if recipe.has_ingredient(ingredient)]\n\n return Response(\n json.dumps(recipes),\n mimetype=\"application/json\")", "def construct_output_dict():\n list_of_recipes = construct_list_of_recipes()\n output_dict = {}\n for recipe_list in list_of_recipes:\n recipe_instance = construct_recipe_object(recipe_list)\n recipe_dict = recipe_instance.construct_json_rep_obj()\n for k, v in recipe_dict.iteritems():\n output_dict[k] = v\n output_dict = filter_output_dict(output_dict)\n return {'recipes': output_dict}", "def _strip_excess_data(recipe):\n for key in list(recipe.keys()):\n if key == \"ingredients\" or key == \"steps\":\n continue\n elif \"ingredient\" in key or \"step\" in key:\n del recipe[key]\n\n return recipe", "def _parse_ingredients(recipe):\n ingredients = []\n group_counter = 1\n counter = 0\n\n filtered_dict = {k: v for k, v in recipe.items() if \"ingredient\" in k}\n ingredient = {}\n\n for key, value in filtered_dict.items():\n if not value:\n continue\n\n elif key == f\"ingredient{group_counter}\":\n ingredient[\"name\"] = value\n\n elif key == f\"ingredientQuantity{group_counter}\":\n ingredient[\"quantity\"] = value\n\n elif key == f\"ingredientMeasurement{group_counter}\":\n ingredient[\"measurement\"] = value\n\n counter += 1\n if counter % 3 == 0:\n ingredients.append(ingredient)\n ingredient = {}\n group_counter += 1\n\n return ingredients", "def filter(self, filter_dict):\n pass", "def filter_non_ingredient(ingredient_list):\n stop_words = set(stopwords.words('english'))\n \n filtered_list = []\n add_list = 0 #a dummy variable to add a text to filtered list\n for phrases in set(ingredient_list): #run through only one item in set (removes duplicates)\n\n for word in phrases:\n if word in stop_words:\n phrases.replace(word,'')\n\n #if one of the word in a phrase is ingredient, counts in to list\n for word in word_tokenize(phrases): #phrases can be phrase (run through phrases)\n \n is_ingredient = is_it_ingredient(word) #returns true if a word is ingridient\n \n if is_ingredient == True:\n add_list = 1\n else:\n add_list = 0\n\n ##if one of the word in a phrase is ingredient, counts in to list\n if add_list == 1 :\n\n filtered_list.append(phrases.capitalize())\n add_list = 0 \n\n return filtered_list", "def __filter(self, obj):\n filtered_keys = ['file_path', \"Data\", \"raw_block_data\", \"Reserved1\", \"raw\"]\n if isinstance(obj, list):\n return dict([t for t in obj if t[0] not in filtered_keys])\n elif isinstance(obj, dict):\n return {k: self.__filter(v) for k, v in obj.items()\n if k not in filtered_keys}\n else:\n return dict(obj)", "def recipe(self, recipe):\n import hxl.filters\n return hxl.filters.from_recipe(self, recipe)", "def _unwrap_filter(self, filter_dict, interface_dict=None):\n if not interface_dict:\n interface_dict = dict((new, old) for old, new in self._interface)\n\n translated = {}\n for k, v in filter_dict.items():\n tran_k = interface_dict[k]\n if tran_k != None:\n translated[tran_k] = v\n else:\n if v != self._missing:\n raise _FilterValueError('Missing column can only be '\n 'filtered to missing value.')\n return translated", "def make_ingredients_mapping(ingredients, funcs = [LessLikelyWordRemover()]):\n def chained_func(string):\n \"\"\"function calling pipeline\"\"\"\n for func in funcs:\n string = func(string)\n return string\n \n #create mapping\n mapping = {}\n \n for ing in ingredients:\n if ing.strip():#prevent empty string\n new_ing = chained_func(ing) \n if ing != new_ing:#ensures the ingredient we recorded needs to be changed\n mapping[ing] = new_ing\n \n return mapping", "def filter(self, filter_dict):\n self.result = [x for x in self.result if all(str(x[y]) == z or (hasattr(x[y], \"__iter__\") and (z in str(x[y]) or any(z in str(d.values) for d in x[y] if isinstance(d, dict)))) for y,z in filter_dict.items())] \n\n return self", "def filter_features(self):\n return {key: {k: v for k, v in value.items() if k in {NAME, TYPE, ACTIVE}} for key, value in self.to_dict().items()}", "def _filter_results(self, result):\n out_result = {}\n for change_type in result:\n temp_dict = {}\n for key in result[change_type]:\n log.debug(\"change_type = %s\", change_type)\n if self.ignore_added and (change_type == \"+++\"):\n continue\n log.debug(\"result[change_type] = %s, key = %s\",\n unicode(result[change_type]), key)\n log.debug(\"self._is_incex_key = %s\",\n self._is_incex_key(\n key,\n result[change_type][key]))\n if not self._is_incex_key(key, result[change_type][key]):\n temp_dict[key] = result[change_type][key]\n if len(temp_dict) > 0:\n out_result[change_type] = temp_dict\n\n return out_result", "def filter_dictionary_by_missing_residues(raw_data):\n new_data = {\n \"seq\": [],\n \"ang\": [],\n \"ids\": [],\n \"evo\": [],\n \"msk\": [],\n \"crd\": [],\n \"sec\": [],\n \"res\": [],\n \"ums\": [],\n \"mod\": []\n }\n train = raw_data[\"train\"]\n n_filtered_entries = 0\n total_entires = 0.\n for seq, ang, crd, msk, evo, _id, res, sec, ums, mod in zip(\n train['seq'], train['ang'], train['crd'], train['msk'], train['evo'],\n train['ids'], train['res'], train['sec'], train['ums'], train['mod']):\n total_entires += 1\n if \"-\" in msk:\n n_filtered_entries += 1\n continue\n else:\n new_data[\"seq\"].append(seq)\n new_data[\"ang\"].append(ang)\n new_data[\"ids\"].append(_id)\n new_data[\"evo\"].append(evo)\n new_data[\"msk\"].append(msk)\n new_data[\"crd\"].append(crd)\n new_data[\"sec\"].append(sec)\n new_data[\"res\"].append(res)\n new_data[\"ums\"].append(ums)\n new_data[\"mod\"].append(mod)\n\n if n_filtered_entries:\n print(f\"{n_filtered_entries} ({n_filtered_entries/total_entires:.1%})\"\n \" training set entries were excluded based on missing residues.\")\n raw_data[\"train\"] = new_data\n return raw_data", "def _cleanse_dict(original):\n return {k: v for k, v in original.items() if \"_pass\" not in k}", "def _cleanse_dict(original):\n return dict((k, v) for k, v in original.items() if \"_pass\" not in k)", "def trim_data(flattened_beer_json, trim_level):\n\n attributes_of_interest = {\n 0: ['style_category_name', 'style_ibuMin', 'style_ibuMax', 'style_abvMin', 'style_abvMax'],\n 1: ['abv', 'ibu'],\n 2: ['style_fgMax'],\n 3: ['style_fgMax', 'style_ogMin'],\n 4: ['style_fgMax', 'style_fgMin', 'style_ogMin'], # don't use style_ogMax, no beer has the attribute!\n 5: ['servingTemperature', 'glass_name', 'available_name', 'abv', 'ibu'],\n 6: ['ingredients'],\n 7: ['ingredients', 'abv', 'ibu']\n }\n\n trimmed = {'data': [], 'labels': []}\n\n # Get all ingredients\n possible_ingredients = get_all_ingredients()\n # Get top 50 highest occurring ingredients\n possible_ingredients = dict(sorted(possible_ingredients.items(), key=operator.itemgetter(1), reverse=True)[:50])\n\n for index, beer in enumerate(flattened_beer_json['data']):\n # if all attributes of interest are present\n if all(attribute in beer for attribute in attributes_of_interest[trim_level]):\n for attribute in list(beer):\n if attribute == 'ingredients':\n for ing in possible_ingredients.keys():\n if ing in get_beer_ingredients(beer):\n beer[ing] = True\n else:\n beer[ing] = False\n del(beer[attribute])\n elif attribute not in attributes_of_interest[trim_level]:\n del (beer[attribute])\n trimmed['data'].append(beer)\n trimmed['labels'].append(flattened_beer_json['labels'][index])\n\n return trimmed", "def metadata_filter_as_dict(metadata_config):\n\n if metadata_config is None:\n return {}\n\n if metadata_config is True:\n return {'additional': 'all'}\n\n if metadata_config is False:\n return {'excluded': 'all'}\n\n if isinstance(metadata_config, dict):\n assert set(metadata_config) <= set(['additional', 'excluded'])\n return metadata_config\n\n metadata_keys = metadata_config.split(',')\n\n metadata_config = {}\n\n for key in metadata_keys:\n key = key.strip()\n if key.startswith('-'):\n metadata_config.setdefault('excluded', []).append(key[1:].strip())\n elif key.startswith('+'):\n metadata_config.setdefault('additional', []).append(key[1:].strip())\n else:\n metadata_config.setdefault('additional', []).append(key)\n\n for section in metadata_config:\n if 'all' in metadata_config[section]:\n metadata_config[section] = 'all'\n else:\n metadata_config[section] = [k for k in metadata_config[section] if k]\n\n return metadata_config", "def get_beer_ingredients(beer):\n beer_ingredients = []\n for ing in beer['ingredients']:\n for item in beer['ingredients'][ing]:\n if 'name' in item:\n if item['name'] not in beer_ingredients:\n beer_ingredients.append(item['name'])\n\n return beer_ingredients", "def dict_cleanup(self, data):\n for data_key, data_value in list(data.items()):\n # TODO: Add DEBUG logging (?)\n for filter_key, filter_value in self.required.items():\n if filter_key in data_value.keys():\n if isinstance(filter_value, str) and self.exact_match:\n if data_value[filter_key] != filter_value:\n del data[data_key]\n break\n elif isinstance(filter_value, str) and (not self.exact_match):\n if data_value[filter_key] is None:\n del data[data_key]\n break\n if filter_value not in data_value[filter_key]:\n del data[data_key]\n break\n elif isinstance(filter_value, list) and self.exact_match:\n if data_value[filter_key] not in filter_value:\n del data[data_key]\n break\n elif isinstance(filter_value, list) and (not self.exact_match):\n if data_value[filter_key] is None:\n del data[data_key]\n break\n found_match = False\n for filter_value_item in filter_value:\n if filter_value_item in data_value[filter_key]:\n found_match = True\n if not found_match:\n del data[data_key]\n break\n else:\n self.logger.warning(msg=\"Dict_Cleanup: None of the cases matched. Data: %s Filter: %s\" % (data_value, self.filter))\n # TODO: Handle other possible cases\n else:\n self.logger.warning(msg=\"Dict_Cleanup: Filter key: %s not present in Data: %s\" % (filter_key, data_value))\n continue\n\n for data_key, data_value in list(data.items()):\n # TODO: Add DEBUG logging (?)\n for filter_key, filter_value in self.excluded.items():\n if filter_key in data_value.keys():\n if isinstance(filter_value, str) and self.exact_match:\n if data_value[filter_key] == filter_value:\n del data[data_key]\n break\n elif isinstance(filter_value, str) and (not self.exact_match):\n if data_value[filter_key] is None:\n continue\n if filter_value in data_value[filter_key]:\n del data[data_key]\n break\n elif isinstance(filter_value, list) and self.exact_match:\n if data_value[filter_key] in filter_value:\n del data[data_key]\n break\n elif isinstance(filter_value, list) and (not self.exact_match):\n if data_value[filter_key] is None:\n continue\n found_match = False\n for filter_value_item in filter_value:\n if filter_value_item in data_value[filter_key]:\n found_match = True\n if found_match:\n del data[data_key]\n break\n else:\n self.logger.warning(msg=\"Dict_Cleanup: None of the cases matched. Data: %s Filter: %s\" % (data_value, self.filter))\n # TODO: Handle other possible cases\n else:\n self.logger.warning(msg=\"Dict_Cleanup: Filter key: %s not present in Data: %s\" % (filter_key, data_value))\n continue\n return data", "def filter_mapping_file_from_mapping_f(\r\n mapping_f, sample_ids_to_keep, negate=False):\r\n mapping_data, header, comments = parse_mapping_file(mapping_f)\r\n filtered_mapping_data = []\r\n sample_ids_to_keep = {}.fromkeys(sample_ids_to_keep)\r\n\r\n for mapping_datum in mapping_data:\r\n hit = mapping_datum[0] in sample_ids_to_keep\r\n if hit and not negate:\r\n filtered_mapping_data.append(mapping_datum)\r\n elif not hit and negate:\r\n filtered_mapping_data.append(mapping_datum)\r\n else:\r\n pass\r\n return format_mapping_file(header, filtered_mapping_data)", "def invert(ref_filters):\n inv_filters = {}\n for key, value in ref_filters.items():\n try:\n # From {\"cats_ok\": {\"url_key\": \"pets_cat\", \"value\": 1, \"attr\": \"cats are ok - purrr\"},}\n # To {\"cats are ok - purrr\": {\"cats_ok\": \"true\"},}\n inv_filters[value[\"attr\"]] = {key: \"true\"}\n except KeyError:\n # For filters with multiple values.\n # From {'auto_bodytype': ['bus', 'convertible', ... ],}\n # To {'bus': 'auto_bodytype', 'convertible': 'auto_bodytype', ... ,}\n if isinstance(value, dict):\n inv_filters.update({child_value: key for child_value in value})\n\n return inv_filters", "def test_excludeIngredientQuery(self) -> None:\n ingredient0 = 'multimedia'\n ingredient1 = 'provision'\n result = self.entries.exclude(Q(ingredients__icontains=ingredient0) | Q(ingredients__icontains=ingredient1))\n self.assertEqual(988, len(result))\n\n queries = (Q(ingredients__icontains=ingredient0), Q(ingredients__icontains=ingredient1))\n result = self.entries.exclude(functools.reduce(operator.or_, queries))\n self.assertEqual(988, len(result))", "def test_filter_recipe_by_ingredients(self):\n recipe1 = sample_recipe(user=self.user, title='chicken curry')\n recipe2 = sample_recipe(user=self.user, title='mutton curry')\n recipe3 = sample_recipe(user=self.user, title='milk dish')\n ing1 = sample_ingredient(user=self.user,name='chicken')\n ing2 = sample_ingredient(user=self.user,name='mutton')\n recipe1.ingredient.add(ing1)\n recipe2.ingredient.add(ing2)\n\n res = self.client.get(RECIPE_URL,{'ingredient':f'{ing1.id},{ing2.id}'})\n\n serializer1 = RecipeSerializer(recipe1)\n serializer2 = RecipeSerializer(recipe2)\n serializer3 = RecipeSerializer(recipe3)\n\n self.assertIn(serializer1.data,res.data)\n self.assertIn(serializer2.data,res.data)\n self.assertNotIn(serializer3.data,res.data)", "def filter_values(self):\n dfilter = self.args.datafilter\n self.logger.info(u'Filtering values with:{f}'.format(f=dfilter))\n data = self.outputdata\n newdata = {}\n for key, value in data.items():\n self.logger.info(u'\\nProcessing Key:{k}, value:{v}'.format(k=key,\n v=value))\n returned_data = dict_value_filter(key, value, dfilter, self.logger)\n if bool(returned_data):\n newdata[key] = returned_data\n self.logger.info(u'Data after filter:{d}'.format(d=newdata))\n\n self.outputdata = newdata", "def build_feature_filter(self):\n if self.features == [\"*\"]:\n random_iso = list(self.data.keys())[0]\n self.features = set()\n for lang_features in self.data.values():\n self.features |= set(lang_features.keys())\n self.features = list(self.features)\n if self.exclusions:\n self.features = [f for f in self.features if f not in self.exclusions]\n self.feature_filter = set(self.features)", "def slot_mappings(self):\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n\n return {\"ingredient\": self.from_entity(entity=\"ingredient\",\n not_intent=\"greet\")}", "def filter_output(output):\n print(\"Filtering for new\")\n new_only = get_changes(output)\n print(\"Applying Hamming Weight function\")\n balanced_hamming = apply_func(new_only, hamming_weight)\n # filter lines where multiple values change\n print(\"Filtering out pops\")\n balanced_hamming = list(\n filter(lambda x: 'pop' not in x['mnemonic'], balanced_hamming))\n return balanced_hamming", "def get_filter_kwargs(self, *_, **__) -> Dict[str, Any]:", "def ignored_values(self):\r\n return dict()" ]
[ "0.63625383", "0.6360492", "0.61961323", "0.6113551", "0.5905042", "0.57337976", "0.56699854", "0.5645597", "0.5505097", "0.5494827", "0.5426863", "0.5423085", "0.54064524", "0.5354577", "0.5350412", "0.5335406", "0.5281635", "0.52562", "0.5244354", "0.52313745", "0.5203894", "0.5193223", "0.519241", "0.5188238", "0.51857895", "0.51854604", "0.5184498", "0.51826453", "0.51746935", "0.51476854" ]
0.79188234
0
Iterate over the recipes and wrap them into a output dict
def construct_output_dict(): list_of_recipes = construct_list_of_recipes() output_dict = {} for recipe_list in list_of_recipes: recipe_instance = construct_recipe_object(recipe_list) recipe_dict = recipe_instance.construct_json_rep_obj() for k, v in recipe_dict.iteritems(): output_dict[k] = v output_dict = filter_output_dict(output_dict) return {'recipes': output_dict}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_recipe_details(recipe_links):\n cuisine_recipes = {}\n for r in recipe_links:\n recipe = {}\n recipe_link = r.a[\"href\"]\n print \"recipe link: \", recipe_link\n soup_recipe = get_recipe(recipe_link)\n if soup_recipe:\n recipe['r_link'] = recipe_link\n recipe['recipe title'] = get_recipe_title(soup_recipe)\n recipe['chef'] = get_recipe_chef(soup_recipe)\n recipe['description'] = get_description(soup_recipe)\n recipe['ingredient list'] = get_recipe_ingredients(soup_recipe)\n recipe['preperation steps'] = get_recipe_preperation(soup_recipe)\n recipe['prep_time'], recipe['cook_time'] = get_recipe_time(soup_recipe)\n recipe['servings'] = get_servings(soup_recipe)\n recipe['skill_level'] = get_skill_level(soup_recipe)\n recipe['rating'], recipe['rating count'] = get_recommendations(soup_recipe)\n recipe['nutritional_info'] = get_nutrition_per_serving(soup_recipe)\n recipe['image_source'] = get_image_source(soup_recipe)\n cuisine_recipes[recipe['recipe title']] = recipe\n return cuisine_recipes", "def get_recipe_details(recipe_links):\n cuisine_recipes = {}\n for r in recipe_links:\n soup_recipe = BeautifulSoup(r)\n if \"www.chowhound.com\" in r.a[\"href\"]:\n recipe = {}\n recipe['r_link'] = r.a[\"href\"]\n print \"recipe link: \", recipe['r_link']\n soup_recipe = get_recipe(recipe['r_link'])\n recipe['recipe title'] = get_recipe_title(soup_recipe)\n recipe['chef'] = get_recipe_chef(soup_recipe)\n recipe['description'] = get_description(soup_recipe)\n recipe['ingredient list'] = get_recipe_ingredients(soup_recipe)\n recipe['preperation steps'] = get_recipe_preperation(soup_recipe)\n recipe['total_time'], recipe['active_time'] = get_recipe_time(soup_recipe)\n recipe['servings'] = get_servings(soup_recipe)\n recipe['skill_level'] = get_recipe_difficulty(soup_recipe)\n recipe['rating'], recipe['rating count'] = get_ratings(soup_recipe)\n recipe['nutritional_info'] = get_nutrition_per_serving(soup_recipe)\n recipe['image_source'] = get_image_source(soup_recipe)\n cuisine_recipes[recipe['recipe title']] = recipe\n return cuisine_recipes", "def _format_recipe(recipe):\n # Some fields are not consistently returned from the API, make sure they exist first.\n if \"analyzedInstructions\" in recipe.keys() and len(\n recipe[\"analyzedInstructions\"]\n ):\n step_instructions = recipe[\"analyzedInstructions\"][0][\"steps\"]\n else:\n step_instructions = None\n\n return {\n \"spoonacular_id\": recipe[\"id\"],\n \"dish_name\": recipe[\"title\"],\n \"servings\": recipe.get(\"servings\", None),\n \"image\": recipe.get(\"image\", None),\n \"is_vegetarian\": recipe[\"vegetarian\"],\n \"is_vegan\": recipe[\"vegan\"],\n \"is_gluten_free\": recipe[\"glutenFree\"],\n \"is_dairy_free\": recipe[\"dairyFree\"],\n \"cook_time_min\": recipe.get(\"cookingMinutes\", None),\n \"prep_time_min\": recipe.get(\"preparationMinutes\", None),\n \"spoonacular_score\": recipe[\"spoonacularScore\"],\n \"ingredients\": [\n ingredient[\"originalName\"]\n for ingredient in recipe[\"extendedIngredients\"]\n ],\n \"instructions\": recipe.get(\"instructions\", None),\n \"step_instructions\": step_instructions,\n }", "def _recipe_details_generator(self, converted_content, overview_recipe):\n def get_cooking_shop_strings(lines):\n ret = []\n buf = None\n is_recipe_step_area = False\n for l in lines:\n if re.search(\"軒目\", l.strip()) or re.match(r\"^[①-⑳*].*『.*』\", l.strip()) or re.match(r\"^[①-⑳*].*「.*」\", l.strip()):\n if buf:\n ret.append(buf)\n buf = l.strip()\n continue\n\n if re.search(\"^(料理|万能調味料)\", l.strip()):\n is_recipe_step_area = False\n\n if re.search(\"^材料\", l.strip()):\n title, materials = re.search(\"(材料)(.*)\", l.strip()).groups()\n # buf += \"\\n\" + \"\\n\".join(l.strip().split(None, 1))\n buf += \"\\n\" + title + \"\\n\" + materials.strip()\n continue\n\n if re.search(\"^作り方\", l.strip()):\n is_recipe_step_area = True\n title, recipe_steps = re.search(\"(作り方)(.*)\", l.strip()).groups()\n # buf += \"\\n\" + \"\\n\".join(l.strip().split(None, 1))\n buf += \"\\n\" + title + \"\\n\" + recipe_steps.strip()\n continue\n \n if buf:\n if is_recipe_step_area:\n if re.match(r\"^[①-⑳*]\", l.strip()):\n buf += \"\\n\" + l.strip()\n else:\n buf += l.strip()\n else:\n buf += \"\\n\" + l.strip()\n if buf:\n ret.append(buf)\n\n return ret\n \n \n for ii, l in enumerate(converted_content.splitlines()):\n if ii == 1:\n overview_recipe.cooking_name_sub = l.strip()\n continue\n \n if -1 < l.find(\"初回放送\"):\n overview_recipe.program_date = dateutil.parser.parse(\"/\".join(re.search(r\"(\\d+)\\D+(\\d+)\\D+(\\d+)\\D+\", l).groups()))\n break\n\n cooking_shop_strings = get_cooking_shop_strings(converted_content.splitlines())\n\n logger.debug(\"-\" * 20)\n logger.debug(cooking_shop_strings)\n for shop_string in cooking_shop_strings:\n recipe_shop = None\n recipe = None\n is_material_area = False\n is_recipe_step_area = False\n for l in shop_string.splitlines():\n if len(l.strip()) == 0:\n continue\n \n if is_material_area == False and is_recipe_step_area == False:\n if re.search(\"軒目\", l.strip()) or re.match(r\"^[①-⑳*].*『.*』\", l.strip()) or re.match(r\"^[①-⑳*].*「.*」\", l.strip()):\n recipe_shop = copy.deepcopy(overview_recipe)\n recipe = None\n \n m = re.search(r\"「(.*)」\", l)\n if m:\n recipe_shop.cooking_name_sub += \"/\" + m.group(1)\n else:\n m2 = re.search(r\"『(.*)』\", l)\n if m2:\n recipe_shop.cooking_name_sub += \"/\" + m2.group(1)\n \n continue\n \n if re.search(\"^(料理|万能調味料)\", l.strip()):\n is_material_area = False\n is_recipe_step_area = False\n if recipe:\n yield recipe\n\n if recipe_shop:\n recipe = copy.deepcopy(recipe_shop)\n else:\n recipe = copy.deepcopy(overview_recipe)\n \n if -1 < l.find(\":\"):\n recipe.cooking_name = l.split(\":\")[1].strip()\n elif -1 < l.find(\":\"):\n recipe.cooking_name = l.split(\":\")[1].strip()\n elif re.search(r\"^(料理|万能調味料)[①-⑳]\", l.strip()):\n # https://www.nhk.or.jp/program/manpuku/recipe/dg0_200115.pdf\n # 料理①カルパッチョ\n recipe.cooking_name = l.strip()[3:].strip()\n else:\n recipe.cooking_name = l.split(None, 1)[1].strip()\n continue\n \n if re.search(\"^材料\", l.strip()):\n is_material_area = True\n is_recipe_step_area = False\n if l.strip() == \"材料\":\n continue\n \n if re.search(\"^作り方\", l.strip()):\n is_material_area = False\n is_recipe_step_area = True\n if l.strip() == \"作り方\":\n pass\n else:\n l = l.replace(\"作り方\", \"\", 1)\n # recipeがNoneの場合はエラーとして検出したい\n recipe.recipe_steps.append(RecipeText(l.strip()))\n continue\n \n \n if is_material_area:\n for material in l.strip().split(\"、\"):\n material = material.strip()\n if len(material):\n if material.startswith(\"(\"):\n recipe.materials.append(RecipeText(material))\n else:\n recipe.materials.append(RecipeText(material.replace(\"(\", \": \").replace(\")\", \"\")))\n \n if is_recipe_step_area:\n recipe.recipe_steps.append(RecipeText(l.strip()))\n if recipe:\n yield recipe", "def used_in_recipes(self):\n Recipe = apps.get_model('recipes','Recipe')\n values = {}\n rqset = Recipe.objects.filter(components__of_ingredient__pk=self.pk)\n\n while rqset.count(): # until no more child recipes\n values.update(rqset.values_list('slug','name')) # Add to return list\n rqset = Recipe.objects.filter(components__of_recipe__in=rqset) # Recurse\n\n return values", "def _check_recipes(self, recipes):\n\n ret = {}\n if type(recipes) is not dict:\n print(\"Error: recipes is not type 'dict'!\")\n return ret\n\n for (recipe, flavors) in recipes.items():\n if type(flavors) is not dict:\n print(\"Error: recipe %s does not contain a dict of flavors\"%recipe)\n continue\n ret[recipe] = {}\n for (flav, amount) in flavors.items():\n if type(amount) is not int and type(amount) is not float:\n print(\"Error: flavor %s has non-numeric amount: %s\"%(flav, amount))\n continue\n # always assume percent\n amount = amount / 100.0\n ret[recipe][flav] = amount\n\n return ret", "def get_recipes_dict(filename, mode_type, encode):\n with open(filename, mode_type, encoding=encode) as file:\n recipe_dict = dict()\n for line in file:\n dish = line.strip()\n amount = int(file.readline())\n buffer_list = list()\n for item in range(amount):\n ingredient, quantity, measure = file.readline().split('|')\n buffer_list.append(\n {'ingredient_name': ingredient.strip(), 'quantity': int(quantity), 'measure': measure.strip()}\n )\n recipe_dict[dish] = buffer_list\n file.readline()\n return recipe_dict", "def recipes(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Recipe]:\n pass", "def _parse_ingredients(recipe):\n ingredients = []\n group_counter = 1\n counter = 0\n\n filtered_dict = {k: v for k, v in recipe.items() if \"ingredient\" in k}\n ingredient = {}\n\n for key, value in filtered_dict.items():\n if not value:\n continue\n\n elif key == f\"ingredient{group_counter}\":\n ingredient[\"name\"] = value\n\n elif key == f\"ingredientQuantity{group_counter}\":\n ingredient[\"quantity\"] = value\n\n elif key == f\"ingredientMeasurement{group_counter}\":\n ingredient[\"measurement\"] = value\n\n counter += 1\n if counter % 3 == 0:\n ingredients.append(ingredient)\n ingredient = {}\n group_counter += 1\n\n return ingredients", "def cleanData(self, recipes, url):\n ret = []\n for recipe in recipes:\n props = recipe['properties']\n for k, vals in props.items():\n new = []\n for v in vals:\n if type(v) is dict:\n if v.has_key('properties'):\n vv = ''\n for prop in v['properties'].values():\n vv += prop[0]\n v = vv\n else:\n continue\n lines = v.splitlines()\n vv = ' '.join([line.strip() for line in lines]).strip()\n new.append(vv)\n props[k] = new\n props['importedFromURL'] = url\n ret.append(recipe)\n return ret", "def get_datasets(recipe):\n # \"datasets\"\n return {dataset: get_instance(**par) for dataset, par in recipe.items()}", "def get_recipes_by_types(self, recipe_type): \n\t\tfor key, val in self.recipes_list.items():\n\t\t\tif key == recipe_type:\n\t\t\t\tfor a, b in val.items():\n\t\t\t\t\tprint(str(b))", "def process(input_list: list) -> dict:\n bag_requirements = {}\n for bag_requirement in input_list:\n outer_bag = bag_requirement.split(' bags ')[0].replace(' ', '_').lower()\n bag_requirements[outer_bag] = {}\n\n sub_bags = bag_requirement.split(' contain ')[1].split(', ')\n for bag in sub_bags:\n if bag and bag.strip(' ') != \"no other bags\":\n bag = bag.lower()\n parsed_bag = re.findall('([0-9]+) ([a-z]+ [a-z]+) ([a-z]+)', bag)\n if parsed_bag:\n parsed_bag = parsed_bag[0]\n n_bags = parsed_bag[0]\n bag_color = parsed_bag[1].replace(' ', '_')\n bag_requirements[outer_bag][bag_color] = n_bags\n return bag_requirements", "def RecipeToText(recipe):\n\n\tout = []\n\tworld = None\n\tfor (annotation, next_world) in recipe[1:]:\n\t\tcommand = annotation[0]\n\t\targuments = annotation[1]\n\n\t\trecipe_text = ''\n\t\tif command == 'create_ing':\n\t\t\t# TODO: When computing BLEU score, we may wish to ignore create_ing\n\t\t\t# commands since they are trivially translated\n\t\t\trecipe_text += '%s.' % arguments[1]\n\n\t\telif command == 'create_tool':\n\t\t\t# TODO: This is a horrible hack but we need some way to make sure that the\n\t\t\t# length of the outputted string is equal to that of the list of original\n\t\t\t# texts.\n\t\t\trecipe_text = '<create_tool>'\n\n\t\telif command == 'combine':\n\t\t\trecipe_text += 'Combine '\n\n\t\t\trecipe_text += ', '.join([world.I_d[ing] for ing in arguments[0]])\n\n\t\t\tif not IsNull(arguments[3]):\n\t\t\t\trecipe_text += ', %s' % arguments[3]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'separate':\n\t\t\trecipe_text += 'Separate '\n\t\t\trecipe_text += '%s and %s' % (world.I_d[arguments[0]], next_world.I_d[arguments[1]])\n\n\t\t\tif not IsNull(arguments[5]):\n\t\t\t\trecipe_text += ', %s' % arguments[5]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'put':\n\t\t\trecipe_text += 'Put %s in %s. ' % (world.I_d[arguments[0]], world.T_d[arguments[1]])\n\n\t\telif command == 'remove':\n\t\t\trecipe_text += 'Remove %s from %s. ' % (world.I_d[arguments[0]], world.T_d[arguments[1]])\n\n\t\telif command == 'cut':\n\t\t\trecipe_text += 'Chop %s' % world.I_d[arguments[0]]\n\n\t\t\tif not IsNull(arguments[1]):\n\t\t\t\trecipe_text += ' with %s' % world.T_d[arguments[1]]\n\n\t\t\tif not IsNull(arguments[4]):\n\t\t\t\trecipe_text += ', %s' % arguments[4]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'mix':\n\t\t\trecipe_text += 'Mix %s' % world.I_d[arguments[0]]\n\n\t\t\tif not IsNull(arguments[1]):\n\t\t\t\trecipe_text += ' with %s' % world.T_d[arguments[1]]\n\n\t\t\tif not IsNull(arguments[4]):\n\t\t\t\trecipe_text += ', %s' % arguments[4]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'cook':\n\t\t\trecipe_text += 'Cook %s' % world.I_d[arguments[0]]\n\n\t\t\tif not IsNull(arguments[1]):\n\t\t\t\trecipe_text += ' with %s' % world.T_d[arguments[1]]\n\n\t\t\tif not IsNull(arguments[4]):\n\t\t\t\trecipe_text += ', %s' % arguments[4]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'do':\n\t\t\trecipe_text += 'Taking %s' % world.I_d[arguments[0]]\n\n\t\t\tif not IsNull(arguments[1]):\n\t\t\t\trecipe_text += ' with %s' % world.T_d[arguments[1]]\n\n\t\t\tif not IsNull(arguments[4]):\n\t\t\t\trecipe_text += ', %s' % arguments[4]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'serve':\n\t\t\trecipe_text += 'Serve %s' % world.I_d[arguments[0]]\n\n\t\t\tif not IsNull(arguments[1]):\n\t\t\t\trecipe_text += ', %s' % arguments[1]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'set':\n\t\t\trecipe_text += 'Set %s on %s. ' % (world.T_d[arguments[0]], arguments[1])\n\n\t\telif command == 'leave':\n\t\t\trecipe_text += 'Leave %s' % world.I_d[arguments[0]]\n\n\t\t\tif not IsNull(arguments[1]):\n\t\t\t\trecipe_text += ', %s' % arguments[1]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'chefcheck':\n\t\t\trecipe_text += 'Check %s for %s. ' % (world.I_d[arguments[0]], arguments[1])\n\n\t\tworld = next_world\n\t\tout.append(recipe_text)\n\n\treturn out", "def iterate_results(results, extract_fn):\n outputs = {}\n for environment, environment_results in results.items():\n if environment not in outputs:\n outputs[environment] = {}\n for experimental_setting, setting_results in environment_results.items():\n outputs[environment][experimental_setting] = []\n for config, seeds_results in setting_results.items():\n for seed, actual_results in seeds_results.items():\n output = extract_fn(actual_results)\n outputs[environment][experimental_setting].append(output)\n outputs[environment][experimental_setting] = np.array(outputs[environment][experimental_setting])\n return outputs", "def showData(self, recipes):\n for recipe in recipes:\n json.dump(recipe, self.stdout, indent=2)\n print\n print '/' + '*' * 50 + '/'", "def iter_recipes(self, pattern):\n raise NotImplementedError()", "def categorize_reads(f_dict, titer):\n for template in titer:\n for mate in template:\n mate['cat_list'] = mate.get('cat_list', []) + [k for k, f in f_dict.items() if f(mate)]\n yield template", "def list_recipes(environ, start_response):\n return list_entities(environ, start_response, 'list_recipes')", "def reconstitute():\n with open(TEXT_FPATH, 'w') as txt:\n for jfpath in json_fpaths():\n with open(jfpath) as f:\n jstruct = json.load(f)\n\n for recipe in jstruct.keys():\n _reconstitute_recipe(txt, jstruct[recipe])", "def make_cake_templates():\n tmpl = dict()\n\n # Attributes\n tmpl['Cooking time'] = ConditionTemplate(\n name=\"Cooking time\",\n description=\"The time elapsed during a cooking process\",\n bounds=RealBounds(0, 7 * 24.0, \"hr\")\n )\n tmpl[\"Oven temperature setting\"] = ParameterTemplate(\n name=\"Oven temperature setting\",\n description=\"Where the knob points\",\n bounds=RealBounds(0, 2000.0, \"K\")\n )\n tmpl[\"Oven temperature\"] = ConditionTemplate(\n name=\"Oven temperature\",\n description=\"Actual temperature measured by the thermocouple\",\n bounds=RealBounds(0, 2000.0, \"K\")\n )\n\n tmpl[\"Tastiness\"] = PropertyTemplate(\n name=\"Tastiness\",\n description=\"Yumminess on a fairly arbitrary scale\",\n bounds=IntegerBounds(lower_bound=1, upper_bound=10)\n )\n\n # Objects\n tmpl[\"Baking in an oven\"] = ProcessTemplate(\n name=\"Baking in an oven\",\n description='Using heat to promote chemical reactions in a material',\n allowed_labels=['precursor'],\n conditions=[(tmpl[\"Oven temperature\"], RealBounds(0, 700, \"degF\"))],\n parameters=[(tmpl[\"Oven temperature setting\"], RealBounds(100, 550, \"degF\"))]\n )\n\n tmpl[\"Taste test\"] = MeasurementTemplate(\n name=\"Taste test\",\n properties=[tmpl[\"Tastiness\"]]\n )\n\n tmpl[\"Dessert\"] = MaterialTemplate(\n name=\"Dessert\",\n properties=[tmpl[\"Tastiness\"]]\n )\n\n tmpl[\"Generic Material\"] = MaterialTemplate(name=\"Generic\")\n tmpl[\"Icing\"] = ProcessTemplate(name=\"Icing\",\n description='Applying a coating to a substrate',\n allowed_labels=['coating', 'substrate'])\n tmpl[\"Mixing\"] = ProcessTemplate(name=\"Mixing\",\n description='Physically combining ingredients',\n allowed_labels=['wet', 'dry', 'leavening', 'seasoning',\n 'sweetener', 'shortening', 'flavoring'])\n tmpl[\"Procurement\"] = ProcessTemplate(name=\"Procurement\",\n description=\"Buyin' stuff\")\n\n return tmpl", "def _yield_result_files(self, tpl, **kwargs):\n for sheet in self.shortcut_sheets:\n for ngs_library in sheet.all_ngs_libraries:\n extraction_type = ngs_library.test_sample.extra_infos[\"extractionType\"]\n if ngs_library.extra_infos[\"seqPlatform\"] in (\"ONP\", \"PacBio\"):\n suffix = \"_long\"\n else:\n suffix = \"\"\n yield from expand(\n tpl,\n mapper=self.config[\"tools\"][extraction_type.lower() + suffix],\n ngs_library=[ngs_library],\n **kwargs\n )", "def test_summarize_recipe(self):\n pass", "def recipes():\n if request.method == 'GET':\n return Response(\n json.dumps(recipebook.to_json_list()),\n mimetype=\"application/json\")\n elif request.method == 'POST':\n new_dict = request.get_json()\n recipebook.recipes.append(models.Recipe.from_json_dict(new_dict))\n write_out()\n return Response(status=200)", "def task_reformat_data():\n\n for data_type in data_sets:\n yield {\n 'actions': ['python reformat_weather_data.py %(dependencies)s > %(targets)s'],\n 'file_dep': ['UK_{}_data.txt'.format(data_type)],\n 'targets': ['UK_{}_data.reformatted.txt'.format(data_type)],\n }", "def main():\n records = get_block_of_records({\"keyword\": \"food\"})\n print (\"returned items: {}\".format(len(records)))\n\n processed_records = {}\n for item in records:\n meta = item[\"meta\"]\n umm = item[\"umm\"]\n cid = meta[\"concept-id\"]\n short_name = umm[\"ShortName\"]\n processed_records[cid] = short_name\n\n print (\"uniq keys: {}\".format(len(processed_records.keys())))", "def parseRemainingVariables(json_response, recipe):\n ready_in_minutes = json_response.get(\"readyInMinutes\")\n recipe.ready_in_minutes = int(ready_in_minutes)\n\n servings = json_response.get(\"servings\")\n recipe.servings = int(servings)\n\n vegetarian = json_response.get(\"vegetarian\")\n recipe.vegetarian = bool(vegetarian)\n\n source_url = json_response.get(\"sourceUrl\")\n recipe.source_url = str(source_url)\n\n aggregate_likes = json_response.get(\"aggregateLikes\")\n recipe.aggregate_likes = int(aggregate_likes)\n\n health_score = json_response.get(\"healthScore\")\n recipe.health_score = int(health_score)\n\n ingredients = []\n\n #parse all the ingredients and add them to the list of ingredients as a ingredient object\n if json_response.get(\"extendedIngredients\") is not None:\n for ingr in json_response.get(\"extendedIngredients\"):\n ingredient_name = ingr.get(\"name\")\n ingredient_id = ingr.get(\"id\")\n amount = ingr.get(\"amount\")\n unit = ingr.get(\"unit\")\n\n ingredient = Ingredient(ingredient_name, ingredient_id, amount, unit)\n ingredients.append(ingredient)\n\n recipe.ingredients = ingredients\n\n instructions = []\n\n #parse all the instructions and add them as instruction objects to the instructions list\n if json_response.get(\"analyzedInstructions\") is not None:\n\n if len(json_response.get(\"analyzedInstructions\")) != 0:\n\n if json_response.get(\"analyzedInstructions\")[0].get(\"steps\") is not None:\n\n for instr in json_response.get(\"analyzedInstructions\")[0].get(\"steps\"):\n instruction_number = instr.get(\"number\")\n step = instr.get(\"step\")\n\n ingred = []\n\n for json_ingred in instr.get(\"ingredients\"):\n for saved_ingred in ingredients:\n if saved_ingred.ingredient_id == json_ingred.get(\"id\"):\n ingred.append(saved_ingred)\n\n equipments = []\n\n for equip in instr.get(\"equipment\"):\n equipment_name = equip.get(\"name\")\n equipment_id = equip.get(\"id\")\n\n equipment = Equipment(equipment_name, equipment_id)\n\n equipments.append(equipment)\n\n instruction = Instruction(instruction_number, step, ingred, equipments)\n\n instructions.append(instruction)\n\n recipe.instructions = instructions\n\n return recipe", "def slot_mappings(self):\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n\n return {\"ingredient\": self.from_entity(entity=\"ingredient\",\n not_intent=\"greet\")}", "def recipe_dir(recipes_folder: py.path.local, tmpdir: py.path.local,\n case, recipe_data):\n recipe = deepcopy(recipe_data['meta.yaml'])\n if 'remove' in case:\n for remove in utils.ensure_list(case['remove']):\n path = remove.split('/')\n cont = recipe\n for p in path[:-1]:\n cont = cont[p]\n if isinstance(cont, list):\n for n in range(len(cont)):\n del cont[n][path[-1]]\n else:\n del cont[path[-1]]\n if 'add' in case:\n dict_merge(recipe, case['add'])\n\n recipe_dir = recipes_folder.mkdir(recipe_data['folder'])\n\n with recipe_dir.join('meta.yaml').open('w') as fdes:\n yaml.dump(recipe, fdes,\n transform=lambda l: l.replace('#{%', '{%').replace(\"#{{\", \"{{\"))\n\n if 'add_files' in case:\n for fname, data in case['add_files'].items():\n with recipe_dir.join(fname).open('w') as fdes:\n fdes.write(data)\n\n if 'move_files' in case:\n for src, dest in case['move_files'].items():\n src_path = recipe_dir.join(src)\n if not dest:\n if os.path.isdir(src_path):\n shutil.rmtree(src_path)\n else:\n os.remove(src_path)\n else:\n dest_path = recipe_dir.join(dest)\n shutil.move(src_path, dest_path)\n\n yield recipe_dir", "def reduce_recipe(self):\n\n self.recipe.reduce(self.crafting, self.crafting_stride)" ]
[ "0.6307142", "0.61831933", "0.6126512", "0.6043163", "0.5846334", "0.58092827", "0.57821715", "0.57819295", "0.57662284", "0.57036597", "0.5619611", "0.5571755", "0.5534036", "0.5516632", "0.5495394", "0.54889274", "0.5431682", "0.53547376", "0.53475016", "0.534165", "0.53280556", "0.5316552", "0.5310349", "0.5299557", "0.5291539", "0.5263479", "0.5183284", "0.5180608", "0.51764905", "0.5159943" ]
0.77564865
0
interest_param is a tuple containing a dict containing the satellite name, orbit parameters, and orbit classifications
def __init__(self, interest_param): self.name, self.orbit_param, self.orbit_class, self.orbit_type = interest_param # dict containing NEO, MEO, GEO, # apogee, perigee, or elliptical # inclination, period, # and eccentricity
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.75,0.75]\");\n self.modelBuilder.doSet(\"POI\",\"Afb\")\n\n # ss templates\n self.modelBuilder.doVar(\"Dilu_ratio[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_mumu_ss[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_ee_ss[1.0,0.0,10.0]\");\n \n self.modelBuilder.factory_('expr::Rpl(\"0.5*(1.+@0*@1)\",Afb, Dilu_ratio)')\n self.modelBuilder.factory_('expr::Rmn(\"0.5*(1.-@0*@1)\",Afb, Dilu_ratio)')", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"kappa_W[1,0.0,2.0]\") \n self.modelBuilder.doVar(\"kappa_Z[1,0.0,2.0]\") \n self.modelBuilder.doVar(\"kappa_tau[1,0.0,3.0]\")\n self.modelBuilder.doVar(\"kappa_mu[1,0.0,5.0]\") \n self.modelBuilder.factory_(\"expr::kappa_mu_expr(\\\"@0*@1+(1-@0)*@2\\\", CMS_use_kmu[0], kappa_mu, kappa_tau)\")\n self.modelBuilder.doVar(\"kappa_t[1,0.0,4.0]\")\n # additional kappa for the anomalous coupling\n self.modelBuilder.doVar(\"kappa_tilde_t[0.0,0.0,4.0]\")\n self.modelBuilder.doVar(\"kappa_b[1,0.0,3.0]\")\n if not self.resolved:\n self.modelBuilder.doVar(\"kappa_g[1,0.0,2.0]\")\n self.modelBuilder.doVar(\"kappa_gam[1,0.0,2.5]\")\n\tself.modelBuilder.doVar(\"BRinv[0,0,1]\")\n self.modelBuilder.out.var(\"BRinv\").setConstant(True)\n # adding additional kappa to list of parameters of interest\n pois = 'kappa_W,kappa_Z,kappa_tau,kappa_t,kappa_tilde_t,kappa_b'\n if not self.resolved:\n pois += ',kappa_g,kappa_gam'\n self.doMH()\n self.modelBuilder.doSet(\"POI\",pois)\n # use modified Higgs Builder\n self.SMH = AnomalousTopHiggsBuilder(self.modelBuilder)\n self.setup()", "def get_interest_map(far):\n\n # --- horizontal locations on 5 meter high in world coordinate\n height = -3.5\n x = np.arange(-4, 12, 1)\n x = x.reshape((-1, 1))\n high_horizon = np.concatenate([x, np.ones_like(x) * height, np.ones_like(x) * far], 1)\n\n # --- {3, 7, 11} meters right and 2.5 meter high in world coordinate\n height = -1.\n x = np.arange(3, 12, 4)\n x = x.reshape((-1, 1))\n right_candidate = np.concatenate([x, np.ones_like(x) * height, np.ones_like(x) * far], 1)\n\n p_world = np.concatenate([high_horizon, right_candidate], 0)\n p_img = project_pts3_to_image(p_world, K)\n\n # --- if close, search for top region in image coordinate\n if far < 8:\n x = np.arange(600, 1280, 50)\n x = x.reshape((-1, 1))\n y = 5\n close = np.concatenate([x, np.ones_like(x) * y], 1)\n p_img = np.concatenate([p_img, close], 0)\n\n # --- consider only locations in image\n ll = np.array([0, 0]) # lower-left\n ur = np.array([img_width, img_height]) # upper-right\n inidx = np.all(np.logical_and(ll <= p_img, p_img <= ur), axis=1)\n inbox = p_img[inidx]\n inbox = inbox.astype(np.int)\n\n interest = np.zeros((img_height, img_width))\n interest[inbox[:, 1], inbox[:, 0]] = 1\n interest = scipy.ndimage.morphology.distance_transform_edt(interest-1)\n interest = np.exp(-interest / 30**2)\n interest = (interest - np.min(interest)) / (np.max(interest) - np.min(interest))\n return interest", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')", "def interests(self):\n if \"interests\" in self._prop_dict:\n return self._prop_dict[\"interests\"]\n else:\n return None", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.70,0.70]\");\n self.modelBuilder.doSet(\"POI\",\"Afb\")\n\n # ss templates\n self.modelBuilder.doVar(\"Rdy_mumu_ss[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_ee_ss[1.0,0.0,10.0]\");\n \n self.modelBuilder.factory_('expr::Rpl(\"(1.+@0)\",Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(1.-@0)\",Afb)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rdy,Rqcd\")", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"mAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"mA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"eAfb,mAfb\")\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"rAfb[1.0,-5.0, 5.0]\");\n self.modelBuilder.doVar(\"rA0[1.0, -5.0, 5.0]\");\n self.modelBuilder.doSet(\"POI\",\"rAfb,rA0\")\n self.modelBuilder.factory_('expr::mAfb(\"@0*@1\",eAfb,rAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0*@1)\",eA0,rA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rbk[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd_emu[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rbk,Rdy,Rqcd_emu\")", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"dAfb[0.,-0.75,0.75]\");\n self.modelBuilder.doVar(\"dA0[0.0, -1.0, 1.0]\");\n #self.modelBuilder.doSet(\"POI\",\"dAfb,dA0\")\n self.modelBuilder.doSet(\"POI\",\"dAfb\")\n self.modelBuilder.factory_('expr::mAfb(\"@0+@1\",eAfb,dAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0+@1)\",eA0,dA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def doParametersOfInterest(self):\r\n if self.fg4fixed:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4[0]\")\r\n self.modelBuilder.doVar(\"r[1,0,4]\")\r\n print \"Fixing CMS_zz4l_fg4\"\r\n poi = \"r\"\r\n else:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg4\"):\r\n print \"have fg4 inside\"\r\n else:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4[0.,-1,1]\")\r\n poi = \"CMS_zz4l_fg4\"\r\n if self.cPOI:\r\n if self.modelBuilder.out.var(\"cww_zz\"):\r\n print \"have czz_ww inside\"\r\n else:\r\n self.modelBuilder.doVar(\"cww_zz[0.5,-10,10]\")\r\n poi += \",cww_zz\"\r\n\r\n if self.fg2POI:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg2\"):\r\n print \"have fg2 inside\"\r\n else:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg2[0.,0,1]\")\r\n poi += \",CMS_zz4l_fg2\"\r\n if self.muFloating:\r\n self.modelBuilder.doVar(\"r[1,0,2000]\")\r\n if self.muAsPOI:\r\n print \"Treating r as a POI\"\r\n poi += \",r\"\r\n else:\r\n self.modelBuilder.out.var(\"r\").setAttribute(\"flatParam\")\r\n if self.phiFloating:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg4phi\"):\r\n print \"have fg4phi inside\"\r\n else: \r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4phi[0.,-3.1415926,3.1415926]\")\r\n if self.phiPOI:\r\n poi += \",CMS_zz4l_fg4phi\"\r\n else:\r\n self.modelBuilder.out.var(\"CMS_zz4l_fg4phi\").setAttribute(\"flatParam\")\r\n if self.phi2Floating:\r\n #self.modelBuilder.doVar(\"CMS_zz4l_fg4phi[0.,-math.pi,math.pi]\")\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg2phi\"):\r\n print \"have fg2phi inside\"\r\n else: \r\n self.modelBuilder.doVar(\"CMS_zz4l_fg2phi[0.,-3.1415926,3.1415926]\")\r\n self.modelBuilder.out.var(\"CMS_zz4l_fg2phi\").setAttribute(\"flatParam\")\r\n \r\n self.modelBuilder.doSet(\"POI\",poi)", "def simple_interest(p,r,t):\n \n try:\n p, r, t = float(p), float(r), float(t)\n i = (p*r*t)/100\n A = p + i\n A = round(A, 2)\n return i, A\n except Exception as e:\n return e", "def __init__(self, name, value, start_date, end_date, period, interest): \n SavingPlan.__init__(self, name, value, start_date, end_date, period)\n self.interest = interest", "def interests(self, create, extracted, **kwargs):\n if not create:\n return\n if extracted:\n for interest in extracted:\n self.interests.add(interest)", "def open_interest(self, open_interest):\n\n self._open_interest = open_interest", "def __init__(self, principle=0, interest_rate=0.0, year=0):\n\n super().__init__(principle, interest_rate, year)\n self.principle = principle\n self.interest_rate = interest_rate\n self.year = year\n # private variable\n self.__date_of_calc = datetime.datetime.now()\n self.__percentage_interest = self.interest_rate / 100\n self.__months = self.year * 12\n # assert validation for the interest rate\n assert isinstance(interest_rate, float), 'is a not a float'", "def getParameterInfo(self):\n feature_class = arcpy.Parameter(\n name = 'in_features',\n displayName = 'In Features',\n direction = 'Input',\n datatype = 'GPFeatureLayer',\n parameterType = 'Required')\n\n field_mappings = arcpy.Parameter(\n name = 'in_fields',\n displayName = 'In Fields',\n direction = 'Input',\n datatype = 'GPFieldInfo',\n parameterType = 'Required')\n\n field_mappings.parameterDependencies = [feature_class.name]\n\n output_dir = arcpy.Parameter(\n name = 'output_dir',\n displayName = 'Output folder',\n direction = 'Input',\n datatype = 'DEFolder',\n parameterType = 'Required')\n\n output_name = arcpy.Parameter(\n name = 'output_name',\n displayName = 'Output filename',\n direction = 'Input',\n datatype = 'GPString',\n parameterType = 'Required')\n\n convert_4326 = arcpy.Parameter(\n name = 'convert_4326',\n displayName = 'Convert to WGS84?',\n direction = 'Input',\n datatype = 'Boolean',\n parameterType = 'Optional')\n convert_4326.value = 'True'\n\n convert_geojson = arcpy.Parameter(\n name = 'convert_geojson',\n displayName = 'Convert to GeoJSON?',\n direction = 'Input',\n datatype = 'Boolean',\n parameterType = 'Optional')\n convert_geojson.value = 'True'\n\n convert_kmz = arcpy.Parameter(\n name = 'convert_kmz',\n displayName = 'Convert to KMZ?',\n direction = 'Input',\n datatype = 'Boolean',\n parameterType = 'Optional')\n convert_kmz.value = 'True'\n\n convert_csv = arcpy.Parameter(\n name = 'convert_csv',\n displayName = 'Convert to CSV?',\n direction = 'Input',\n datatype = 'Boolean',\n parameterType = 'Optional')\n\n convert_metadata = arcpy.Parameter(\n name = 'convert_metadata',\n displayName = 'Convert metadata to markdown?',\n direction = 'Input',\n datatype = 'Boolean',\n parameterType = 'Optional')\n\n debug = arcpy.Parameter(\n name = 'debug',\n displayName = 'Debug',\n direction = 'Input',\n datatype = 'Boolean',\n parameterType = 'Optional')\n\n return [feature_class, field_mappings, output_dir, output_name,\n convert_4326, convert_geojson, convert_kmz, convert_csv,\n convert_metadata, debug]", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.7,0.7]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n # ss templates\n self.modelBuilder.doVar(\"R_ee_os_fakes[0.6,0.0,1.0]\");\n self.modelBuilder.doVar(\"ee16_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee17_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee18_fakes_norm[1.0, 0.01, 10.]\");\n #Remember, cant use spaces in these formulas!\n #self.modelBuilder.options.verbose = 10\n self.modelBuilder.factory_('expr::R_ee16_qcd_os(\"@0*@1\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_os(\"@0*@1\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_os(\"@0*@1\",ee18_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee16_qcd_ss(\"@0*(1.0-@1)\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_ss(\"@0*(1.0-@1)\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_ss(\"@0*(1.0-@1)\",ee18_fakes_norm,R_ee_os_fakes)')\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')", "def get_point_of_interest(self):\n if self.point_of_interest == \"centroid\":\n return (round(float(self.kf.x[0][0]), 2), round(float(self.kf.x[1][0]), 2))\n elif self.point_of_interest == \"botmid\":\n x1, y1, x2, y2 = convert_x_to_bbox(self.kf.x)[0]\n x = (x2 + x1) / 2\n y = y2\n return (round(float(x), 2), round(float(y), 2))\n elif self.point_of_interest == \"topmid\":\n x1, y1, x2, y2 = convert_x_to_bbox(self.kf.x)[0]\n x = (x2 + x1) / 2\n y = y1\n return (round(float(x), 2), round(float(y), 2))\n else:\n print(\"point of interest not devised\")\n exit()", "def add_interest_points_args(parser: argparse.ArgumentParser):\n # The following arguments characterise the track the user wants to get:\n parser.add_argument(\"waterfall\", help=\"1 if the wanted track should contain a waterfall, 0 otherwise.\", type=int)\n parser.add_argument(\"birding\", help=\"1 if the wanted track is good for bird-lovers, 0 otherwise.\", type=int)\n parser.add_argument(\"river\", help=\"1 if the wanted track should contain a river, 0 otherwise.\", type=int)\n parser.add_argument(\"cave\", help=\"1 if the wanted track should contain a cave, 0 otherwise.\", type=int)\n parser.add_argument(\"lake\", help=\"1 if the wanted track should contain a lake, 0 otherwise.\", type=int)\n parser.add_argument(\"spring\", help=\"1 if the wanted track should contain a spring, 0 otherwise.\", type=int)\n parser.add_argument(\"geo\", help=\"1 if the track should contain a geological interest point, 0 otherwise.\",\n type=int)\n parser.add_argument(\"historic\", help=\"1 if the track should contain a historic interest point, 0 otherwise.\",\n type=int)", "def setup_pair_register():\n register_param = {}\n si = SI.RegisterImagePair()\n register_param['si'] = si\n register_param['model0_name']= 'affine_map'\n register_param['model1_name']= 'svf_vector_momentum_map'\n\n return register_param", "def onInterest(self, prefix, interest, transport, registeredPrefixId):\n interestName = interest.getName()\n\n data = Data(interestName)\n data.setContent(\"Hello, \" + interestName.toUri())\n\n hourMilliseconds = 3600 * 1000\n data.getMetaInfo().setFreshnessPeriod(hourMilliseconds)\n\n self.keyChain.sign(data, self.keyChain.getDefaultCertificateName())\n\n transport.send(data.wireEncode().toBuffer())\n\n dump(\"Replied to:\", interestName.toUri())", "def pinterest(self, pinterest):\n\n self._pinterest = pinterest", "def updateParameters(self, parameters):\r\n\t\tin_wikiplace_IRI = parameters[0]\r\n\t\tin_location_property = parameters[1]\r\n\t\tin_relation_degree = parameters[2]\r\n\t\tout_location = parameters[3]\r\n\t\tout_points_name = parameters[4]\r\n\t\t\r\n\t\tif in_wikiplace_IRI.value:\r\n\t\t\tinputFeatureClassName = in_wikiplace_IRI.valueAsText\r\n\t\t\tlastIndexOFGDB = inputFeatureClassName.rfind(\"\\\\\")\r\n\t\t\tfeatureClassName = inputFeatureClassName[(lastIndexOFGDB+1):]\r\n\t\t\tcurrentWorkspace = inputFeatureClassName[:lastIndexOFGDB]\r\n\r\n\t\t\tarcpy.env.workspace = currentWorkspace\r\n\t\t\tout_location.value = currentWorkspace\r\n\r\n\t\t\t# get all the IRI from input point feature class of wikidata places\r\n\t\t\tinplaceIRIList = []\r\n\t\t\tcursor = arcpy.SearchCursor(inputFeatureClassName)\r\n\t\t\tfor row in cursor:\r\n\t\t\t\tinplaceIRIList.append(row.getValue(\"URL\"))\r\n\t\t\t\r\n\t\t\t# get all the property URL which are used in the input feature class. their objects are geographic locations which have coordinates, I call them location common properties\r\n\t\t\tlocationCommonPropertyJSONObj = SPARQLQuery.locationCommonPropertyQuery(inplaceIRIList)\r\n\t\t\tlocationCommonPropertyJSON = locationCommonPropertyJSONObj[\"results\"][\"bindings\"]\r\n\r\n\t\t\tLocationPropertyPath.locationCommonPropertyURLList = []\r\n\t\t\tLocationPropertyPath.locationCommonPropertyCountList = []\r\n\t\t\tfor jsonItem in locationCommonPropertyJSON:\r\n\t\t\t\tLocationPropertyPath.locationCommonPropertyURLList.append(jsonItem[\"p\"][\"value\"])\r\n\t\t\t\tLocationPropertyPath.locationCommonPropertyCountList.append(jsonItem[\"NumofSub\"][\"value\"])\r\n\r\n\t\t\tlocationCommonPropertyCountDict = dict(zip(LocationPropertyPath.locationCommonPropertyURLList, LocationPropertyPath.locationCommonPropertyCountList))\r\n\r\n\t\t\t# get the english label for each location common property\r\n\t\t\tlocationCommonPropertyLabelJSON = SPARQLQuery.locationCommonPropertyLabelQuery(LocationPropertyPath.locationCommonPropertyURLList)\r\n\t\t\t# locationCommonPropertyLabelJSON = locationCommonPropertyLabelJSONObj[\"results\"][\"bindings\"]\r\n\r\n\t\t\t# a dictionary object: key: propertyNameCount, value: propertyURL\r\n\t\t\tLocationPropertyPath.locationCommonPropertyDict = dict()\r\n\t\t\tLocationPropertyPath.locationCommonPropertyNameCountList = []\r\n\t\t\tLocationPropertyPath.locationCommonPropertyURLList = []\r\n\t\t\tLocationPropertyPath.locationCommonPropertyCountList = []\r\n\r\n\t\t\tfor jsonItem in locationCommonPropertyLabelJSON:\r\n\t\t\t\tpropertyURL = jsonItem[\"p\"][\"value\"]\r\n\t\t\t\tLocationPropertyPath.locationCommonPropertyURLList.append(propertyURL)\r\n\r\n\t\t\t\tpropertyName = jsonItem[\"propertyLabel\"][\"value\"]\r\n\r\n\t\t\t\tpropertyCount = locationCommonPropertyCountDict[propertyURL]\r\n\t\t\t\tLocationPropertyPath.locationCommonPropertyCountList.append(propertyCount)\r\n\r\n\t\t\t\tpropertyNameCount = propertyName + \"(\" + propertyCount + \")\"\r\n\t\t\t\tLocationPropertyPath.locationCommonPropertyNameCountList.append(propertyNameCount)\r\n\t\t\t\tLocationPropertyPath.locationCommonPropertyDict[propertyNameCount] = propertyURL\r\n\r\n\t\t\tin_location_property.filter.list = LocationPropertyPath.locationCommonPropertyNameCountList\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif in_location_property.value and in_relation_degree.value and out_points_name.valueAsText == None:\r\n\t\t\t\tpropertyName = in_location_property.valueAsText\r\n\t\t\t\trelationdegree = in_relation_degree.valueAsText\r\n\r\n\t\t\t\tlastIndex = propertyName.rfind(\"(\")\r\n\t\t\t\tpropertyName = propertyName[:lastIndex]\r\n\r\n\t\t\t\tpropertyName = propertyName.replace(\" \", \"_\")\r\n\r\n\t\t\t\tif featureClassName.endswith(\".shp\"):\r\n\t\t\t\t\tlastIndex = featureClassName.rfind(\".\")\r\n\t\t\t\t\tfeatureClassNameNoShp = featureClassName[:lastIndex]\r\n\t\t\t\t\tout_points_name.value = featureClassNameNoShp + \"_D\" + relationdegree + \"_\" + propertyName + \".shp\"\r\n\t\t\t\telse:\r\n\t\t\t\t\tout_points_name.value = featureClassName + \"_D\" + relationdegree + \"_\" + propertyName\r\n\r\n\r\n\t\t\t\tif arcpy.Exists(out_points_name.valueAsText):\r\n\t\t\t\t\tarcpy.AddError(\"The output feature class name already exists in current workspace!\")\r\n\t\t\t\t\traise arcpy.ExecuteError\r\n\r\n\t\t\t\r\n\r\n\r\n\t\t\tif in_relation_degree.value:\r\n\t\t\t\trelationDegree = int(in_relation_degree.valueAsText)\r\n\t\t\t\tif relationDegree > 4:\r\n\t\t\t\t\tin_relation_degree.value = 4\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\r\n\r\n\t\treturn", "def add_params(traj):\n\n # We set the BrianParameter to be the standard parameter\n traj.v_standard_parameter=Brian2Parameter\n traj.v_fast_access=True\n\n # Add parameters we need for our network\n traj.f_add_parameter('Net.C',281*pF)\n traj.f_add_parameter('Net.gL',30*nS)\n traj.f_add_parameter('Net.EL',-70.6*mV)\n traj.f_add_parameter('Net.VT',-50.4*mV)\n traj.f_add_parameter('Net.DeltaT',2*mV)\n traj.f_add_parameter('Net.tauw',40*ms)\n traj.f_add_parameter('Net.a',4*nS)\n traj.f_add_parameter('Net.b',0.08*nA)\n traj.f_add_parameter('Net.I',.8*nA)\n traj.f_add_parameter('Net.Vcut','vm > 0*mV') # practical threshold condition\n traj.f_add_parameter('Net.N',50)\n\n eqs='''\n dvm/dt=(gL*(EL-vm)+gL*DeltaT*exp((vm-VT)/DeltaT)+I-w)/C : volt\n dw/dt=(a*(vm-EL)-w)/tauw : amp\n Vr:volt\n '''\n traj.f_add_parameter('Net.eqs', eqs)\n traj.f_add_parameter('reset', 'vm=Vr;w+=b')", "def __init__(self, instrumentsNAmounts, totalInvest):\n # add all necessary attributes here which link to portfolio and\n # will be tracked.\n self.portfolio = instrumentsNAmounts\n self.totalInvest = totalInvest", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"mu[1,0,100]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"@0-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"1-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)\", mu)')", "def runNN(selectables_interests=[[0.0, 0.1, 0.6, 0.7], [0.0, 0.5, 0.6, 1]], profile_interests = [0.0, 0.1, 0.2, 0.3]):\n #Intialise a single neuron neural network.\n # Apply vertical normalisation for the pageobject interests\n if len(selectables_interests)>0:\n selectables_interests = normalize(selectables_interests, axis=0, norm='l1')\n else:\n # If there is nothing to compare it to, return the current profile.\n return profile_interests\n # For the given size of the first element in the array (number of interests) add a dimension of such size\n Interest_Array_Inputs = [[] for i in range(len(selectables_interests[0]))]\n Interest_Array_Outputs = [[] for i in range(len(selectables_interests[0]))]\n # Creating instances of 3 dimensional Inputs/Outputs sets\n for ida, ll in enumerate(selectables_interests):\n # For the number of permutations\n current_fuzzy_element_permutation = fuzzy_permutation(ll)\n for index in range(len(current_fuzzy_element_permutation[0])):\n # Insert array inside column Interest for the input, output.\n Interest_Array_Inputs[current_fuzzy_element_permutation[0][index][0]].append(current_fuzzy_element_permutation[0][index][1][:])\n Interest_Array_Outputs[current_fuzzy_element_permutation[1][index][0]].append([current_fuzzy_element_permutation[1][index][1]])\n profile_Interest_Inputs = []\n # Creating the list of inputs (Leaving out an interest to be predicted)\n for index, inputList in enumerate(fuzzy_permutation(profile_interests)[0]):\n profile_Interest_Inputs.append(inputList[1])\n\n collected_interests = []\n # Training on each individual interest\n for i in range(len(profile_interests)):\n collected_interests.append(initialise_train_result(Interest_Array_Inputs[i], Interest_Array_Outputs[i], profile_Interest_Inputs[i]))\n well_ordered_interests = collected_interests[::-1]\n # Returns the outcoming interests.\n return well_ordered_interests" ]
[ "0.5597302", "0.55475473", "0.5455908", "0.53832096", "0.5357705", "0.5349089", "0.5325157", "0.5322769", "0.52929866", "0.5284521", "0.5183548", "0.5170347", "0.5084414", "0.49903712", "0.49777502", "0.4952482", "0.4888492", "0.48553658", "0.4845688", "0.4829843", "0.48280817", "0.4807605", "0.47908762", "0.47782326", "0.47756818", "0.47617537", "0.47614208", "0.47272325", "0.47029915", "0.4691032" ]
0.756182
0
Extend to encode random f and cr values onto each member of the population.
def __init__(self, *args, **kwargs): super(jDE, self).__init__(*args, **kwargs) for i in range(self.population.size): self.population.members[i].f = 0.1 + 0.9 * numpy.random.rand() self.population.members[i].cr = numpy.random.rand()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_values(self):", "def _generate_raw_environments(self, num, seed):", "def rand(self):\n raise NotImplementedError", "def mutate(self):\n\n if len(self.genes) < 250:\n for g in self.genes:\n\n if MUTATION_CHANCE < random.random(): # random.random() gives float in [0,1)\n g.mutate()\n\n else:\n k = int(MUTATION_CHANCE*len(self.genes))\n for g in random.sample(self.genes,int(k)): #int(k)\n g.mutate()\n\n #To add random gene\n if ADD_GENE_CHANCE < random.random():\n self.genes.append(Gene(self.size)) #Call to Gene to add to genes list\n\n #To randomly remove genes\n\n if REM_GENE_CHANCE < random.random() and len(self.genes)>0:\n self.genes.remove(random.choice(self.genes))", "def setRandom(self):\n pass # define each VarElement family", "def _seed_population(self):\n return [self._generate_weights() for x in range(self.population_size)]", "def __init__(self,outerPPRF):\n self.outerPPRF = outerPPRF\n\n self.g = random.randint(0,self.outerPPRF.N-1)", "def create_individual(self):\n self.genes = np.random.rand(self.chromosome_size)", "def generate_random_individual():\n genotype = []\n ### Your code here\n return {'genotype': genotype, 'fitness': None }", "def testEncoder(self):\n params = copy.copy(self.typical_instance)\n params.prob_f = 0.5\n params.prob_p = 0.5\n params.prob_q = 0.75\n\n rand_funcs = rappor.SimpleRandFuncs(params, MockRandom())\n rand_funcs.cohort_rand_fn = lambda a, b: a\n e = rappor.Encoder(params, 0, rand_funcs=rand_funcs)\n\n cohort, bloom_bits_irr = e.encode(\"abc\")\n\n self.assertEquals(0, cohort)\n self.assertEquals(0x000ffff, bloom_bits_irr)", "def test_random_with_custom_encoder(self):\n\n channel_count = 10\n gen = random_data(encoder=CustomEncoder(),\n channel_count=channel_count)\n\n data = [next(gen) for _ in range(100)]\n\n self.assertEqual(len(data), 100)\n for _count, record in data:\n self.assertEqual(len(record), channel_count)\n\n self.assertEqual(data[0][0], 1)\n self.assertEqual(data[99][0], 100)", "def randomize_value(self) -> None:", "def reproduce(self):\n\n def compute_seeds(fitness):\n \"\"\" Computes the number of seeds given a fitness value. \"\"\"\n\n seeds = (fitness-min_fitness) / (max_fitness-min_fitness) * \\\n (self.max_seeds-self.min_seeds) + self.min_seeds\n\n return round(seeds)\n\n # evaluates max and min fitness for current year\n max_fitness = max(tree[0] for tree in self.population)\n min_fitness = min(tree[0] for tree in self.population)\n\n # computes the number of seeds produced per tree\n for tree in self.population:\n tree[1].seeds = int(compute_seeds(tree[0]))", "def selectNextGeneration(self, *args, **kwargs):\n super(jDE, self).selectNextGeneration(*args, **kwargs)\n self.f = numpy.mean([member.f for member in self.population.members])\n self.cr = numpy.mean([member.cr for member in self.population.members])", "def _build_genotypes(self):\n x = np.zeros(self.n)\n \n # Frequencies derived from HWE.\n num_hetero = 2 * self.maf * (1 - self.maf) * self.n\n num_homo_minor = self.maf ** 2 * self.n\n \n x[:num_hetero] = 1\n x[num_hetero:num_hetero+num_homo_minor] = 2\n np.random.shuffle(x)\n \n # Add noise for dosage values if needed.\n if self.dosage_var:\n x[x == 0] += np.abs(\n np.random.normal(0, self.dosage_var, len(x[x == 0]))\n )\n x[x == 1] += np.random.normal(0, self.dosage_var, len(x[x == 1]))\n x[x == 2] -= np.abs(\n np.random.normal(0, self.dosage_var, len(x[x == 2]))\n )\n\n # Mask some values if the call rate is not 1.\n if self.call_rate < 1:\n missing_rate = 1 - self.call_rate\n missing_number = missing_rate * self.n\n missing_idx = np.arange(0, self.n)\n np.random.shuffle(missing_idx)\n missing_idx = missing_idx[:missing_number]\n x[missing_idx] = np.nan\n \n return x", "def rand(self):\n raise NotImplementedError(\"Not implemented yet.\")", "def init_population(self):\n for idx in xrange(0, self.population_size):\n individual = self.individual_factory.create()\n self.population.append(individual)\n\n self.population_fitness = numpy.asarray(map(lambda individual: individual.get_fitness(), self.population))\n\n # In order to roulette wheel selection work with negative values, \n # we sum all fitness values to the absolute value of the most negative plus one\n most_negative = self.population_fitness.min()\n self.normalized_fitness = numpy.asarray(map(lambda fitness: 1/math.pow(fitness+numpy.absolute(most_negative)+1, 1), self.population_fitness))\n s = float(self.normalized_fitness.sum())\n self.normalized_fitness = numpy.asarray(map(lambda fitness: fitness/s, self.normalized_fitness))\n #print self.population_fitness.min()\n #print self.population_fitness\n #print self.normalized_fitness", "def create_individual(self):\n self.genes = np.random.rand(self.chromosome_size)\n self.personal_best = self.genes.copy", "def _prepare(self):\n for n in range(4):\n self._code += str(random.randint(1, 9))", "def totem_random():\n random_head()\n random_head()\n random_head()", "def experiment4():\n np.random.seed()\n state['result'] = np.random.rand(1)", "def generate(self):", "def randomize(self):\n self.size = randint(1,5)\n self.resource = randint(1,3)\n self.temperature = randint(20, 1000)\n self.gravity = randint(0, 10)\n for key in self.get_atmosphere().keys():\n setattr(self, key, randint(0, 5))\n for attribute_count in range(randint(0, 3)):\n pa = PlanetaryAttribute.objects.order_by('?')[0]\n self.attributes.add(pa)", "def randomValue(self, _lstDomain):\n # for each gene in the chromosome...\n for g in self.genes:\n # choose a random value for the gene\n g.value = g.randomValue()", "def genPopulation(self):\r\n self.population_list = []\r\n for i in xrange(0, self.pop_size):\r\n individual = bitarray(self.indv_size)\r\n # Loop for randomizing the 'individual' string.\r\n for j in xrange(0, self.board_size):\r\n vert_pos = random.randint(0, self.board_size-1)\r\n vert_pos_bitnum = toBitArray(vert_pos, self.pos_bits_size)\r\n # print \"\\t\\t\", j, vert_pos_bitnum, vert_pos\r\n for k in range(0, self.pos_bits_size):\r\n individual[j * self.pos_bits_size + k] = vert_pos_bitnum[k]\r\n self.population_list.append(individual)\r\n # print \"\\t\", i, individual\r", "def breed(self): \n while (len(self.population) <= self.pop_size):\n orga = random.choice(self.population)\n orgb = random.choice(self.population) # Asexualism works too :-p\n self.population.append(orga.breed(orgb)) # Add a new organism", "def _gen_pert(self, count, **kwargs):\n self._check_pert(**kwargs)\n pert = FairBetaPert(**kwargs)\n rvs = pert.random_variates(count)\n return rvs", "def generate_characteristics(self):\n self.__generate_genotype()\n self.__generate_fenotype()\n self.__calculate_fitness()\n \n return self", "def clone_rand(self):", "def setRandomInputs(self):\n prime_ips = [io[0] for io in self.listPrimeIos() if (io[1] == 'i' and (io[0] not in ['VCC', 'GND']))]\n bit_str = self.__randomStringGen(len(prime_ips))\n\n for idx, ip in enumerate(prime_ips):\n self.dGrph[ip][1] = int(bit_str[idx])" ]
[ "0.582015", "0.5631153", "0.55910796", "0.5561565", "0.553103", "0.5508012", "0.54818636", "0.54791534", "0.5465223", "0.5457774", "0.5436654", "0.5425668", "0.5397977", "0.53881395", "0.5383666", "0.53655213", "0.5344523", "0.5330343", "0.52958024", "0.5286964", "0.5282226", "0.52771795", "0.5271391", "0.52504617", "0.5238351", "0.52304703", "0.52283347", "0.52165776", "0.5207312", "0.5202901" ]
0.63107413
0
Profiles a given cuda application using the command provided The profile data is returned as a dict of kernels with their metrics and the data for each call
def ProfileApp(command): logging.info("Command to profile: {0}".format(" ".join(command))) kernelMetrics = dict() # get execution time first because for whatever reason # we need a different nvprof command # build the profiling command profileCommand = ["nvprof", "--print-gpu-trace", "--csv"] profileCommand.extend(command) logging.info("nvprof command: {0}".format(" ".join(profileCommand))) # setup so we capture stdout and stderr and run the command pipes = subprocess.Popen(profileCommand, stdout=subprocess.PIPE, stderr=subprocess.PIPE) std_out, std_err = pipes.communicate() if (pipes.returncode != 0): print("Error executing command {0}, return code {1}, exiting...".format(profileCommand, pipes.returncode)) exit(1) logging.debug("nvprof output") logging.debug("{0}".format(std_err.decode())) # take our output and store it in our dictionary of metrics processNvprofCSV(std_err.decode().splitlines(), kernelMetrics) for metric in nvMetricNames: std_out = "" std_err = "" # build the profiling command profileCommand = ["nvprof", "--metrics", metric, "--print-gpu-trace", "--csv"] profileCommand.extend(command) logging.info("nvprof command: {0}".format(" ".join(profileCommand))) # setup so we capture stdout and stderr and run the command pipes = subprocess.Popen(profileCommand, stdout=subprocess.PIPE, stderr=subprocess.PIPE) std_out, std_err = pipes.communicate() if (pipes.returncode != 0): print("Error executing command {0}, return code {1}, exiting...".format(profileCommand, pipes.returncode)) exit(1) # take our output and store it in our dictionary of metrics processNvprofCSV(std_err.decode().splitlines(), kernelMetrics) return kernelMetrics
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _run_profile(self, profile: Tuple[str, List[int], int, float]) -> List[int]:\n # pylint: disable=unused-variable\n name, command, signals, delay = profile\n # pylint: enable=unused-variable\n\n # print(\"\\trunning profile: %s, command %s, %d, delay %0.02f\" %\n # (name, [\"0x%02x\" % i for i in command], signals, delay))\n return self._i2c_read_words_from_cmd(command, delay, signals)", "def profile(args):\n\n if not args.first_batches_to_skip < args.max_batch_num:\n raise ValueError(\"arg 'first_batches_to_skip' must be smaller than \"\n \"'max_batch_num'.\")\n if not args.first_batches_to_skip >= 0:\n raise ValueError(\n \"arg 'first_batches_to_skip' must not be smaller than 0.\")\n\n _, avg_cost, accuracy = stacked_lstmp_model(\n frame_dim=args.frame_dim,\n hidden_dim=args.hidden_dim,\n proj_dim=args.proj_dim,\n stacked_num=args.stacked_num,\n class_num=args.class_num,\n parallel=args.parallel)\n\n optimizer = fluid.optimizer.Adam(\n learning_rate=fluid.layers.exponential_decay(\n learning_rate=args.learning_rate,\n decay_steps=1879,\n decay_rate=1 / 1.2,\n staircase=True))\n optimizer.minimize(avg_cost)\n\n place = fluid.CPUPlace() if args.device == 'CPU' else fluid.CUDAPlace(0)\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n\n ltrans = [\n trans_add_delta.TransAddDelta(2, 2),\n trans_mean_variance_norm.TransMeanVarianceNorm(args.mean_var),\n trans_splice.TransSplice(5, 5), trans_delay.TransDelay(5)\n ]\n\n data_reader = reader.AsyncDataReader(\n args.feature_lst, args.label_lst, -1, split_sentence_threshold=1024)\n data_reader.set_transformers(ltrans)\n\n feature_t = fluid.LoDTensor()\n label_t = fluid.LoDTensor()\n\n sorted_key = None if args.sorted_key is 'None' else args.sorted_key\n with profiler.profiler(args.device, sorted_key) as prof:\n frames_seen, start_time = 0, 0.0\n for batch_id, batch_data in enumerate(\n data_reader.batch_iterator(args.batch_size,\n args.minimum_batch_size)):\n if batch_id >= args.max_batch_num:\n break\n if args.first_batches_to_skip == batch_id:\n profiler.reset_profiler()\n start_time = time.time()\n frames_seen = 0\n # load_data\n (features, labels, lod, _) = batch_data\n features = np.reshape(features, (-1, 11, 3, args.frame_dim))\n features = np.transpose(features, (0, 2, 1, 3))\n feature_t.set(features, place)\n feature_t.set_lod([lod])\n label_t.set(labels, place)\n label_t.set_lod([lod])\n\n frames_seen += lod[-1]\n\n outs = exe.run(fluid.default_main_program(),\n feed={\"feature\": feature_t,\n \"label\": label_t},\n fetch_list=[avg_cost, accuracy]\n if args.print_train_acc else [],\n return_numpy=False)\n\n if args.print_train_acc:\n print(\"Batch %d acc: %f\" %\n (batch_id, lodtensor_to_ndarray(outs[1])[0]))\n else:\n sys.stdout.write('.')\n sys.stdout.flush()\n time_consumed = time.time() - start_time\n frames_per_sec = frames_seen / time_consumed\n print(\"\\nTime consumed: %f s, performance: %f frames/s.\" %\n (time_consumed, frames_per_sec))", "def profile_command(func, args, kwargs, func_results):\n (db, command) = args[:2]\n report_kvs = {}\n\n _add_connection_info(report_kvs, db)\n if not isinstance(command, basestring):\n command = _to_json(command)\n\n report_kvs['Command'] = command\n\n return report_kvs", "def _display_cuda_devices():\n\n cuda_query_output = subprocess.run(\"nvidia-smi --query-gpu=gpu_uuid,gpu_name,compute_mode --format=csv\", shell=True, capture_output=True, text=True)\n # Check if command worked\n if cuda_query_output.returncode == 0:\n # Split by line jump and comma\n cuda_devices_list = [entry for entry in cuda_query_output.stdout.splitlines()]\n logger.debug(f\"CUDA devices available: {*cuda_devices_list,}\")\n # We only support \"Default\" and not \"Exclusive_Process\" for the compute mode\n if \"Default\" not in cuda_query_output.stdout:\n logger.warning(f\"GPU in 'Exclusive_Process' mode (or Prohibited), one context is allowed per device. This may prevent some openmmtools features from working. GPU must be in 'Default' compute mode\")\n # Handel the case where the command had some error\n else:\n logger.debug(f\"nvidia-smi command failed: {cuda_query_output.stderr}, this is expected if there is no GPU available\")", "def profile(profileOutputFile=None, dotOutputFile=None, imageOutputFile=None):\n\n try:\n __import__(\"gobject\")\n from thirdparty.gprof2dot import gprof2dot\n from thirdparty.xdot import xdot\n import gtk\n import pydot\n except ImportError as ex:\n errMsg = \"profiling requires third-party libraries ('%s') \" % getSafeExString(ex)\n errMsg += \"(Hint: 'sudo apt-get install python-pydot python-pyparsing python-profiler graphviz')\"\n logger.error(errMsg)\n\n return\n\n if profileOutputFile is None:\n profileOutputFile = os.path.join(paths.SQLMAP_OUTPUT_PATH, \"sqlmap_profile.raw\")\n\n if dotOutputFile is None:\n dotOutputFile = os.path.join(paths.SQLMAP_OUTPUT_PATH, \"sqlmap_profile.dot\")\n\n if imageOutputFile is None:\n imageOutputFile = os.path.join(paths.SQLMAP_OUTPUT_PATH, \"sqlmap_profile.png\")\n\n if os.path.exists(profileOutputFile):\n os.remove(profileOutputFile)\n\n if os.path.exists(dotOutputFile):\n os.remove(dotOutputFile)\n\n if os.path.exists(imageOutputFile):\n os.remove(imageOutputFile)\n\n infoMsg = \"profiling the execution into file '%s'\" % profileOutputFile\n logger.info(infoMsg)\n\n # Start sqlmap main function and generate a raw profile file\n cProfile.run(\"start()\", profileOutputFile)\n\n infoMsg = \"converting profile data into a dot file '%s'\" % dotOutputFile\n logger.info(infoMsg)\n\n # Create dot file by using extra/gprof2dot/gprof2dot.py\n # http://code.google.com/p/jrfonseca/wiki/Gprof2Dot\n dotFilePointer = codecs.open(dotOutputFile, 'wt', UNICODE_ENCODING)\n parser = gprof2dot.PstatsParser(profileOutputFile)\n profile = parser.parse()\n profile.prune(0.5 / 100.0, 0.1 / 100.0)\n dot = gprof2dot.DotWriter(dotFilePointer)\n dot.graph(profile, gprof2dot.TEMPERATURE_COLORMAP)\n dotFilePointer.close()\n\n infoMsg = \"converting dot file into a graph image '%s'\" % imageOutputFile\n logger.info(infoMsg)\n\n # Create graph image (png) by using pydot (python-pydot)\n # http://code.google.com/p/pydot/\n pydotGraph = pydot.graph_from_dot_file(dotOutputFile)\n\n # Reference: http://stackoverflow.com/questions/38176472/graph-write-pdfiris-pdf-attributeerror-list-object-has-no-attribute-writ\n if isinstance(pydotGraph, list):\n pydotGraph = pydotGraph[0]\n\n try:\n pydotGraph.write_png(imageOutputFile)\n except OSError:\n errMsg = \"profiling requires graphviz installed \"\n errMsg += \"(Hint: 'sudo apt-get install graphviz')\"\n logger.error(errMsg)\n else:\n infoMsg = \"displaying interactive graph with xdot library\"\n logger.info(infoMsg)\n\n # Display interactive Graphviz dot file by using extra/xdot/xdot.py\n # http://code.google.com/p/jrfonseca/wiki/XDot\n win = xdot.DotWindow()\n win.connect('destroy', gtk.main_quit)\n win.set_filter(\"dot\")\n win.open_file(dotOutputFile)\n gtk.main()", "def test_profile_commands(self):\n with DockerHost('host', start_calico=False) as host:\n\n host.calicoctl(\"profile add TEST_PROFILE\")\n\n json_dict = {\"id\": \"TEST_PROFILE\",\n \"inbound_rules\": [\n {\"action\": \"allow\",\n \"src_tag\": \"TEST_PROFILE\"},\n {\"action\": \"deny\"}\n ],\n \"outbound_rules\": [{\"action\": \"deny\",\n \"dst_net\": \"192.168.0.0/16\"},\n {\n \"action\": \"allow\"\n }]}\n\n update = json.dumps(json_dict)\n cmd = \"/code/dist/calicoctl profile TEST_PROFILE rule update\"\n host.execute(\"echo '%s' | %s\" % (update, cmd))\n\n self.assertIn('1 deny',\n host.calicoctl(\"profile TEST_PROFILE rule show\"))\n\n result = host.calicoctl(\"profile TEST_PROFILE rule json\")\n rules = json.loads(result)\n self.assertDictEqual(rules, json_dict)\n\n # Test that adding and removing a tag works.\n self.assertNotIn(\"TEST_TAG\", self.show_tag(host))\n host.calicoctl(\"profile TEST_PROFILE tag add TEST_TAG\")\n self.assertIn(\"TEST_TAG\", self.show_tag(host))\n host.calicoctl(\"profile TEST_PROFILE tag remove TEST_TAG\")\n self.assertNotIn(\"TEST_TAG\", self.show_tag(host))", "def test_gpu_cuda_code() -> None:\n if get_from_environ(\"DISABLE_GPU_FOR_TESTING\") is not None:\n print(\"GPU payload disabled for testing\")\n return\n\n # if the command exists it can run on the hardware below\n proc = subprocess.Popen([\"nvidia-smi\"], stdout=subprocess.PIPE)\n stdout, _ = proc.communicate()\n str_stdout = stdout.decode()\n assert \"NVIDIA-SMI\" in str_stdout, str_stdout\n assert proc.returncode == 0\n # search the history for the CUDA implementation", "def auto_model_profiling(model_info, server_name, device_util_thd=0.01, device_memory_thd=0.01, period=10):\n\n different_kind_devices = collections.OrderedDict()\n for gpu in GPUtil.getGPUs():\n if gpu.name not in different_kind_devices:\n different_kind_devices[gpu.name] = gpu\n\n for device in list(different_kind_devices.values()):\n profiler = Profiler(model_info=model_info, server_name=server_name)\n monitor = UtilMonitor(device, profiler, period, device_util_thd, device_memory_thd)\n monitor.start()", "def cli(ctx: click.Context,\n experiment: str,\n devices: List[int],\n ) -> None:\n f = EXPERIMENTS[experiment]\n try:\n model, B, C, _devices = f(devices)\n except ValueError as exc:\n # Examples:\n # ValueError: too few devices to hold given partitions (devices: 1, paritions: 2)\n ctx.fail(str(exc))\n\n optimizer = SGD(model.parameters(), lr=0.1)\n\n in_device = _devices[0]\n out_device = _devices[-1]\n torch.cuda.set_device(in_device)\n\n input = torch.rand(32, 3, 192, 192, device=in_device)\n target = torch.rand(32, 1, 192, 192, device=out_device)\n\n # HEADER ======================================================================================\n\n title = f'{experiment}, U-Net ({B}, {C})'\n click.echo(title)\n\n if isinstance(model, GPipe):\n click.echo(f'balance: {model.balance}')\n\n click.echo('torchgpipe: %s, python: %s, torch: %s, cudnn: %s, cuda: %s, gpu: %s' % (\n torchgpipe.__version__,\n platform.python_version(),\n torch.__version__,\n torch.backends.cudnn.version(),\n torch.version.cuda,\n torch.cuda.get_device_name(in_device)))\n\n hr()\n\n # PARAMETERS ==================================================================================\n\n param_count = sum(p.storage().size() for p in model.parameters())\n param_size = sum(p.storage().size() * p.storage().element_size() for p in model.parameters())\n param_scale = 2 # param + grad\n\n click.echo(f'# of Model Parameters: {param_count:,}')\n click.echo(f'Total Model Parameter Memory: {param_size*param_scale:,} Bytes')\n\n # ACTIVATIONS =================================================================================\n\n try:\n torch.cuda.empty_cache()\n for d in _devices:\n torch.cuda.reset_max_memory_cached(d)\n\n for _ in range(2):\n output = model(input)\n output = cast(Tensor, output)\n loss = F.binary_cross_entropy_with_logits(output, target)\n loss.backward()\n optimizer.step()\n\n max_memory = 0\n for d in _devices:\n torch.cuda.synchronize(d)\n max_memory += torch.cuda.max_memory_cached(d)\n\n latent_size = max_memory - param_size*param_scale\n click.echo(f'Peak Activation Memory: {latent_size:,} Bytes')\n click.echo(f'Total Memory: {max_memory:,} Bytes')\n\n # MAX MEMORY PER DEVICE =======================================================================\n\n finally:\n hr()\n\n for d in _devices:\n memory_usage = torch.cuda.memory_cached(d)\n click.echo(f'{d!s}: {memory_usage:,} Bytes')", "def _run(self, command):\n # print command\n if self._profile:\n result = subprocess.Popen(command, shell=True, stdout=PIPE).stdout.read().splitlines()\n self._hash = result[0]\n cpu = result[1].split(':')[1].strip()\n wall = result[2].split(':')[1].strip()\n return {'digest': self._hash, 'cpu': cpu, 'wall': wall}\n\n self._hash = subprocess.Popen(command, shell=True, stdout=PIPE).stdout.read().strip()\n return self._hash", "def _profile_module(self):\n with open(self._run_object, 'r') as srcfile:\n src_code = srcfile.read()\n code = compile(src_code, self._run_object, 'exec')\n try:\n with _CodeHeatmapCalculator() as prof:\n exec(code, self._globs, None)\n except SystemExit:\n pass\n\n heatmaps = []\n for filename, heatmap in prof.heatmap.items():\n if os.path.isfile(filename):\n heatmaps.append(\n self._format_heatmap(\n filename, heatmap, prof.execution_count[filename]))\n\n run_time = sum(heatmap['runTime'] for heatmap in heatmaps)\n return {\n 'objectName': self._run_object,\n 'runTime': run_time,\n 'heatmaps': heatmaps\n }", "def get_profile_op_info():\r\n profiler_dir = get_profiler_dir(request)\r\n train_id = get_train_id(request)\r\n if not profiler_dir or not train_id:\r\n raise ParamValueError(\"No profiler_dir or train_id.\")\r\n\r\n search_condition = request.stream.read()\r\n try:\r\n search_condition = json.loads(search_condition if search_condition else \"{}\")\r\n except Exception:\r\n raise ParamValueError(\"Json data parse failed.\")\r\n validate_condition(search_condition)\r\n\r\n device_id = search_condition.get(\"device_id\", \"0\")\r\n profiler_dir_abs = os.path.join(settings.SUMMARY_BASE_DIR, train_id, profiler_dir)\r\n try:\r\n profiler_dir_abs = validate_and_normalize_path(profiler_dir_abs, \"profiler\")\r\n except ValidationError:\r\n raise ParamValueError(\"Invalid profiler dir\")\r\n\r\n op_type = search_condition.get(\"op_type\")\r\n\r\n analyser = AnalyserFactory.instance().get_analyser(\r\n op_type, profiler_dir_abs, device_id\r\n )\r\n\r\n op_info = analyser.query(search_condition)\r\n return jsonify(op_info)", "def cuda_info() -> str:\n\n def _cuda_devices_formatting(\n info_function: typing.Callable,\n formatting_function: typing.Callable = None,\n mapping_function: typing.Callable = None,\n ):\n def _setup_default(function):\n return (lambda arg: arg) if function is None else function\n\n formatting_function = _setup_default(formatting_function)\n mapping_function = _setup_default(mapping_function)\n\n return \" | \".join(\n mapping_function(\n [\n formatting_function(info_function(i))\n for i in range(torch.cuda.device_count())\n ]\n )\n )\n\n def _device_properties(attribute):\n return _cuda_devices_formatting(\n lambda i: getattr(torch.cuda.get_device_properties(i), attribute),\n mapping_function=lambda in_bytes: map(str, in_bytes),\n )\n\n cuda_cap = _cuda_devices_formatting(\n torch.cuda.get_device_capability,\n formatting_function=lambda capabilities: \".\".join(map(str, capabilities)),\n )\n return \"\\n\".join(\n [\n f\"Available CUDA devices count: {torch.cuda.device_count()}\",\n f\"CUDA devices names: {_cuda_devices_formatting(torch.cuda.get_device_name)}\",\n f\"Major.Minor CUDA capabilities of devices: {cuda_cap}\",\n f\"Device total memory (bytes): {_device_properties('total_memory')}\",\n f\"Device multiprocessor count: {_device_properties('multi_processor_count')}\",\n ]\n )", "def profiler(self):\r\n\r\n class Task(object):\r\n \"Private class to nicely wrap up the profile data\"\r\n def __init__(self, block, addr):\r\n self.block = block\r\n self.addr = addr\r\n self.name = None\r\n def tidy(self, sym):\r\n self.name = sym.varfind(self.addr).name\r\n self.CPU_FRAC = sym.constfind(\"$profiler.CPU_FRACTION_FIELD\").value\r\n def __repr__(self):\r\n if self.name is None:\r\n raise Exception(\"Need to call the tidy method before using\")\r\n return \"%-50s - %2.1f %%\" % (self.name, self.block[self.CPU_FRAC]/1000)\r\n\r\n\r\n # get the head of the list and a couple of constants\r\n head = self._core.sym.varfind(\"$profiler.last_addr\").addr\r\n NULL = self._core.sym.constfind(\"$profiler.LAST_ENTRY\").value\r\n SIZE = self._core.sym.constfind(\"$profiler.STRUC_SIZE\").value\r\n NEXT = self._core.sym.constfind(\"$profiler.NEXT_ADDR_FIELD\").value\r\n\r\n # get the first address\r\n curr = self._core.dm[head]\r\n\r\n # read all the structures off the chip as fast as we can\r\n tasks = []\r\n while curr != NULL:\r\n block = self._core.dm[curr:(curr+SIZE)]\r\n tasks.append(self.Task(block, curr))\r\n curr = block[NEXT]\r\n\r\n # now fill in the other bits\r\n for t in tasks:\r\n t.tidy(self._core.sym)\r\n\r\n # finally return\r\n return tasks", "def GetGPU():\n return option['device_id']", "def evaluate_cuda(self, cuda_threads_per_block=_default_cuda_threads_per_block, profiling_iterations=None):\n if not cuda_enabled:\n raise RuntimeError('CUDA is not enabled')\n\n # get the CUDA test function from it's .so (compiles if necessary)\n lib_path = Operator._make_generic_cuda(self.op_cuda_generic, self.op_name).encode('utf-8')\n fcn_name = (self.op_name + '_generic_cuda').encode('utf-8')\n self._define_eval_params(lib_path, fcn_name)\n\n num_inputs = len(self._input_types)\n num_outputs = len(self.output_types)\n\n if profiling_iterations is None:\n iters = 1\n else:\n if not isinstance(profiling_iterations, int) or profiling_iterations < 1:\n raise ValueError('Profiling iterations must be a positive int, but received: ' +\n str(profiling_iterations))\n iters = profiling_iterations\n\n eval_times_ms = np.empty(iters, dtype=np.float64)\n eval_times_ms[:] = np.nan\n\n # lazily compile testcudaop.cc\n if self._test_cuda_op is None:\n testlib_path = os.path.join(cache_directory, 'libtestcudaop.so.'+version)\n try:\n libtest = ctypes.cdll.LoadLibrary(testlib_path)\n except OSError:\n Operator._check_proto()\n this_file_path = os.path.abspath(__file__)\n this_directory = os.path.split(this_file_path)[0]\n\n # build the test framework library\n cc_path = os.path.join(this_directory, 'testcudaop.cc')\n o_path = os.path.join(cache_directory, 'testcudaop.o')\n nvcc_path = os.path.join(cuda_directory, 'bin/nvcc')\n try:\n subprocess.check_output([nvcc_path, '-O3', '--relocatable-device-code=true',\n '-x', 'cu', '--compile', '-Xcompiler',\n '-fPIC', '-std=c++11',\n '-I'+this_directory,\n '-I'+cache_directory,\n cc_path, '-o', o_path],\n stderr=subprocess.STDOUT,\n universal_newlines=True)\n\n # relocatable device code has to be defined when linking in addition\n # to compiling. g++ has no concept of this, so we have to do an extra\n # device code link step with a dummy link file\n linko_path = os.path.join(cache_directory, 'link.o')\n subprocess.check_output([nvcc_path, '-dlink', '-Xcompiler', '-fPIC',\n '-o', linko_path, o_path],\n stderr=subprocess.STDOUT,\n universal_newlines=True)\n subprocess.check_output(['g++', '-shared',\n '-o', testlib_path, o_path, linko_path],\n stderr=subprocess.STDOUT,\n universal_newlines=True)\n except subprocess.CalledProcessError as exception:\n tf.logging.log(tf.logging.ERROR, 'nvcc error: ' + exception.output)\n raise\n\n # clean up .o files\n subprocess.call(['rm', o_path, linko_path])\n\n libtest = ctypes.cdll.LoadLibrary(testlib_path)\n\n self._test_cuda_op = libtest.testCUDAOperator\n self._test_cuda_op.restype = ctypes.c_int16\n self._test_cuda_op.argtypes = \\\n [ctypes.c_char_p, ctypes.c_char_p,\n ndpointer(dtype=_TensorParam, flags=\"C_CONTIGUOUS\"), ctypes.c_size_t,\n ndpointer(dtype=_TensorParam, flags=\"C_CONTIGUOUS\"), ctypes.c_size_t,\n ctypes.c_uint16,\n ndpointer(dtype=ctypes.c_double, flags=\"C_CONTIGUOUS\"), ctypes.c_size_t]\n\n err = self._test_cuda_op(lib_path, fcn_name,\n self._input_params, ctypes.c_size_t(num_inputs),\n self._output_params, ctypes.c_size_t(num_outputs),\n ctypes.c_uint16(cuda_threads_per_block),\n eval_times_ms, ctypes.c_size_t(iters))\n\n if err != 0 or np.isnan(eval_times_ms).any():\n tf.logging.log(tf.logging.ERROR, 'Test CUDA operator failed for Op ' + self.__class__.__name__)\n raise ValueError('Test CUDA operator failed for Op ' + self.__class__.__name__)\n\n if profiling_iterations is None:\n return Operator._unwrap_single(self._output_buffers)\n else:\n return Operator._unwrap_single(self._output_buffers), eval_times_ms", "def collect_cuda_device(cuda_id: int, mb_size: int = None) -> torch.Tensor:\r\n logger.info(f'Collect CUDA {cuda_id}')\r\n torch.cuda.set_device(cuda_id)\r\n if mb_size is None:\r\n q = GPUStatCollection.new_query()[cuda_id]\r\n mb_size = q.memory_total\r\n MB = 1024 * 1024\r\n size = int(0.80 * mb_size * MB / 4)\r\n block = torch.empty(size, dtype=torch.float32).cuda()\r\n return block", "def profile_module(self):\n return base_profiler.run_in_separate_process(self._profile_module)", "def main():\n # Manual seed for reproducibility\n torch.manual_seed(363636)\n\n # Global instances\n global args, use_cuda, device\n # Instantiating the parser\n args = parser.parse_args()\n # Global CUDA flag\n use_cuda = args.cuda and torch.cuda.is_available()\n # Defining device and device's map locationo\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n print('chosen device: ', device)\n\n # Defining loss function and printing CUDA information (if available)\n if use_cuda:\n print(\"PyTorch version: \")\n print(torch.__version__)\n print(\"CUDA Version: \")\n print(torch.version.cuda)\n print(\"cuDNN version is: \")\n print(cudnn.version())\n cudnn.benchmark = True\n criterion = nn.CrossEntropyLoss().cuda()\n else:\n criterion = nn.CrossEntropyLoss()\n\n # Dataloaders for CIFAR, ImageNet and MNIST\n if args.dataset == 'CIFAR100':\n\n normalize = transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],\n std=[x / 255.0 for x in [63.0, 62.1, 66.7]])\n\n kwargs = {'num_workers': args.workers, 'pin_memory': True} if use_cuda else {}\n\n train_loader = torch.utils.data.DataLoader(\n datasets.CIFAR100(root=args.data_path, train=True, transform=transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(32, 4),\n transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.075),\n transforms.ToTensor(),\n normalize,\n Cutout(n_holes=1, length=16),\n ]), download=True),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n\n val_loader = torch.utils.data.DataLoader(\n datasets.CIFAR100(root=args.data_path, train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=args.val_batch_size, shuffle=False, **kwargs)\n\n elif args.dataset == 'ImageNet':\n\n traindir = os.path.join(args.data_path, 'train')\n valdir = os.path.join(args.data_path, 'val')\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n train_dataset = datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(args.image_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=args.workers, pin_memory=True)\n\n image_size = args.image_size\n val_dataset = datasets.ImageFolder(\n valdir,\n transforms.Compose([\n transforms.Resize(image_size, interpolation=PIL.Image.BICUBIC),\n transforms.CenterCrop(image_size),\n transforms.ToTensor(),\n normalize,\n ]))\n val_loader = torch.utils.data.DataLoader(\n val_dataset, batch_size=args.val_batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n elif args.dataset == 'MNIST':\n\n kwargs = {'num_workers': args.workers, 'pin_memory': True} if use_cuda else {}\n\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST(args.data_path, train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n val_loader = torch.utils.data.DataLoader(\n datasets.MNIST(args.data_path, train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.val_batch_size, shuffle=True, **kwargs)\n\n elif args.dataset == 'CIFAR10':\n\n normalize = transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],\n std=[x / 255.0 for x in [63.0, 62.1, 66.7]])\n\n kwargs = {'num_workers': args.workers, 'pin_memory': True} if use_cuda else {}\n\n train_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10(root=args.data_path, train=True, transform=transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(32, 4),\n transforms.ToTensor(),\n normalize,\n ]), download=True),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n\n val_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10(root=args.data_path, train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=args.val_batch_size, shuffle=False, **kwargs)\n\n # original grid = [(1.0, 1.0), (1.9, 1.0), (1.7, 1.1), (1.6, 1.1), (1.4, 1.2), (1.2, 1.3), (1.0, 1.4)]\n\n grid = [(args.grid[i], args.grid[i+1]) for i in range(0, len(args.grid), 2)]\n\n for coeff in grid:\n alpha = coeff[0] ** args.phi\n beta = coeff[1] ** args.phi\n grid_search(train_loader, val_loader, criterion, alpha, beta)", "def startProfile(profileName):\n \n checkRoot()\n\n # Enable bluetooth service if not enabled\n changeBluetoothService(enable=True)\n \n # Check if profile file exists\n if not os.path.isfile('/etc/bluectl/'+profileName):\n print('Profile with given name does not exist.')\n return\n\n # Load profile\n with open('/etc/bluectl/'+profileName, 'rt') as profileFile:\n for line in profileFile.readlines():\n if 'Controller=' in line:\n cntMAC = line.replace('Controller=', '').replace('\\n', '')\n if 'Device=' in line:\n deviceMAC = line.replace('Device=', '').replace('\\n', '')\n \n if not (checkMACAddress(cntMAC) or checkMACAddress(deviceMAC)):\n print('Profile file is corrupted. Please remove and create this profile again.')\n return\n \n print('\\nStarting bluetooth profile\\n')\n \n # Choose bluetooth controller\n blueSelectStdout = execCommand('bluetoothctl select {}'.format(cntMAC))\n \n # Power on bluetooth controller\n bluePoweronStdout = execCommand('bluetoothctl power on')\n \n # Connect bluetooth device\n blueConnectStdout = execCommand('bluetoothctl connect {}'.format(deviceMAC))\n \n if not 'Connection successful' in blueConnectStdout:\n print(blueConnectStdout)\n print('Is device powered on in the vicinity?\\n')\n return\n \n print('Profile was successfully started\\n')\n \n return", "async def profile(self, ctx:utils.Context):\n\n pass", "def cuda(self):\n if torch.cuda.is_available():\n self.automata = self.automata.cuda()\n self.inv_automata = self.inv_automata.cuda()\n self.action = self.action.cuda()\n self.inv_action = self.inv_action.cuda()", "def get_profile_device_list():\r\n profiler_dir = get_profiler_dir(request)\r\n train_id = get_train_id(request)\r\n if not profiler_dir or not train_id:\r\n raise ParamValueError(\"No profiler_dir or train_id.\")\r\n\r\n profiler_dir_abs = os.path.join(settings.SUMMARY_BASE_DIR, train_id, profiler_dir)\r\n try:\r\n profiler_dir_abs = validate_and_normalize_path(profiler_dir_abs, \"profiler\")\r\n except ValidationError:\r\n raise ParamValueError(\"Invalid profiler dir\")\r\n\r\n device_list = analyse_device_list_from_profiler_dir(profiler_dir_abs)\r\n return jsonify(device_list)", "def benchmark_profile(self):\n cb_bin = os.path.join(bin_path, 'compilebench')\n desc = \"benchmark\"\n test_name = \"compilebench_{0}\".format(to_safe_name(desc))\n test = TestProfile(\n name=test_name,\n desc=desc,\n test_path=self.test_path,\n bin_path=bin_path,\n command=\"{0} -D {1} -i 10 --makej\".format(cb_bin, self.test_path))\n\n return test", "def profile(self, data):\n\n # need to store the output of each morphism in the analysis, which forms the training data later on\n self.profile = {}\n cur_output = data\n for morph in self.analysis.morphisms:\n cur_name = morph.name\n cur_output = morph.apply(cur_output)\n self.profile[cur_name] = cur_output", "def cprofiler(fun, *args, **kwargs):\n print(f\"Profiling {fun.__name__}\")\n with cProfile.Profile() as pr:\n fun(*args, **kwargs)\n pr.print_stats()", "def add_vgpu(self, vgpu_profile):\n\n self.logger.info(\n \"Adding vGPU {0} for \" \"VM {1}\".format(vgpu_profile, self.vm_obj.name)\n )\n backing = vim.VirtualPCIPassthroughVmiopBackingInfo(vgpu=vgpu_profile)\n backing_obj = vim.VirtualPCIPassthrough(backing=backing)\n dev_config_spec = vim.VirtualDeviceConfigSpec(device=backing_obj)\n dev_config_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add\n config_spec = vim.vm.ConfigSpec()\n config_spec.deviceChange = [dev_config_spec]\n return self.vm_obj.ReconfigVM_Task(spec=config_spec)", "def stat_cuda(msg: str) -> None:\n print(f'-- {msg:<35} allocated: %dM, max allocated: %dM, cached: %dM, max cached: %dM' % (\n torch.cuda.memory_allocated() / 1024 / 1024,\n torch.cuda.max_memory_allocated() / 1024 / 1024,\n torch.cuda.memory_cached() / 1024 / 1024,\n torch.cuda.max_memory_cached() / 1024 / 1024\n ))", "def __call__(self, data):\n return self.model(data.cuda())", "def _profile(self) -> None:\n if self.use_case.profile:\n if self._profile_stats is None:\n self._profile_stats = pstats.Stats()\n if self._current_profiler is not None:\n self._current_profiler.disable()\n self._profile_stats.add(self._current_profiler)\n # TODO: use clear() instead of always creating a new profile\n self._current_profiler = cProfile.Profile()\n self._current_profiler.enable()" ]
[ "0.5960219", "0.58565557", "0.5716736", "0.5441635", "0.53858054", "0.5206173", "0.5170805", "0.51140755", "0.50826657", "0.5024006", "0.5007603", "0.49988145", "0.49846023", "0.4983805", "0.49742296", "0.4943996", "0.48566967", "0.48441958", "0.48424977", "0.4832195", "0.48304933", "0.48234773", "0.48166627", "0.48074797", "0.48015764", "0.48007673", "0.47901082", "0.47842902", "0.47667006", "0.47504246" ]
0.8090687
0
Takes a set of metrics and adds a set of derived metrics A given set of throughput metrics will be converted to its equivalent counts using the duration given in statistics Metrics from combined metrics will then be summed together to generate a new combined metric
def generateDerivedMetrics(kernelMetrics, statistics, throughputMetrics = {}, countMetrics = {}, combinedMetrics = {}): # combine single metrics for combinedMetric in combinedMetrics: for kernel in kernelMetrics: logging.debug("Combining metrics for kernel {}".format(kernel)) # iterate over each run, take the number of runs to be # the length of the first source metric if combinedMetrics[combinedMetric][0] in kernelMetrics[kernel]: combinedMetricCounts = [] sourceMetricMissing = False # go through each run for run in range(0, len(kernelMetrics[kernel][ combinedMetrics[combinedMetric][0] ])): combinedMetricRunCount = 0 # take all the source metrics and add them into the # combined metric for sourceMetric in combinedMetrics[combinedMetric]: if sourceMetric in kernelMetrics[kernel]: # TODO delete once debugged print("runs of {} {}".format(sourceMetric, kernelMetrics[kernel][sourceMetric])) combinedMetricRunCount = combinedMetricRunCount + kernelMetrics[kernel][sourceMetric][run] else: sourceMetricMissing = True logging.info("Source metric {} missing for combined metric {}, combined metric will not be" "added".format(sourceMetric, combinedMetric)) # append this run ot the end of the list combinedMetricCounts.append(combinedMetricRunCount) if not sourceMetricMissing: kernelMetrics[kernel][combinedMetric] = combinedMetricCounts # take throughputs and convert them to counts # doesn't use averages since that can skew results for throughputMetricName, countMetricName in zip(throughputMetrics, countMetrics): for kernel in kernelMetrics: logging.debug("Generating count metrics for {} in kernel {}".format(throughputMetricName, kernel)) if throughputMetricName in kernelMetrics[kernel]: counts = [] for run in range(0, len(kernelMetrics[kernel][throughputMetricName])): count = kernelMetrics[kernel][throughputMetricName][run] * kernelMetrics[kernel]["Duration"][run] counts.append(count) kernelMetrics[kernel][countMetricName] = counts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def merge_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, other in metrics:\n key = (name, '')\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)", "def _aggregate_metrics(metrics, aggfunc, base):\n return base.Struct(**_UNCOMPRESSED_METRICS)(\n left_side_bearing=aggfunc(_m.left_side_bearing for _m in metrics),\n right_side_bearing=aggfunc(_m.right_side_bearing for _m in metrics),\n character_width=aggfunc(_m.character_width for _m in metrics),\n character_ascent=aggfunc(_m.character_ascent for _m in metrics),\n character_descent=aggfunc(_m.character_descent for _m in metrics),\n character_attributes=0,\n )", "def add_metrics(self, metrics):\n for i, metric in enumerate(self.config.metrics):\n tf.summary.scalar(metric, metrics[i])", "def aggregate_statistics(self, new_stats):\n \n if isinstance(new_stats,RunStatistics):\n new_stats = [new_stats, ]\n elif isinstance(new_stats,list):\n if any(not isinstance(_,RunStatistics) for _ in new_stats):\n raise MadGraph5Error, \"The 'new_stats' argument of the function \"+\\\n \"'updtate_statistics' must be a (possibly list of) \"+\\\n \"RunStatistics instance.\"\n \n keys = set([])\n for stat in [self,]+new_stats:\n keys |= set(stat.keys())\n\n new_stats = new_stats+[self,]\n for key in keys:\n # Define special rules\n if key=='max_precision':\n # The minimal precision corresponds to the maximal value for PREC\n self[key] = min( _[key] for _ in new_stats if key in _)\n elif key=='min_precision':\n # The maximal precision corresponds to the minimal value for PREC\n self[key] = max( _[key] for _ in new_stats if key in _)\n elif key=='averaged_timing':\n n_madloop_calls = sum(_['n_madloop_calls'] for _ in new_stats if\n 'n_madloop_calls' in _)\n if n_madloop_calls > 0 :\n self[key] = sum(_[key]*_['n_madloop_calls'] for _ in \n new_stats if (key in _ and 'n_madloop_calls' in _) )/n_madloop_calls\n else:\n # Now assume all other quantities are cumulative\n self[key] = sum(_[key] for _ in new_stats if key in _)", "def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None:\n loss_sum = sum(log.get('loss', 0) for log in logging_outputs)\n ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)\n sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)\n nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)\n\n metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)", "def summerize_adapter_metrics(parsed_metrics: Dict[int, dict]) -> Dict[Tuple[str, str], dict]:\n\n summarized_metrics = {}\n for lane in parsed_metrics:\n # Iterate over all samples in lane\n summarized_metrics[lane] = summarized_metrics.get(lane, {})\n for value in parsed_metrics[lane].values():\n sample_id = value.get(\"Sample_ID\")\n summarized_metrics[lane][sample_id] = summarized_metrics[lane].get(sample_id, value)\n summarized_metrics[lane][sample_id][\n \"R\" + value.get(\"ReadNumber\") + \"_SampleBases\"\n ] = value.get(\"SampleBases\")\n\n return summarized_metrics", "def aggregate_metrics(metrics):\n if len(metrics) == 1:\n return metrics[0]\n else:\n agg_metrics = metrics[0]\n for metric in agg_metrics.keys():\n vals = [x[metric] for x in metrics]\n agg_metrics[metric] = [np.mean(vals), np.std(vals)]\n return agg_metrics", "def aggregate(all_metrics, reducer, suffix):\n # Collect metric separately\n separated_metrics = {} # type: dict[frozenset, list[dict]]\n for el in all_metrics:\n key = frozenset(el[\"metric\"][\"dimensions\"].items())\n if key not in separated_metrics:\n separated_metrics[key] = [el]\n else:\n separated_metrics[key].append(el)\n\n # Collect all dimensions\n dims = {}\n for metric_dims in separated_metrics.keys():\n for prop, val in dict(metric_dims).iteritems():\n if prop in dims:\n dims[prop].add(val)\n else:\n dims[prop] = set(val)\n\n # Sort each metric\n for _, metric in separated_metrics.iteritems():\n metric.sort(key=lambda v: v[\"metric\"][\"timestamp\"])\n\n separated_metrics = sorted(separated_metrics.values(), key=len)\n separated_metrics.reverse()\n\n # Compute the new values\n new_values = []\n all_timestamps = map(\n lambda l: map(\n lambda x: x[\"metric\"][\"timestamp\"], l),\n separated_metrics)\n metric_count = len(separated_metrics)\n for index in range(0, len(separated_metrics[0])):\n new_value = reducer[0](\n separated_metrics[0][index][\"metric\"][\"value\"],\n metric_count)\n new_timestamp = separated_metrics[0][index][\"metric\"][\"timestamp\"]\n for metric_index in range(1, metric_count):\n new_value = reducer[1](new_value, helpers.interpolate(\n new_timestamp,\n separated_metrics[metric_index],\n all_timestamps[metric_index]\n ), metric_count)\n new_values.append((new_timestamp, new_value))\n\n # Aggregate the other details:\n metric_name = separated_metrics[0][0][\"metric\"][\"name\"] + suffix\n meta = separated_metrics[0][0][\"meta\"]\n new_metrics = [\n helpers.create_agg_metric(\n metric_name,\n meta,\n dims,\n val[0],\n val[1]\n ) for val in new_values\n ]\n return new_metrics", "def _report_metrics(self, total_bytes, time_delta, num_files):\n # This recreates the gsutil throughput calculation so that metrics are 1:1.\n avg_speed = round(float(total_bytes) / float(time_delta))\n report(\n source_scheme=self._source_scheme,\n destination_scheme=self._destination_scheme,\n num_files=num_files,\n size=total_bytes,\n avg_speed=avg_speed,\n disk_io_time=self._calculate_disk_io())", "def metrics_group():", "def reduce_metrics(logging_outputs) -> None:\n loss_sum = sum(log.get('loss', 0) for log in logging_outputs)\n admloss_sum = sum(log.get('admloss', 0) for log in logging_outputs)\n margin_n = sum(log.get('margin_n', 0) for log in logging_outputs)\n sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)\n nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)\n\n metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)\n metrics.log_scalar('admloss', admloss_sum / sample_size / math.log(2), sample_size, round=3)\n metrics.log_scalar('margin_norm', margin_n / nsentences, 32, round=3)\n metrics.log_derived('ppl', lambda meters: round(2**meters['loss'].avg, 3))", "def reduce_metrics(logging_outputs) -> None:\n loss_sum = sum(log.get('loss', 0) for log in logging_outputs)\n loss_sum_vad = sum(log.get('loss_vad', 0) for log in logging_outputs)\n\n ntokens_vad = sum(log.get('ntokens_vad', 0) for log in logging_outputs)\n total_n_ts = sum(log.get('n_ts', 0) for log in logging_outputs)\n total_n_pred_ts = sum(log.get('n_pred_ts', 0) for log in logging_outputs)\n\n nsentences_vad = sum(log.get('nsentences_vad', 0) for log in logging_outputs)\n\n sample_size_vad = sum(log.get('sample_size_vad', 0) for log in logging_outputs)\n sample_size_asr = sum(log.get('sample_size', 0) for log in logging_outputs)\n\n sample_size = sample_size_vad + sample_size_asr\n\n metrics.log_scalar('loss', loss_sum / sample_size, sample_size, round=3)\n\n if sample_size_vad:\n metrics.log_scalar('loss_vad', loss_sum_vad / sample_size_vad, sample_size_vad, round=3)\n else:\n metrics.log_scalar('loss_vad', loss_sum_vad / 999999999999, sample_size_vad, round=3)\n\n ncorrect_vad = sum(log.get('ncorrect_vad', 0) for log in logging_outputs)\n if nsentences_vad:\n metrics.log_scalar('accuracy_vad', 100.0 * ncorrect_vad / ntokens_vad, ntokens_vad, round=1)\n else:\n metrics.log_scalar('accuracy_vad', 100.0 * ncorrect_vad / 999999999999, ntokens_vad, round=1)\n\n if total_n_ts:\n ncorrect_ts = sum(log.get('ncorrect_ts', 0) for log in logging_outputs)\n metrics.log_scalar('recall_TS', 100.0 * ncorrect_ts / total_n_ts, total_n_ts, round=1)\n else:\n metrics.log_scalar('recall_TS', 100.0 * 0 / ntokens_vad, ntokens_vad, round=1)\n\n if total_n_pred_ts:\n ncorrect_ts = sum(log.get('ncorrect_ts', 0) for log in logging_outputs)\n metrics.log_scalar('precision_TS', 100.0 * ncorrect_ts / total_n_pred_ts, total_n_pred_ts, round=1)\n else:\n metrics.log_scalar('precision_TS', 100.0 * 0 / ntokens_vad, ntokens_vad, round=1)", "def get_metrics(self, add_metrics={}):\n tot_px_cnt = self.res * int(self.tensors['samples_evaluated'][0])\n\n if self.debug:\n sum_per_class = self.tensors['TP'] + self.tensors['TN'] + self.tensors['FP'] + self.tensors['FN']\n unique = sum_per_class.unique()\n assert len(unique) == 1, 'Expect to observe the exact same number for all classes.'\n assert unique[0] == self.tensors['PX_CNT'].sum() == tot_px_cnt, 'Expect exactly one type of prediction per pixel.'\n\n mask_non_observed = (self.tensors['PX_CNT']).bool()\n mask_bg = self.tensors['M']\n mask_combined = (self.tensors['M'] * mask_non_observed).bool() # in PyTorch 1.4 no logical AND\n\n if self.debug:\n assert mask_combined.sum() <= mask_bg.sum()\n assert mask_combined.sum() <= mask_non_observed.sum()\n \n accuracies = (self.tensors['TP'] + self.tensors['TN']) / tot_px_cnt\n acc = torch.mean(accuracies[mask_combined])\n acc_bg_included = torch.mean(accuracies[mask_non_observed])\n\n IoUs = self.tensors['TP'] / (tot_px_cnt - self.tensors['TN']) # per class: I/U, U = sum(TP,FP,FN) = all - TN\n mIoU = torch.mean(IoUs[mask_combined])\n mIoU_bg_included = torch.mean(IoUs[mask_non_observed])\n\n if self.debug:\n if torch.cuda.is_available():\n for i in [accuracies, acc, acc_bg_included, IoUs, mIoU, mIoU_bg_included]:\n assert i.is_cuda\n\n results = OrderedDict()\n\n for i in ['acc','mIoU']:\n for j in ['','_bg_included']:\n results[ i + j + '_' + self.fold ] = float(eval(i+j+'.cpu()'))\n\n for i in range(self.tensors['TP'].shape[0]):\n results['IoU_class_' + str(i) + '_' + self.fold] = float(IoUs[i].cpu())\n results['acc_class_' + str(i) + '_' + self.fold] = float(accuracies[i].cpu())\n\n if self.debug:\n for k in results:\n if isinstance(results[k], float) and not math.isnan(results[k]):\n # don't apply check to nans and str; we don't use exactly 1 due to smaller rounding error\n assert results[k] <= 1.0001, f'Failure for {k,results[k],type(results[k])}: any metric derived from the confusion matrix should be <= 1.'\n\n #for t in self.tensors:\n # results[t + '_' + self.fold] = self.tensors[t].cpu()\n\n if add_metrics:\n for k in add_metrics:\n results[k + '_' + self.fold] = float(add_metrics[k])\n\n return results", "def _create_metric_sum(a, b):\n metric_sum = GridSearchClassificationMetrics()\n metric_sum.accuracy = a.accuracy + b.accuracy\n metric_sum.precision = a.precision + b.precision\n metric_sum.f_measure = a.f_measure + b.f_measure\n metric_sum.recall = a.recall + b.recall\n metric_sum.confusion_matrix = a.confusion_matrix + b.confusion_matrix\n return metric_sum", "def sum_dstats(self, stats, smetrics):\n avg = {}\n\n for disk, metrics in stats.iteritems():\n for mname, metric in metrics.iteritems():\n if mname not in smetrics:\n continue\n if mname in avg:\n avg[mname] += metric\n else:\n avg[mname] = metric\n\n return avg", "def add_metrics(self,\n metrics_: Optional[Dict[str, Any]] = None,\n add_to_child_: bool = True,\n **kwargs: Any) -> None:\n if self._child_stack and add_to_child_:\n self._child_stack[-1].add_metrics(metrics_, **kwargs)\n else:\n def collect(target: Dict[str, Any]):\n if metrics_:\n for key, val in metrics_.items():\n key = stage_type.add_metric_prefix(key)\n target[key] = to_number_or_numpy(val)\n if kwargs:\n for key, val in kwargs.items():\n key = stage_type.add_metric_prefix(key)\n target[key] = to_number_or_numpy(val)\n\n stage_type = self._stage.type\n if self._stage.batch.is_active:\n collect(self._batch_metrics)\n elif self._stage.epoch is not None and self._stage.epoch.is_active:\n collect(self._epoch_metrics)\n else:\n collect(self._stage_metrics)\n self._stage.push_metrics(self._stage_metrics)", "def reduce_metrics(logging_outputs) -> None:\n loss_sum = utils.item(sum(log.get('loss', 0) for log in logging_outputs))\n sample_size = utils.item(sum(log.get('sample_size', 0) for log in logging_outputs))\n\n metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)\n metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['loss'].avg))", "def _build_metric_list_to_collect(self, additional_metrics):\n metrics_to_collect = {}\n\n # Defaut metrics\n for default_metrics in self.DEFAULT_METRICS.itervalues():\n metrics_to_collect.update(default_metrics)\n\n # Additional metrics metrics\n for option in additional_metrics:\n additional_metrics = self.AVAILABLE_METRICS.get(option)\n if not additional_metrics:\n if option in self.DEFAULT_METRICS:\n self.log.warning(\n u\"`%s` option is deprecated.\"\n u\" The corresponding metrics are collected by default.\", option\n )\n else:\n self.log.warning(\n u\"Failed to extend the list of metrics to collect:\"\n u\" unrecognized `%s` option\", option\n )\n continue\n\n self.log.debug(\n u\"Adding `%s` corresponding metrics to the list\"\n u\" of metrics to collect.\", option\n )\n metrics_to_collect.update(additional_metrics)\n\n return metrics_to_collect", "def _update_metric(\n metrics: List[mlflow.entities.Metric], dataset: MetricsDict = {}\n ) -> MetricsDict:\n for metric in metrics:\n metric_dict = {\"step\": metric.step, \"value\": metric.value}\n if metric.key in dataset:\n if isinstance(dataset[metric.key], list):\n dataset[metric.key].append(metric_dict)\n else:\n dataset[metric.key] = [dataset[metric.key], metric_dict]\n else:\n dataset[metric.key] = metric_dict\n return dataset", "def set_metrics(metric_dict, cd_loss, cd_corrects, cd_report):\n metric_dict['cd_losses'].append(cd_loss.item())\n metric_dict['cd_corrects'].append(cd_corrects.item())\n metric_dict['cd_precisions'].append(cd_report[0])\n metric_dict['cd_recalls'].append(cd_report[1])\n metric_dict['cd_f1scores'].append(cd_report[2])\n\n return metric_dict", "def get_metrics(self, objs_metrics):\n d = {}\n _S = DiffStatus\n\n for status in _S.iter():\n d[status] = [dict(obj_m) for obj_m in objs_metrics if obj_m['status'] == status]\n\n count_a_only = len(d.get(_S.deleted, []))\n count_b_only = len(d.get(_S.added, []))\n count_modified = len(d.get(_S.modified, []))\n count_unchanged = len(d.get(_S.unchanged, []))\n count_common = count_modified + count_unchanged\n\n count_a = count_common + count_a_only\n count_b = count_common + count_b_only\n\n d['count'] = {\n 'a': count_a,\n 'b': count_b,\n 'a_only': count_a_only,\n 'b_only': count_b_only,\n 'modified': count_modified,\n 'unchanged': count_unchanged,\n }\n\n d['summary'] = {\n _S.added: {\n 'count': count_b_only,\n 'relative_to': {\n 'a': count_b_only / count_a,\n 'b': count_b_only / count_b,\n }\n },\n _S.deleted: {\n 'count': count_a_only,\n 'relative_to': {\n 'a': count_a_only / count_a,\n 'b': count_a_only / count_b,\n },\n },\n _S.modified: {\n 'count': count_modified,\n 'relative_to': {\n 'a': count_modified / count_a,\n 'b': count_modified / count_b,\n }\n },\n _S.unchanged: {\n 'count': count_unchanged,\n 'relative_to': {\n 'a': count_unchanged / count_a,\n 'b': count_unchanged / count_b,\n }\n },\n }\n\n return d", "def update(self, outputs: torch.Tensor, targets: torch.Tensor) -> Tuple[Any, Any, Any, Any]:\n tn, fp, fn, tp, support = super().update(outputs=outputs, targets=targets)\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=tp, fp=fp, fn=fn, support=support, zero_division=self.zero_division,\n )\n return per_class, micro, macro, weighted", "def reduce_metrics(logging_outputs) -> None:\n\n loss_sum = utils.item(sum(log.get(\"loss\", 0) for log in logging_outputs))\n ntokens = utils.item(sum(log.get(\"ntokens\", 0) for log in logging_outputs))\n nsentences = utils.item(\n sum(log.get(\"nsentences\", 0) for log in logging_outputs)\n )\n sample_size = utils.item(\n sum(log.get(\"sample_size\", 0) for log in logging_outputs)\n )\n\n metrics.log_scalar(\n \"loss\", loss_sum / sample_size / math.log(2), sample_size, round=3\n )\n metrics.log_scalar(\"ntokens\", ntokens)\n metrics.log_scalar(\"nsentences\", nsentences)\n if sample_size != ntokens:\n metrics.log_scalar(\n \"nll_loss\", loss_sum / ntokens / math.log(2), ntokens, round=3\n )", "def report_metrics(prefix, metrics):\n series = []\n\n now = time.time()\n for key, value in metrics.iteritems():\n metric = '{prefix}.{key}'.format(prefix=prefix, key=key)\n point = [(now, value)]\n series.append({'metric':metric, 'points':point})\n\n if len(series) > 0:\n print u\"Sending {}\".format(series)\n dog_http_api.metrics(series)", "def calculate_metrics(metrics_data: List[Tuple[Metric, DataType]]) -> List[float]:\n pass", "def prepare_multiple_perf_metrics(run_dict):\n multiple_perf_metrics = {}\n for run_label, run_name in run_dict.items():\n output_parser = OutputParser(run_name, use_most_recent=False)\n perf_metrics = performance_calculations.performance_metrics(output_parser)\n multiple_perf_metrics[run_label] = perf_metrics\n return multiple_perf_metrics", "def reduce_metrics(logging_outputs) -> None:\n loss_sum = sum(log.get('loss', 0) for log in logging_outputs)\n neg_elbo_sum = sum(log.get('neg_elbo', 0) for log in logging_outputs)\n recon_loss_sum = sum(log.get('recon_loss', 0) for log in logging_outputs)\n ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)\n sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)\n nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)\n KLz_sum = sum(log.get('KLz', 0) for log in logging_outputs)\n KLt_sum = sum(log.get('KLt', 0) for log in logging_outputs)\n KLtheta_sum = sum(log.get('KLtheta', 0) for log in logging_outputs)\n\n if 'nll_iw' in logging_outputs[0]:\n nll_iw_sum = sum(log.get('nll_iw', 0) for log in logging_outputs)\n metrics.log_scalar('nll_iw_s', nll_iw_sum / nsentences, \n nsentences, round=3, priority=4)\n metrics.log_scalar('nll_iw_t', nll_iw_sum / ntokens / math.log(2), \n ntokens, round=3, priority=5) \n metrics.log_derived('ppl_iw', lambda meters: utils.get_perplexity(meters['nll_iw_t'].avg), priority=6)\n\n else:\n metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), \n sample_size, round=3, priority=3)\n\n metrics.log_scalar('neg_elbo_s', neg_elbo_sum / nsentences, \n nsentences, round=3, priority=4)\n metrics.log_scalar('recon_loss_s', recon_loss_sum / nsentences, \n nsentences, round=3, priority=4)\n\n metrics.log_scalar('neg_elbo_t', neg_elbo_sum / ntokens / math.log(2), \n ntokens, round=3, priority=5)\n metrics.log_scalar('recon_loss_t', recon_loss_sum / ntokens / math.log(2), \n ntokens, round=3, priority=5)\n\n metrics.log_scalar('KLz', KLz_sum / nsentences, nsentences, round=1, priority=8)\n metrics.log_scalar('KLt', KLt_sum / nsentences, nsentences, round=1, priority=8)\n metrics.log_scalar('KLtheta', KLtheta_sum / nsentences, nsentences, round=1, priority=8)\n\n metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['neg_elbo_t'].avg), priority=6)\n metrics.log_derived('recon_ppl', lambda meters: utils.get_perplexity(meters['recon_loss_t'].avg), priority=7)\n\n if 'active' in logging_outputs[0]:\n metrics.log_scalar('active', logging_outputs[0]['active'], weight=0, round=1, priority=10)\n metrics.log_scalar('percent', logging_outputs[0]['percent'], weight=0, round=2, priority=10)\n # metrics.log_scalar('nlow', logging_outputs[0]['nlow'], weight=0, priority=10)\n # metrics.log_scalar('nhigh', logging_outputs[0]['nhigh'], weight=0, priority=10)", "def result(\n metrics: Dict[metric_types.MetricKey, float]\n ) -> Dict[metric_types.MetricKey, float]:\n output = {}\n for threshold in thresholds:\n ptn = flip_count_metric_key_by_name_by_threshold[threshold][\n 'positive_to_negative']\n ntp = flip_count_metric_key_by_name_by_threshold[threshold][\n 'negative_to_positive']\n pos_examples = flip_count_metric_key_by_name_by_threshold[threshold][\n 'positive_to_negative_examples_ids']\n neg_examples = flip_count_metric_key_by_name_by_threshold[threshold][\n 'negative_to_positive_examples_ids']\n pos = flip_count_metric_key_by_name_by_threshold[threshold][\n 'positive_examples_count']\n neg = flip_count_metric_key_by_name_by_threshold[threshold][\n 'negative_examples_count']\n output[metric_key_by_name_by_threshold[threshold]\n ['overall']] = (metrics[ntp] + metrics[ptn]) / (\n metrics[pos] + metrics[neg])\n output[metric_key_by_name_by_threshold[threshold]\n ['positive_to_negative']] = metrics[ptn] / metrics[pos]\n output[metric_key_by_name_by_threshold[threshold]\n ['negative_to_positive']] = metrics[ntp] / metrics[neg]\n output[metric_key_by_name_by_threshold[threshold]\n ['positive_to_negative_examples_ids']] = metrics[pos_examples]\n output[metric_key_by_name_by_threshold[threshold]\n ['negative_to_positive_examples_ids']] = metrics[neg_examples]\n\n return output", "def calc_metric(output, metrics):\n score = []\n for metric in metrics:\n metric_mod = __import__(\"sklearn.metrics\", fromlist=[metric])\n metric_func = getattr(metric_mod, metric)\n score.append(metric_func(output[0], output[1]))\n return score, output" ]
[ "0.67379445", "0.6392101", "0.6263343", "0.6217148", "0.6202033", "0.6021694", "0.5909661", "0.588806", "0.58644867", "0.5835653", "0.5708207", "0.5707943", "0.5697257", "0.5673869", "0.56496733", "0.5629821", "0.56219655", "0.561071", "0.561009", "0.55759895", "0.55577517", "0.5543367", "0.5532502", "0.5493901", "0.54576707", "0.5452468", "0.5448231", "0.5436635", "0.5426086", "0.5407613" ]
0.6852241
0
Generates roofline points from a set of kernel metrics The flops type is automatically selected to be the one with the highest throughput
def generateRooflinePoints(kernelMetrics): rooflines = dict() memRooflines = dict() # one point for each kernel # runs are averaged for kernel in kernelMetrics: logging.debug("Starting roofline generation for kernel {}".format(kernel)) # figure out which flops is highest flops = dict() for flopsMetric in rooflineMetricsFlops: if flopsMetric in kernelMetrics[kernel]: flops[flopsMetric] = statistics.mean(kernelMetrics[kernel][flopsMetric]) * flopsMultipliers[flopsMetric] if len(flops) == 0: logging.debug("flops for {} empty skipping".format(kernel)) continue flopsMetric = max(flops, key=flops.get) # figure out which throughput is highest throughput = dict() for memMetric in rooflineMetricsMem: if memMetric in kernelMetrics[kernel]: throughput[memMetric] = statistics.mean(kernelMetrics[kernel][flopsMetric]) if len(throughput) == 0: logging.debug("Throughput for {} empty skipping".format(kernel)) continue memMetric = max(throughput, key=throughput.get) durationList = kernelMetrics[kernel]["Duration"] flopsList = kernelMetrics[kernel][flopsMetric] memList = kernelMetrics[kernel][memMetric] # really should use numpy for this but some systems don't have it installed flopsPerSecList = [flops / duration if duration > 0 else 0 for flops, duration in zip(flopsList, durationList)] throughputList = [mem / duration if duration > 0 else 0 for mem, duration in zip(flopsList, memList)] #[flops / duration for flops, duration in zip # calculate intensity for each memory type # and add it to the list for memMetric in rooflineMetricsMem: logging.debug("Working on memory metric {}".format(memMetric)) if memMetric in kernelMetrics[kernel]: #intensity = flops / statistics.mean(kernelMetrics[kernel][memMetric]) intensityList = [flops / data if data > 0 else 0 for flops, data in zip (flopsList, kernelMetrics[kernel][memMetric])] invIntensityList = [data / flops if flops > 0 else 0 for flops, data in zip(flopsList, kernelMetrics[kernel][memMetric])] #intensityList = flopsList / np.array(kernelMetrics[kernel][memMetric]) flopsInfo = abbrMetricNames[memMetric] + " " + abbrMetricNames[flopsMetric] + "/" + kernel #print("kernel info {}".format(flopsInfo)) intensityStdDev = 0 flopsPerSecStdDev = 0 if len(intensityList) > 1: intensityStdDev = statistics.stdev(intensityList) invIntensityStdDev = statistics.stdev(invIntensityList) flopsPerSecStdDev = statistics.stdev(flopsPerSecList) throughputStdDev = statistics.stdev(kernelMetrics[kernel][memMetric]) rooflines[flopsInfo] = [statistics.mean(intensityList), statistics.mean(flopsPerSecList), intensityStdDev, flopsPerSecStdDev] memRooflines[flopsInfo] = [statistics.mean(invIntensityList), statistics.mean(kernelMetrics[kernel][memMetric]), invIntensityStdDev, throughputStdDev] return rooflines, memRooflines
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def roofline_plot():\n\n def attainable_performance(operational_intensity):\n return min(PEAK_PERFORMANCE, MEMORY_BANDWIDTH * operational_intensity)\n\n oi_values = np.logspace(-4, 12, 1000, base=2)\n perf_values = [attainable_performance(oi) for oi in oi_values]\n fig, ax = viz_utils.setup_figure_1ax(x_label='Operational Intensity [Flops/Bytes]',\n y_label='Performance [Flops/Cycle]')\n ax.set_xscale(\"log\", basex=2)\n ax.set_yscale(\"log\", basey=2)\n ax.plot(oi_values, perf_values, linewidth=2.0, alpha=0.7)\n ax.set_aspect('equal', adjustable='datalim')\n\n ridge_point = PEAK_PERFORMANCE / MEMORY_BANDWIDTH\n ax.annotate(f'{{{ridge_point:0.1f}, {PEAK_PERFORMANCE:0.1f}}}',\n xy=(ridge_point, PEAK_PERFORMANCE),\n xytext=(-70, 15), textcoords='offset points',)\n return fig, ax", "def perf_in_roofline_plot(perf_metrics, label, color, fig=None, ax=None):\n perf_values = []\n input_size_values = []\n op_intensity_values = []\n for metrics in perf_metrics.values():\n perf_values.append(metrics['performance'])\n op_intensity_values.append(metrics['op_intensity'])\n input_size_values.append(metrics['config']['population'])\n\n if not fig and not ax:\n fig, ax = roofline_plot()\n ax.plot(op_intensity_values, perf_values, color=color, markersize=8, marker='o')\n ax.plot(op_intensity_values, perf_values, color=color, linewidth=1.5, alpha=0.8, label=label)", "def inp_kernel(r, ktype):\n \n if ktype == 'uniform':\n \n if r < 1.:\n return 1./((4./3.)*pi)\n else:\n return 0.\n \n elif ktype == 'sph-anarchy':\n \n if r <= 1.: return (21./(2.*pi)) * ((1. - r)*(1. - r)*(1. - r)*(1. - r)*(1. + 4.*r)) \n else: return 0. \n \n elif ktype == 'gadget-2':\n \n if r < 0.5: return (8./pi) * (1. - 6*(r*r) + 6*(r*r*r))\n elif r < 1.: return (8./pi) * 2 * ((1. - r)*(1. - r)*(1. - r))\n else: return 0.\n \n elif ktype == 'cubic':\n \n if r < 0.5: return (2.546479089470 + 15.278874536822 * (r - 1.0) * r * r)\n elif r < 1: return 5.092958178941 * (1.0 - r) * (1.0 - r) * (1.0 - r)\n else: return 0\n \n elif ktype == 'quintic':\n \n if r < 0.333333333: return 27.0*(6.4457752*r*r*r*r*(1.0-r) -1.4323945*r*r +0.17507044)\n elif r < 0.666666667: return 27.0*(3.2228876*r*r*r*r*(r-3.0) +10.7429587*r*r*r -5.01338071*r*r +0.5968310366*r +0.1352817016)\n elif r < 1: return 27.0*0.64457752*(-r*r*r*r*r +5.0*r*r*r*r -10.0*r*r*r +10.0*r*r -5.0*r +1.0)\n else: return 0\n \n else:\n \n print (\"Doesn't recognize the kernel. Input your own kernel in `inp_kernel`\")\n exit()", "def receiver_operation_curve(test_confidence, test_labels, label_range):\n\n test_confidence = np.array(test_confidence)\n\n # compute actual number of positive and negative instances\n num_instance = len(test_confidence)\n num_true_pos = sum(np.array([label_range[0] == test_labels[i] for i in range(num_instance)]))\n num_true_neg = num_instance - num_true_pos\n\n # for each threshold, compute the TP and FP\n ROC_array = []\n\n zipped = zip(test_confidence, test_labels)\n zipped.sort(key = lambda t: t[0]) # sort confidence and label based on confidence, ascending order\n zipped.reverse() # sort the confidence from high to low, descending order\n [test_confidence, test_labels] = zip(*zipped)\n\n # set cutoff at each point when the instance label changes\n cutoff = []\n cutoff.append(1)\n for i in range(num_instance):\n if i == 0:\n cutoff.append(test_confidence[0])\n current_state = test_labels[0]\n else:\n if current_state == test_labels[i]:\n continue\n else:\n current_state = test_labels[i]\n cutoff.append(test_confidence[i-1])\n cutoff.append(test_confidence[i])\n cutoff.append(0)\n\n for cf in cutoff:\n # compute true positive and false positive\n TP = 0\n FP = 0\n for i in range(num_instance):\n if test_confidence[i] < cf:\n break\n else:\n if label_range[0] == test_labels[i]:\n TP += 1\n elif label_range[0] != test_labels[i]:\n FP += 1\n TP_rate = 1.0 * TP / num_true_pos\n FP_rate = 1.0 * FP / num_true_neg\n ROC_array.append([FP_rate, TP_rate])\n\n return ROC_array", "def main():\n df = prof_conv_bwd_filter()\n df.to_csv(\"prof.cudnnConvBwdFilter.csv\")\n\n \"\"\"visualization, Roofline model\"\"\"\n df = pd.read_csv('prof.cudnnConvBwdFilter.csv', header=0, index_col=0)\n fig = plt.figure(figsize=(10,5))\n ax = fig.add_subplot(1, 1, 1)\n plot_rooline (ax, MACHINE_SPEC, PEAK_PERF, BAND_WIDTH)\n plot_result (ax, df)\n # fig.subplots_adjust(right=0.8)\n plt.subplots_adjust(left=0.1, right=0.6)\n plt.savefig('roofline.png')\n return", "def generate_data():\n # Set random seed number so results are consistent for grader\n # Comment this out if you'd like to see results on different random data!\n np.random.seed(0)\n # Generate some fake data to represent lane-line pixels\n ploty = np.linspace(0, 719, num=720) # to cover same y-range as image\n quadratic_coeff = 3e-4 # arbitrary quadratic coefficient\n # For each y position generate random x position within +/-50 pix\n # of the line base position in each case (x=200 for left, and x=900 for right)\n leftx = np.array([200 + (y ** 2) * quadratic_coeff + np.random.randint(-50, high=51)\n for y in ploty])\n rightx = np.array([900 + (y ** 2) * quadratic_coeff + np.random.randint(-50, high=51)\n for y in ploty])\n\n leftx = leftx[::-1] # Reverse to match top-to-bottom in y\n rightx = rightx[::-1] # Reverse to match top-to-bottom in y\n\n # Fit a second order polynomial to pixel positions in each fake lane line\n left_fit = np.polyfit(ploty, leftx, 2)\n right_fit = np.polyfit(ploty, rightx, 2)\n\n return ploty, left_fit, right_fit", "def procfs_timeline():\n\n return [\n {\n \"timestamp\": \"2021-09-14T12:37:37.168817\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:37.669237\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:38.170142\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:38.670338\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:39.171321\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:39.671572\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:40.172503\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:40.672693\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:41.173552\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:41.673815\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:42.174560\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:42.674690\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:43.175441\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:43.675743\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:44.176551\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:44.677307\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:45.178049\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:45.678310\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:46.179120\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:46.679308\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:47.180223\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:47.680468\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:48.181316\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:48.681683\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:49.182522\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:49.682731\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:50.183680\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:50.683812\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:51.184792\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:51.685027\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:52.185709\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:52.686065\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:53.186929\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:53.687190\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.35,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:54.188031\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.35,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:54.688674\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.35,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.59000000000001,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:55.189489\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.35,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.59000000000001,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:55.690299\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.35,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.59000000000001,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:56.191124\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.35,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.59000000000001,\n },\n ]", "def gen_lines(problem):\n ps0 = [[-0.4, -0.5, 0]]\n ps1 = [[0.4, -0.5, 0]]\n\n # Use enough points for higher order approximations.\n n_point = 30\n\n labels = ['%s -> %s' % (p0, p1) for p0, p1 in zip(ps0, ps1)]\n probes = []\n for ip in xrange(len(ps0)):\n p0, p1 = ps0[ip], ps1[ip]\n probes.append(LineProbe(p0, p1, n_point))\n\n return probes, labels", "def powerline_all(osm_path): \n return retrieve(osm_path,'lines',['power', 'voltage'])", "def main():\n housing = pd.read_csv(\"Data/train_original.csv\")\n housing[\"TotalSF\"] = (\n housing[\"TotalBsmtSF\"] + housing[\"1stFlrSF\"] + housing[\"2ndFlrSF\"]\n )\n training_features, testing_features, training_target, testing_target = impute_dummify_and_split(\n housing, drop_target=False\n )\n\n p_values = [\n (c, pearsonr(training_features[\"SalePrice\"], training_features[c])[1])\n for c in training_features.columns\n ]\n\n p_value_limits = [0.05]\n\n result = []\n ps_and_cols = {}\n\n for p_value_limit in p_value_limits:\n\n high_ps = list(\n map(lambda t: t[0], sorted(p_values, key=lambda t1: t1[1])[:15])\n )\n\n print(training_features[high_ps].corr())\n\n columns = [p[0] for p in p_values if p[1] < p_value_limit]\n\n training_features_restricted = training_features[columns].drop(\n \"SalePrice\", axis=\"columns\"\n )\n\n testing_features_restricted = testing_features[columns].drop(\n \"SalePrice\", axis=\"columns\"\n )\n\n for model in (\n linear_model.Lasso(alpha=2.1),\n linear_model.Ridge(alpha=2.1),\n ):\n\n model.fit(training_features_restricted, training_target)\n\n train_score = model.score(\n training_features_restricted, training_target\n )\n\n test_score = model.score(\n testing_features_restricted, testing_target\n )\n\n name = str(model).split(\"(\")[0]\n\n result = result + [\n (\n \"_2_restrict_features\",\n name,\n \"p value limit: {:.3f}, alpha: 2.1\".format(p_value_limit),\n train_score,\n test_score,\n )\n ]\n\n print(ps_and_cols)\n return training_features[high_ps].corr()", "def eg_sk():\n\n rxs = []\n a = []\n b = []\n c = []\n d = []\n e = []\n f = []\n g = []\n h = []\n i = []\n j = []\n\n for _ in range(1000):\n a.append(utils.gaussian(10, 1))\n\n for _ in range(1000):\n b.append(utils.gaussian(10.1, 1))\n\n for _ in range(1000):\n c.append(utils.gaussian(20, 1))\n\n for _ in range(1000):\n d.append(utils.gaussian(30, 1))\n\n for _ in range(1000):\n e.append(utils.gaussian(30.1, 1))\n\n for _ in range(1000):\n f.append(utils.gaussian(10, 1))\n\n for _ in range(1000):\n g.append(utils.gaussian(10, 1))\n\n for _ in range(1000):\n h.append(utils.gaussian(40, 1))\n\n for _ in range(1000):\n i.append(utils.gaussian(40, 3))\n\n for _ in range(1000):\n j.append(utils.gaussian(10, 1))\n\n for k, v in enumerate([a, b, c, d, e, f, g, h, i, j]):\n rxs.append(creation.RX(v, \"rx{}\".format(k)))\n\n for rx in stats.tiles(stats.scottKnot(rxs)):\n print(\"\", rx[\"rank\"], rx[\"name\"], rx[\"show\"], sep=\"\\t\")", "def generate_curves(self, seed=None):\n num_context = tf.random_uniform(\n shape=[], minval=3, maxval=self._max_num_context, dtype=tf.int32, seed=seed)\n\n # If we are testing we want to have more targets and have them evenly\n # distributed in order to plot the function.\n if self._testing:\n num_target = self._x_data.get_shape().as_list()[0]\n num_total_points = num_target\n # During training the number of target points and their x-positions are\n # selected at random\n else:\n num_target = tf.random_uniform(shape=(), minval=0, \n maxval=self._max_num_context - num_context,\n dtype=tf.int32, seed=seed)\n num_total_points = num_context + num_target\n\n # idx for x vals in target\n idxs = []\n # which instance to get y data from\n insts = []\n for i in range(self._batch_size):\n idxs.append( tf.random_shuffle(tf.range(self._num_pts_per_inst), seed=seed) )\n insts.append( tf.random_uniform(shape=[], minval=0, maxval=self._num_inst-1, dtype=tf.int32, seed=seed) )\n \n idxs = tf.stack(idxs)\n insts = tf.stack(insts)\n \n # batchsize x numtotalpoints x size (xsize or ysize)\n x_values = tf.stack([tf.expand_dims(tf.gather(self._x_uniq, idxs[tf.cast(i,tf.int32)][:tf.cast(num_total_points,tf.int32)]), axis=-1) for i in range(self._batch_size)])\n y_values = tf.stack([tf.expand_dims(tf.gather(self._y_data[insts[i]*self._num_pts_per_inst:(insts[i]+1)*self._num_pts_per_inst], idxs[i][:num_total_points]), axis=-1) for i in range(self._batch_size)])\n \n \n \n if self._testing:\n # Select the targets\n target_x = x_values\n target_y = y_values\n\n # Select the observations\n idx_ctxt = tf.random_shuffle(tf.range(num_target), seed=seed)\n context_x = tf.gather(x_values, idx_ctxt[:num_context], axis=1)\n context_y = tf.gather(y_values, idx_ctxt[:num_context], axis=1)\n\n else:\n # Select the targets which will consist of the context points as well as\n # some new target points\n target_x = x_values[:, :num_target + num_context, :]\n target_y = y_values[:, :num_target + num_context, :]\n\n # Select the observations\n context_x = x_values[:, :num_context, :]\n context_y = y_values[:, :num_context, :]\n \n context_x = tf.squeeze(context_x,-1)\n target_x = tf.squeeze(target_x,-1)\n\n query = ((context_x, context_y), target_x)\n\n return NPRegressionDescription(\n query=query,\n target_y=target_y,\n num_total_points=tf.shape(target_x)[1],\n num_context_points=num_context)", "def generate_curves(self, seed=None):\n num_context = tf.random_uniform(\n shape=[], minval=self._min_num_context, maxval=self._max_num_context, dtype=tf.int32, seed=seed)\n\n # If we are testing we want to have more targets and have them evenly\n # distributed in order to plot the function.\n if self._testing:\n num_target = self._num_pts_per_inst #self._x_data.get_shape().as_list()[0]\n num_total_points = num_target\n # During training the number of target points and their x-positions are\n # selected at random\n else:\n num_target = tf.random_uniform(shape=(), minval=0, \n maxval=self._max_num_context - num_context,\n dtype=tf.int32, seed=seed)\n num_total_points = num_context + num_target\n\n # idx for x vals in target\n idxs = []\n # which instance to get y data from\n insts = []\n for i in range(self._batch_size):\n idxs.append( tf.random_shuffle(tf.range(self._num_pts_per_inst), seed=seed) )\n insts.append( tf.random_uniform(shape=[], minval=0, maxval=self._num_inst-1, dtype=tf.int32, seed=seed) )\n \n idxs = tf.stack(idxs)\n insts = tf.stack(insts)\n \n # batchsize x numtotalpoints x size (xsize or ysize)\n x_values = tf.stack([tf.expand_dims(tf.gather(self._x_uniq, idxs[tf.cast(i,tf.int32)][:tf.cast(num_total_points,tf.int32)]), axis=-1) for i in range(self._batch_size)])\n y_values = tf.stack([tf.expand_dims(tf.gather(self._y_data[insts[i]*self._num_pts_per_inst:(insts[i]+1)*self._num_pts_per_inst], idxs[i][:num_total_points]), axis=-1) for i in range(self._batch_size)])\n \n \n \n if self._testing:\n # Select the targets\n target_x = x_values\n target_y = y_values\n\n # Select the observations\n idx_ctxt = tf.random_shuffle(tf.range(num_target), seed=seed)\n context_x = tf.gather(x_values, idx_ctxt[:num_context], axis=1)\n context_y = tf.gather(y_values, idx_ctxt[:num_context], axis=1)\n\n else:\n # Select the targets which will consist of the context points as well as\n # some new target points\n target_x = x_values[:, :num_target + num_context, :]\n target_y = y_values[:, :num_target + num_context, :]\n\n # Select the observations\n context_x = x_values[:, :num_context, :]\n context_y = y_values[:, :num_context, :]\n \n context_x = tf.squeeze(context_x,-1)\n target_x = tf.squeeze(target_x,-1)\n\n context_y = tf.squeeze(context_y,-1)\n target_y= tf.squeeze(target_y,-1)\n\n query = ((context_x, context_y), target_x)\n\n return NPRegressionDescription(\n query=query,\n target_y=target_y,\n num_total_points=tf.shape(target_x)[1],\n num_context_points=num_context)", "def plot_slice_wise_measures(labels, preds, args):\n\n cal_roc = [[], []]\n cal_prrcf1 = [[], [], []] # save PR, RC, F1 respectively\n noncal_prrcf1 = [[], [], []]\n thres_all = []\n noncal_roc = [[], []]\n n_slices = len(labels)\n for thres in range(500, -1, -5):\n print(\"[Threshold # of pixels: {}]\".format(thres))\n thres_all.append(thres)\n cal_pgt, cal_pp, cal_tp, noncal_pgt, noncal_pp, noncal_tp = \\\n plaque_detection_rate(labels, preds, thres=thres)\n\n\n cal_prrcf1[0].append(float(cal_tp) / cal_pp if cal_pp != 0 else 0.0)\n cal_prrcf1[1].append(float(cal_tp) / cal_pgt)\n cal_prrcf1[2].append(2.0 * cal_tp / (cal_pgt + cal_pp))\n noncal_prrcf1[0].append(float(noncal_tp) / noncal_pp if noncal_pp != 0 else 0.0)\n noncal_prrcf1[1].append(float(noncal_tp) / noncal_pgt)\n noncal_prrcf1[2].append(2.0 * noncal_tp / (noncal_pgt + noncal_pp))\n\n cal_roc[0].append((cal_pp - cal_tp) / (n_slices - cal_pgt)) # false negative ratio\n cal_roc[1].append(cal_tp / cal_pgt) # true positive ratio\n noncal_roc[0].append((noncal_pp - noncal_tp) / (n_slices - noncal_pgt)) # false negative ratio\n noncal_roc[1].append(noncal_tp / noncal_pgt) # true positive ratio\n\n print('Cal: PR - {:.4f} RC - {:.4f} F1 - {:.4f} Noncal: PR - {:.4f} RC - {:.4f} F1 - {:.4f}'.format(\n cal_prrcf1[0][-1], cal_prrcf1[1][-1], cal_prrcf1[2][-1],\n noncal_prrcf1[0][-1], noncal_prrcf1[1][-1], noncal_prrcf1[2][-1]))\n print('Cal: fpr - {:.4f} tpr - {:.4f} Noncal: fpr - {:.4f} tpr - {:.4f}'.format(\n cal_roc[0][-1], cal_roc[1][-1], noncal_roc[0][-1], noncal_roc[1][-1]))\n\n # plot the roc curve and calculate AUC\n fig_names = ['calcified', 'non-calcified']\n for plq_metrics, fig_name in zip([cal_roc, noncal_roc], fig_names):\n plt.figure()\n lw = 2\n auc_metric = auc(plq_metrics[0], plq_metrics[1])\n print(\"{} : {}\".format(fig_name, auc_metric))\n plt.plot(plq_metrics[0], plq_metrics[1], color='darkorange',\n lw=lw, label='ROC curve (area = %0.2f)' % auc_metric)\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('slice-wise ROC curve of {} plaques'.format(fig_name))\n plt.legend(loc=\"lower right\")\n plt.savefig(\"./{}/{}_roc.png\".format(args.fig_dir, fig_name))\n\n for plq_metrics, fig_name in zip([cal_prrcf1, noncal_prrcf1], fig_names):\n plt.figure()\n lw = 2\n plt.plot(thres_all, plq_metrics[0], color='r', lw=lw, label='precision')\n plt.plot(thres_all, plq_metrics[1], color='g', lw=lw, label='recall')\n plt.plot(thres_all, plq_metrics[2], color='b', lw=lw, label='f1')\n\n plt.xlim([min(thres_all), max(thres_all)])\n plt.ylim([0.0, 1.05])\n plt.xlabel('Threshold Number of Pixels')\n plt.title('{} measures under different thresholds'.format(fig_name))\n plt.legend(bbox_to_anchor=(1, 0.95), loc=\"upper right\")\n plt.savefig(\"./{}/{}_prrcf1.png\".format(args.fig_dir, fig_name))", "def get_best_threshold(model, x_test, y_test, metric, plot=True):\n\n #prediction = model(x_test, training=False)\n prediction = model.predict(x_test)\n proto_tensor = tf.make_tensor_proto(prediction)\n y_hat = tf.make_ndarray(proto_tensor)\n y_hat = y_hat.reshape(-1,)\n\n if metric == 'sensitivity_specitivity':\n \n fpr, tpr, thresholds = roc_curve(y_test.reshape(-1,), y_hat)\n gmeans = np.sqrt(tpr * (1-fpr)) # The Geometric Mean or G-Mean is a metric for imbalanced classification that, if optimized, will seek a balance between the sensitivity and the specificity.\n ix = argmax(gmeans)\n score = gmeans[ix]\n\n print('AUC:', auc(1-fpr, tpr))\n \n if plot:\n pyplot.plot(1-fpr, tpr, marker='.')\n pyplot.scatter(1-fpr[ix], tpr[ix], marker='o', color='black', label='Optimal Threshold')\n pyplot.xlabel('Specificity', fontsize=16)\n pyplot.ylabel('Sensitivity', fontsize=16)\n pyplot.legend(loc='upper left')\n pyplot.title('Sensitivity-Specitivity Curve', fontsize=20)\n pyplot.show()\n \n elif metric == 'precision_recall':\n \n precision, recall, thresholds = precision_recall_curve(y_test.reshape(-1,), y_hat)\n fscore = (2 * precision * recall) / (precision + recall) # If we are interested in a threshold that results in the best balance of precision and recall, then this is the same as optimizing the F-measure that summarizes the harmonic mean of both measures.\n ix = argmax(fscore)\n score = fscore[ix]\n print('AUC:', auc(recall, precision))\n\n if plot:\n pyplot.plot(recall, precision, marker='.')\n pyplot.scatter(recall[ix], precision[ix], marker='o', color='black', label='Optimal Threshold')\n pyplot.xlabel('Recall', fontsize=16)\n pyplot.ylabel('Precision', fontsize=16)\n pyplot.legend(loc='upper left')\n pyplot.title('Precision-Recall Curve', fontsize=20)\n pyplot.show()\n\n else:\n thresholds = np.arange(0, 1, 0.001)\n scores = [f1_score(y_test.reshape(-1,), round_using_t(y_hat, t)) for t in thresholds]\n ix = argmax(scores)\n score = scores[ix]\n\n return y_hat, thresholds[ix], score", "def operating_points(ranking):\n precision, recall = list(), list()\n for pos in range(len(ranking)):\n p, r = precision_recall_from_ranking(ranking, pos)\n precision.append(p)\n recall.append(r)\n return precision, recall", "def generate_curves(self, num_target=10, min_x=-2., max_x=2.):\n #num_context = tf.random_uniform(\n # shape=[], minval=3, maxval=self._num_context, dtype=tf.int32)\n num_context = self._num_context\n \n # If we are testing we want to have more targets and have them evenly\n # distributed in order to plot the function.\n if self._testing:\n num_total_points = num_target\n\n # During training the number of target points and their x-positions are\n # selected at random\n else:\n #num_target = tf.random_uniform(\n # shape=(), minval=2, maxval=self._max_num_context, dtype=tf.int32)\n #num_target = self._num_context\n num_total_points = num_context + num_target\n x_values = tf.random.uniform(\n [self._batch_size, num_total_points, self._x_size], min_x, max_x)\n\n # Set kernel parameters\n # Either choose a set of random parameters for the mini-batch\n if self._random_kernel_parameters:\n l1 = tf.random_uniform([self._batch_size, self._y_size,\n self._x_size], 0.1, self._l1_scale)\n sigma_f = tf.random_uniform([self._batch_size, self._y_size],\n 0.1, self._sigma_scale)\n # Or use the same fixed parameters for all mini-batches\n else:\n l1 = tf.ones(shape=[self._batch_size, self._y_size,\n self._x_size]) * self._l1_scale\n sigma_f = tf.ones(shape=[self._batch_size,\n self._y_size]) * self._sigma_scale\n \n # Pass the x_values through the Gaussian kernel\n # [batch_size, y_size, num_total_points, num_total_points]\n kernel = self._gaussian_kernel(x_values, l1, sigma_f)\n\n # Calculate Cholesky, using double precision for better stability:\n cholesky = tf.cast(tf.cholesky(\n tf.cast(kernel, tf.float64)), tf.float32)\n\n # Sample a curve\n # [batch_size, y_size, num_total_points, 1]\n y_values = tf.matmul(\n cholesky,\n tf.random_normal([self._batch_size, self._y_size, num_total_points, 1]))\n\n # [batch_size, num_total_points, y_size]\n y_values = tf.transpose(tf.squeeze(y_values, 3), [0, 2, 1])\n\n if self._testing:\n # Select the targets\n target_x = x_values\n target_y = y_values\n\n # Select the observations\n idx = tf.random_shuffle(tf.range(num_target))\n context_x = tf.gather(x_values, idx[:num_context], axis=1)\n context_y = tf.gather(y_values, idx[:num_context], axis=1)\n\n else:\n # Select the targets which will consist of the context points as well as\n # some new target points\n target_x = x_values[:, :num_target + num_context, :]\n target_y = y_values[:, :num_target + num_context, :]\n\n # Select the observations\n context_x = x_values[:, :num_context, :]\n context_y = y_values[:, :num_context, :]\n\n query = ((context_x, context_y), target_x)\n\n return NPRegressionDescription(\n query=query,\n target_y=target_y,\n num_total_points=tf.shape(target_x)[1],\n num_context_points=num_context)", "def _get_fprop_lrn(clss, compute_capability):\n code = r\"\"\"\n%(common)s\n\n__global__ void spool_fprop_lrn(\n const %(type)s* I, %(type)s* O, %(type)s* A,\n float alpha, float beta, float ascale, float bpower, int flags,\n int N, int W, int H, int D, int C,\n int WN, int HWN, int DHWN, int P, int Q,\n int magic_P, int shift_P, int QN, int PQN, int MPQN,\n int pad_c, int pad_d, int pad_h, int pad_w,\n int str_c, int str_d, int str_h, int str_w,\n int S, int RS, int RST, int JRST,\n int magic_S, int shift_S,\n int magic_RS, int shift_RS, int magic_RST, int shift_RST,\n int supP, int supQ, int shlP, int maskP, int shrP,\n int shlQ, int maskQ, int shrQ, int maskN, int shrN\n %(stats_args)s\n )\n{\n __shared__ float rcpWindowSize;\n extern __shared__ int lut[];\n\n int tid = threadIdx.x;\n\n // paralellism is over QMPK dimensions (output pixels and ofm's)\n int n = tid;\n int q = blockIdx.x;\n int mp = blockIdx.y;\n int k = blockIdx.z;\n\n int m = mp * magic_P; m >>= shift_P;\n int p = mp - m*P;\n\n // zigzag q back and forth to improve L2 cache perf\n if (p & 1)\n q = Q - q - 1;\n\n const %(type)s* IonO = I; // input pixel at output location\n I += n;\n IonO += k*MPQN + m*PQN + p*QN + q*N + n;\n O += k*MPQN + m*PQN + p*QN + q*N + n;\n A += k*MPQN + m*PQN + p*QN + q*N + n;\n\n float O_val = beta != 0.0f ? %(cvt)s(__ldg(O)) : 0.0f;\n\n if (tid < 32)\n {\n int kj = k * str_c - pad_c;\n int mt = m * str_d - pad_d;\n int pr = p * str_h - pad_h;\n int qs = q * str_w - pad_w;\n\n int window_size = 0;\n int jrst = tid;\n // this loop generates the LUT (same for pooling and normalization)\n while (jrst < JRST)\n {\n int j = jrst * magic_RST; j >>= shift_RST;\n int rst = jrst - j * RST;\n\n int t = rst * magic_RS; t >>= shift_RS;\n int rs = rst - t * RS;\n\n int r = rs * magic_S; r >>= shift_S;\n int s = rs - r*S;\n\n int x = qs + s;\n int y = pr + r;\n int z = mt + t;\n int c = kj + j;\n\n bool bounds_x = x >= 0 && x < W;\n bool bounds_y = y >= 0 && y < H;\n bool bounds_z = z >= 0 && z < D;\n bool bounds_c = c >= 0 && c < C;\n bool in_bounds = bounds_x && bounds_y && bounds_z && bounds_c;\n\n // Count the total valid slices\n window_size += __popc(__ballot(in_bounds));\n\n int sliceI = c*DHWN + z*HWN + y*WN + x*N;\n\n lut[jrst] = in_bounds ? sliceI : -1;\n jrst += 32;\n }\n\n if(tid == 0)\n {\n //rcpWindowSize = 1.0f / (float)window_size;\n rcpWindowSize = (float)RST/(float)JRST;\n }\n }\n __syncthreads();\n\n float out = 0.0f;\n float denom;\n float sumsquare = 0.0f;\n float input = 0.0f;\n int jrst = 0;\n while (jrst < JRST)\n {\n int slice0 = lut[jrst + 0];\n int slice1 = lut[jrst + 1];\n int slice2 = lut[jrst + 2];\n int slice3 = lut[jrst + 3];\n\n // TODO: May not need to load all slices if they are not used.\n input = jrst + 0 < JRST && slice0 >= 0 ? %(cvt)s(__ldg(I + slice0)) : 0.0f;\n sumsquare += jrst + 0 < JRST && slice0 >= 0 ? input * input: 0.0f;\n input = jrst + 1 < JRST && slice1 >= 0 ? %(cvt)s(__ldg(I + slice1)) : 0.0f;\n sumsquare += jrst + 1 < JRST && slice1 >= 0 ? input * input: 0.0f;\n input = jrst + 2 < JRST && slice2 >= 0 ? %(cvt)s(__ldg(I + slice2)) : 0.0f;\n sumsquare += jrst + 2 < JRST && slice2 >= 0 ? input * input: 0.0f;\n input = jrst + 3 < JRST && slice3 >= 0 ? %(cvt)s(__ldg(I + slice3)) : 0.0f;\n sumsquare += jrst + 3 < JRST && slice3 >= 0 ? input * input: 0.0f;\n\n jrst += 4;\n }\n\n denom = (1 + ascale*sumsquare*rcpWindowSize);\n out = %(cvt)s(__ldg(IonO)) / powf(denom, bpower);\n\n\n // convert back to fp to write out\n %(type)s temp_out = %(cvt_out)s( %(mul_by_scale)s (out*alpha + O_val*beta));\n\n // predicate write with no-op flag\n if (!(flags & 1)) {\n *O = temp_out;\n *A = %(cvt_out)s( %(mul_by_scale)s denom ); // write the denomiantor to address\n }\n\n // collect max abs stats\n int intermediate_max = max_abs(0, temp_out); // compute abs\n %(atomic_max)s\n}\n\"\"\"\n\n template_vals = prepare_template_vals(clss, compute_capability)\n code = code % template_vals\n module = SourceModule(code)\n kernel = module.get_function(\"spool_fprop_lrn\")\n kernel.prepare(\"3P 4f 34I 10I\" + flex_sig(clss[0])) # add superblocking parameter\n return kernel", "def main(arguments):\n # create aliases\n SPLIT = arguments.split\n IN_PATH = arguments.inference_path\n DATASET_ROOT = arguments.dataset_root\n DATASET = arguments.dataset\n SEG_THRESHOLD = arguments.segmentation_threshold\n IDX = arguments.index\n RANGE = arguments.range\n V2 = arguments.v2\n CKPT = arguments.ckpt_filename\n\n try:\n st, ed, num = map(float, RANGE.split(':'))\n num = int(num)\n except:\n log.error('Invalid range')\n\n # dataset = CoNSeP(download=False, root=DATASET_PATH)\n dataset = getattr(dataset_reader, DATASET)(download=False, root=DATASET_ROOT+DATASET+\"/\")\n\n metrics = VALID_METRICS.keys()\n\n aggregated_metrics = {}\n thresholds = []\n\n for step, k in enumerate(np.linspace(st, ed, num)):\n thresholds.append(k)\n mlflow.log_metric('threshold', k, step=step)\n output_map = get_instance_output(True, IDX, \n root=IN_PATH, split=SPLIT, \n h=SEG_THRESHOLD, k=k, \n ckpt=CKPT, dot_refinement=V2)\n label, _ = dataset.read_labels(IDX, SPLIT)\n s = score(output_map, label, *metrics)\n for metric in metrics:\n value = s[metric]\n if isinstance(value, dict):\n for key, val in value.items():\n if not isinstance(val, list):\n metric_name = metric + '_' + key\n mlflow.log_metric(metric_name, val, step=step)\n if metric_name not in aggregated_metrics:\n aggregated_metrics[metric_name] = []\n aggregated_metrics[metric_name].append(val)\n else:\n mlflow.log_metric(metric, value, step=step)\n if metric not in aggregated_metrics:\n aggregated_metrics[metric] = []\n aggregated_metrics[metric].append(value)\n\n for metric, score_list in aggregated_metrics.items():\n mlflow.log_metric(\"average_\" + metric, sum(score_list) / len(score_list))\n\n mlflow.log_metric(\"best_threshold\", thresholds[np.argmax(aggregated_metrics['DQ_point'])])", "def generateColdStartSystemSplits(filename, ratings, test_ratio, featurefile, ratios, time_stamps = False, fbots=False):\n if time_stamps == True:\n if(len(ratings[0]) < 4):\n print('Warning: No timestamps found')\n\n num_ratings = len(ratings)\n if not time_stamps: #Read ratings\n r = random.sample(range(len(ratings)), int(test_ratio*len(ratings))) #Randomly select test ratings\n y_test = [ratings[i] for i in r] #Put ratings in testset\n X_pool = [i for j, i in enumerate(ratings) if j not in r] #Put the remaining in the testset\n for i in range(len(ratios)): #For each training ratio supplied\n X_train = generateDatasetSplit(X_pool, ratios[i], num_ratings) #Generate a split of size ratios[i]\n num_ratings = len(X_train) \n if fbots:\n X_train = fb.addFilterBotRatings(X_train, featurefile, fbots)\n print('Added', len(X_train)-num_ratings, 'filterbot ratings', 'system', fbots, time_stamps)\n helpers.writeRatingsToFile('%s/%s_systemtrain%d.txt' %(folder, filename, i+1), X_train, delimiter='\\t')\n helpers.writeRatingsToFile('%s/%s_systemtest.txt' %(folder, filename), y_test, delimiter='\\t')\n\n else:\n ratings = sorted(ratings, key=itemgetter(3), reverse=True) #Sort ratings based on timestamps, the freshest being 'on top'\n num_test_ratings = int(len(ratings)*test_ratio) #Number of ratings to use for testing\n y_test = ratings[-num_test_ratings:] #Put freshest ratings in the testset\n X_pool = ratings[:-num_test_ratings] #Put the remainding in the training set pool\n for i in range(len(ratios)): #For each training ratio supplied\n X_train = generateDatasetSplit(X_pool, ratios[i], num_ratings) #Generate a split of size ratios[i]\n num_ratings = len(X_train) \n if fbots:\n X_train = fb.addFilterBotRatings(X_train, featurefile, fbots)\n print('Added', len(X_train)-num_ratings, 'filterbot ratings', 'system', fbots, time_stamps)\n helpers.writeRatingsToFile('%s/%s_systemtrain%d.txt' %(folder, filename, i+1), X_train, delimiter='\\t')\n helpers.writeRatingsToFile('%s/%s_systemtest.txt' %(folder, filename), y_test, delimiter='\\t')", "def rpfp(self):\n inigen = IniGen()\n fields = algorithm_fields.algorithms['rpfp']\n\n output_uuid_map = {}\n\n # set up global parameters\n algorithm_path = fields['path']\n enabled = \"True\"\n inigen.emit_global(algorithm_path, enabled)\n\n for i in xrange(1,4):\n label = \"RPFP{0}\".format(str(i))\n distillate_label = \"L{0}-E_C{1}\".format(str(i),str(i))\n lAng_label = 'L{0}ANG'.format(str(i))\n cAng_label = 'C{0}ANG'.format(str(i))\n lMag_label = 'C{0}MAG'.format(str(i))\n cMag_label = 'C{0}MAG'.format(str(i))\n distillate_label = get_distillate_label([lAng_label, cAng_label, lMag_label, cMag_label])\n\n # header\n inigen.emit_run_header(label, CHUNKING, MINTIME, MAXTIME)\n\n # body\n dep_lAng_label = lAng_label\n dep_lAng_name = fields['deps'][0]\n dep_lAng_uuid = self.uuid_map[lAng_label]\n dep_cAng_label = cAng_label\n dep_cAng_name = fields['deps'][1]\n dep_cAng_uuid = self.uuid_map[cAng_label]\n dep_lMag_label = lMag_label\n dep_lMag_name = fields['deps'][2]\n dep_lMag_uuid = self.uuid_map[lMag_label]\n dep_cMag_label = cMag_label\n dep_cMag_name = fields['deps'][3]\n dep_cMag_uuid = self.uuid_map[cMag_label]\n \n deps = [[dep_lAng_label, dep_lAng_name, dep_lAng_uuid],\n [dep_lMag_label, dep_lMag_name, dep_lMag_uuid],\n [dep_cAng_label, dep_cAng_name, dep_cAng_uuid],\n [dep_cMag_label, dep_cMag_name, dep_cMag_uuid]]\n\n param_section_name = fields['params'][0]\n param_section_value = \"Production/{0}/{1}/{2}\".format(self.location, self.name, distillate_label)\n param_name_name = fields['params'][1]\n param_name_value = \"RPFP\"\n params = [[param_section_name, param_section_value], [param_name_name, param_name_value]]\n\n outputs = fields['outputs']\n\n emitted = inigen.emit_run_body(deps, params, outputs)\n\n output_uuid_map['REAC_PWR{0}'.format(i)] = emitted[-3][-36:]\n output_uuid_map['FUND_PWR{0}'.format(i)] = emitted[-2][-36:]\n\n filename = \"{0}/RPFP_{1}.ini\".format(self.dirname, self.name)\n inigen.generate_file(filename)\n return output_uuid_map", "def __init__(self, flagTrackShape = 0):\n\n \"\"\" Nos interesa que el planner tenga una pista algo mas reducida de la real\n para conservar algo de robustez y no salirnos de la pista en el primer segundo. \"\"\"\n \n ### is HW is the half width of vehicle dimension + some saftey factor?\n ### what is slack??\n \n # HW = rospy.get_param(\"halfWidth\")+0.1\n HW = 0.4\n # print (\"HW\",HW)\n # if flagTrackShape == 0:\n # selectedTrack = rospy.get_param(\"trackShape\") # comentado para el testeo del planner\n # # selectedTrack = \"L_shape\"\n # else:\n # selectedTrack = \"oval\"\n\n selectedTrack = \"L_shape\"\n print (\"track selected\",selectedTrack)\n if selectedTrack == \"3110\":\n self.halfWidth = 0.6\n self.slack = 0.15\n spec = np.array([[60 * 0.03, 0],\n [80 * 0.03, +80 * 0.03 * 2 / np.pi],\n [20 * 0.03, 0],\n [80 * 0.03, +80 * 0.03 * 2 / np.pi],\n [40 * 0.03, -40 * 0.03 * 10 / np.pi],\n [60 * 0.03, +60 * 0.03 * 5 / np.pi],\n [40 * 0.03, -40 * 0.03 * 10 / np.pi],\n [80 * 0.03, +80 * 0.03 * 2 / np.pi],\n [20 * 0.03, 0],\n [80 * 0.03, +80 * 0.03 * 2 / np.pi],\n [80 * 0.03, 0]])\n\n elif selectedTrack == \"oval\":\n self.halfWidth = HW\n self.slack = 0.15\n spec = np.array([[1.0, 0],\n [4.5, 4.5 / np.pi],\n [2.0, 0],\n [4.5, 4.5 / np.pi],\n [1.0, 0]])\n\n # elif selectedTrack == \"L_shape\":\n # self.halfWidth = HW\n # self.slack = 0.01\n # lengthCurve = 4.5\n # spec = np.array([[1.0, 0],\n # [lengthCurve, lengthCurve / np.pi],\n # # Note s = 1 * np.pi / 2 and r = -1 ---> Angle spanned = np.pi / 2\n # [lengthCurve/2,-lengthCurve / np.pi ],\n # [lengthCurve, lengthCurve / np.pi],\n # [lengthCurve / np.pi *2, 0],\n # [lengthCurve/2, lengthCurve / np.pi]])\n\n elif selectedTrack == \"L_shape_n\":\n self.halfWidth = HW\n self.slack = 0.01\n lengthCurve = 4.5\n spec = np.array([[1.0, 0],\n [lengthCurve, lengthCurve / np.pi],\n [lengthCurve/2,-lengthCurve / np.pi ],\n [lengthCurve, lengthCurve / np.pi],\n [lengthCurve / np.pi *2, 0],\n [lengthCurve/2, lengthCurve / np.pi]])\n\n elif selectedTrack == \"L_shape_IDIADA\":\n self.halfWidth = HW\n self.slack = 6*0.45\n lengthCurve = 10*4.5\n spec = np.array([[1.0, 0],\n [lengthCurve, lengthCurve / np.pi],\n # Note s = 1 * np.pi / 2 and r = -1 ---> Angle spanned = np.pi / 2\n [lengthCurve/2,-lengthCurve / np.pi ],\n [lengthCurve, lengthCurve / np.pi],\n [lengthCurve / np.pi *2, 0],\n [lengthCurve/2, lengthCurve / np.pi]])\n\n elif selectedTrack == \"L_shape\":\n # elif selectedTrack == \"SLAM_shape1\":\n self.halfWidth = 0.4\n self.slack = 0.01\n lengthCurve = 1.5*(np.pi/2)\n spec = np.array([[2.5,0],\n [2*lengthCurve,(lengthCurve*2)/np.pi],\n [lengthCurve,-(lengthCurve*2) / np.pi],\n [1.0,0],\n [lengthCurve,lengthCurve*2/np.pi],\n [2.0,0],\n [lengthCurve,(lengthCurve*2)/np.pi],\n [4.0,0],\n [lengthCurve,(lengthCurve*2)/np.pi],\n [2.6,0]])\n\n\n elif selectedTrack == \"8_track\":\n self.halfWidth = 0.4\n self.slack = 0.15\n lengthCurve = 1.5*(np.pi/2)\n spec = np.array([[0.5,0],\n [lengthCurve,(lengthCurve*2)/np.pi],\n [1.0,0],\n [lengthCurve,-(lengthCurve*2) / np.pi],\n [lengthCurve,lengthCurve*2/np.pi],\n [lengthCurve,lengthCurve*2/np.pi],\n [1.0,0],\n [lengthCurve,(lengthCurve*2)/np.pi],\n [lengthCurve,-(lengthCurve*2)/np.pi],\n [lengthCurve,(lengthCurve*2)/np.pi],\n [1.0,0],\n [lengthCurve,lengthCurve*2/np.pi]])\n\n\n\n # Now given the above segments we compute the (x, y) points of the track and the angle of the tangent vector (psi) at\n # these points. For each segment we compute the (x, y, psi) coordinate at the last point of the segment. Furthermore,\n # we compute also the cumulative s at the starting point of the segment at signed curvature\n # PointAndTangent = [x, y, psi, cumulative s, segment length, signed curvature]\n\n ### what is cumulative s and signed curvature.?\n\n PointAndTangent = np.zeros((spec.shape[0] + 1, 6))\n for i in range(0, spec.shape[0]):\n if spec[i, 1] == 0.0: # If the current segment is a straight line\n l = spec[i, 0] # Length of the segments\n if i == 0:\n ang = 0 # Angle of the tangent vector at the starting point of the segment\n x = 0 + l * np.cos(ang) # x coordinate of the last point of the segment\n y = 0 + l * np.sin(ang) # y coordinate of the last point of the segment\n else:\n ang = PointAndTangent[i - 1, 2] # Angle of the tangent vector at the starting point of the segment\n x = PointAndTangent[i-1, 0] + l * np.cos(ang) # x coordinate of the last point of the segment\n y = PointAndTangent[i-1, 1] + l * np.sin(ang) # y coordinate of the last point of the segment\n psi = ang # Angle of the tangent vector at the last point of the segment\n\n # # With the above information create the new line\n # if i == 0:\n # NewLine = np.array([x, y, psi, PointAndTangent[i, 3], l, 0])\n # else:\n # NewLine = np.array([x, y, psi, PointAndTangent[i, 3] + PointAndTangent[i, 4], l, 0])\n #\n # PointAndTangent[i + 1, :] = NewLine # Write the new info\n\n if i == 0:\n NewLine = np.array([x, y, psi, PointAndTangent[i, 3], l, 0])\n else:\n NewLine = np.array([x, y, psi, PointAndTangent[i-1, 3] + PointAndTangent[i-1, 4], l, 0])\n\n PointAndTangent[i, :] = NewLine # Write the new info\n else:\n l = spec[i, 0] # Length of the segment\n r = spec[i, 1] # Radius of curvature\n\n\n if r >= 0:\n direction = 1\n else:\n direction = -1\n\n if i == 0:\n ang = 0 # Angle of the tangent vector at the\n # starting point of the segment\n CenterX = 0 \\\n + np.abs(r) * np.cos(ang + direction * np.pi / 2) # x coordinate center of circle\n CenterY = 0 \\\n + np.abs(r) * np.sin(ang + direction * np.pi / 2) # y coordinate center of circle\n else:\n ang = PointAndTangent[i - 1, 2] # Angle of the tangent vector at the\n # starting point of the segment\n CenterX = PointAndTangent[i-1, 0] \\\n + np.abs(r) * np.cos(ang + direction * np.pi / 2) # x coordinate center of circle\n CenterY = PointAndTangent[i-1, 1] \\\n + np.abs(r) * np.sin(ang + direction * np.pi / 2) # y coordinate center of circle\n\n spanAng = l / np.abs(r) # Angle spanned by the circle\n psi = wrap(ang + spanAng * np.sign(r)) # Angle of the tangent vector at the last point of the segment\n\n angleNormal = wrap((direction * np.pi / 2 + ang))\n angle = -(np.pi - np.abs(angleNormal)) * (sign(angleNormal))\n x = CenterX + np.abs(r) * np.cos(\n angle + direction * spanAng) # x coordinate of the last point of the segment\n y = CenterY + np.abs(r) * np.sin(\n angle + direction * spanAng) # y coordinate of the last point of the segment\n\n # With the above information create the new line\n # plt.plot(CenterX, CenterY, 'bo')\n # plt.plot(x, y, 'ro')\n\n # if i == 0:\n # NewLine = np.array([x, y, psi, PointAndTangent[i, 3], l, 1 / r])\n # else:\n # NewLine = np.array([x, y, psi, PointAndTangent[i, 3] + PointAndTangent[i, 4], l, 1 / r])\n #\n # PointAndTangent[i + 1, :] = NewLine # Write the new info\n\n if i == 0:\n NewLine = np.array([x, y, psi, PointAndTangent[i, 3], l, 1 / r])\n else:\n NewLine = np.array([x, y, psi, PointAndTangent[i-1, 3] + PointAndTangent[i-1, 4], l, 1 / r])\n\n PointAndTangent[i, :] = NewLine # Write the new info\n # plt.plot(x, y, 'or')\n\n # Now update info on last point\n # xs = PointAndTangent[PointAndTangent.shape[0] - 2, 0]\n # ys = PointAndTangent[PointAndTangent.shape[0] - 2, 1]\n # xf = PointAndTangent[0, 0]\n # yf = PointAndTangent[0, 1]\n # psif = PointAndTangent[PointAndTangent.shape[0] - 2, 2]\n #\n # # plt.plot(xf, yf, 'or')\n # # plt.show()\n # l = np.sqrt((xf - xs) ** 2 + (yf - ys) ** 2)\n #\n # NewLine = np.array([xf, yf, psif, PointAndTangent[PointAndTangent.shape[0] - 2, 3] + PointAndTangent[\n # PointAndTangent.shape[0] - 2, 4], l, 0])\n # PointAndTangent[-1, :] = NewLine\n\n\n xs = PointAndTangent[-2, 0]\n ys = PointAndTangent[-2, 1]\n xf = 0\n yf = 0\n psif = 0\n\n # plt.plot(xf, yf, 'or')\n # plt.show()\n l = np.sqrt((xf - xs) ** 2 + (yf - ys) ** 2)\n\n NewLine = np.array([xf, yf, psif, PointAndTangent[-2, 3] + PointAndTangent[-2, 4], l, 0])\n PointAndTangent[-1, :] = NewLine\n\n self.PointAndTangent = PointAndTangent\n self.TrackLength = PointAndTangent[-1, 3] + PointAndTangent[-1, 4]", "def eg1(r_train, r_test, N_train=1000, N_test=500):\n\n def eg1_kernel(r, N):\n X1 = np.random.randn(N)\n X2_1 = np.exp(X1)\n X2_2 = np.random.randn(N)\n X2_prob = np.random.uniform(0, 1, N)\n X2 = np.where(X2_prob < r, X2_1, X2_2)\n X3 = np.random.randn(N)\n X4 = np.random.randn(N)\n Y = 210 + 27.4 * X1 + 13.7 * X2 + 13.7 * X3 + 13.7 * X4 + np.random.randn(N)\n\n data = {}\n data['X1'] = X1\n data['X2'] = X2\n data['X3'] = X3\n data['X4'] = X4\n data['Y'] = Y\n return data\n\n data_train = eg1_kernel(r_train, N_train)\n data_test = eg1_kernel(r_test, N_test)\n\n return data_train, data_test", "def proposal_metrics(iou):\n # find best roi for each gt, for summary only\n best_iou = tf.reduce_max(iou, axis=0)\n mean_best_iou = tf.reduce_mean(best_iou, name='best_iou_per_gt')\n summaries = [mean_best_iou]\n with tf.device('/cpu:0'):\n for th in [0.3, 0.5]:\n recall = tf.truediv(\n tfv1.count_nonzero(best_iou >= th),\n tf.size(best_iou, out_type=tf.int64),\n name='recall_iou{}'.format(th))\n summaries.append(recall)\n add_moving_summary(*summaries)", "def _get_fpr(self, arg):", "def precompute_scoring():\n global volume_void_inclusion\n global attract_point_distances\n global perlin_values\n \n volume_void_inclusion = []\n for i,void in enumerate(volumes_void):\n inclusion = gh.PointInBrep(void,points_input,False)\n volume_void_inclusion.append(inclusion)\n \n attract_point_distances = []\n for i,point in enumerate(points_attractor):\n distances = gh.Division(gh.Distance(point,points_input),max_dist)\n attract_point_distances.append(distances)", "def multiple_perf_in_roofline_plot(multiple_perf_metrics, colormap, reverse_legend=False):\n fig, ax = roofline_plot()\n\n norm = matplotlib.colors.Normalize(vmin=0, vmax=len(multiple_perf_metrics))\n cmap = matplotlib.cm.get_cmap(colormap)\n\n idx = 0\n for run_label, perf_metrics in multiple_perf_metrics.items():\n print(f'Plotting roofline for {run_label}')\n perf_in_roofline_plot(perf_metrics, run_label, cmap(norm(idx)), fig, ax)\n idx += 1\n\n # Shrink current axis by 20%\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n\n if reverse_legend:\n handles, labels = ax.get_legend_handles_labels()\n ax.legend(reversed(handles), reversed(labels), frameon=False, loc='center left', bbox_to_anchor=(1, 0.5))\n else:\n ax.legend(frameon=False, loc='center left', bbox_to_anchor=(1, 0.5))", "def rocs(test_set_y_org,test_set_y_pred_prob,methods,linestyles,classes_unique,plot_curve=False,filename=\"./fig_roc.pdf\",colors=None,positive_class_for_two_classes=None,figwidth=5,figheight=5):\n from sklearn.metrics import roc_curve\n #from sklearn.metrics import auc\n from sklearn.metrics import roc_auc_score\n from scipy import interp\n import matplotlib as mpl\n mpl.use(\"pdf\")\n import matplotlib.pyplot as plt\n \n n_classes=len(classes_unique)\n test_set_Y_org,test_set_y_org_unique=membership_vector_to_indicator_matrix(test_set_y_org)\n\n num_methods=len(methods)\n roc_aucs=[0]*num_methods\n names=[None]*num_methods\n for m in range(num_methods):\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n for c in range(n_classes):\n fpr[c], tpr[c], _ = roc_curve(test_set_Y_org[:, c], test_set_y_pred_prob[m][:, c])\n roc_auc[c] = roc_auc_score(test_set_Y_org[:, c], test_set_y_pred_prob[m][:, c])\n\n # Compute macro-average ROC curve and AUROC area\n # First aggregate all false positive rates\n all_fpr = np.unique(np.concatenate([fpr[c] for c in range(n_classes)]))\n # Then interpolate all ROC curves at this points\n mean_tpr = np.zeros_like(all_fpr)\n for c in range(n_classes):\n mean_tpr += interp(all_fpr, fpr[c], tpr[c])\n # Finally average it and compute AUC\n mean_tpr /= n_classes\n fpr[\"macro\"] = all_fpr\n tpr[\"macro\"] = mean_tpr\n #roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"])\n\n # Compute micro-average PRC curve and PRC areas\n fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(test_set_Y_org.ravel(), test_set_y_pred_prob[m].ravel())\n roc_auc[\"macro\"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob[m], average=\"macro\") # micro macro, weighted, or samples\n roc_auc[\"micro\"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob[m],average=\"micro\") # micro macro, weighted, or samples\n roc_auc[\"weighted\"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob[m], average=\"weighted\") # micro macro, weighted, or samples\n roc_auc[\"samples\"] = roc_auc_score(test_set_Y_org, test_set_y_pred_prob[m], average=\"samples\") # micro macro, weighted, or samples\n\n if plot_curve:\n if m==0:\n fig=plt.figure(num=1,figsize=(figwidth,figheight))\n ax=fig.add_subplot(1,1,1)\n ax.plot([0, 1], [0, 1], 'k--')\n if n_classes>2 or positive_class_for_two_classes is None:\n ax.plot(fpr[\"macro\"], tpr[\"macro\"], linestyle=linestyles[m],linewidth=1,color=colors[n_classes],label='{0}: macro-avg ROC (area={1:0.4f})'.format(methods[m], roc_auc[\"macro\"]))\n\n for c in range(n_classes):\n if positive_class_for_two_classes is None or (n_classes==2 and positive_class_for_two_classes==c):\n ax.plot(fpr[c], tpr[c],linestyle=linestyles[m],linewidth=1,color=colors[c],label='{0}: ROC of {1} (area={2:0.4f})'.format(methods[m], classes_unique[c], roc_auc[c]))\n\n # add some text for labels, title and axes ticks\n if m==num_methods-1:\n ax.set_ylim(0.0,1.0)\n ax.set_xlim(0.0,1.0)\n ax.set_ylabel(\"True Positive Rate\",fontsize=12)\n ax.set_xlabel(\"False Positive Rate\",fontsize=12) \n #ax.set_title(\"\",fontsize=15)\n ax.legend(loc=\"lower right\",fontsize=8)\n #plt.subplots_adjust(bottom=0.12) # may this is not working because of the following setting\n fig.savefig(filename,bbox_inches='tight')\n plt.close(fig)\n\n roc_auc_list=[roc_auc[c] for c in range(n_classes)]\n roc_auc_list.extend([roc_auc[\"macro\"],roc_auc[\"micro\"],roc_auc[\"weighted\"],roc_auc[\"samples\"]])\n roc_auc=np.array(roc_auc_list)\n name=[methods[m]+\"_AUROC_\" + c for c in classes_unique]\n name.extend([\"macro\",\"micro\",\"weighted\",\"samples\"])\n name=np.array(name)\n\n roc_aucs[m]=roc_auc\n names[m]=name\n \n return roc_aucs,names", "def generate_curves(self, seed=None):\n num_context = tf.random_uniform(\n shape=[], minval=3, maxval=self._max_num_context, dtype=tf.int32, seed=seed)\n\n # If we are testing we want to have more targets and have them evenly\n # distributed in order to plot the function.\n if self._testing:\n num_target = 400\n num_total_points = num_target\n x_values = tf.tile(\n tf.expand_dims(tf.range(-2., 2., 1. / 100, dtype=tf.float32), axis=0),\n [self._batch_size, 1])\n x_values = tf.expand_dims(x_values, axis=-1)\n # During training the number of target points and their x-positions are\n # selected at random\n else:\n num_target = tf.random_uniform(shape=(), minval=0, \n maxval=self._max_num_context - num_context,\n dtype=tf.int32, seed=seed)\n num_total_points = num_context + num_target\n x_values = tf.random_uniform(\n [self._batch_size, num_total_points, self._x_size], -2, 2, seed=seed)\n \n def w(x, x_min=-2, x_max=2):\n weight_vals = tf.stack([ [1/(i+1) if j <= i else 0 for j in range(self._num_gammas)] for i in range(self._num_gammas)])\n \n bucketsize = (x_max-x_min)/self._num_gammas\n buckets = (x-x_min)/bucketsize\n buckets = tf.reshape(buckets,[-1])\n \n mapped = tf.expand_dims(tf.expand_dims(tf.map_fn(lambda x: weight_vals[tf.cast(x,tf.int32)], buckets),-2),-2)\n\n return mapped \n\n # Set kernel parameters\n # Either choose a set of random parameters for the mini-batch\n if self._random_kernel_parameters:\n gammas = 3.14*tf.random_uniform([self._num_gammas, self._batch_size], 0.1, 2)\n gammas = tf.expand_dims(tf.expand_dims(gammas,-1),-1)\n # Or use the same fixed parameters for all mini-batches\n else:\n gammas = 3.14*tf.linspace(0.1,2,self._num_gammas)\n print(gammas)\n #gammas = tf.broadcast_to(gammas,[self._num_gammas, self._batch_size])\n gammas = tf.reshape(tf.tile(gammas,tf.constant([self._batch_size])),[self._num_gammas, self._batch_size])\n gammas = tf.expand_dims(tf.expand_dims(gammas,-1),-1)\n\n weights = w(x_values)\n \n weights = tf.reshape(weights, [self._batch_size, num_total_points,self._x_size,self._num_gammas])\n weights = tf.transpose(weights,[3,0,1,2])\n \n gammas = tf.broadcast_to(gammas,[self._num_gammas, self._batch_size, num_total_points, self._x_size])\n x_values_bcast = tf.expand_dims(x_values, 0)\n x_values_bcast = tf.broadcast_to(x_values_bcast,[self._num_gammas, self._batch_size, num_total_points, self._x_size])\n \n out = tf.math.multiply(gammas,x_values_bcast)\n out = tf.math.multiply(weights,tf.sin(out))\n out = tf.reduce_sum(out,axis=0)\n \n y_values = out\n y_values += tf.random.normal((self._batch_size,num_total_points,self._y_size),stddev = self._epsilon, seed=seed)\n\n if self._testing:\n # Select the targets\n target_x = x_values\n target_y = y_values\n\n # Select the observations\n idx = tf.random_shuffle(tf.range(num_target), seed=seed)\n context_x = tf.gather(x_values, idx[:num_context], axis=1)\n context_y = tf.gather(y_values, idx[:num_context], axis=1)\n\n else:\n # Select the targets which will consist of the context points as well as\n # some new target points\n target_x = x_values[:, :num_target + num_context, :]\n target_y = y_values[:, :num_target + num_context, :]\n\n # Select the observations\n context_x = x_values[:, :num_context, :]\n context_y = y_values[:, :num_context, :]\n\n query = ((context_x, context_y), target_x)\n\n return NPRegressionDescription(\n query=query,\n target_y=target_y,\n num_total_points=tf.shape(target_x)[1],\n num_context_points=num_context)", "def metrics(labels, preds_proba, print_metrics=True, plot=False, threshold=0.5, rounded=4):\n \n\n test_fpr, test_tpr, test_thresholds = roc_curve(labels, preds_proba)\n roc_auc = auc(test_fpr, test_tpr)\n preds = preds_proba_to_preds_class(preds_proba,threshold)\n precision = precision_score(labels, preds)\n recall = recall_score(labels, preds)\n accuracy = accuracy_score(labels, preds)\n f1 = f1_score(labels, preds)\n if plot:\n sns.set_style('darkgrid', {'axes.facecolor': '0.9'})\n plt.figure(figsize=(10, 8))\n lw = 2\n plt.plot(test_fpr, test_tpr, color='darkorange',\n lw=lw, label='ROC curve')\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.yticks([i/20.0 for i in range(21)])\n plt.xticks([i/20.0 for i in range(21)])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic (ROC) Curve')\n plt.legend(loc='lower right')\n plt.show()\n if print_metrics:\n print(f\"ROC AUC Score: {roc_auc}\\n\")\n print(f\"------- Metrics for threshold {threshold} -------\")\n print(f\"- Precision Score: {precision}\")\n print(f\"- Recall Score: {recall}\")\n print(f\"- Accuracy Score: {accuracy}\")\n print(f\"- F1 Score: {f1}\\n\")\n else:\n return {\"roc_auc\":roc_auc, \"precision\":precision, \"recall\":recall, \"accuracy\":accuracy, \"f1\":f1}" ]
[ "0.55515224", "0.5260893", "0.5233731", "0.5168344", "0.51133126", "0.5103171", "0.5094314", "0.5086308", "0.5072418", "0.50661457", "0.5039138", "0.50162923", "0.5015882", "0.50045204", "0.4940882", "0.49120685", "0.49088785", "0.48621792", "0.4859575", "0.4826404", "0.4822536", "0.48018596", "0.4797209", "0.47873545", "0.4772046", "0.47575015", "0.47532266", "0.47469366", "0.4744492", "0.47405344" ]
0.81111413
0
Generates and aspen model based on kernel metrics counts will be based on profile data
def generateAspenModel(kernelMetrics, modelName=None, rooflines=None): # metrics we care about and the mapping to aspen resources aspenMetricsFlops = {"flop_count_dp" : "as dp", "flop_count_sp" : "as sp"} aspenMetricsMem = {"dram_read_bytes" : "loads", "dram_bytes" : "stores", "l2_read_bytes" : "loads_l2", "l2_write_bytes" : "stores_l2", "shared_load_bytes" : "loads_shared", "shared_store_bytes" : "stores_shared"} if modelName is None: raise ValueError("Error, no modelname for aspen model given") if modelName == "": raise ValueError("Error, the modelname can't be blank") indent = 0 modelFileName = modelName + ".aspen" with open(modelFileName, 'w') as aspenFile: # boilerplate aspenFile.write("// Aspen file generated automatically using cuda roofline tool\n") aspenFile.write("// All kernels have exact counts from profiling\n") aspenFile.write("// This model needs to know the number of processors to run on\n") aspenFile.write("\n\n") aspenFile.write("model {} {{\n".format(modelName)) indent = indent + 1 aspenFile.write("{}param numThreads = numProcessors\n".format("\t" * indent)) # write out the individual kernel calls for kernel in kernelMetrics: # ignore library calls (usually start with [) if kernel[0] == "[": continue # first some kernel info aspenFile.write("{}// kernel {} average exec time {}\n".format("\t" * indent, formatKernel(kernel), statistics.mean(kernelMetrics[kernel]["Duration"]))) if rooflines: aspenFile.write("{}// roofline points\n".format("\t" * indent)) for roofline in rooflines: if kernel in roofline: aspenFile.write("{}// {} flops/byte {} gflops\n".format("\t" * indent, rooflines[roofline][0], rooflines[roofline][1] / 1.0e09)) aspenFile.write("{}kernel {} {{\n".format("\t" * indent, formatKernel(kernel))) indent = indent + 1 aspenFile.write("{}execute [ {} ] {{\n".format("\t" * indent, kernelMetrics[kernel]["callCount"])) indent = indent + 1 # flops first for flopMetric in aspenMetricsFlops: if flopMetric in kernelMetrics[kernel]: aspenFile.write("{}flops [ {} / numThreads ] {}\n".format("\t" * indent, statistics.mean(kernelMetrics[kernel][flopMetric]), aspenMetricsFlops[flopMetric])) aspenFile.write("\n") # memory next for memMetric in aspenMetricsMem: if memMetric in kernelMetrics[kernel]: aspenFile.write("{}{} [ {} / numThreads]\n".format("\t" * indent, aspenMetricsMem[memMetric], statistics.mean(kernelMetrics[kernel][memMetric]))) # close out kernels indent = indent - 1 aspenFile.write("{}}}\n".format("\t" * indent)) indent = indent - 1 aspenFile.write("{}}}\n\n".format("\t" * indent)) # do the main kernel aspenFile.write("{}kernel main {{\n".format("\t" * indent)) indent = indent + 1 # calls to all of the kernels for kernel in kernelMetrics: # skip library calls if kernel[0] == "[": continue aspenFile.write("{}call {}()\n".format("\t" * indent, formatKernel(kernel))) indent = indent - 1 aspenFile.write("{}}}\n".format("\t" * indent)) # end of the model aspenFile.write("}\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, dims, act = 'relu', random_seed = 201809, splitseed = 215, optimizer = Adam(),\n weights_dir = 'CarDEC Count Weights', n_features = 32, mode = 'HVG'):\n \n super(count_model, self).__init__()\n\n tf.keras.backend.clear_session()\n \n self.mode = mode\n self.name_ = mode + \" Count\"\n \n if mode == 'HVG':\n self.embed_name = 'embedding'\n else:\n self.embed_name = 'LVG embedding'\n \n self.weights_dir = weights_dir\n \n self.dims = dims\n n_stacks = len(dims) - 1\n \n self.optimizer = optimizer\n self.random_seed = random_seed\n self.splitseed = splitseed\n \n random.seed(random_seed)\n np.random.seed(random_seed)\n tf.random.set_seed(random_seed)\n \n self.activation = act\n self.MeanAct = lambda x: tf.clip_by_value(tf_exp(x), 1e-5, 1e6)\n self.DispAct = lambda x: tf.clip_by_value(tf.nn.softplus(x), 1e-4, 1e4)\n \n model_layers = []\n for i in range(n_stacks - 1, 0, -1):\n model_layers.append(Dense(dims[i], kernel_initializer = \"glorot_uniform\", activation = self.activation\n , name='base%d' % (i-1)))\n self.base = Sequential(model_layers, name = 'base')\n\n self.mean_layer = Dense(dims[0], activation = self.MeanAct, name='mean')\n self.disp_layer = Dense(dims[0], activation = self.DispAct, name='dispersion')\n\n self.rescale = Lambda(lambda l: tf.matmul(tf.linalg.diag(l[0]), l[1]), name = 'sf scaling')\n \n build_dir(self.weights_dir)\n \n self.construct(n_features, self.name_)", "def evaluate(cfg: DictConfig):\n\n # suppress TensorFlow and DALI warnings\n suppress_warnings()\n\n if cfg.USE_MULTI_GPUS.VALUE:\n # change number of visible gpus for evaluation\n set_gpus(cfg.USE_MULTI_GPUS.GPU_IDS)\n # update batch size according to available gpus\n data_generator.update_batch_size(cfg)\n\n if cfg.OPTIMIZATION.AMP:\n print(\"Enabling Automatic Mixed Precision(AMP) training\")\n policy = mixed_precision.Policy('mixed_float16')\n mixed_precision.set_global_policy(policy)\n\n if cfg.OPTIMIZATION.XLA:\n print(\"Enabling Automatic Mixed Precision(XLA) training\")\n tf.config.optimizer.set_jit(True)\n\n # create model\n strategy = None\n if cfg.USE_MULTI_GPUS.VALUE:\n # multi gpu training using tensorflow mirrored strategy\n strategy = tf.distribute.MirroredStrategy(\n cross_device_ops=tf.distribute.HierarchicalCopyAllReduce()\n )\n print('Number of visible gpu devices: {}'.format(strategy.num_replicas_in_sync))\n with strategy.scope():\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=cfg.HYPER_PARAMETERS.LEARNING_RATE\n ) # optimizer\n if cfg.OPTIMIZATION.AMP:\n optimizer = mixed_precision.LossScaleOptimizer(\n optimizer,\n dynamic=True\n )\n dice_coef = DiceCoefficient(post_processed=True, classes=cfg.OUTPUT.CLASSES)\n dice_coef = tf.keras.metrics.MeanMetricWrapper(name=\"dice_coef\", fn=dice_coef)\n model = prepare_model(cfg, training=True)\n else:\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=cfg.HYPER_PARAMETERS.LEARNING_RATE\n ) # optimizer\n if cfg.OPTIMIZATION.AMP:\n optimizer = mixed_precision.LossScaleOptimizer(\n optimizer,\n dynamic=True\n )\n dice_coef = DiceCoefficient(post_processed=True, classes=cfg.OUTPUT.CLASSES)\n dice_coef = tf.keras.metrics.MeanMetricWrapper(name=\"dice_coef\", fn=dice_coef)\n model = prepare_model(cfg, training=True)\n\n model.compile(\n optimizer=optimizer,\n loss=unet3p_hybrid_loss,\n metrics=[dice_coef],\n )\n\n # weights model path\n checkpoint_path = join_paths(\n cfg.WORK_DIR,\n cfg.CALLBACKS.MODEL_CHECKPOINT.PATH,\n f\"{cfg.MODEL.WEIGHTS_FILE_NAME}.hdf5\"\n )\n\n assert os.path.exists(checkpoint_path), \\\n f\"Model weight's file does not exist at \\n{checkpoint_path}\"\n\n # TODO: verify without augment it produces same results\n # load model weights\n model.load_weights(checkpoint_path, by_name=True, skip_mismatch=True)\n model.summary()\n\n # data generators\n val_generator = data_generator.get_data_generator(cfg, \"VAL\", strategy)\n validation_steps = data_generator.get_iterations(cfg, mode=\"VAL\")\n\n # evaluation metric\n evaluation_metric = \"dice_coef\"\n if len(model.outputs) > 1:\n evaluation_metric = f\"{model.output_names[0]}_dice_coef\"\n\n result = model.evaluate(\n x=val_generator,\n steps=validation_steps,\n workers=cfg.DATALOADER_WORKERS,\n return_dict=True,\n )\n\n # return computed loss, validation accuracy, and it's metric name\n return result, evaluation_metric", "def fit(self, X_raw, y_made_claim, y_claims_amount):\n\n # YOUR CODE HERE\n\n # Remember to include a line similar to the one below\n # X_clean = self._preprocessor(X_raw)\n \n # made_metrics = [tf.keras.metrics.AUC(name=\"auc\")]\n # def made_nn_model(metrics, input_shape, lr=0.001):\n # model = tf.keras.Sequential([\n # tf.keras.layers.Dense(256,activation=\"relu\",input_shape=(input_shape,),kernel_regularizer=l2(l=0.05)),\n # tf.keras.layers.Dropout(0.5),\n # tf.keras.layers.Dense(64,activation=\"relu\",kernel_regularizer=l2(l=0.01)),\n # tf.keras.layers.Dropout(0.5),\n # tf.keras.layers.Dense(8,activation=\"relu\",kernel_regularizer=l2(l=0.001)),\n # tf.keras.layers.Dropout(0.5),\n # tf.keras.layers.Dense(1,activation=\"sigmoid\")\n # ])\n\n # model.compile(\n # optimizer=tf.keras.optimizers.Adam(lr=lr),\n # loss=tf.keras.losses.BinaryCrossentropy(),\n # metrics=metrics)\n\n # return model\n\n # claim_metrics = [tf.keras.metrics.MeanSquaredError(name=\"mse\")]\n # def claim_nn_model(metrics, input_shape, lr=0.001):\n # model = tf.keras.Sequential([\n # tf.keras.layers.Dense(256,activation=\"relu\",input_shape=(input_shape,)),\n # tf.keras.layers.Dropout(0.5),\n # tf.keras.layers.Dense(16,activation=\"relu\",input_shape=(input_shape,)),\n # tf.keras.layers.Dropout(0.5),\n # tf.keras.layers.Dense(8,activation=\"relu\",input_shape=(input_shape,)),\n # tf.keras.layers.Dropout(0.5),\n # tf.keras.layers.Dense(1)\n # ])\n \n # model.compile(\n # optimizer=tf.keras.optimizers.Adam(lr=lr),\n # loss=tf.keras.losses.MeanSquaredError(),\n # metrics=metrics)\n # return model\n\n \n # X_1, X_1val, y_1, y_1val, y_2, y_2val = train_test_split(X_raw,y_made_claim,y_claims_amount,test_size=0.05)\n # X_1, drop_index = self._preprocessor(X_1, train=True)\n # y_1 = y_1.drop(drop_index).values\n # y_2 = y_2.drop(drop_index).values\n \n # X_1val, drop_index = self._preprocessor(X_1val, train=False)\n # y_1val = y_1val.drop(drop_index).values\n # y_2val = y_2val.drop(drop_index).values\n \n # self.scaler = StandardScaler()\n # X_1 = self.scaler.fit_transform(X_1)\n # X_1val = self.scaler.transform(X_1val)\n \n # #prepare for claim amount\n # X_2 = X_1[y_1==1]\n # y_2 = y_2[y_1==1]\n # X_2val = X_1val[y_1val==1]\n # y_2val = y_1val[y_1val==1]\n \n # self.y_mean = np.mean(y_2)\n # self.y_std = np.std(y_2)\n # y_2 = (y_2 - self.y_mean)/self.y_std\n # y_2val = (y_2val - self.y_mean)/self.y_std\n\n # #fit made claim\n # logdir = \"log\" + datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n # tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)\n # early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10, mode=\"min\", restore_best_weights=True)\n \n # self.Model_made = made_nn_model(made_metrics, X_1.shape[1], lr=0.0003)\n # History_made = self.Model_made.fit(X_1,y_1,\n # class_weight={0:1,1:10},\n # callbacks=[tensorboard_callback, early_stopping],\n # validation_data = (X_1val, y_1val),\n # epochs=200,\n # batch_size=512)\n\n # #fit claim amount\n # early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=20, mode=\"min\", restore_best_weights=True)\n # logdir = \"log\" + datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n # tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)\n \n # self.Model_claim = claim_nn_model(claim_metrics, X_2.shape[1], lr=0.0005)\n # History = self.Model_claim.fit(X_2,y_2,\n # callbacks=[tensorboard_callback, early_stopping],\n # validation_data=(X_2, y_2),\n # epochs=5000,\n # batch_size=512)\n \n \n X_1, drop_index = self._preprocessor(X_raw, train=True)\n y_1 = y_made_claim.drop(drop_index).values\n y_2 = y_claims_amount.drop(drop_index).values\n \n scaler = StandardScaler()\n clf_made = RandomForestClassifier(n_estimators=500,class_weight={0:1,1:10},n_jobs=-1,max_depth=10,max_features=33,min_samples_leaf=30)\n self.Model_made = Pipeline([(\"scale\",scaler),(\"clf\",clf_made)])\n self.Model_made.fit(X_1,y_1)\n #self.Model_made = fit_and_calibrate_classifier(self.Model_made, X_1, y_1)\n \n # #prepare for claim amount\n X_2 = X_1[y_1==1]\n y_2 = y_2[y_1==1]\n \n self.y_mean = np.mean(y_2)\n self.y_std = np.std(y_2)\n y_2 = (y_2 - self.y_mean)/self.y_std\n\n clf_claim = RandomForestRegressor(n_estimators=500,n_jobs=-1,max_depth=10,max_features=30,min_samples_leaf=70)\n self.Model_claim = Pipeline([(\"scale\",scaler),(\"clf\",clf_claim)])\n self.Model_claim.fit(X_2,y_2)\n \n\n return None", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n train_op = create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.compat.v1.metrics.accuracy(label_ids, predicted_labels)\n #f1_score = tf.contrib.metrics.f1_score(\n # label_ids,\n # predicted_labels)\n #auc = tf.metrics.auc(\n # label_ids,\n # predicted_labels)\n #recall = tf.metrics.recall(\n # label_ids,\n # predicted_labels)\n #precision = tf.metrics.precision(\n # label_ids,\n # predicted_labels)\n #true_pos = tf.metrics.true_positives(\n # label_ids,\n # predicted_labels)\n #true_neg = tf.metrics.true_negatives(\n # label_ids,\n # predicted_labels)\n #false_pos = tf.metrics.false_positives(\n # label_ids,\n # predicted_labels)\n #false_neg = tf.metrics.false_negatives(\n # label_ids,\n # predicted_labels)\n return {\n \"eval_accuracy\": accuracy,\n # \"f1_score\": f1_score,\n #\"auc\": auc,\n # \"precision\": precision,\n # \"recall\": recall,\n # \"true_positives\": true_pos,\n # \"true_negatives\": true_neg,\n # \"false_positives\": false_pos,\n # \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (subject_logits, property_logits, value_logits) = self.create_model(\n bert_config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n params=params,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n seq_length = modeling.get_shape_list(input_ids)[1]\n\n def compute_loss(logits, positions, depth):\n one_hot_positions = tf.one_hot(\n positions, depth=depth, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n return loss\n\n # subject, property, value로 나오도록\n subject_label = features[\"subject\"]\n property_label = features[\"property\"]\n value_label = features[\"value\"]\n res_length = params[\"res_length\"]\n ont_length = params[\"ont_length\"]\n\n subject_loss = compute_loss(subject_logits, subject_label, res_length)\n property_loss = compute_loss(property_logits, property_label, ont_length)\n value_loss = compute_loss(value_logits, value_label, res_length)\n\n total_loss = (subject_loss + property_loss + value_loss) / 3.0\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": unique_ids,\n \"subject_logits\": subject_logits,\n \"property_logits\": property_logits,\n \"value_logits\": value_logits,\n }\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec", "def model_fn(features, labels, mode, params):\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels, log_probs) = ClassifierModel.create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n train_op = bert.optimization.create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.metrics.accuracy(label_ids, predicted_labels)\n f1_score = tf.contrib.metrics.f1_score(\n label_ids,\n predicted_labels)\n auc = tf.metrics.auc(\n label_ids,\n predicted_labels)\n recall = tf.metrics.recall(\n label_ids,\n predicted_labels)\n precision = tf.metrics.precision(\n label_ids,\n predicted_labels)\n true_pos = tf.metrics.true_positives(\n label_ids,\n predicted_labels)\n true_neg = tf.metrics.true_negatives(\n label_ids,\n predicted_labels)\n false_pos = tf.metrics.false_positives(\n label_ids,\n predicted_labels)\n false_neg = tf.metrics.false_negatives(\n label_ids,\n predicted_labels)\n\n return {\n \"eval_accuracy\": accuracy,\n \"f1_score\": f1_score,\n \"auc\": auc,\n \"precision\": precision,\n \"recall\": recall,\n \"true_positives\": true_pos,\n \"true_negatives\": true_neg,\n \"false_positives\": false_pos,\n \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels, log_probs) = ClassifierModel.create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)", "def model_fn(features,labels,mode,params):\n tf.logging.info('*** Features ***')\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s ,shape = %s\" % (name,features[name].shape))\n\n input_ids = features['input_ids']\n input_mask = features['input_mask']\n segment_ids = features['segment_ids']\n label_ids = features['label_ids']\n if 'is_real_example' in features:\n is_real_example = tf.cast(features['is_real_example'],dtype=tf.float32)\n else:\n is_real_example = tf.ones(tf.shape(label_ids),dtype=tf.float32)\n\n is_training = (mode == tf_estimator.estimator.ModeKeys.TRAIN)\n\n (total_loss,per_example_loss,probabilities,predictions) = \\\n create_model(albert_config,is_training,input_ids,input_mask,\n segment_ids,label_ids,num_labels,\n use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n if init_checkpoint:\n (assignment_map,initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint,assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf_estimator.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(total_loss,\n learning_rate,\n num_train_steps,\n num_warmup_steps,use_tpu=False)\n output_spec = tf_estimator.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n )\n elif mode == tf_estimator.estimator.ModeKeys.EVAL:\n def metric_fn(per_example_loss,label_ids,logits,is_real_example):\n accuracy = tf.metrics.accuracy(\n labels=label_ids,predictions=predictions,\n weights=is_real_example\n )\n loss = tf.metrics.mean(\n values=per_example_loss,weights=is_real_example\n )\n return {\n 'eval_accuracy':accuracy,\n 'eval_loss':loss,\n }\n\n eval_metrics = metric_fn(per_example_loss,label_ids,predictions,is_real_example)\n output_spec = tf_estimator.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics\n )\n else:\n output_spec = tf_estimator.estimator.EstimatorSpec(\n mode=mode,\n predictions={\n 'probabilities':probabilities,\n 'predictions':predictions,\n },\n )\n\n return output_spec", "def generateDerivedMetrics(kernelMetrics, statistics, throughputMetrics = {}, countMetrics = {}, combinedMetrics = {}):\n\n # combine single metrics \n for combinedMetric in combinedMetrics:\n for kernel in kernelMetrics:\n logging.debug(\"Combining metrics for kernel {}\".format(kernel))\n # iterate over each run, take the number of runs to be\n # the length of the first source metric\n if combinedMetrics[combinedMetric][0] in kernelMetrics[kernel]:\n combinedMetricCounts = []\n sourceMetricMissing = False\n # go through each run\n for run in range(0, len(kernelMetrics[kernel][ combinedMetrics[combinedMetric][0] ])):\n\n combinedMetricRunCount = 0\n # take all the source metrics and add them into the\n # combined metric\n for sourceMetric in combinedMetrics[combinedMetric]:\n if sourceMetric in kernelMetrics[kernel]:\n # TODO delete once debugged print(\"runs of {} {}\".format(sourceMetric, kernelMetrics[kernel][sourceMetric]))\n combinedMetricRunCount = combinedMetricRunCount + kernelMetrics[kernel][sourceMetric][run]\n else:\n sourceMetricMissing = True\n logging.info(\"Source metric {} missing for combined metric {}, combined metric will not be\"\n \"added\".format(sourceMetric, combinedMetric))\n # append this run ot the end of the list\n combinedMetricCounts.append(combinedMetricRunCount)\n if not sourceMetricMissing:\n kernelMetrics[kernel][combinedMetric] = combinedMetricCounts\n\n # take throughputs and convert them to counts\n # doesn't use averages since that can skew results\n for throughputMetricName, countMetricName in zip(throughputMetrics, countMetrics):\n for kernel in kernelMetrics:\n logging.debug(\"Generating count metrics for {} in kernel {}\".format(throughputMetricName, kernel))\n if throughputMetricName in kernelMetrics[kernel]:\n counts = []\n for run in range(0, len(kernelMetrics[kernel][throughputMetricName])):\n count = kernelMetrics[kernel][throughputMetricName][run] * kernelMetrics[kernel][\"Duration\"][run]\n counts.append(count)\n kernelMetrics[kernel][countMetricName] = counts", "def model_fn(features,labels,mode,params):\n input_q = features[\"input_q\"] # query feature vector\n input_K = features[\"input_K\"] # Key set Matrix\n input_v = features[\"input_v\"] # image visual feature vector\n input_labels = features[\"input_labels\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = modeling.AMT(\n config = config,\n is_trainging = is_training, \n scope = \"AMT\",\n input_q = input_q,\n input_K = input_K,\n input_v = input_v\n )\n loss = model.loss\n q_doc_rank = model.get_predict()\n output_spec = None\n scaffold_fn = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer()\n output_spec = tf.estimator.EstimatorSpec(\n mode = mode,\n loss = loss,\n train_op = train_op,\n scaffold_fn = scaffold_fn)\n elif mode == tf.estimator.ModeKeys.EVAL:\n def metric_fn():\n return 0\n else:\n output_spec = tf.estimator.EstimatorSpec(\n mode = mode,\n predictions = q_doc_rank,\n scaffold_fn = scaffold_fn)\n return output_spec", "def create_models(self):\r\n self.all_ratings = AllRatingsWithCommon(\r\n experts=self.users,\r\n objects=self.videos,\r\n output_features=self.features,\r\n name=\"prod\",\r\n )\r\n\r\n print_memory(stage=\"DPLF:ratings_nodata_created\")\r\n\r\n # creating models\r\n self.user_to_model = {\r\n user: FeaturelessPreferenceLearningModel(\r\n expert=user, all_ratings=self.all_ratings\r\n )\r\n for user in self.users\r\n }\r\n\r\n print_memory(stage=\"DPLF:models_created\")\r\n\r\n # before creating the aggregator, filling models with data\r\n self.user_to_size = {\r\n user: self.fill_model_data(self.user_to_model[user], user)\r\n for user in tqdmem(self.users, desc=\"fill_data\")\r\n }\r\n\r\n # virtual 'common' data\r\n fplm_common = FeaturelessPreferenceLearningModel(\r\n expert=AllRatingsWithCommon.COMMON_EXPERT, all_ratings=self.all_ratings\r\n )\r\n fplm_common.on_dataset_end()\r\n\r\n print_memory(stage=\"DPLF:data_filled\")\r\n\r\n # resetting the model given the data\r\n self.all_ratings.reset_model()\r\n\r\n print_memory(stage=\"DPLF:model_reset_ok\")\r\n\r\n # aggregating models\r\n self.aggregator = FeaturelessMedianPreferenceAverageRegularizationAggregator(\r\n models=[self.user_to_model[u] for u in self.users]\r\n )\r\n self.aggregator.certification_status = self.user_certified\r\n\r\n print_memory(stage=\"DPLF:aggregator_created\")", "def classic_model_testing():\n dataset_path = \"/home/kateryna/Documents\"\n X_train, X_test, y_train, y_test = generate_embeddings_memory(dataset_path, classes=['normal', 'glare_small'])\n contam = 0.08\n models = [XGBOD(), OCSVM(contamination=contam), IForest(contamination=contam, n_estimators=150), XGBOD(learning_rate=0.01, n_estimators=150),\n COPOD(contamination=contam)]\n for model in models:\n model_name = model.__str__().split('(')[0]\n clf = model\n clf.fit(X_train, y_train)\n\n y_train_pred = clf.labels_\n y_train_scores = clf.decision_scores_\n\n # get the prediction on the test data\n # 0 stands for inliers and 1 for outliers.\n y_test_pred = clf.predict(X_test)\n y_test_scores = clf.decision_function(X_test)\n # y_probabilities = clf.predict_proba(X_test)\n print(\"\\nOn Training Data:\")\n evaluate_print(model_name, y_train, y_train_scores)\n print(\"\\nOn Test Data:\")\n evaluate_print(model_name, y_test, y_test_scores)\n print('roc auc', roc_auc_score(y_test, y_test_scores))\n\n conf_mtx_test = confusion_matrix(y_test, y_test_pred, labels=[0, 1])\n print(conf_mtx_test)\n conf_mtx_train = confusion_matrix(y_train, y_train_pred, labels=[0, 1])\n print(conf_mtx_train)\n print('~~~')", "def define_model(model):\n global log_data_likelihood, log_priors, num_params, file_labels, labels, prior_xs, prior_pdfs\n num_prior_pts = 1001\n pic50_lower = -4.\n pic50_upper = 14.\n hill_lower = 0.\n hill_upper = 6.\n if model == 1:\n num_params = 2\n log_data_likelihood = log_data_likelihood_model_1_capped\n log_priors = log_priors_model_1\n labels = [r\"$pIC50$\", r\"$\\sigma$\"]\n file_labels = ['pIC50','sigma']\n #prior_xs = [np.linspace(pic50_lower, pic50_upper, num_prior_pts),\n # np.linspace(sigma_uniform_lower,sigma_uniform_upper,num_prior_pts)]\n prior_xs = [np.linspace(pic50_exp_lower-2, pic50_exp_lower+23, num_prior_pts),\n np.linspace(0, 25, num_prior_pts)]\n #prior_pdfs = [st.logistic.pdf(prior_xs[0], loc=mu, scale=s),\n # np.ones(num_prior_pts)/(1.*sigma_uniform_upper-sigma_uniform_lower)]\n #prior_pdfs = [st.expon.pdf(prior_xs[0], loc=pic50_exp_lower, scale=pic50_exp_scale),\n # np.concatenate(([0,0],np.ones(num_prior_pts)/(1.*sigma_uniform_upper-sigma_uniform_lower),[0,0]))]\n prior_pdfs = [st.expon.pdf(prior_xs[0], loc=pic50_exp_lower, scale=pic50_exp_scale),\n st.gamma.pdf(prior_xs[1], sigma_shape, loc=sigma_loc, scale=sigma_scale)]\n elif model == 2:\n num_params = 3\n log_data_likelihood = log_data_likelihood_model_2_capped\n log_priors = log_priors_model_2\n labels = [r\"$pIC50$\", r\"$Hill$\", r\"$\\sigma$\"]\n file_labels = ['pIC50','Hill','sigma']\n #prior_xs = [np.linspace(pic50_lower, pic50_upper, num_prior_pts),\n # np.linspace(hill_lower, hill_upper, num_prior_pts),\n # np.linspace(sigma_uniform_lower,sigma_uniform_upper,num_prior_pts)]\n prior_xs = [np.linspace(pic50_exp_lower-2, pic50_exp_lower+23, num_prior_pts),\n np.concatenate(([hill_uniform_lower-2,hill_uniform_lower],\n np.linspace(hill_uniform_lower, hill_uniform_upper, num_prior_pts),\n [hill_uniform_upper,hill_uniform_upper+2])),\n np.linspace(0, 25, num_prior_pts)]\n #prior_pdfs = [st.logistic.pdf(prior_xs[0],loc=mu,scale=s),\n # st.fisk.pdf(prior_xs[1],c=beta,scale=alpha),\n # np.ones(num_prior_pts)/(1.*sigma_uniform_upper-sigma_uniform_lower)]\n #prior_pdfs = [st.expon.pdf(prior_xs[0], loc=pic50_exp_lower, scale=pic50_exp_scale),\n # np.concatenate(([0,0],np.ones(num_prior_pts) / (1. * hill_uniform_upper - hill_uniform_lower),[0,0])),\n # np.concatenate(([0, 0], np.ones(num_prior_pts) / (1. * sigma_uniform_upper - sigma_uniform_lower), [0, 0]))]\n prior_pdfs = [st.expon.pdf(prior_xs[0], loc=pic50_exp_lower, scale=pic50_exp_scale),\n np.concatenate(([0,0],np.ones(num_prior_pts) / (1. * hill_uniform_upper - hill_uniform_lower),[0,0])),\n st.gamma.pdf(prior_xs[2], sigma_shape, loc=sigma_loc, scale=sigma_scale)]", "def create_model(config, rng, example_batch):\n example_batch = train_utils.prepare_example_batch(example_batch)\n\n key0, rng = random.split(rng, 2)\n model, variables, metric_collector = MODEL_DICT[config.model.name](\n key0, example_batch, config\n )\n\n return model, variables, metric_collector", "def generate(args):\n\n # Using the data Augmentation in traning data\n\n normalizer = Normalizer()\n\n train_aug = tf.keras.preprocessing.image.ImageDataGenerator(\n #rescale=1. / 255.,\n shear_range=args.shear_range,\n zoom_range=args.zoom_range,\n rotation_range=args.rotation_range,\n width_shift_range=args.width_shift_range,\n height_shift_range=args.height_shift_range,\n horizontal_flip=args.horizontal_flip,\n vertical_flip=args.vertical_flip,\n preprocessing_function=normalizer)\n\n\n validation_aug = tf.keras.preprocessing.image.ImageDataGenerator(preprocessing_function=normalizer)\n\n train_generator = train_aug.flow_from_directory(\n args.train_dir,\n target_size=(args.input_size, args.input_size),\n batch_size=args.batch_size,\n class_mode='categorical',\n shuffle=True)\n\n mean, std = [], []\n if args.mean is None or args.std is None:\n mean, std = normalizer.get_stats(args.train_dir, train_generator.filenames, (args.input_size, args.input_size))\n else:\n mean = [float(m.strip()) for m in args.mean.split(',')]\n std = [float(s.strip()) for s in args.std.split(',')]\n normalizer.set_stats(mean, std)\n\n if not os.path.exists('model'):\n os.makedirs('model')\n with open('model/stats.txt', 'w') as stats:\n stats.write(\"Dataset mean [r, g, b] = {}\\n\".format(mean))\n\n\n label_map = (train_generator.class_indices)\n label_map = dict((v,k) for k,v in label_map.items())\n\n with open('model/labels.csv', 'w') as csv_file:\n csv_writer = csv.writer(csv_file, lineterminator='\\n')\n csv_writer.writerows(label_map.items())\n\n validation_generator = validation_aug.flow_from_directory(\n args.validation_dir,\n target_size=(args.input_size, args.input_size),\n batch_size=args.batch_size,\n class_mode='categorical')\n\n return train_generator, validation_generator, train_generator.samples, validation_generator.samples, len(label_map)", "def model_fn(features, mode, params): # pylint: disable=unused-argument\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"input_type_ids\"]\n # label_ids = features[\"label_ids\"]\n vocab = vocab_list\n vocab_size = len(vocab_list)\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n\n\n \n # TRAIN\n if not is_predicting:\n\n (loss, predictions, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, vocab, vocab_size, bert_config, use_one_hot_embeddings)\n\n train_op = bert.optimization.create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=FLAGS.use_tpu)\n\n # if mode == tf.estimator.ModeKeys.TRAIN:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n ## else:\n # return tf.estimator.EstimatorSpec(mode=mode,\n # loss=loss,\n # eval_metric_ops=eval_metrics)\n else:\n (predictions, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, vocab, vocab_size, bert_config, use_one_hot_embeddings)\n\n predictions = {\n 'probabilities': log_probs,\n 'predictions': predictions\n }\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode, predictions=predictions, scaffold_fn=scaffold_fn)\n\n return output_spec if use_tpu else output_spec.as_estimator_spec()", "def budget_analysis(models, blocks, num_sweeps, sample_sizes, data, K, CUDA, device, batch_size=100):\n result_flags = {'loss_required' : False, 'ess_required' : True, 'mode_required' : False, 'density_required': True}\n\n ess = []\n density = []\n num_batches = int((data.shape[0] / batch_size))\n metrics = {'block' : [], 'num_sweeps' : [], 'sample_sizes' : [], 'ess' : [], 'density' : []}\n for block in blocks:\n for i in range(len(num_sweeps)):\n metrics['block'].append(block)\n time_start = time.time()\n num_sweep = int(num_sweeps[i])\n sample_size = int(sample_sizes[i])\n metrics['num_sweeps'].append(num_sweep)\n metrics['sample_sizes'].append(sample_size)\n resampler = Resampler(strategy='systematic',\n sample_size=sample_size,\n CUDA=CUDA,\n device=device)\n ess, density = 0.0, 0.0\n for b in range(num_batches):\n x = data[b*batch_size : (b+1)*batch_size].repeat(sample_size, 1, 1, 1)\n if CUDA:\n x = x.cuda().to(device)\n trace = apg_objective(models, x, result_flags, num_sweeps=num_sweep, block=block, resampler=resampler)\n ess += trace['ess'][-1].mean().item()\n density += trace['density'][-1].mean().item()\n metrics['ess'].append(ess / num_batches / sample_size)\n metrics['density'].append(density / num_batches)\n time_end = time.time()\n print('block=%s, num_sweep=%d, sample_size=%d completed in %ds' % (block, num_sweep, sample_size, time_end-time_start))\n return pd.DataFrame.from_dict(metrics)", "def model_fn(features, labels, mode, params):\n label_offset = 1\n total_loss, train_op, detections, export_outputs = None, None, None, None\n is_training = mode==tf.estimator.ModeKeys.TRAIN\n num_classes = run_config[\"num_classes\"]\n model = RetinaNetModel(is_training=is_training, num_classes=num_classes)\n if mode == tf.estimator.ModeKeys.TRAIN:\n # load pretrained model for checkpoint\n ckpt_file = run_config.get(\"finetune_ckpt\")\n if ckpt_file:\n asg_map = model.restore_map()\n available_var_map = (_get_variables_available_in_ckpt(asg_map, ckpt_file))\n tf.train.init_from_checkpoint(ckpt_file, available_var_map)\n # predict\n images = features[\"image\"]\n keys = features[\"key\"]\n predictions_dict = model.predict(images)\n # postprocess\n if mode in (tf.estimator.ModeKeys.EVAL, tf.estimator.ModeKeys.PREDICT):\n detections = model.postprocess(predictions_dict, score_thres=default_params.get(\"score_thres\"))\n # unstack gt info\n if mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL):\n unstacked_labels = unstack_batch(labels)\n gt_boxes_list = unstacked_labels[\"gt_boxes\"]\n gt_labels_list = unstacked_labels[\"gt_labels\"]\n # -1 due to label offset\n gt_labels_onehot_list = [tf.one_hot(tf.squeeze(tf.cast(gt_labels-label_offset, tf.int32), 1), num_classes)\n for gt_labels in gt_labels_list]\n reg_loss, cls_loss, box_weights, cls_weights = model.loss(predictions_dict, gt_boxes_list, gt_labels_onehot_list)\n losses = [reg_loss * default_params.get(\"box_loss_weight\"), cls_loss]\n total_loss_dict = {\"Loss/classification_loss\": cls_loss, \"Loss/localization_loss\": reg_loss}\n # add regularization loss\n regularization_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n if regularization_loss:\n regularization_loss = tf.add_n(regularization_loss, name='regularization_loss')\n losses.append(regularization_loss)\n total_loss_dict[\"Loss/regularization_loss\"] = regularization_loss\n total_loss = tf.add_n(losses, name='total_loss')\n total_loss_dict[\"Loss/total_loss\"] = total_loss\n\n # optimizer\n if mode == tf.estimator.ModeKeys.TRAIN:\n lr = learning_rate_schedule(default_params.get(\"total_train_steps\"))\n optimizer = tf.train.MomentumOptimizer(lr, momentum=default_params.get(\"momentum\"))\n # batch norm need update_ops\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_op = optimizer.minimize(total_loss, tf.train.get_global_step())\n else:\n train_op = None\n\n # predict mode\n if mode == tf.estimator.ModeKeys.PREDICT:\n export_outputs = {tf.saved_model.signature_constants.PREDICT_METHOD_NAME: detections}\n\n eval_metric_ops = {}\n # just for debugging\n logging_hook = [tf.train.LoggingTensorHook({\"gt_labels\": gt_labels_list[0], \"gt_boxes\": gt_boxes_list[0],\n 'norm_box_loss': reg_loss, 'norm_cls_loss': cls_loss,\n \"pred_box\": predictions_dict[\"box_pred\"],\n \"pred_cls\": predictions_dict[\"cls_pred\"]},\n every_n_iter=50)]\n if mode == tf.estimator.ModeKeys.EVAL:\n logging_hook = [tf.train.LoggingTensorHook({\"gt_labels\": gt_labels_list[0], \"gt_boxes\": gt_boxes_list[0],\n \"detection_boxes\": detections[\"detection_boxes\"],\n \"detection_classes\": detections[\"detection_classes\"],\n \"scores\": detections[\"detection_scores\"],\n \"num_detections\": detections[\"num_detections\"]},\n every_n_iter=50)]\n eval_dict = _result_dict_for_single_example(images[0:1], keys[0], detections,\n gt_boxes_list[0], tf.reshape(gt_labels_list[0], [-1]))\n if run_config[\"label_map_path\"] is None:\n raise RuntimeError(\"label map file must be defined first!\")\n else:\n category_index = create_categories_from_labelmap(run_config[\"label_map_path\"])\n coco_evaluator = CocoDetectionEvaluator(categories=category_index)\n eval_metric_ops = coco_evaluator.get_estimator_eval_metric_ops(eval_dict)\n eval_metric_ops[\"classification_loss\"] = tf.metrics.mean(cls_loss)\n eval_metric_ops[\"localization_loss\"] = tf.metrics.mean(reg_loss)\n return tf.estimator.EstimatorSpec(mode=mode,\n predictions=detections,\n loss=total_loss,\n train_op=train_op,\n eval_metric_ops=eval_metric_ops,\n training_hooks=logging_hook,\n export_outputs=export_outputs,\n evaluation_hooks=logging_hook)", "def main_stats_model(y_train: pd.DataFrame, y_test: pd.DataFrame, y_pred: np.ndarray,\n model_name: str = '',\n model_parameters: dict = None,\n model_preprocessing: str = '',\n sequence_origin: str = '',\n primers_origin: str = '',\n taxonomy_level: Union[List[int], int] = '',\n selected_primer: Union[List[str], str] = '',\n test_size: float = 0.2,\n feature_importances: np.ndarray = None,\n k: int = 4,\n save_csv: bool = False,\n xgb_model=None,\n rf_model=None,\n save_model=False,\n save_tree: int = 0):\n model_path = folder_paths['model_results'] + model_name + '{}'.format(slash)\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n\n folder_number = get_new_model_folder_number(model_name=model_name)\n analysis_path = model_path + '{:0>5d}_analysis_{}_{}{}'.format(folder_number, selected_primer, taxonomy_level, slash)\n os.makedirs(analysis_path)\n\n log_path = analysis_path + 'model_results.txt'\n logger = StatLogger(log_path=log_path)\n\n # Basic information on configuration\n test_size = get_model_info(y_test, model_name, model_parameters, model_preprocessing, sequence_origin,\n primers_origin, taxonomy_level, selected_primer, test_size, logger)\n\n # Metrics of model results\n main_class_prop, accuracy = get_metrics_model(y_train, y_test, y_pred, logger, feature_importances, k, save_tree,\n xgb_model,\n analysis_path=analysis_path)\n\n if save_csv:\n add_optimal_model_params(folder_number, selected_primer, taxonomy_level, accuracy, model_parameters,\n model_path=model_path)\n\n if save_model:\n if xgb_model is not None:\n xgb_model.save_model(analysis_path+'0001.model')\n if rf_model is not None:\n filename = analysis_path+'0001.model'\n pickle.dump(rf_model, open(filename, 'wb'))\n\n logger.close_file()\n\n return test_size, main_class_prop, accuracy", "def build_model(train_inputs,train_labels,model_params,model_mode='classification',\n model_type='naive_bayes'):\n if model_mode == \"classification\":\n if model_type == \"naive_bayes\":\n model = GaussianNB()\n if model_type == \"knn\":\n model = KNeighborsClassifier(n_neighbors=50)\n if model_type == \"svm\":\n model = SVC(kernel='poly', degree =27, coef0 =1, C=5)\n if model_type == \"decision_tree\":\n model = DecisionTreeClassifier(min_samples_split=45,min_samples_leaf=45,criterion=\"gini\")\n #model = RandomForestClassifier(n_estimators=500, n_jobs=-1)\n\n if model_mode == \"regression\":\n if model_type == \"knn\":\n model = KNeighborsRegressor()\n if model_type == \"svm\":\n model = SVR()\n if model_type == \"decision_tree\":\n model = DecisionTreeRegressor()\n\n\n model.fit(train_inputs, train_labels)\n # for name, score in zip(train_inputs.columns,model.feature_importances_):\n # print(name, score)\n\n return model", "def train(self):\n \n for kernel_name, kernel in self.kernel_dict.items():\n if self.verbose: print('Training with {:s} kernel'.format(kernel_name))\n model = BnpQedModel(self.x, self.y, kernel, self.labelFunc, \n self.labelLUT, self.mode, self.design)\n model.train(num_restarts=self.num_restarts, b=self.b) \n if self.verbose:\n print('Log Bayes factor in favor of discontinuity = {:0.2f}'.format(model.summary(b=self.b)['logbayesfactor']))\n print('Evidence: M_C = {:0.3f}, M_D = {:0.3f}'.format(model.summary(b=self.b)['evidence']['mc'], \n model.summary(b=self.b)['evidence']['md']))\n print('Posterior model probabilities: p(M_C|D) = {:0.3f}, p(M_D|D) = {:0.3f}'.format(model.summary(b=self.b)['pmp']['pmc'], \n model.summary(b=self.b)['pmp']['pmd']))\n print('') \n self.results[kernel_name] = model \n self.trained = True \n return self.results", "def evaluate_model(args, eval_runs, warm_runs, metrics=['psnr', 'ssim', 'fps']):\n upsampler = Upsampler(args)\n if warm_runs > 0:\n print(\"Warming up for evaluation\")\n for i in range(warm_runs):\n print(\"Performing warm-up run\", str(i+1))\n for sequence in ['foliage', 'walk', 'calendar', 'city']:\n bix_dir = os.path.join(VID4_DIR, 'BIx4', sequence)\n upsampler.run_dir(bix_dir, reset=False)\n \n time = 0.\n psnrs = []\n ssims = []\n for i in range(eval_runs):\n run_psnrs = []\n run_ssims = []\n print(\"Performing evaluation run\", str(i+1))\n for sequence in ['foliage', 'walk', 'calendar', 'city']:\n bix_dir = os.path.join(VID4_DIR, 'BIx4', sequence)\n gt_dir = os.path.join(VID4_DIR, 'GT', sequence)\n print(\"Evaluating on\", bix_dir)\n time += upsampler.run_dir(bix_dir, reset=False)\n vid_psnrs, vid_ssims = _eval_sr_perf(os.path.join(bix_dir, 'up'), gt_dir)\n run_psnrs += vid_psnrs\n run_ssims += vid_ssims\n if i == eval_runs-1:\n with open(os.path.join(upsampler.get_model_dir(), \"psnr.txt\"), \"w\") as f:\n f.writelines(str(psnr) + '\\n' for psnr in run_psnrs)\n with open(os.path.join(upsampler.get_model_dir(), \"ssim.txt\"), \"w\") as f:\n f.writelines(str(ssim) + '\\n' for ssim in run_ssims)\n psnrs += run_psnrs\n ssims += run_ssims\n\n fps = VID4_LENGTH/ (time/eval_runs)\n return Performance(psnr=psnrs, ssim=ssims, fps=fps)", "def learn(model: KW_Model,\n trainloader: DataLoader,\n testloader: DataLoader,\n optimizer: optim.Optimizer,\n nb_epoch: int,\n device: torch.device,\n eval_fn: Callable[[List[bool], List[Qid]], Dict[Qid, float]],\n mean_window: int = 50,\n entropy_lambda: float = 0.025,\n smt_lambda: float = 1.0,\n reinforce_lambda: float = 1.0,\n ) -> Tuple[nn.Module, Dict[str, List[torch.tensor]], Dict[str, List[torch.tensor]]]:\n print(\"Memory usage: %s (kb)\" % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)\n past_rewards = {str(q_id.long().item()): deque(maxlen=mean_window)\n for _, _, q_ids, _ in chain(trainloader, testloader)\n for q_id in q_ids}\n \n logs = [\"reward\",\n \"scaled_entropy\",\n \"scaled_reinforce\",\n \"scaled_smt\",\n \"total_loss\",\n \"accuracy\"]\n train_logs = {log: list() for log in logs}\n test_logs = {log: list() for log in logs}\n del logs\n \n for epoch in range(nb_epoch):\n running_loss, running_reward = [], []\n entropies, reinforces, smts = [], [], []\n nb_correct, nb_total = 0, 0\n print(f\"\\nEpoch {epoch}\")\n \n print(\"Begin epoch: %s (kb)\" % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)\n model.train()\n for x, y, q_id, masks in trainloader:\n x = x.to(device)\n y = y.to(device)\n masks = masks.to(device)\n\n # batch x seq , batch x seq , batch x seq\n sample, logits, entropy, params = model(x, masks)\n batch_reward, rewards = eval_fn(sample.detach().t().tolist(), q_id)\n losses = compute_losses(y, params, rewards, logits, past_rewards,\n entropy, entropy_lambda, reinforce_lambda, smt_lambda, device)\n\n # entropy_lambda = min(1.01*entropy_lambda, 0.025)\n # reinforce_lambda = min(1.01*reinforce_lambda, 1.0)\n # smt_lambda = max(0.99*smt_lambda, 0.05)\n loss, reinforce_loss, entropy, smt_loss = losses\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n temp = (sample.long() == y.detach().long()).float() * masks\n nb_correct += temp.byte().cpu().sum().tolist()\n nb_total += masks.cpu().sum().tolist()\n\n running_loss.append(loss.item())\n running_reward.extend(rewards.values())\n print(f\"\\rTr Loss {mean(running_loss): .3f} Rewa {mean(running_reward): .5f}\", end=\"\")\n \n reinforces.append(reinforce_loss.item())\n entropies.append(entropy.item())\n smts.append(smt_loss.item())\n\n # Logs\n train_logs[\"reward\"].append(mean(running_reward))\n train_logs[\"scaled_entropy\"].append(mean(entropies))\n train_logs[\"scaled_reinforce\"].append(mean(reinforces))\n train_logs[\"scaled_smt\"].append(mean(smts))\n train_logs[\"total_loss\"].append(mean(running_loss))\n train_logs[\"accuracy\"].append(nb_correct / nb_total)\n \n \n train_loss, train_reward = mean(running_loss), mean(running_reward)\n running_loss, running_reward = [], []\n entropies, reinforces, smts = [], [], []\n nb_correct, nb_total = 0, 0\n model.eval()\n for x, y, q_id, masks in testloader:\n x = x.to(device)\n y = y.to(device)\n masks = masks.to(device)\n\n # batch x seq , batch x seq , batch x seq\n sample, logits, entropy, params = model(x, masks)\n batch_reward, rewards = eval_fn(sample.detach().t().tolist(), q_id)\n\n losses = compute_losses(y, params, rewards, logits, past_rewards,\n entropy, entropy_lambda, reinforce_lambda, smt_lambda, device)\n loss, reinforce_loss, entropy, smt_loss = losses\n \n temp = (sample.long() == y.detach().long()).float() * masks\n nb_correct += temp.byte().sum().tolist()\n nb_total += masks.sum().tolist()\n\n running_loss.append(loss.item())\n running_reward.extend(rewards.values())\n print(f\"\\rTr Loss {train_loss: .3f} Rewa {train_reward: .3f}\",\n f\"Te Loss{mean(running_loss): .3f} Rewa {mean(running_reward): .3f}\",\n end=\"\")\n \n reinforces.append(reinforce_loss.item())\n entropies.append(entropy.item())\n smts.append(smt_loss.item())\n \n \n # Logs\n test_logs[\"reward\"].append(mean(running_reward))\n test_logs[\"scaled_entropy\"].append(mean(entropies))\n test_logs[\"scaled_reinforce\"].append(mean(reinforces))\n test_logs[\"scaled_smt\"].append(mean(smts))\n test_logs[\"total_loss\"].append(mean(running_loss))\n test_logs[\"accuracy\"].append(nb_correct / nb_total)\n \n\n return model, train_logs, test_logs", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n # tf.logging.info(\"*** Features ***\")\n # for name in sorted(features.keys()):\n # tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings)\n\n # output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n # mode=tf.estimator.ModeKeys.PREDICT,\n # predictions=probabilities)\n output_spec = tf.estimator.EstimatorSpec(\n mode=tf.estimator.ModeKeys.PREDICT,\n predictions=probabilities\n )\n return output_spec", "def create_model(self, model_input, vocab_size, num_frames, **unused_params):\n num_frames_t=num_frames\n num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)\n feature_size = model_input.get_shape().as_list()[2]\n iterations=5#150\n attention_size=8\n if FLAGS.is_train: \n iterations=120\n model_input = utils.SampleRandomFrames(model_input[:,15:,:], num_frames-15-15,\n iterations)\n model_input=model_input+tf.random_normal(shape=tf.shape(model_input), mean=0.0, stddev=1e-3, dtype=tf.float32)\n\n aggregated_model = getattr(video_level_models,\n FLAGS.video_level_classifier_model)\n\n video_attention = MultiAttentionLayers(1024,iterations,256,attention_size)#256\n audio_attention = MultiAttentionLayers(128,iterations,256/4,attention_size)#256/4\n\n model_input = slim.batch_norm(\n model_input,\n center=True,\n scale=True,\n is_training=True,\n scope=\"model_input_bn\")\n\n with tf.variable_scope(\"video_Attention\"):\n attention_video = video_attention.forward(model_input[:,:,0:1024]) \n with tf.variable_scope(\"audio_Attention\"):\n attention_audio = audio_attention.forward(model_input[:,:,1024:])\n\n pooled=tf.concat([attention_video,attention_audio],axis=1)\n #instance_att#tf.reduce_mean(pooledi,axis=1)\n\n print('pooled is',pooled)\n pooled=tf.reshape(tf.transpose(pooled,perm=[0,2,1]),[-1,1152])\n dr2 = tf.get_variable(\"dr2\",\n [feature_size,1024],\n initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(feature_size)))\n pooled=tf.matmul(pooled,dr2)\n\n pooled = slim.batch_norm(\n pooled,\n center=True,\n scale=True,\n is_training=True,\n scope=\"pooled_bn\")\n\n gating_weights = tf.get_variable(\"gating_weights_2\",\n [1024, 1024],\n initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(1024))) \n gates = tf.matmul(pooled, gating_weights) \n gates = slim.batch_norm(\n gates,\n center=True,\n scale=True,\n is_training=True,\n scope=\"gating_bn\")\n gates = tf.sigmoid(gates)\n pooled = tf.multiply(pooled,gates)\n\n results_temp=aggregated_model().create_model(\n model_input=pooled, vocab_size=vocab_size, **unused_params)\n results_temp['predictions']=tf.reduce_max(tf.reshape(results_temp['predictions'],[-1,attention_size,vocab_size]),axis=1)\n print(results_temp)\n return results_temp", "def test(model, dataloader, params, args, val):\n\n # evaluation mode\n model.eval()\n\n # initialise buffers\n dice_lv_buffer = []\n dice_myo_buffer = []\n dice_rv_buffer = []\n\n mcd_lv_buffer = []\n hd_lv_buffer = []\n mcd_myo_buffer = []\n hd_myo_buffer = []\n mcd_rv_buffer = []\n hd_rv_buffer = []\n\n mean_mag_grad_detJ_buffer = []\n negative_detJ_buffer = []\n\n\n with tqdm(total=len(dataloader)) as t:\n # iterate over validation subjects\n for idx, (image_ed_batch, image_es_batch, label_ed_batch, label_es_batch) in enumerate(dataloader):\n # (data all in shape of (c, N, H, W))\n\n # extend to (N, c, H, W)\n image_ed_batch = image_ed_batch.permute(1, 0, 2, 3).to(device=args.device)\n image_es_batch = image_es_batch.permute(1, 0, 2, 3).to(device=args.device)\n label_es_batch = label_es_batch.permute(1, 0, 2, 3).to(device=args.device)\n\n with torch.no_grad():\n # compute optical flow and warped ED images towards ES\n dvf = model(image_ed_batch, image_es_batch)\n\n # transform label mask of ES frame\n warped_label_es_batch = resample_transform(label_es_batch.float(), dvf, interp='nearest')\n\n\n \"\"\" Move data to device \"\"\"\n if args.cuda:\n # move data to cpu to calculate metrics\n # (the axis permutation is to comply with metric calculation code which takes input shape H, W, N)\n warped_label_es_batch = warped_label_es_batch.squeeze(1).cpu().numpy().transpose(1, 2, 0)\n label_ed_batch = label_ed_batch.squeeze(0).numpy().transpose(1, 2, 0)\n dvf = dvf.data.cpu().numpy().transpose(0, 2, 3, 1) # (N, H, W, 2)\n else:\n # CPU version of the code\n warped_label_es_batch = warped_label_es_batch.squeeze(1).numpy().transpose(1, 2, 0)\n label_ed_batch = label_ed_batch.squeeze(0).numpy().transpose(1, 2, 0)\n dvf = dvf.data.numpy().transpose(0, 2, 3, 1) # (N, H, W, 2)\n \"\"\"\"\"\"\n\n \"\"\" Calculate the metrics (only works with SAX images) \"\"\"\n # (optional) extract 3 slices (apical, mid-ventricle and basal)\n if not args.all_slices:\n num_slices = label_ed_batch.shape[-1]\n apical_idx = int(round((num_slices - 1) * 0.75)) # 75% from basal\n mid_ven_idx = int(round((num_slices - 1) * 0.5)) # 50% from basal\n basal_idx = int(round((num_slices - 1) * 0.25)) # 25% from basal\n slices_idx = [apical_idx, mid_ven_idx, basal_idx]\n\n warped_label_es_batch = warped_label_es_batch[:, :, slices_idx]\n label_ed_batch = label_ed_batch[:, :, slices_idx]\n dvf = dvf[slices_idx, :, :, :] # needed for detJac\n\n # dice\n dice_lv = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=1)\n dice_myo = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=2)\n dice_rv = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=3)\n\n dice_lv_buffer += [dice_lv]\n dice_myo_buffer += [dice_myo]\n dice_rv_buffer += [dice_rv]\n\n # contour distances\n mcd_lv, hd_lv = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=1, dx=params.pixel_size)\n mcd_myo, hd_myo = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=2, dx=params.pixel_size)\n mcd_rv, hd_rv = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=3, dx=params.pixel_size)\n\n # determinant of Jacobian\n mean_grad_detJ, mean_negative_detJ = detJac_stack(dvf)\n\n\n # update buffers\n mcd_lv_buffer += [mcd_lv]\n hd_lv_buffer += [hd_lv]\n mcd_myo_buffer += [mcd_myo]\n hd_myo_buffer += [hd_myo]\n mcd_rv_buffer += [mcd_rv]\n hd_rv_buffer += [hd_rv]\n\n mean_mag_grad_detJ_buffer += [mean_grad_detJ]\n negative_detJ_buffer += [mean_negative_detJ]\n\n t.update()\n\n # construct metrics dict\n metrics = {'dice_lv_mean': np.mean(dice_lv_buffer), 'dice_lv_std': np.std(dice_lv_buffer),\n 'dice_myo_mean': np.mean(dice_myo_buffer), 'dice_myo_std': np.std(dice_myo_buffer),\n 'dice_rv_mean': np.mean(dice_rv_buffer), 'dice_rv_std': np.std(dice_rv_buffer),\n\n 'mcd_lv_mean': np.mean(mcd_lv_buffer), 'mcd_lv_std': np.std(mcd_lv_buffer),\n 'mcd_myo_mean': np.mean(mcd_myo_buffer), 'mcd_myo_std': np.std(mcd_myo_buffer),\n 'mcd_rv_mean': np.mean(mcd_rv_buffer), 'mcd_rv_std': np.std(mcd_rv_buffer),\n\n 'hd_lv_mean': np.mean(hd_lv_buffer), 'hd_lv_std': np.std(hd_lv_buffer),\n 'hd_myo_mean': np.mean(hd_myo_buffer), 'hd_myo_std': np.std(hd_myo_buffer),\n 'hd_rv_mean': np.mean(hd_rv_buffer), 'hd_rv_std': np.std(hd_rv_buffer),\n\n 'mean_mag_grad_detJ_mean': np.mean(mean_mag_grad_detJ_buffer),\n 'mean_mag_grad_detJ_std': np.std(mean_mag_grad_detJ_buffer),\n\n 'negative_detJ_mean': np.mean(negative_detJ_buffer),\n 'negative_detJ_std': np.std(negative_detJ_buffer)\n }\n\n\n if not val:\n # testing only: save all metrics evaluated for all test subjects in pandas dataframe\n test_result_dir = os.path.join(args.model_dir, \"test_results\")\n if not os.path.exists(test_result_dir):\n os.makedirs(test_result_dir)\n\n # save metrics results mean & std\n xutils.save_dict_to_json(metrics,\n f\"{test_result_dir}/test_results_3slices_{not args.all_slices}.json\")\n\n # save accuracy metrics of every subject\n subj_id_buffer = dataloader.dataset.dir_list\n df_buffer = []\n column_method = ['DL'] * len(subj_id_buffer)\n for struct in ['LV', 'MYO', 'RV']:\n if struct == 'LV':\n ls_dice = dice_lv_buffer\n ls_mcd = mcd_lv_buffer\n ls_hd = hd_lv_buffer\n elif struct == 'MYO':\n ls_dice = dice_myo_buffer\n ls_mcd = mcd_myo_buffer\n ls_hd = hd_myo_buffer\n elif struct == 'RV':\n ls_dice = dice_rv_buffer\n ls_mcd = mcd_rv_buffer\n ls_hd = hd_rv_buffer\n\n ls_struct = [struct] * len(subj_id_buffer)\n data = {'Method': column_method,\n 'ID': subj_id_buffer,\n 'Structure': ls_struct,\n 'Dice': ls_dice,\n 'MCD': ls_mcd,\n 'HD': ls_hd}\n df_buffer += [pd.DataFrame(data=data)]\n # concatenate df and save\n metrics_df = pd.concat(df_buffer, axis=0)\n metrics_df.to_pickle(f\"{test_result_dir}/test_accuracy_results_3slices_{not args.all_slices}.pkl\")\n\n # save detJac metrics for every subject\n jac_data = {'Method': column_method,\n 'ID': subj_id_buffer,\n 'GradDetJac': mean_mag_grad_detJ_buffer,\n 'NegDetJac': negative_detJ_buffer}\n jac_df = pd.DataFrame(data=jac_data)\n jac_df.to_pickle(f\"{test_result_dir}/test_Jacobian_results_3slices{not args.all_slices}.pkl\")\n\n return metrics", "def evaluate_model():\n\n # Get the processed data (in proper format to evaluate the NER model)\n data = get_json_from_file_path(PROCESSED_DATA_PATH)\n # Split the dataset for training and test as we did for training\n train_data, test_data = train_test_split(data, train_size=0.7, \n random_state=4)\n\n # Load the model trained\n try:\n ner_model = spacy.load(OUTPUT_MODEL_PATH)\n except Exception as err:\n msg = f'Could not load the model. Error: {err}'\n raise Exception(msg)\n\n # Compute evaluation scores\n print('Computing metrics...')\n scores = evaluate(ner_model, test_data)\n # General metrics of the model\n f_score = scores.get('ents_f')\n precision = scores.get('ents_p')\n recall = scores.get('ents_r')\n print('\\nScoring:')\n print(f'F-score: {f_score}')\n print(f'Precision: {precision}')\n print(f'Recall: {recall}')\n\n # Get the specific scores for each entity \n scores_per_entity = scores.get('ents_per_type')\n # Get the F-score of the entities\n f_scores_of_entities = []\n for entity_scores in scores_per_entity.values():\n f_scores_of_entities.append(entity_scores['f'])\n # Compute the macro averaged F-score\n macro_avg_f_score = sum(f_scores_of_entities)/len(f_scores_of_entities)\n print(f'Macro averaged F-score: {macro_avg_f_score}')\n \n print('\\nScores per entity;')\n print('{:<15} {:<10} {:<10} {:<10}'.format('Entity','F-score','Precision','Recall'))\n for key, value in scores_per_entity.items():\n entity = key\n f, p, r = value['f'], value['p'], value['r']\n print('{:<15} {:<10.2f} {:<10.2f} {:<10.2f}'.format(entity, f, p, r))", "def app(data_file=None, \n metric=\"all\",\n include_diagnoses=(1,3), \n num_sub=None, \n feature_name=None, \n label_name=None, \n model_file=None):\n \n \n # Load model\n all_models = pickle.load(open(model_file, \"rb\"))\n \n # Get data and preprocessing\n \n # Feature Preprocessing\n \n # Predict\n predict_proba, predict_label = model.predict(all_models[\"merged_model\"], data)\n\n # Evaluation\n # acc, auc, f1, confmat, report = model.evaluate(predict_label, predict_proba, predict_label)\n # print(f\"Test dataset:\\nacc = {acc}\\nf1score = {f1}\\nauc = {auc}\\n\")\n\n return predict_proba, predict_label", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels,\n log_probs) = create_model(bert_model_hub, is_predicting,\n input_ids, input_mask, segment_ids,\n label_ids, num_labels)\n\n train_op = bert.optimization.create_optimizer(loss,\n learning_rate,\n num_train_steps,\n num_warmup_steps,\n use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.metrics.accuracy(label_ids, predicted_labels)\n f1_score = tf.contrib.metrics.f1_score(label_ids,\n predicted_labels)\n auc = tf.metrics.auc(label_ids, predicted_labels)\n recall = tf.metrics.recall(label_ids, predicted_labels)\n precision = tf.metrics.precision(label_ids, predicted_labels)\n true_pos = tf.metrics.true_positives(label_ids,\n predicted_labels)\n true_neg = tf.metrics.true_negatives(label_ids,\n predicted_labels)\n false_pos = tf.metrics.false_positives(label_ids,\n predicted_labels)\n false_neg = tf.metrics.false_negatives(label_ids,\n predicted_labels)\n return {\n \"eval_accuracy\": accuracy,\n \"f1_score\": f1_score,\n \"auc\": auc,\n \"precision\": precision,\n \"recall\": recall,\n \"true_positives\": true_pos,\n \"true_negatives\": true_neg,\n \"false_positives\": false_pos,\n \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels,\n log_probs) = create_model(bert_model_hub, is_predicting,\n input_ids, input_mask, segment_ids,\n label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)", "def keras_model_fn_cpu(model_config, vocab_size, embedding_size, embeddings):\n ## hyperparams\n model_name = model_config['model_name']\n num_class = model_config['num_class']\n lstm_hs = model_config['lstm_hs']\n gru_hs = model_config['gru_hs']\n learning_rate = model_config['learning_rate']\n \n with tf.device('/cpu:0'):\n ## build model\n inputs = ks.Input(shape=(None,), dtype='int32', name='inputs')\n embedded_sequences_ft1 = layers.Embedding(vocab_size, embedding_size, trainable = False, mask_zero = False)(inputs)\n embedded_sequences_ft2 = layers.Embedding(vocab_size, embedding_size, trainable = False, mask_zero = False)(inputs)\n concat_embed = layers.concatenate([embedded_sequences_ft1 ,embedded_sequences_ft2])\n concat_embed = layers.SpatialDropout1D(0.5)(concat_embed)\n x = layers.Bidirectional(layers.LSTM(lstm_hs,recurrent_activation = 'sigmoid', return_sequences = True))(concat_embed)\n x, x_h, x_c = layers.Bidirectional(layers.GRU(gru_hs, reset_after = True, recurrent_activation = 'sigmoid', return_sequences = True, return_state = True))(x)\n x_1 = layers.GlobalMaxPool1D()(x)\n x_2 = layers.GlobalAvgPool1D()(x)\n x_out = layers.concatenate([x_1 ,x_2, x_h])\n x_out = layers.BatchNormalization()(x_out)\n outputs = layers.Dense(num_class, activation = 'softmax', name = 'outputs')(x_out) # outputs\n model = ks.Model(inputs, outputs, name = model_name)\n\n ## compile\n model.compile(loss = 'categorical_crossentropy', \n optimizer=ks.optimizers.Adam(lr=learning_rate, clipnorm=.25, beta_1=0.7, beta_2=0.99), \n metrics=['categorical_accuracy', ks.metrics.TopKCategoricalAccuracy(k=3)])\n return model", "def run_experiments() :\n #%%\n target_size=(32,32)\n g_specs = {\n \"batch_size\" : [ 30 , 60, 100 ],\n \"learning_rate\" : [ 0.0002, 0.0003, 0.0005 ],\n \"drop_out_rate\" : [ 0.2, 0.25, 0.3 ],\n \"rescale_mode\" : [ \"max_q\" , \"max\", \"\" ]\n }\n\n model_traits = MODEL_TRAITS[\"model2\"].copy()\n tt_obj = model_traits[\"trainer_tester_class\"]( model_traits )\n del model_traits[\"trainer_tester_class\"]\n\n cnt = 0\n for batchs, lrate, do_rate, resc_mode in product( g_specs[\"batch_size\"],\n g_specs[\"learning_rate\"],\n g_specs[\"drop_out_rate\"],\n g_specs[\"rescale_mode\"] ) :\n\n tt_obj.model_traits.update( {\"batch_size\" : batchs,\n \"learning_rate\" : lrate,\n \"rescale_mode\" : resc_mode,\n \"drop_out_rate\" : do_rate } )\n\n train_4d, train_gt = tu.make_4d_arrays( images_dir=\"images/train\",\n target_size=target_size )\n\n test_4d, test_gt = tu.make_4d_arrays( images_dir=\"images/test\",\n target_size=target_size )\n\n data = {\"train_4d\" : train_4d,\n \"test_4d\" : test_4d,\n \"train_y\" : train_gt,\n \"test_y\" : test_gt}\n\n valid_accu_log, train_accu_log = tt_obj.train( model_traits, data,\n logl=100 )\n idx_v = int(np.argmax( valid_accu_log))\n idx_t = int(np.argmax( train_accu_log))\n\n model_traits.update({\"valid_accu_log\" : valid_accu_log,\n \"train_accu_log\" : train_accu_log,\n \"best_valid\" : max(valid_accu_log),\n \"best_valid_at\" : idx_v,\n \"train_at_best_valid\" : train_accu_log[idx_v],\n \"best_train\" : max(train_accu_log),\n \"best_train_at\": idx_t })\n\n #print(cnt, pformat(model_traits) )\n print( \"%d : best_train = %.4f, best_valid = %.4f\" % \\\n (cnt, max(train_accu_log), max(valid_accu_log) ))\n\n with open( \"exp_results_%d.json\" % cnt,\n \"wt\" , encoding=\"utf8\" ) as f_out :\n print( json.dumps( model_traits ), file=f_out)\n\n\n cnt += 1\n #%%" ]
[ "0.6197215", "0.6179176", "0.61233026", "0.60849535", "0.5975204", "0.59584266", "0.5925668", "0.5914981", "0.5912443", "0.59088415", "0.587874", "0.583481", "0.5832889", "0.58285797", "0.5824648", "0.58075756", "0.5796022", "0.5794778", "0.579187", "0.57816434", "0.57709134", "0.5757443", "0.5751359", "0.57184255", "0.5718085", "0.5696702", "0.56942844", "0.5694188", "0.56842303", "0.56683457" ]
0.65697956
0
This function tries to cast the port to integer. If it's not possible, the initial string value is returned.
def valid_port(ctx, param, value): try: value = int(value) except ValueError: pass return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _grab_port(self):\r\n port = \"\"\r\n while self._char != -1 and self._char in \"0123456789\":\r\n port += self._char\r\n self._get_char()\r\n if len(port) == 0:\r\n self._error(\"port empty\")\r\n return int(port)", "def convertInt(s):\n try:\n int(s)\n return \"INT\"\n except:\n return s", "def port(name):\n\n words = name.upper().split('-', 1)\n\n if len(words) == 1:\n words.append(words[0][1])\n\n return int(f\"{ord(words[0][0])}{ord(words[1][0])}\")", "def _convert_pin_port(self, pin):\n if pin in dir(port):\n port_num = getattr(port, pin)\n elif pin in dir(connector):\n port_num = getattr(connector, pin)\n else:\n errmsg = \"Unknown pin {}\".format(pin)\n self._logger.error(errmsg)\n return 0\n return port_num", "def tryCastToInt(number):\n try:\n return int(number)\n except:\n print(\"Error! Impossible to parse this variable\")\n return 0", "def Port(self) -> int:", "def try_int_cast(value):\n try: \n return int(value)\n except:\n return value", "def _translate_port(port):\n services = _get_services_mapping()\n if port in services and services[port][\"port\"]:\n return services[port][\"port\"][0]\n return port", "def try_to_convert(value):\n try:\n return int(value)\n except:\n return value", "def convert(s):\n\n try:\n\n return int(s)\n except (ValueError, TypeError) as e:\n print(\"conversion error {}\".format(str(e)), file=sys.stderr)\n pass\n return -1", "def _validate_port(port_value):\n try:\n port_value = int(port_value)\n except (ValueError, TypeError):\n return None\n\n if not (1024 <= port_value <= 49151):\n return None\n\n return port_value", "def _validate_port(port_value):\n try:\n port_value = int(port_value)\n except (ValueError, TypeError):\n return None\n\n if not (1024 <= port_value <= 49151):\n return None\n\n return port_value", "def port_number(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"port_number\")", "def parse_port():\n port = 8484\n try:\n port = int(sys.argv[1])\n except Exception as e:\n print(\"CLI argument for port could not be parsed: \" + str(e))\n print(\"Fall back on default port: \" + str(port))\n pass\n return '{}'.format(port)", "def verify_port(port_def):\n if re.match(r\"^\\d+\\+?$\", str(port_def)):\n port_def = str(port_def), str(port_def).replace(\"+\", \"\")\n elif re.match(r\"^(\\d+\\+?):(\\d+)$\", str(port_def)):\n port_def = tuple(re.findall(\"(\\d+\\+?):(\\d+)\", str(port_def))[0])\n else:\n raise ValueError(f\"invalid port def '{port_def}'\")\n return port_def", "def _get_nport(self):\n return self.__nport", "def _get_port(self):\n return self.__port", "def dec2int(r: str) -> int:", "def try_to_convert (id):\n converted = id\n try:\n converted = int(id)\n except ValueError:\n pass\n return converted", "def cast(val, regs):\n try:\n return int(val)\n except ValueError as ve:\n return regs[val]", "def _port(port):\n\n\tvalid_range = range(1, 65535 + 1)\n\n\ttry:\n\t\tport = int(port)\n\t\tif port not in valid_range:\n\t\t\traise argparse.ArgumentTypeError(\"Port must be 1-65535\")\n\t\treturn port\n\texcept ValueError:\n\t\traise argparse.ArgumentTypeError(\"Port must be 1-65535\")", "def port_parser(string):\n try:\n portnum = int(string)\n if portnum < MIN_TCP_PORT_NUM:\n print('?? TCP port value (%d) too low; changing to %d' % (portnum, MIN_TCP_PORT_NUM))\n elif portnum > MAX_TCP_PORT_NUM:\n print('?? TCP port value (%d) too high; changing to %d' % (portnum, max(TCP_PORT_RANGE)))\n return max(min(portnum, MAX_TCP_PORT_NUM), MIN_TCP_PORT_NUM)\n except:\n syndrome = 'invalid port count: %s\\ncount must be a positive integer in range %d - %d' % (\n string, MIN_TCP_PORT_NUM, MAX_TCP_PORT_NUM)\n raise argparse.ArgumentTypeError(syndrome)", "def convert_number(s):\n\n try:\n return int(s)\n except ValueError:\n return None", "def is_port(inString):\r\n if is_int(inString):\r\n intiger = int(inString)\r\n return intiger >= 0 and intiger < 65536\r\n #the 0 is acepted, beacuse later it will be modifyed\r\n else:\r\n return False", "def Int(val):\n try:\n return int(val)\n except ValueError:\n return ''", "def to_int(str_val: str) -> int:\n\n return int(str_val) if is_int(str_val) else None", "def port(self) -> int:", "def to_int(value):\n\n if isinstance(value, int):\n return value\n\n elif isinstance(value, string_types):\n return int(value) if value.isdigit() else None", "def port_from_hex(p_hex):\n return int(p_hex, 16)", "def _get_port_number(\n device_dict: _DeviceDictType,\n port_mapping: Optional[Dict[str, int]] = None,\n parent_device_dict: Optional[_DeviceDictType] = None) -> Optional[int]:\n if not port_mapping: # not connected to a usb hub\n return None\n if is_cambrionix(device_dict):\n return None\n if parent_device_dict:\n index = _get_cambrionix_port_using_parent_hub(\n device_dict, parent_device_dict)\n else:\n # get the appropriate digits\n stripped_location_id = _rstrip_location_id(device_dict)\n index = '{}.{}'.format(stripped_location_id[-2], stripped_location_id[-1])\n port = port_mapping[index]\n return port" ]
[ "0.7344312", "0.64872056", "0.64229685", "0.63615555", "0.63481104", "0.6235894", "0.62252676", "0.6177295", "0.61397123", "0.61028975", "0.60978687", "0.60978687", "0.60958785", "0.6053488", "0.6051167", "0.59981614", "0.5985547", "0.59640247", "0.59606385", "0.59353215", "0.59288836", "0.5925116", "0.59171265", "0.5898649", "0.5887986", "0.5887986", "0.58834994", "0.5871361", "0.5862435", "0.58595574" ]
0.6962645
1
This function creates the SQL query depending on the specified port and the like option.
def get_ports(port, like=False): conn = sqlite3.connect(DATABASE_PATH) cursor = conn.cursor() where_field = "port" if isinstance(port, int) else "name" where_value = "%{}%".format(port) if like else port cursor.execute(BASE_SQL + where_field + " LIKE ?", (where_value,)) return cursor
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(port, like):\n ports = get_ports(port, like)\n table = get_table(ports)\n print(table)", "def query(self, query, request_type=None):\n\n #encode to UTF-8\n try: query = query.encode(\"utf-8\")\n except: query = query.decode('raw_unicode_escape').encode(\"utf-8\")\n\n lowercase_query = query.lower()\n if lowercase_query.startswith(\"select\") or \\\n lowercase_query.startswith(\"describe\") or \\\n lowercase_query.startswith(\"show\") or \\\n request_type==\"GET\":\n\n return self._get(urllib.urlencode({'sql': query}))\n\n else:\n return self._post(urllib.urlencode({'sql': query}))", "def specific_ports(pattern):\n like = \"like\" in request.args\n return jsonify({\"ports\": get_ports(pattern, like)})", "def filter(self, **kwargs):\n kwargs['query'] += ' FROM {0}'\n return kwargs", "def _make_query(self):\r\n raise NotImplementedError()", "def make_query(self):", "def determine_query():\n return query if query is not None \\\n else f\"SELECT * FROM '{table}';\"", "def build_query_clauses(\n where: str = \"\", order: str = \"\", limit: int = 0, offset: int = 0\n ) -> str:\n return SqliteQueryBuilder.build_query_clauses(where, order, limit, offset)", "def test_query_sql_injection(self):\r\n\r\n q = '1%3D1;SELECT%20*%20FROM%20task%20WHERE%201=1'\r\n res = self.app.get('/api/task?' + q)\r\n error = json.loads(res.data)\r\n assert res.status_code == 415, error\r\n assert error['action'] == 'GET', error\r\n assert error['status'] == 'failed', error\r\n assert error['target'] == 'task', error\r\n\r\n q = 'app_id=1%3D1;SELECT%20*%20FROM%20task%20WHERE%201'\r\n res = self.app.get('/api/apappp?' + q)\r\n assert res.status_code == 404, res.data\r\n\r\n q = 'app_id=1%3D1;SELECT%20*%20FROM%20task%20WHERE%201'\r\n res = self.app.get('/api/' + q)\r\n assert res.status_code == 404, res.data\r\n\r\n q = 'app_id=1%3D1;SELECT%20*%20FROM%20task%20WHERE%201'\r\n res = self.app.get('/api' + q)\r\n assert res.status_code == 404, res.data", "def _execute_database_specific_connection_statements(self, conn):\n if self.conn_info[\"DATABASE\"] == Database.SQLITE.value:\n conn.execute(\"PRAGMA case_sensitive_like = ON;\")\n\n return conn", "def gen_q_stmt(name, query):\n return \"query {} `{}`;\\n\".format(name, query)", "def sql(self, q):\r\n params = base.get_params(None, locals())\r\n url = '{0}/{1}'.format(self.get_url(), 'sql')\r\n\r\n return http.Request('POST', url, params), parsers.parse_json", "def pp_query(query):\n print(format_query(query))", "def _sql_where(self, cursor, table, prefix=None, aggregate=False):\n assert False, \"subclass responsibility\"", "def run_query(where_clause, limit=1000):\n sql = \"SELECT * FROM catalog WHERE {} ORDER BY creators, title LIMIT {}\"\\\n .format(where_clause, limit)\n with sqlite3.connect(db_name) as db:\n results = pd.read_sql_query(sql, db)\n print_results(results)", "def query3() :", "def sql(self, method: str = 'select') -> str:", "def query(self, query):", "def sql(q, database_url):\r\n output, cur_description = Q(q, database_url, out=True, description=True)\r\n # print(cur_description)\r\n cols = [i[0] for i in cur_description]\r\n return pd.DataFrame(output, columns=cols)", "def make_query(table_name, cols, query):\n str_query = None\n if query == None:\n str_query = \"SELECT {} FROM {};\".format(cols, table_name)\n else:\n str_query = \"SELECT {} FROM {} {};\".format(cols, table_name, query)\n print(\">>>ejecutando: \", str_query)\n sistema.cursor.execute(str_query)\n for row in sistema.cursor.fetchall():\n print(row)", "def _create_query(cls, search: Search):\n\n search_columns = []\n for column_name in search.SearchBy.split(\",\"): # accepts multiple columns split by ,\n search_column = cls._get_column_from_name(column_name)\n if search_column is None:\n raise AppException(\"The column {} you are trying to search at don't exists.\".format(column_name))\n search_columns.append(search_column)\n\n find_values = []\n for value in search.SearchValue.split(\",\"): # accepts multiple values split by ,\n find_value = \"%{}%\".format(value.strip())\n find_values.append(find_value)\n\n # construct search filter.\n if search.MapColumnAndValue:\n # makes a 1:1 search for column:value\n search_filters = [sc.like(value) for sc, value in zip(search_columns, find_values)]\n else:\n # makes n:x search for column:value\n search_filters = [sc.like(value) for sc in search_columns for value in find_values]\n\n order_by_list = cls._create_order_by_list(search)\n\n # AND or OR\n if search.Use_AND_Operator:\n query = cls.query.filter(and_(*search_filters)).order_by(*order_by_list)\n else:\n query = cls.query.filter(or_(*search_filters)).order_by(*order_by_list)\n\n if search.TextualQuery:\n query = query.filter(text(search.TextualQuery)).order_by(*order_by_list)\n\n return query", "def get_query(self):\n columns = ','.join(['\"{}\"'.format(x) for x in self.columns])\n query = 'SELECT {} FROM \"{}\"'.format(columns, self.table)\n filter_params = []\n if self.filters:\n filter_sql, filter_params = filter_postgis(self.filters)\n query += ' WHERE {}'.format(filter_sql)\n query += ';'\n return str(text(query)), filter_params", "def build_query(db, request, tags):\n inner_query, clauses = build_inner_query(request, tags)\n if len(tags) and tags[-1][0] == 'uuid':\n # if we select uuid as the trailing tag we have to be special\n query = \"\"\"\nSELECT DISTINCT s.uuid \nFROM stream AS s\nWHERE s.id IN \"\"\" + inner_query\n elif len(tags) and (tags[-1][1] == None or tags[-1][1] == ''):\n # odd-numbered clasues, so we print matching values of tags\n t = escape_string(tags[-1][0])\n query = \"\"\"\nSELECT DISTINCT metadata -> %s AS svals FROM stream\nWHERE id IN %s AND metadata ? %s\nORDER BY svals ASC\"\"\" % (t, inner_query, t)\n else:\n # otherwise we print all tags matching the restriction\n query = \"\"\"\nSELECT DISTINCT skeys\nFROM (\n SELECT skeys(metadata) FROM stream\n WHERE id IN %s\n) AS skeys ORDER BY skeys ASC\"\"\" % inner_query\n\n log.msg(query)\n d = db.runQuery(query)\n d.addCallback(log_time, time.time())\n return d", "def _rewrite_stmt(self, stmt: str) -> str:\n stmt = re.sub(r'reserved_til=([0-9]+)WHERE', r'reserved_til=\\1 WHERE', stmt)\n stmt = re.sub(r'peer_id=([0-9]+)WHERE channels.id=', r'peer_id=\\1 WHERE channels.id=', stmt)\n return stmt", "def makeQueries(baseQuery, joiningChar):\n results = []\n searchQueries = sys.argv[2:]\n for query in searchQueries: # for every individual query\n queryList = query.split() # split individual terms in a query\n # join them back with the joining char between them\n formatedQuery = joiningChar.join(queryList)\n # append the structured query to the result\n results.append(baseQuery + formatedQuery)\n return results", "def sub_binds(sql_select):\n\n keywords = ['INNER','FROM','HAVING','WHERE',\"GROUP BY\",\", \"]\n\n (sql_command,binds) = tuple(sql_select)\n\n for b in binds: sql_command=sql_command.replace('?',repr(b),1)\n\n replace_dict = {x:('\\n\\t'+x) for x in keywords}\n\n print '\\n'+replacer(sql_command,replace_dict)+'\\n'", "def run(port, like, json):\n ports = get_ports(port, like)\n if not ports:\n print(\"No ports found for '{0}'\".format(port))\n if json:\n print(jsonmod.dumps(get_dict(ports), indent=4))\n else:\n table = get_table(ports)\n print(table)", "def querydb_util(self, cmd='queryDB_util', pattern='name:', re_flags=re.IGNORECASE):\n delay_factor = self.select_delay_factor(delay_factor=0)\n output = \"\"\n \"\"\"If current mode is Expert mode then let enter dbedit \"\"\"\n if self.check_enable_mode(check_string='#'):\n self.write_channel(self.normalize_cmd(cmd))\n time.sleep(.3 * delay_factor)\n pattern = re.escape(pattern)\n try:\n output += self.read_channel()\n if re.search(pattern, output, flags=re_flags):\n self.write_channel(self.normalize_cmd('\\n\\r'))\n output = self.read_channel()\n self.set_base_prompt()\n except socket.timeout:\n raise NetMikoTimeoutException(\"Timed-out reading channel, data not available.\")\n if not self.check_enable_mode(check_string='>'):\n raise ValueError(\"Failed to enter queryDB_util mode.\")\n return output", "def gen_sql(runtime, query_type, target_model=None):\n\n from_table = runtime.model.table_name\n\n # if target_model not given, use from_table instead\n if target_model is None:\n target_model = runtime.model\n\n target_table = target_model.table_name\n\n data = runtime.data # alias\n\n # quick mark for parse time functions\n _where = Compiler.parse_where(data['where'])\n _set = Compiler.parse_set(data['set'])\n _orderby = Compiler.parse_orderby(data['orderby'])\n _select = Compiler.parse_select(data['select'])\n _limit = Compiler.parse_limit(data['limit'])\n _groupby = Compiler.parse_groupby(data['groupby'])\n _having = Compiler.parse_having(data['having'])\n _distinct = Compiler.parse_distinct(data['distinct'])\n\n pattern = Compiler.SQL_PATTERNS[query_type]\n\n SQL = pattern.format(**{\n 'target': target_table,\n 'set': _set,\n 'from': from_table,\n 'where': _where,\n 'select': _select,\n 'limit': _limit,\n 'orderby': _orderby,\n 'groupby': _groupby,\n 'having': _having,\n 'distinct': _distinct,\n })\n\n return SQL", "def query(engine: sa.engine, query_params: Dict[str, Any]) -> sa.engine.ResultProxy:\r\n with engine.connect() as con:\r\n attr_col = [query_params[Toml.ATTR]]\r\n cols = [sa.Column(x) for x in attr_col + query_params[Toml.COLUMNS]]\r\n table = query_params[Toml.TABLE]\r\n s = sa.select(cols)\r\n s.append_from(sa.text(table))\r\n s.append_whereclause(sa.text(query_params[Toml.EXCLUDE_BY] + ' not in :exclude')\r\n .bindparams(sa.bindparam('exclude', expanding=True)))\r\n return con.execute(s, {\r\n 'exclude': query_params[Toml.EXCLUDE]\r\n })" ]
[ "0.5777958", "0.54624337", "0.54352665", "0.5378882", "0.5313509", "0.53130084", "0.5191155", "0.51026887", "0.50910205", "0.50606954", "0.5035756", "0.50239843", "0.5011402", "0.50092334", "0.5002485", "0.49994776", "0.4979458", "0.49748883", "0.49660277", "0.49547812", "0.49448493", "0.49255085", "0.4915786", "0.48915613", "0.48803636", "0.4878823", "0.48741558", "0.4866111", "0.48568603", "0.48519212" ]
0.6570871
0
This function returns a pretty table used to display the port results.
def get_table(ports): table = PrettyTable(["Name", "Port", "Protocol", "Description"]) table.align["Name"] = "l" table.align["Description"] = "l" table.padding_width = 1 for p in ports: table.add_row(p) return table
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_table(ports):\n table = PrettyTable([\"Name\", \"Port\", \"Protocol\", \"Description\"])\n table.align[\"Name\"] = \"l\"\n table.align[\"Description\"] = \"l\"\n table.padding_width = 1\n\n for port in ports:\n table.add_row(port)\n\n return table", "def pretty_display(self):\n\t\tpretty_space = PrettyTable()\n\t\tpretty_space.field_names = range(self.space.shape[1])\n\t\tcount = 0\n\t\tpretty_row = []\n\t\tfor cell in self.space.flat:\n\t\t\tcount = count + 1\n\t\t\tpretty_row.append(cell.state)\n\t\t\tif count >= self.space.shape[1]:\n\t\t\t\tpretty_space.add_row(pretty_row)\n\t\t\t\tcount = 0\n\t\t\t\tpretty_row = []\n\t\tprint(pretty_space)", "def pretty(self):\n #table = [\"\".join([\"%8s \" % s for s in self.alpha.getSymbols()])]\n table = []\n for row in PWM.getFreq(self):\n table.append(\"\".join([\"%8.6f \" % y for y in row]))\n return table", "def __print_work_table(table):\n print \"%-5s %-30s %5s %5s %5s %5s %5s\" % ('Act', 'Pred', 'Block', 'Dummy', 'Succ', 'start', 'end')\n for k, col in sorted(table.items()):\n print \"%-5s %-30s %5s %5s %5s %5s %5s\" % tuple(\n [str(k)] + [list(col[0])] + [str(col[i]) for i in range(1, len(col))])", "def tabulate(self) -> str:\n items = [\n ('Number of stations', self._num_stations),\n ('Loss probability', self.drop_prob),\n ]\n\n for node in range(self._num_stations):\n items.append((f'[[ STATION #{node} ]]', ''))\n\n ssize = self.system_size[node]\n qsize = self.queue_size[node]\n busy = self.busy[node]\n\n ssize_pmf = [ssize.pmf(x) for x in range(ssize.truncated_at + 1)]\n qsize_pmf = [qsize.pmf(x) for x in range(qsize.truncated_at + 1)]\n busy_pmf = [busy.pmf(x) for x in range(busy.truncated_at + 1)]\n\n items.extend([\n ('System size PMF', str_array(ssize_pmf)),\n ('System size average', ssize.mean),\n ('System size std.dev.', ssize.std),\n ('Queue size PMF', str_array(qsize_pmf)),\n ('Queue size average', qsize.mean),\n ('Queue size std.dev.', qsize.std),\n ('Busy PMF', str_array(busy_pmf)),\n ('Utilization', self.get_utilization(node)),\n ('Drop probability', self.drop_prob[node]),\n ('Delivery probability', self.delivery_prob[node]),\n ('Departures, average', self.departures[node].avg),\n ('Departures, std.dev.', self.departures[node].std),\n ('Response time, average', self.response_time[node].avg),\n ('Response time, std.dev.', self.response_time[node].std),\n ('Wait time, average', self.wait_time[node].avg),\n ('Wait time, std.dev.', self.wait_time[node].std),\n ('End-to-end delays, average', self.delivery_delays[node].avg),\n ('End-to-end delays, std.dev.', self.delivery_delays[node].std),\n ])\n return tabulate(items, headers=('Param', 'Value'))", "def __repr__(self):\n result = \"%s (%s):\\n\" % (self.host, self.status)\n for port in self.ports:\n result += \"\\t%s %s %s %s %s\\n\" % (port[0], port[1], port[2], port[3], port[4])\n result += \"\\n\"\n return result", "def pprint_table(out, table):\n\n\tcol_paddings = []\n\n\tfor i in range(len(table[0])):\n\t\tcol_paddings.append(get_max_width(table, i))\n\n\tfor row in table:\n\t\t# left col\n\t\tout.write(str(row[0]).ljust(col_paddings[0] + 1))\n\t\t\n\t\t# rest of the cols\n\t\tfor i in range(1, len(row)):\n\t\t\tout.write(str(row[i]).rjust(col_paddings[i] + 2))\n\t\t\n\t\tout.write('\\n')", "def print_table(self):\n print(\"%-12s%-12s%-12s%-12s%-12s\" % (\"index\",\"balance\",\"payment\",\"interest\",\"amortization\"))\n print(\"-------------------------------------------------------------\")\n for i in self.table[\"index\"]:\n print(\"%-12i%-12i%-12i%-12i%-12i\" % (self.table[\"index\"][i],self.table[\"balance\"][i]\\\n ,self.table[\"payment\"][i],self.table[\"interest\"][i],\\\n self.table[\"amortization\"][i]))", "def genhtml(port):\n\n sfont='<font size=\"-1\">'\n\n efont='</font>'\n\n res='<table width=\"100%\"><tr><th width=\"20%\">Property<th width=\"40%\">Value<th width=\"40%\">Description</tr>\\n'\n\n keys=port.keys()\n\n keys.sort()\n\n for k in keys:\n\n if k.startswith('usb-') and not k.endswith('string'):\n\n continue\n\n res+='<tr><td valign=\"top\">'+sfont+k+efont+'</td><td valign=\"top\">\\n'\n\n if k=='active' or k=='available':\n\n if port[k]:\n\n res+=sfont+\"True\"+efont\n\n else:\n\n res+=sfont+\"False\"+efont\n\n elif k=='driverdate':\n\n res+=sfont+(\"%d-%d-%d\" % tuple(port[k]))+efont\n\n elif k=='driverstatus':\n\n res+=sfont+`port[k]`+efont \n\n else:\n\n if isinstance(port[k], type(\"\")):\n\n res+=sfont+htmlify(port[k])+efont\n\n else:\n\n res+=sfont+`port[k]`+efont\n\n res+='</td><td valign=\"top\">'\n\n if k=='name':\n\n res+=sfont+\"This is the name the port is known to your operating system as\"+efont\n\n elif k=='available':\n\n if port[k]:\n\n res+=sfont+\"It was possible to open this port\"+efont\n\n else:\n\n res+=sfont+\"It was not possible to open this port\"+efont\n\n elif k=='active':\n\n if port[k]:\n\n res+=sfont+\"Your operating system shows this driver and port is correctly configured and a device attached\"+efont\n\n else:\n\n res+=sfont+\"This driver/port combination is not currently running\"+efont\n\n elif k=='driverstatus':\n\n res+=sfont+\"\"\"This is low level detail. If problem is non-zero then you need to look in the\n control panel for an explanation as to why this driver/device is not working.\"\"\"+efont\n\n elif k=='hardwareinstance':\n\n res+=sfont+\"\"\"This is how the device is named internally. For example USB devices include\n the vendor (VID) and product (PID) identities\"\"\"+efont\n\n elif k==\"libusb\":\n\n res+=sfont+\"\"\"This indicates if the usb library is in use to access this device. Operating system\n device drivers (if any) are bypassed when BitPim talks to the device\"\"\"+efont\n\n elif k==\"driver-required\":\n\n res+=sfont+\"\"\"This indicates if you must use a device driver, not direct USB access\"\"\"+efont\n\n elif k==\"BitFling\":\n\n res+=sfont+\"\"\"This indicates that the port is being accessed from a remote machine via BitFling,\"\"\"+efont\n\n elif k==\"protocol\":\n\n res+=sfont+\"\"\"This is the protocol the USB device claims to speak\"\"\"+efont\n\n elif k==\"class\":\n\n if port[k]==\"serial\":\n\n res+=sfont+\"\"\"This is a serial connection\"\"\"+efont\n\n elif port[k]==\"modem\":\n\n res+=sfont+\"\"\"This is a modem connection\"\"\"+efont\n\n else:\n\n res+=sfont+\"\"\"The port type (serial, modem etc)\"\"\"+efont\n\n else:\n\n res+=\"&nbsp;\"\n\n res+=\"</td></tr>\\n\"\n\n res+=\"\\n</table>\"\n\n return res", "def format_prettytable(table):\r\n for i, row in enumerate(table.rows):\r\n for j, item in enumerate(row):\r\n table.rows[i][j] = format_output(item)\r\n ptable = table.prettytable()\r\n ptable.hrules = FRAME\r\n ptable.horizontal_char = '.'\r\n ptable.vertical_char = ':'\r\n ptable.junction_char = ':'\r\n return ptable", "def print_para_table(s):\n if MODE == 1:\n t = [['Parameter', 'Value', 'Unit'],\n ['Number of bends', NBENDS, '/'], \n ['Width', WIDTH, 'm'],\n ['Depth', DEPTH, 'm'],\n ['Length', LAMBDA*(NBENDS+1), 'm'],\n ['Arc wavelength', LAMBDA, 'm'],\n ['Slope', SLOPE, '/'],\n ['Streamwise resolution', DS, 'm'],\n ['Transverse resolution', np.around(INTERVAL, decimals=4), 'm'],\n ['Streamwise # of pts', s.size + 2*int(LAMBDA/2/DS), '/'],\n ['Transverse # of pts', NUM*2+1, '/']]\n elif MODE == 2:\n if FNAME[0].islower():\n f = FNAME[0].upper() + FNAME[1:]\n else:\n f = FNAME\n t = [['Parameter', 'Value', 'Unit'],\n ['River name', f.rsplit('.', 1)[0], '/'],\n ['Width', WIDTH, 'm'],\n ['Depth', DEPTH, 'm'],\n ['Length', np.round(s[-1], decimals=2), 'm'],\n ['Slope', SLOPE, '/'],\n ['Streamwise resolution', np.round(np.mean(np.diff(s)), decimals=2), 'm'],\n ['Transverse resolution', np.round(INTERVAL, decimals=2), 'm'],\n ['Streamwise # of pts', s.size, '/'],\n ['Transverse # of pts', NUM*2+1, '/']]\n print(tabulate(t, tablefmt='psql', stralign='right', headers='firstrow'))", "def remote_login_table_format(result):\n table = []\n for item in result:\n row = OrderedDict()\n row['ID'] = item['nodeId']\n row['IP'] = item['ipAddress']\n row['SSH Port'] = int(item['port'])\n table.append(row)\n return table", "def display_port(self):\n ports=os.popen(\"sudo netstat -ntlp\").read().strip().splitlines()[2:]\n for port in ports:\n split=re.split('[\\s]+',port)\n self.portDic[\"Protcol\"]=split[0]\n self.portDic[\"Receive Q\"]=split[1]\n self.portDic[\"Send Q\"]=split[2]\n split_port=split[3].split(\":\")\n if split_port[1]==\"\":\n self.portDic[\"port\"]=\"No Port\" \n else:\n self.portDic[\"port\"]=split_port[1]\n self.portDic[\"Foreign Address\"]=split[4]\n self.portDic[\"State\"]=split[5]\n split_ID=split[6].split(\"/\")\n self.portDic[\"PID\"]=split_ID[0]\n self.portDic[\"Programme Name\"]=split_ID[1]\n self.portList.append(self.portDic.copy())\n return self.portList", "def build_table(type_, test_type, device_name, thresholds):\n x = PrettyTable() \n x.field_names = [device_name] + thresholds\n \n \"Chrome,\" + test_type + \",\" + str(notAfter_date) + \",\" + thresholds[index], \",fail\"\n \n ##read all Chromep entries\n ##get all test_type rows\n ##loop rows\n ##show table", "def pretty_print(self):\n pt = PrettyTable()\n for i in self.files_summary:\n pt.field_names = [\"File Name\", \"Classes\", \"Functions\", \"Lines\", \"Characters\"]\n pt.add_row(list([i, self.files_summary[i][\"class\"], self.files_summary[i][\"function\"], self.files_summary[i][\"line\"], self.files_summary[i][\"char\"]]))\n print(pt) #Using a Print statement here because i tried to return self.pt and it didnt give me anything but the print works", "def print_tables(self):\n print \"------------------\\nTables\\n------------------\"\n cnt = 0\n for x in self.show_tables():\n cnt += 1\n print (\"{0}.) {1}\".format(cnt, x[0]))", "def ascii_table(self, tablefmt=\"pipe\"):\n methods = self.methods\n xvalues = self.xvalues\n plot_matrix = self.plot_matrix\n\n import tabulate\n # https://pypi.python.org/pypi/tabulate\n aug_table = np.hstack((np.array(methods)[:, np.newaxis], plot_matrix))\n return tabulate.tabulate(aug_table, xvalues, tablefmt=tablefmt)", "def print_table(table):\r\n print('/-----------------------------------------------------------------------------------\\\\')\r\n for item in table:\r\n\r\n while len(item[1]) <= 22:\r\n item[1] += ' '\r\n\r\n while len(item[2]) <= 27:\r\n item[2] += ' '\r\n\r\n while len(item[0]) <= 15:\r\n item[0] += ' '\r\n\r\n print('| '+item[0]+' | '+item[1]+'| '+item[2]+' |')\r\n\r\n print('\\\\-----------------------------------------------------------------------------------/')", "def print_table(self) -> None:\n if (self.probability_links == None):\n print(\"+--------+\")\n print(f\"| P({self.key:1s}) |\")\n print(\"+--------+\")\n print(f\"| {self.probability_values[0]:0.04f} |\")\n print(\"+--------+\")\n else:\n arg_len = 2 + len(' '.join(self.probability_links.keys()))\n param_len = 2 + \\\n max(6, len(\"P(A|)\" + \",\".join(self.probability_links.keys())))\n print(f\"+{'-'*arg_len}+{'-'*param_len}+\")\n print(\n f\"| {' '.join(self.probability_links.keys())} | P({self.key}|{','.join(self.probability_links.keys())}) |\")\n print(f\"+{'-'*arg_len}+{'-'*param_len}+\")\n for i in range(2**len(self.probability_links.keys())):\n # Gives us a string binary value to make truth table off of\n bool_key = f\"{i:0{len(self.probability_links.keys())}b}\"\n print(\n f\"| {' '.join(['T' if bool_key[j] == '0' else 'F' for j in range(len(self.probability_links.keys()))])} | {f'{self.probability_values[i]:0.04f}':<{param_len-1}s}|\")\n print(f\"+{'-'*arg_len}+{'-'*param_len}+\")", "def __str__(self):\n\n # Create grid headers for the table\n headers = [letter for letter in string.ascii_uppercase[:self.width]]\n\n board_state = []\n board_state.extend([[value for value in row] for i, row in enumerate(self.board_state)])\n\n for idx, row in enumerate(board_state):\n row.insert(0, idx + 1)\n\n return tabulate(board_state, headers, tablefmt=\"grid\")", "def to_html_table(self):\n td = '<td>'\n nwtd = '<td nowrap=\"true\">'\n ftd = '<td class=\"format\">'\n ctd = '<td class=\"cen\">'\n etd = '</td>'\n \n if self.is_power_onoff():\n out = td + 'Power On/Off' + etd\n else:\n out = nwtd + '<strong>' + self['target'].ljust(20) + '</strong>' + etd\n\n if 'Date' in self:\n out += ctd + self['Date'] + etd\n else:\n out += td + etd\n\n if 'UTstart' in self:\n out += ctd + self['UTstart'] + etd\n else:\n out += td + etd\n\n if 'UTend' in self:\n out += ctd + self['UTend'] + etd\n else:\n out += td + etd\n\n if 'exposure' in self:\n out += ctd + self['exposure'] + etd\n else:\n out += td + etd\n\n if 'sample' in self:\n out += ctd + self['sample'] + etd\n else:\n out += td + etd\n\n if 'nframe' in self:\n out += ctd + self['nframe'] + etd\n else:\n out += td + etd\n \n if self.is_power_onoff():\n out += (td + etd)*3\n else:\n speed = self['speed']\n out += ctd + self['filters'].ljust(11) + etd + ctd + self['x_bin'] + 'x' + self['y_bin'] + etd + ctd + speed + etd \n \n if self.number_windows() > 0:\n out += ctd + self['x1_size'].rjust(4) + 'x' + self['y1_size'].ljust(4) + etd + td + self['x1_start'].ljust(3) + etd + td + self['y1_start'].ljust(4) + etd\n else:\n out += (td + etd)*3\n \n if self.number_windows() > 1:\n out += ctd + self['x2_size'].rjust(4) + 'x' + self['y2_size'].ljust(4) + etd + td + self['x2_start'].ljust(3) + etd + td + self['y2_start'].ljust(4) + etd\n else:\n out += (td + etd)*3\n\n if 'grating' in self:\n out += ctd + self['grating'] + etd\n else:\n out += td + etd\n\n if 'slit_width' in self:\n out += ctd + self['slit_width'] + etd\n else:\n out += td + etd\n\n if 'slit_angle' in self:\n out += ctd + self['slit_angle'] + etd\n else:\n out += td + etd\n \n if 'ID' in self:\n out += ctd + self['ID'] + etd\n else:\n out += td + etd\n\n if 'PI' in self:\n out += ctd + self['PI'] + etd\n else:\n out += td + etd\n \n if 'Comment' in self:\n out += nwtd + self['Comment'] + etd\n else:\n out += td + etd\n\n return out", "def pprint_table(table, out=sys.stdout, rstrip=False):\n\n def max_width_col(table, col_idx):\n \"\"\"\n Get the maximum width of the given column index\n \"\"\"\n return max(len(row[col_idx]) for row in table)\n\n if rstrip:\n for row_idx, row in enumerate(table):\n table[row_idx] = [c.rstrip() for c in row]\n\n col_paddings = []\n ncols = len(table[0])\n for i in range(ncols):\n col_paddings.append(max_width_col(table, i))\n\n for row in table:\n # left col\n out.write(row[0].ljust(col_paddings[0] + 1))\n # rest of the cols\n for i in range(1, len(row)):\n col = row[i].rjust(col_paddings[i] + 2)\n out.write(col)\n out.write(\"\\n\")", "def prettyPrint(description, ip_comp, host, width):\n value = (len(ip_comp) + len(host))\n #When printing values wider than the second column, split and print them\n if value > (int(width/3)):\n print(\"| \" + description.ljust(int(width/3)) + \" |\" ), \n i=0\n wrapped=textwrap.wrap(value, 60) \n for loop in wrapped:\n print(\"Fail point 3 inside loop\")\n if i == 0:\n print(loop + \"|\".rjust(int(width/3-(len(loop)))))\n else: \n print(\"| \".ljust(int(width/3+3)) + \" | \" + loop + \"|\".rjust(int(width/3-(len(loop)))))\n i=i+1\n else: \n print( \"| \" + description.ljust(int(width/3)) + \" | \" + ip_comp.rjust(int(width/3-6)) + \" | \" + host.rjust(int(width/3+2)) + \"|\")", "def file_server_show_table_format(result):\n row = OrderedDict()\n row['Name'] = result['name']\n row['Resource Group'] = result['resourceGroup']\n row['Size'] = result['vmSize']\n disks = result['dataDisks']\n if disks:\n row['Disks'] = '{0} x {1} Gb'.format(disks['diskCount'], disks['diskSizeInGb'])\n mount_settings = result['mountSettings']\n if mount_settings:\n row['Public IP'] = mount_settings['fileServerPublicIp']\n row['Internal IP'] = mount_settings['fileServerInternalIp']\n row['Mount Point'] = mount_settings['mountPoint']\n return row", "def vm_table_view(vlab_api, info):\n vm_body = []\n vm_header = ['Name', 'IPs', 'Type', 'Version', 'Powered', 'Networks']\n for vm, data in info.items():\n body = {'url': data['console']}\n network = data.get('networks', ['?'])\n kind = data['meta']['component']\n version = data['meta']['version']\n power = data['state'].replace('powered', '')\n row = [vm, '\\n'.join(data['ips']), kind, version, power, ','.join(network)]\n vm_body.append(row)\n if not vm_body:\n table = None\n else:\n table = tabulate(vm_body, headers=vm_header, tablefmt='presto')\n return table", "def print_table(table):\n # transpose the table:\n table = map(list, zip(*table))\n # get the column width:\n col_width = [max(len(str(x)) for x in col) for col in zip(*table)]\n # print it to screen:\n print\n for line in table:\n print \"| \" + \" | \".join(\"{:{}}\".format(x, col_width[i]) for i, x in enumerate(line)) + \" |\"\n print", "def __str__(self):\n tabuleiro = prettytable.PrettyTable(header=False)\n for linha in self.tabuleiro:\n tabuleiro.add_row(linha)\n return str(tabuleiro)", "def print_table(table):\n rest = table[1:]\n fmt = \"%-28s %-9s %-16s %s\"\n for row in rest:\n print(fmt % tuple(row))", "def list_ports(state):\n\tstate.report()", "def pprint(self):\n pad = 4\n upper = [x/10 for x in range(GRID_SIZE)]\n lower = [x%10 for x in range(GRID_SIZE)]\n print(' ' * pad + ' '+' '.join(map(str,upper)))\n print(' ' * pad + ' '+' '.join(map(str,lower)))\n print(' ' * pad + pprint_header())\n for row in range(GRID_SIZE):\n print(' {0:2}'.format(row), end=' ')\n for col in range(GRID_SIZE):\n cell = self.get_cell_rc(row, col)\n if cell is None:\n print('.', end=' ')\n elif cell == 0:\n print(' ', end=' ')\n else: # cell == 1\n print('#', end=' ')\n print() # end line\n print(' ' * pad + pprint_header(delim='+', pad='-'))" ]
[ "0.8009263", "0.6580488", "0.65752167", "0.65355736", "0.64888346", "0.6466678", "0.645486", "0.64460754", "0.6360644", "0.628148", "0.62409216", "0.6148639", "0.6144184", "0.6138973", "0.6137527", "0.6128025", "0.61141527", "0.6105768", "0.60711896", "0.6041017", "0.60379446", "0.6035905", "0.6028095", "0.6018037", "0.60157484", "0.6014832", "0.6009036", "0.60042423", "0.5990286", "0.5989194" ]
0.80720896
0
Return phase and composition.
def get_phase_and_composition(self): data = self.data total = data.sum() if total <= 0.: raise RuntimeError(f"'{phase_names[self.phase]}' phase does not exist") return self.phase, data / total
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getPhase(phase):", "def phase(self):\n return self.__phase", "def GetPhase(self):\n ...", "def phase(self):\n return self.data", "def phases(self):\r\n\r\n phase = tsa.cache_to_phase(self.cache, self.ij)\r\n\r\n return phase", "def phase(self):\n pass", "def phase(self):\n return np.arctan(np.sum(np.imag(self.values)) / np.sum(np.real(self.values)))", "def get_phase(self,):\n\n # for comparison\n initial_state = self._read('CPOW0')\n\n POW_step = 0.02197265\n POW = 0x00 | initial_state[0] << 8 | initial_state[1]\n phase = round(POW*POW_step, 2)\n\n print ('Latest phase set (i.e. currently in register):', phase)\n\n return self.phases", "def m_phase(self):\n return self._m_phase", "def phase(self):\r\n\r\n #XXX calcluate this from the standard output, instead of recalculating:\r\n\r\n tseries_length = self.input.data.shape[0]\r\n spectrum_length = self.spectrum.shape[-1]\r\n\r\n phase = np.zeros((tseries_length,\r\n tseries_length,\r\n spectrum_length))\r\n\r\n for i in range(tseries_length):\r\n for j in range(i, tseries_length):\r\n phase[i][j] = np.angle(\r\n self.spectrum[i][j])\r\n\r\n phase[j][i] = np.angle(\r\n self.spectrum[i][j].conjugate())\r\n return phase", "def Phase(self, *xpars):\n return np.angle(self.combineResult(*xpars))", "def iter_composition(self):\n array = self.data\n total = array.sum() or 1.\n return zip(self._phases, array/total)", "def phase(self, hjd):\n # 2009-09-28 14:07 IJC: Implemented object-oriented version\n return getorbitalphase(self, hjd)", "def phase(self):\n\n self.theta = np.arctan(np.sqrt(self.P / (1 - self.P)))\n return self", "def phases(self):\n return self._phases", "def phase(self):\r\n return 0.2 * self.weights", "def relative_phases(self):\r\n return np.angle(self.coherency)", "def relative_phases(self):\r\n return np.angle(self.coherency)", "def phase(self):\n return -self.attrs['RFphase']*2*np.pi", "def Closure_Phase(self, uv1, uv2, uv3):\n phi1 = self.Phase(*uv1)\n phi2 = self.Phase(*uv2)\n phi3 = self.Phase(*uv3)\n cphi = phi1 - phi2 + phi3\n cphi[cphi < -pi] += 2 * pi\n cphi[cphi > pi] -= 2 * pi\n return cphi", "def remote_getPhase(phase):", "def phaseChi2(self, pars):\n\t\treturn self.modeChi2(pars, PHASE)", "def phase(state, i):\n particles = bin(state >> i + 1).count(\"1\")\n return 1 if particles % 2 == 0 else -1", "def phase(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"phase\")", "def phase(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"phase\")", "def phase(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"phase\")", "def calc_phase(p, t):\n\n return (t % p)/p", "def phase(self):\n return 0.0 * self.__weights", "def Phase(data):\r\n hil = signal.hilbert(data)\r\n return np.unwrap(np.arctan2(hil.imag, hil.real))", "def phase(self) -> Optional[str]:\n return pulumi.get(self, \"phase\")" ]
[ "0.7307", "0.7237978", "0.71048236", "0.7045281", "0.698043", "0.696836", "0.67255837", "0.6650661", "0.66206443", "0.66041034", "0.65987706", "0.65656793", "0.6487877", "0.644085", "0.63621134", "0.62020665", "0.61857206", "0.61857206", "0.6132078", "0.60938764", "0.60899353", "0.6086675", "0.6084505", "0.6041206", "0.6041206", "0.6041206", "0.6030405", "0.6022369", "0.60183716", "0.59915" ]
0.80427444
0
Iterate over phasedata pairs.
def __iter__(self): return zip(self._phases, self.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iteritems(self):\n\t\tself.filep.seek(self.start + 2048)\n\n\t\t# iterate until we hit the enddata marker\n\t\twhile self.filep.tell() < self.enddata - 1:\n\t\t\t# fetch the lengths of the key and value\n\t\t\t(klen, vlen) = unpack('<LL', self.filep.read(8))\n\n\t\t\t# yield the key and value as a tuple\n\t\t\tyield (self.filep.read(klen), self.filep.read(vlen))", "def __next__(self):\n for (k, v) in pairs(self._data):\n yield (v, k)", "def iter_params(self):\n for var, val in self._params.iteritems():\n yield var, val", "def _iter_items(data_sequence):\n for time, element in data_sequence:\n for item in element:\n yield time, item", "def __iter__(self) -> (str, np.ndarray):\n for k, v in self.fields.items():\n yield k, v", "def iter_over_pairs(pairs):\r\n if isinstance(pairs, dict):\r\n return pairs.iteritems()\r\n else:\r\n return pairs", "def __iter__(self):\n prefix = len(META_NS) + 2\n for key, value in self.stats.items():\n yield (key[prefix:-6], int(value))", "def iteritems(self):\r\n for wr, value in self.data.iteritems():\r\n key = wr()\r\n if key is not None:\r\n yield key, value", "def iteritems(self):\r\n for wr, value in self.data.iteritems():\r\n key = wr()\r\n if key is not None:\r\n yield key, value", "def variableIter(self):\n for (para, start), variable in self.variables.iteritems():\n yield para, start, variable", "def iter_composition(self):\n array = self.data\n total = array.sum() or 1.\n return zip(self._phases, array/total)", "def iteritems(self):\n for aVal, bValues in self._forwardMap.iteritems():\n for bVal in bValues:\n yield aVal, bVal\n\n return", "def iloop():\n \"\"\" they are in no particular order\"\"\"\n for attr, value in sdata.__dict__.iteritems():\n print(attr, value)", "def __iter__(self):\n for sample in self.data:\n yield sample", "def iter_components(self):\n for iv in range(len(self._var_names)):\n yield self._var_names[iv], self._vals[iv]", "def __iter__(self):\n for key, value in self.read():\n yield key, value", "def __iter__(self):\r\n for item in self._data:\r\n yield item # yield the KEY\r", "def parse_pairs(self):\n pass", "def __iter__(self):\n yield from chain.from_iterable(self.data.values())", "def items():\n for i in self._iter_restrict(zeros, ones):\n yield self.pcdata[i]", "def __iter__(self):\n start_times = (start for start, end in self.tss)\n names = (name.rstrip() for name in self.inps)\n for ind, (c, t) in enumerate(zip(names, start_times)):\n yield (c, t, ind)", "def items(self):\n for element, value in self.focals.items():\n yield (element, value)", "def values(self):\n\t\treturn iter(self.data)", "def __iter__(self):\n for i in range(len(self.data)):\n yield self.data[i]", "def pump_curves(self):\n for key in self._pump_curves:\n yield key, self._data[key]", "def pairs(self):\n return self.items() if self.is_a(dict) else self.chunks(2)", "def itervalues(self):\n for key in self:\n yield self[key]", "def __iter__(self):\n keys = [CoolProp.iDmass,CoolProp.iHmass,CoolProp.iP,CoolProp.iSmass,CoolProp.iT]\n for key in sorted(keys):\n yield key", "def pairs(self) -> Iterator[tuple[str, list[CommandParser]]]:\n for module, cmds in self._registry[\"by_module\"].items():\n yield (module, cmds)", "def iter_varints(data: bytes) -> Iterator[int]:\n pos = 0\n while pos < len(data):\n val, n_read = unpack_varint(data[pos:])\n pos += n_read\n yield val" ]
[ "0.61923057", "0.6127433", "0.6024104", "0.59248453", "0.5864485", "0.5809957", "0.5752815", "0.5745456", "0.5745456", "0.5681684", "0.566154", "0.56560236", "0.56503946", "0.5589269", "0.5587654", "0.55798745", "0.5524056", "0.55216944", "0.5510899", "0.54914224", "0.5477888", "0.54409075", "0.5439717", "0.5438345", "0.54372096", "0.54219675", "0.54161924", "0.5413385", "0.54068935", "0.53826576" ]
0.6936464
0
Iterate over phasecomposition pairs.
def iter_composition(self): array = self.data total = array.sum() or 1. return zip(self._phases, array/total)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __iter__(self):\n return zip(self._phases, self.data)", "def iter_components(self):\n for iv in range(len(self._var_names)):\n yield self._var_names[iv], self._vals[iv]", "def pairs(self) -> Iterator[tuple[str, list[CommandParser]]]:\n for module, cmds in self._registry[\"by_module\"].items():\n yield (module, cmds)", "def iteritems(self):\n for aVal, bValues in self._forwardMap.iteritems():\n for bVal in bValues:\n yield aVal, bVal\n\n return", "def __iter__(self):\n for p in self.param_grid:\n # Always sort the keys of a dictionary, for reproducibility\n modstr = '%s__' % self.modality\n items = sorted([(k.replace('clf__'+modstr, ''), v) for k, v in p.items() if modstr in k])\n if not items:\n yield {}\n else:\n keys, values = zip(*items)\n for v in product(*values):\n params = dict(zip(keys, v))\n yield params", "def __iter__(self):\n for p in self.param_grid:\n # Always sort the keys of a dictionary, for reproducibility\n items = sorted(p.items())\n if not items:\n yield {}\n else:\n keys, values = zip(*items)\n for v in product(*values):\n params = dict(zip(keys, v))\n yield params", "def iter_components(self):\n return self.components.values()", "def iter_params(self):\n for var, val in self._params.iteritems():\n yield var, val", "def swp_combo_iter(self) -> Iterable[Tuple[Any, ...]]:\n return itertools.product(*(self._sweep_params[var] for var in self._swp_var_list))", "def iterate_layers(self, *args):\n for layer in range(self.num_layers):\n yield layer, (\n (\n direction,\n tuple(arg[self.num_directions * layer + direction] for arg in args),\n )\n for direction in range(self.num_directions)\n )", "def __iter__(self):\n from sage.misc.mrange import cartesian_product_iterator\n\n if self._cd._length == 1:\n if self._cd._degree == 1:\n yield self([[0]])\n return\n\n S = self._cd._sym\n profile = list(self._profile)[:-1]\n for p in cartesian_product_iterator([S.conjugacy_class(pi)\n for pi in profile]):\n if self._cd._connected and not perms_are_connected(p, self._cd._degree):\n continue\n c = self._cd(list(p) + [None], check=False)\n if c.profile() == self._profile:\n yield c", "def __iter__(self):\n yield from self.qc_mol\n yield from self.br_mol\n yield from self.pc_mol", "def __iter__(self):\n keys = [CoolProp.iDmass,CoolProp.iHmass,CoolProp.iP,CoolProp.iSmass,CoolProp.iT]\n for key in sorted(keys):\n yield key", "def params(self) -> Iterable[sympy.Symbol]:\n for i in range(self.iterations):\n for p in range(len(self.qubits)):\n if (self.include_all_z or not\n numpy.isclose(self.orbital_energies[p], 0)):\n yield LetterWithSubscripts('U', p, i)\n for p, q in itertools.combinations(range(len(self.qubits)), 2):\n if (self.include_all_cz or not\n numpy.isclose(self.hamiltonian.two_body[p, q], 0)):\n yield LetterWithSubscripts('V', p, q, i)", "def __iter__(self):\n for p in self.positions(): # use same order as positons()\n yield p.element() # but yield each element", "def createPhaseIntervals(self):\n if self.phaseIvals:\n self.notify.debug(\"not creating phase ivals again\")\n return\n self.phaseIvals = []\n for key,info in self.phaseInfo.iteritems():\n self.notify.debug(\"key=%s\"%key)\n if type(info[0]) == types.TupleType:\n ival = Sequence()\n for index,anims in enumerate(info[0]):\n animName = \"phase%d_%d\" % (key, index)\n animIval = self.node.actorInterval(animName)\n animIvalDuration = animIval.getDuration() \n soundIval = self.createSoundInterval(anims, animIvalDuration) \n soundIvalDuration = soundIval.getDuration() \n animAndSound = Parallel( soundIval, animIval)\n ival.append(animAndSound)\n self.phaseIvals.append(ival)\n else:\n animName = \"phase%d\" % key \n animIval = self.node.actorInterval( 'phase%d' % key)\n animIvalDuration = animIval.getDuration()\n soundIval = self.createSoundInterval(info[0], animIvalDuration ) \n soundIvalDuration = soundIval.getDuration()\n ival = Parallel(\n animIval,\n soundIval,\n )\n self.phaseIvals.append(ival)", "def variableIter(self):\n for (para, start), variable in self.variables.iteritems():\n yield para, start, variable", "def __iter__(self):\n return iter(self.transforms)", "def __iter__(self):\n return iter(self.transforms)", "def __iter__(self):\n for p in self.positions(): # use same order as positions()\n yield p.element() # but yield each element", "def generator(self) -> Iterator[Tuple[int, int, complex]]:\n for inda in range(self._core.lena()):\n alpha_str = self._core.string_alpha(inda)\n for indb in range(self._core.lenb()):\n beta_str = self._core.string_beta(indb)\n yield alpha_str, beta_str, self.coeff[inda, indb]", "def __iter__(self):\n for x in self._order:\n yield x", "def __iter__(self):\n for element in self.focals:\n yield element", "def __iter__(self):\n for p in self.positions():\n yield p.element()", "def __iter__(self):\n pt = (self.x, self.y)\n for i in pt:\n yield i", "def _iter_pairs(graph):\n for u, v in set(graph.edges_iter()):\n yield u, v", "def itercubes(self, **kwargs):\n for ifuslot in self.fplane.ifuslots:\n yield ifuslot, self.extract_ifu_sensitivity_cube(ifuslot, \n **kwargs)", "def __iter__(self):\n for i in self.loopindices:\n pid = self.frametracks.particle.values[i]\n yield pid, self.neighbors(pid)", "def __iter__(self):\n from sage.combinat.posets.posets import FinitePosets_n\n n = 0\n while True:\n for P in FinitePosets_n(n):\n yield P\n n += 1", "def iterCurves(self):\n for c in range(self.length()):\n yield self.curve(c)" ]
[ "0.64725065", "0.6385009", "0.5799649", "0.57750535", "0.5743196", "0.5727175", "0.56926465", "0.56606615", "0.56315726", "0.5597081", "0.55758834", "0.55486155", "0.55393696", "0.5531499", "0.55149543", "0.55137235", "0.5492203", "0.54808474", "0.54808474", "0.54404294", "0.5439582", "0.54104143", "0.5394193", "0.53886604", "0.5374433", "0.53668475", "0.53414184", "0.5332823", "0.53316003", "0.5313728" ]
0.71738464
0
Return a ChemicalMassFlowIndexer that references this object's molar data.
def by_mass(self): try: mass = self._data_cache['mass'] except: chemicals = self.chemicals self._data_cache['mass'] = mass = \ ChemicalMassFlowIndexer.from_data( SparseVector.from_dict( MassFlowDict(self.data.dct, chemicals.MW), chemicals.size ), self._phase, chemicals, False ) return mass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def by_mass(self):\n try:\n mass = self._data_cache['mass']\n except:\n chemicals = self.chemicals\n size = chemicals.size\n MW = chemicals.MW\n self._data_cache['mass'] = mass = \\\n MassFlowIndexer.from_data(\n SparseArray.from_rows([\n SparseVector.from_dict(MassFlowDict(i.dct, MW), size)\n for i in self.data\n ]),\n self.phases, chemicals,\n False\n )\n return mass", "def mols(self) -> List[Chem.Mol]:\n return [d.mol for d in self.data]", "def molecule_data(self) -> Union[QMolecule, WatsonHamiltonian]:\n return self._molecule_data", "def get_mol_info(self):\n return", "def molecule(self):\n return self._molecule", "def create_index_molecules(self):\n logger.info(\"\\n\\n Processing data set:\")\n\n self.index_complexes = []\n\n # Training dataset\n desc = '{:25s}'.format(' Train dataset')\n if self.tqdm:\n data_tqdm = tqdm(self.train_database,\n desc=desc, file=sys.stdout)\n else:\n logger.info(' Train dataset')\n data_tqdm = self.train_database\n sys.stdout.flush()\n\n for fdata in data_tqdm:\n if self.tqdm:\n data_tqdm.set_postfix(mol=os.path.basename(fdata))\n try:\n fh5 = h5py.File(fdata, 'r')\n mol_names = list(fh5.keys())\n mol_names = self._select_pdb(mol_names)\n # to speed up in case of no filtering:\n if not self.dict_filter:\n self.index_complexes = [[fdata, k, None, None] for k in mol_names]\n else:\n for k in mol_names: \n if self.filter(fh5[k]):\n self.index_complexes += [(fdata,\n k, None, None)]\n for irot in range(self.data_augmentation):\n axis, angle = pdb2sql.transform.get_rot_axis_angle(\n self.rotation_seed)\n self.index_complexes += [\n (fdata, k, angle, axis)]\n fh5.close()\n except Exception:\n logger.exception(f'Ignore file: {fdata}')\n\n self.ntrain = len(self.index_complexes)\n self.index_train = list(range(self.ntrain))\n\n if self.ntrain == 0:\n raise ValueError(\n 'No avaiable training data after filtering')\n\n # Validation dataset\n if self.valid_database:\n\n desc = '{:25s}'.format(' Validation dataset')\n if self.tqdm:\n data_tqdm = tqdm(self.valid_database,\n desc=desc, file=sys.stdout)\n else:\n data_tqdm = self.valid_database\n logger.info(' Validation dataset')\n sys.stdout.flush()\n\n for fdata in data_tqdm:\n if self.tqdm:\n data_tqdm.set_postfix(mol=os.path.basename(fdata))\n try:\n fh5 = h5py.File(fdata, 'r')\n mol_names = list(fh5.keys())\n mol_names = self._select_pdb(mol_names)\n self.index_complexes += [(fdata, k, None, None)\n for k in mol_names]\n fh5.close()\n except Exception:\n logger.exception(f'Ignore file: {fdata}')\n\n self.ntot = len(self.index_complexes)\n self.index_valid = list(range(self.ntrain, self.ntot))\n self.nvalid = self.ntot - self.ntrain\n\n # Test dataset\n if self.test_database:\n\n desc = '{:25s}'.format(' Test dataset')\n if self.tqdm:\n data_tqdm = tqdm(self.test_database,\n desc=desc, file=sys.stdout)\n else:\n data_tqdm = self.test_database\n logger.info(' Test dataset')\n sys.stdout.flush()\n\n for fdata in data_tqdm:\n if self.tqdm:\n data_tqdm.set_postfix(mol=os.path.basename(fdata))\n try:\n fh5 = h5py.File(fdata, 'r')\n mol_names = list(fh5.keys())\n mol_names = self._select_pdb(mol_names)\n self.index_complexes += [(fdata, k, None, None)\n for k in mol_names]\n fh5.close()\n except Exception:\n logger.exception(f'Ignore file: {fdata}')\n\n self.ntot = len(self.index_complexes)\n self.index_test = list(\n range(self.ntrain + self.nvalid, self.ntot))\n self.ntest = self.ntot - self.ntrain - self.nvalid", "def make_molecule(self):\n\n self.data.molecules = self.data.Smiles.apply(Chem.MolFromSmiles)\n\n return self.data.molecules", "def mols(self) -> List[Chem.Mol]:\n return []", "def featurize(self):\n\n self.make_molecule()\n\n self.described_molecules = self.calculator.pandas(self.data.molecules)\n self.described_molecules.index = self.data.index\n\n self.described_molecules = pd.concat([self.data, self.described_molecules], axis=1)\n\n return self.described_molecules", "def assembleMol(self):\n\n\t\tnewMol = Molecule()\n\n\t\tfor atom in self.atomlist:\n\t\t\tres = atom.parentResidue\n\t\t\tchain = res.parentChain\n\n\t\t\tcurrChain = newMol.getChain(chain.name)\n\t\t\tif not currChain:\n\t\t\t\tcurrChain = newMol.newChain()\n\t\t\t\tcurrChain.copy(chain)\n\n\n\t\t\tcurrRes = currChain.getResidue(res.file_id)\n\t\t\tif not currRes:\n\t\t\t\tcurrRes = currChain.newResidue()\n\t\t\t\tcurrRes.copy(res)\n\n\t\t\tcurrRes.addAtom(atom)\n\n\t\treturn newMol", "def __init__(self, name, force_field):\n self.molecule = force_field.blocks[name]", "def mol_with_atom_index(mol: Chem.Mol) -> Chem.Mol:\n cp = Chem.Mol(mol)\n atoms = cp.GetNumAtoms()\n for idx in range(atoms):\n cp.GetAtomWithIdx(idx).SetProp('molAtomMapNumber', str(mol.GetAtomWithIdx(idx).GetIdx()))\n return cp", "def get_index(self):\n return self.index", "def get_index(self):\n return self.index", "def get_model(self):\n raise NotImplementedError(\n \"You must provide a 'get_model' method for the '%r' index.\" % self\n )", "def index_set(self):\n return self._index", "def Open(self):\n return _gmat_py.SolarFluxReader_Open(self)", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def index(self):\n return self._index", "def existing_mc(self) -> ManagedCluster:\n return self.__existing_mc", "def index_as_cube(self):\n return _IndexAsCubeSlicer(self)" ]
[ "0.56176096", "0.5561553", "0.5336587", "0.5316509", "0.52759135", "0.5150848", "0.5141311", "0.5130907", "0.5127069", "0.50799567", "0.4900331", "0.4872749", "0.48726016", "0.48726016", "0.48714966", "0.48510545", "0.47927132", "0.47504795", "0.47504795", "0.47504795", "0.47504795", "0.47504795", "0.47504795", "0.47504795", "0.47504795", "0.47504795", "0.47504795", "0.47504795", "0.47408396", "0.47363818" ]
0.5627419
0
Return a ChemicalVolumetricFlowIndexer that references this object's molar data.
def by_volume(self, TP): try: vol = self._data_cache['vol', TP] except: chemicals = self._chemicals V = [i.V for i in chemicals] phase = self._phase self._data_cache['vol', TP] = \ vol = ChemicalVolumetricFlowIndexer.from_data( SparseVector.from_dict( VolumetricFlowDict(self.data.dct, TP, V, None, phase, {}), chemicals.size ), phase, chemicals, False ) return vol
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def by_volume(self, TP):\n try:\n vol = self._data_cache[TP]\n except:\n phases = self._phases\n chemicals = self._chemicals\n V = [i.V for i in chemicals]\n size = chemicals.size\n self._data_cache[TP] = \\\n vol = VolumetricFlowIndexer.from_data(\n SparseArray.from_rows([\n SparseVector.from_dict(VolumetricFlowDict(i.dct, TP, V, j, None, {}), size)\n for i, j in zip(self.data, self._phases)\n ]),\n phases, chemicals,\n False\n )\n return vol", "def Open(self):\n return _gmat_py.SolarFluxReader_Open(self)", "def index_as_cube(self):\n return _IndexAsCubeSlicer(self)", "def get_molar_volume(self):\n structure = self.structure\n volume = structure.volume\n _, units = structure.composition.get_reduced_composition_and_factor()\n unit_volume = volume / units\n\n return unit_volume", "def mols(self) -> List[Chem.Mol]:\n return [d.mol for d in self.data]", "def model(self):\n return _model_from_quantity(self._sliced_components, mesh=self.mesh)", "def volume(self):\n return _cantera.reactor_volume(self.__reactor_id)", "def getComponentVolume(self):\n lengthO = self.getDimension(\"lengthOuter\")\n widthO = self.getDimension(\"widthOuter\")\n heightO = self.getDimension(\"heightOuter\")\n lengthI = self.getDimension(\"lengthInner\")\n widthI = self.getDimension(\"widthInner\")\n heightI = self.getDimension(\"heightInner\")\n mult = self.getDimension(\"mult\")\n vol = mult * (lengthO * widthO * heightO - lengthI * widthI * heightI)\n return vol", "def Vc(self):\n return self.__central_volume", "def __init__(self, name, force_field):\n self.molecule = force_field.blocks[name]", "def get_index(self):\n return (np.sqrt(self.dielectric))", "def volume(self):\n return self.volume_array", "def molecule_data(self) -> Union[QMolecule, WatsonHamiltonian]:\n return self._molecule_data", "def open(self):\n return shelve.open(self.filename, 'c')", "def get_an_vol(self):\n\n structure = self.structure\n\n unit_volume = self.get_molar_volume()\n reduced_form = structure.composition.element_composition.to_reduced_dict\n anion, cations = self.ion_class()\n\n try:\n num_anions = np.float(reduced_form[anion])\n an_vol = unit_volume / num_anions\n except ZeroDivisionError:\n print(''.join(re.split(r'\\D', reduced_form[::-1])[::-1]))\n return None\n\n return an_vol", "def get_ref_index(self):\n total_pol = self.get_compound_pol()\n molar_volume = self.get_molar_volume()\n if not total_pol:\n return None\n ref_index = np.sqrt((4 * np.pi * total_pol) / ((2.26 - 4 * np.pi / 3) * total_pol + molar_volume) + 1)\n return ref_index", "def __init__(self, time_series=None, ij=(0, 0), method=None, lb=0, ub=None,\r\n prefer_speed_over_memory=True, scale_by_freq=True):\r\n\r\n BaseAnalyzer.__init__(self, time_series)\r\n #Initialize variables from the time series\r\n self.ij = ij\r\n\r\n #Set the variables for spectral estimation (can also be entered by\r\n #user):\r\n if method is None:\r\n self.method = {'this_method': 'welch'}\r\n\r\n else:\r\n self.method = method\r\n\r\n if self.method['this_method'] != 'welch':\r\n e_s = \"For SparseCoherenceAnalyzer, \"\r\n e_s += \"spectral estimation method must be welch\"\r\n raise ValueError(e_s)\r\n\r\n self.method['Fs'] = self.method.get('Fs', self.input.sampling_rate)\r\n\r\n #Additional parameters for the coherency estimation:\r\n self.lb = lb\r\n self.ub = ub\r\n self.prefer_speed_over_memory = prefer_speed_over_memory\r\n self.scale_by_freq = scale_by_freq", "def get_index(self):\n return self.vm.filesystems.index(self)", "def __init__(self):\n\n # The molecule-residue-spin object.\n self.mol = MoleculeList()\n\n # The interatomic data object.\n self.interatomic = InteratomList()\n\n # The data pipe type.\n self.pipe_type = None\n\n # Hybrid models.\n self.hybrid_pipes = []", "def mols(self) -> List[Chem.Mol]:\n return []", "def get_mol_info(self):\n return", "def getComponentVolume(self, cold=False):\n od = self.getDimension(\"od\", cold=cold)\n iD = self.getDimension(\"id\", cold=cold)\n mult = self.getDimension(\"mult\")\n vol = mult * 4.0 / 3.0 * math.pi * ((od / 2.0) ** 3 - (iD / 2.0) ** 3)\n return vol", "def molecule(self):\n return self._molecule", "def _isotope_data(self):\n # Data structure:\n # header\n # 1 int (M): no. blocks (i.e. masses)\n # M blocks:\n # (each block)\n # 1 int (N): no. points (i.e. frames)\n # N doubles: cumulative count time in s\n # N doubles: data\n self.fh.seek(self.header['header size'])\n blocks = unpack(self._bo + 'i', self.fh.read(4))[0]\n\n data = []\n for block in range(blocks):\n points = unpack(self._bo + 'i', self.fh.read(4))[0]\n d = np.fromfile(self.fh, dtype=self._bo+'f8', count=2*points).reshape(2, points)\n data.append(d[1])\n\n self._data_corr = xarray.DataArray(data,\n dims=('species', 'frame'),\n coords={'species': ('species', list(self.header['label list']))},\n attrs={'unit': 'counts/s'})", "def wheel(self):\n return FilletedSolid(\n built_from=\n TranslatedShape(shape_in=\n RotatedShape(shape_in=\n Cylinder(radius=\n self.wheels_properties[1],\n height=300.,\n position=self.position),\n rotation_point=self.position,\n vector=Vector(1, 0, 0),\n angle=radians(90)),\n displacement=Vector(self.wheels_properties[0],\n 300.,\n -self.positions[1][0])),\n radius=30.)", "def solar_meta(self):\n return self.data.solar_meta", "def featurize(self):\n\n self.make_molecule()\n\n self.described_molecules = self.calculator.pandas(self.data.molecules)\n self.described_molecules.index = self.data.index\n\n self.described_molecules = pd.concat([self.data, self.described_molecules], axis=1)\n\n return self.described_molecules", "def lofted_car(self):\n return LoftedSolid(profiles=self.chamfered_curve,\n mesh_deflection=1e-4)", "def pyscal_voronoi_volume(self):\n return analyse_voronoi_volume(atoms=self._structure)", "def model(self):\n return _model_from_quantity(self.table, self.mesh)" ]
[ "0.5716781", "0.5351351", "0.5267331", "0.52465904", "0.5042801", "0.50214684", "0.4959102", "0.4895627", "0.48869613", "0.47211167", "0.4713881", "0.47011477", "0.46425217", "0.4640899", "0.46223387", "0.4618841", "0.45911646", "0.45791003", "0.4566672", "0.454923", "0.4546791", "0.45457157", "0.452765", "0.45222276", "0.45201895", "0.44780633", "0.44388542", "0.44386715", "0.4434701", "0.44328195" ]
0.57009274
1
Return a VolumetricFlowIndexer that references this object's molar data.
def by_volume(self, TP): try: vol = self._data_cache[TP] except: phases = self._phases chemicals = self._chemicals V = [i.V for i in chemicals] size = chemicals.size self._data_cache[TP] = \ vol = VolumetricFlowIndexer.from_data( SparseArray.from_rows([ SparseVector.from_dict(VolumetricFlowDict(i.dct, TP, V, j, None, {}), size) for i, j in zip(self.data, self._phases) ]), phases, chemicals, False ) return vol
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def by_volume(self, TP):\n try:\n vol = self._data_cache['vol', TP]\n except:\n chemicals = self._chemicals\n V = [i.V for i in chemicals]\n phase = self._phase\n self._data_cache['vol', TP] = \\\n vol = ChemicalVolumetricFlowIndexer.from_data(\n SparseVector.from_dict(\n VolumetricFlowDict(self.data.dct, TP, V, None, phase, {}),\n chemicals.size\n ),\n phase, chemicals,\n False\n )\n return vol", "def index_as_cube(self):\n return _IndexAsCubeSlicer(self)", "def get_molar_volume(self):\n structure = self.structure\n volume = structure.volume\n _, units = structure.composition.get_reduced_composition_and_factor()\n unit_volume = volume / units\n\n return unit_volume", "def model(self):\n return _model_from_quantity(self._sliced_components, mesh=self.mesh)", "def Open(self):\n return _gmat_py.SolarFluxReader_Open(self)", "def volume(self):\n return self.volume_array", "def getComponentVolume(self):\n lengthO = self.getDimension(\"lengthOuter\")\n widthO = self.getDimension(\"widthOuter\")\n heightO = self.getDimension(\"heightOuter\")\n lengthI = self.getDimension(\"lengthInner\")\n widthI = self.getDimension(\"widthInner\")\n heightI = self.getDimension(\"heightInner\")\n mult = self.getDimension(\"mult\")\n vol = mult * (lengthO * widthO * heightO - lengthI * widthI * heightI)\n return vol", "def volume(self):\n return _cantera.reactor_volume(self.__reactor_id)", "def pyscal_voronoi_volume(self):\n return analyse_voronoi_volume(atoms=self._structure)", "def get_volume(cls) -> float:\n raise NotImplementedError", "def get_an_vol(self):\n\n structure = self.structure\n\n unit_volume = self.get_molar_volume()\n reduced_form = structure.composition.element_composition.to_reduced_dict\n anion, cations = self.ion_class()\n\n try:\n num_anions = np.float(reduced_form[anion])\n an_vol = unit_volume / num_anions\n except ZeroDivisionError:\n print(''.join(re.split(r'\\D', reduced_form[::-1])[::-1]))\n return None\n\n return an_vol", "def get_mesh(self):\n tsdf_vol, color_vol = self.get_volume()\n\n # Marching cubes\n verts, faces, norms, vals = measure.marching_cubes_lewiner(tsdf_vol, level=0)\n verts_ind = np.round(verts).astype(int)\n verts = verts * self._voxel_size + self._vol_origin # voxel grid coordinates to world coordinates\n\n # Get vertex colors\n rgb_vals = color_vol[verts_ind[:, 0], verts_ind[:, 1], verts_ind[:, 2]]\n colors_b = np.floor(rgb_vals / self._color_const)\n colors_g = np.floor((rgb_vals - colors_b * self._color_const) / 256)\n colors_r = rgb_vals - colors_b * self._color_const - colors_g * 256\n colors = np.floor(np.asarray([colors_r, colors_g, colors_b])).T\n colors = colors.astype(np.uint8)\n return verts, faces, norms, colors", "def _get_index(self):\n\n return self.index\n\n # to avoid having differences bigger than 2pi", "def Vc(self):\n return self.__central_volume", "def model(self):\n return _model_from_quantity(self.table, self.mesh)", "def compute(self):\n # this just initializes all gradients to the vector (0,0,0)\n self.data = [ZERO_GRADIENT] * (self.volume.dim_x * self.volume.dim_y * self.volume.dim_z)\n\n for i in range(1, self.volume.dim_x-1):\n for j in range(1, self.volume.dim_y-1):\n for k in range(1, self.volume.dim_z-1):\n d_x = 0.5 * (self.volume.get_voxel(i+1, j, k) - self.volume.get_voxel(i-1, j, k))\n d_y = 0.5 * (self.volume.get_voxel(i, j+1, k) - self.volume.get_voxel(i, j-1, k))\n d_z = 0.5 * (self.volume.get_voxel(i, j, k+1) - self.volume.get_voxel(i, j, k-1))\n self.set_gradient(i, j, k, VoxelGradient(d_x, d_y, d_z))", "def get_index(self):\n return self.vm.filesystems.index(self)", "def index(self):\n if not isinstance(self._index, pd.core.frame.DataFrame):\n self.load()\n return self._index", "def mesh(self):\n return self._mesh", "def mesh(self):\n return self._mesh", "def mesh(self):\n return self._mesh", "def mesh(self):\n return self._mesh", "def volume(self):\n return self.structure.volume", "def get_indexable(cls):\n return cls.get_model().get_objects()", "def mols(self) -> List[Chem.Mol]:\n return [d.mol for d in self.data]", "def get_volume(self):\n return self.__volume", "def get_ref_index(self):\n total_pol = self.get_compound_pol()\n molar_volume = self.get_molar_volume()\n if not total_pol:\n return None\n ref_index = np.sqrt((4 * np.pi * total_pol) / ((2.26 - 4 * np.pi / 3) * total_pol + molar_volume) + 1)\n return ref_index", "def get_index(self):\n return (np.sqrt(self.dielectric))", "def get_index(self):\n return self.index", "def get_index(self):\n return self.index" ]
[ "0.5805949", "0.56040126", "0.54907763", "0.5324066", "0.531043", "0.5271636", "0.5215782", "0.5140199", "0.4976544", "0.48717976", "0.48590884", "0.48520187", "0.48088706", "0.4799558", "0.47949484", "0.4783697", "0.4777927", "0.47577655", "0.472059", "0.472059", "0.472059", "0.472059", "0.47193956", "0.471029", "0.46943998", "0.46911198", "0.4683033", "0.4681757", "0.4677993", "0.4677993" ]
0.59221214
0
Returns first task in the individual robot buffer. Task is deleted.
def get_first_task(self, robot_id): individual_buffer = self.all_buffers[robot_id] task = individual_buffer[-1] individual_buffer = np.delete(individual_buffer, -1, 0) self.all_buffers[robot_id] = individual_buffer return task
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_first_task(self, robot_id): \n individual_buffer = self.all_buffers[robot_id]\n return individual_buffer[-1]", "def get_task(self): \n task = self.buffer[0]\n self.buffer = np.delete(self.buffer, 0, 0)\n return task", "def get_last_task(self, robot_id): \n individual_buffer = self.all_buffers[robot_id]\n data = individual_buffer[0]\n individual_buffer = np.delete(individual_buffer, 0, 0)\n self.all_buffers[robot_id] = individual_buffer\n return data", "def check_last_task(self, robot_id): \n individual_buffer = self.all_buffers[robot_id]\n return individual_buffer[0]", "def first(self) -> Task:\n return self._tasks[0]", "def check_task(self): \n return self.buffer[0]", "def get_first_task(self):\n return self.get_first_step().get_last_task()", "def get_previous_task():\n return _tasks[0] if len(_tasks) != 0 else None", "def delete_task_by_id(self, robot_id, task_id): \n individual_buffer = self.all_buffers[robot_id]\n task_ids = individual_buffer[:, 0]\n task_idx = np.where(task_ids == task_id)\n if task_idx[0].size == 0:\n print(\"ERROR: Task was already deleted of was never in the buffer.\")\n individual_buffer = np.delete(individual_buffer, task_idx, 0)\n self.all_buffers[robot_id] = individual_buffer", "def get_last_task(self):\n return self.get_task_by_index(-1)", "def next(self) -> Optional[Task]:\n if len(self._pending_tasks) == 0:\n return None\n else:\n task = self._pending_tasks.pop(0)\n task.status = 'Running'\n self._dump_tasks_info()\n return task", "def getFirst(self, t):\n index = self._findFirst(t)\n if index >= 0:\n return self.jobs[index]\n else:\n return None", "def popFirst(self, t):\n index = self._findFirst(t)\n if index >= 0:\n return self.jobs.pop(index)", "def get_one_task_by_name(self, name: str) -> \"Task\": # noqa: F821\n tasks = self.get_tasks_by_name(name)\n if not tasks:\n raise PyDSTaskNoFoundException(f\"Can not find task with name {name}.\")\n return tasks.pop()", "def current_task(self):\n try:\n return self.active_item(remove=False)\n except queue.Empty:\n return None", "def get(self, task_id=None):\n if task_id:\n item = self.find(task_id)\n self.queue.remove(item)\n else:\n item = self.queue.get()\n return item", "def get_task(self):\n return self.queue.get()", "def peek(self):\r\n return self.queue[0]", "def peek(self):\r\n return self.queue[0]", "def check_task_by_deadline(self, robot_id, deadline): \n individual_buffer = self.all_buffers[robot_id]\n if individual_buffer.shape[0] == 1:\n task = self.check_last_task(robot_id)\n return task\n elif individual_buffer.shape[0] == 0:\n print(\"ERROR: buffer for robot \" + str(robot_id) + \" is empty!\")\n else:\n all_buff_dls = individual_buffer[:, -1]\n task_idxs = np.where(all_buff_dls <= deadline)\n if task_idxs[0].size == 0:\n # no task has a deadline that is earlier\n # goal of active task is returned\n # print(\"no task has earlier deadline\")\n # print(\"return goal of active task\")\n task = individual_buffer[-1]\n # print(\"task:\", task)\n return task\n else:\n relevant_task_idx = task_idxs[0][0]\n task = individual_buffer[relevant_task_idx, :]\n # print(\"task:\", task)\n return task", "def get_first(self):\n for u in self.user_order:\n if self.user_skip[u] == 0:\n return self.user_queue[u][0].obj\n return None", "def peek(self):\n return self.queue[0]", "def get_free_key_task(self):\n keys = []\n\n try:\n with open(self.path_to_task_file, 'r') as file:\n for line in file:\n current_task = Task()\n current_task.load(line)\n keys.append(current_task.key)\n except:\n pass\n\n while True:\n if len(keys) == 500:\n return None\n key = random.randint(0, 500)\n if key not in keys:\n return str(key)", "def dequeue(self) -> Any:\n if len(self.queue) <= 1:\n task = self.queue[0]\n self.queue = []\n\n return task\n \n last_index = len(self.queue) - 1\n self._swap_tasks(0, last_index)\n\n task = self.queue.pop()\n\n self._bubble_down_task()\n\n return task", "def get(self, block=True, timeout=None):\n if not self.connected:\n raise QueueNotConnectedError(\"Queue is not Connected\")\n\n if block:\n payload = self.__db.brpop(self._key, timeout=timeout)\n else:\n payload = self.__db.rpop(self._key)\n\n if not payload:\n return None\n\n task = self.task_class(payload[1])\n\n # if task was marked as unique then\n # remove the unique_hash from lock table\n if task.unique:\n self.__db.srem(self._lock_key, task.unique_hash())\n\n return task", "def peek(self):\n return self.the_queue[0]", "def read_command(self):\n if self._pending_commands:\n return self._pending_commands.popleft()\n\n msg = self._serializer.read_msg()\n\n while self._msg_is_taskid(msg):\n self._pending_taskids.append(msg)\n msg = self._serializer.read_msg()\n\n return msg", "def next_task(self, t = 0):\r\n \r\n if self.idle_playlist_counter >= len(self.idle_playlist)-1:\r\n self.idle_playlist_counter = 0\r\n if len(self.playlist) ==0:\r\n task = self.idle_playlist[self.idle_playlist_counter]\r\n self.idle_playlist_counter +=1\r\n\r\n if self.playlist_counter < len(self.playlist):\r\n while self.playlist[self.playlist_counter][0] - t < 0:\r\n self.playlist_counter +=1\r\n if abs(self.playlist[self.playlist_counter][0] - t ) < 2.0*1.1:\r\n task = self.playlist[self.playlist_counter]\r\n debug(task)\r\n self.playlist_counter +=1\r\n else:\r\n task = self.idle_playlist[self.idle_playlist_counter]\r\n self.idle_playlist_counter +=1\r\n else:\r\n task = self.idle_playlist[self.idle_playlist_counter]\r\n self.idle_playlist_counter +=1\r\n\r\n return task", "def get_current_element(self, ):\n n = jbscene.get_current_scene_node()\n if not n:\n return None\n tfid = cmds.getAttr(\"%s.taskfile_id\" % n)\n try:\n tf = djadapter.taskfiles.get(pk=tfid)\n return tf.task.element\n except djadapter.models.TaskFile.DoesNotExist:\n raise djadapter.models.TaskFile.DoesNotExist(\"Could not find the taskfile that was set on the scene node. Id was %s\" % tfid)", "def top(self): # O(1)\n if not self.queue:\n return None\n return self.queue[0]" ]
[ "0.80448747", "0.7857006", "0.7391221", "0.7325314", "0.7287126", "0.6595893", "0.65539056", "0.6505673", "0.64575756", "0.63990015", "0.6366266", "0.63554853", "0.6327331", "0.6190272", "0.6144666", "0.61099964", "0.6075297", "0.58957654", "0.58957654", "0.58465374", "0.5834985", "0.5807265", "0.57740206", "0.5762758", "0.5762033", "0.57381433", "0.5693094", "0.5679457", "0.56206524", "0.56138825" ]
0.8743162
0
Check first task info without deletion.
def check_first_task(self, robot_id): individual_buffer = self.all_buffers[robot_id] return individual_buffer[-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_task(self): \n return self.buffer[0]", "def is_task_stagnant(task):", "def check_repeated_task(self, task):\n task_status = task in self.tasks_asked\n\n # append if never asked\n if task_status == False:\n self.tasks_asked.append(task)\n\n return task_status", "def task_scanned(now_task):", "def test_delete_task(self):\n check = False\n r = main.List.connection()\n main.List.add(r, \"ToDo\", 1, \"Buy apples\", 2, \"20.05.2015\")\n main.List.delete(r, \"ToDo\", 1)\n task = main.List.pull_from_redis(r, \"ToDo\", False)\n for key in task.iterkeys():\n if key == \"1\":\n check = True\n self.assertFalse(check, \"Deleting task failed.\")", "def first(self) -> Task:\n return self._tasks[0]", "def _get_task(self, task_id):\n if not task_id:\n return None\n task = objects.Transaction.get_by_uid(task_id, fail_if_not_found=False)\n if task and task.cluster_id == self.cluster.id:\n return task\n return False", "def _check_task(self, task: Task) -> bool:\n try:\n extents = list(fiemap(task.path, sync=task.frequency > 1))\n except OSError:\n self.logger.error('Error#%d %s', task.id, task.path, exc_info=True)\n return False\n\n if not extents:\n return False\n\n planner = Planner(self.planner_params, extents)\n clusters = planner.result()\n\n if not clusters:\n return False\n\n task.extents = extents\n task.clusters = clusters\n\n return True", "def __checkInited(self, date=None, tkey=None, ttype=None):\n if ttype == 'crontab':\n return False\n if date is None:\n date = self.ddate\n\n for doc in TaskHistory().search(task_day=date, task_key=tkey):\n if doc.get(\"status\") == \"waiting\":\n return True\n return False", "def get_first_task(self):\n return self.get_first_step().get_last_task()", "def test_get_all(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n task_list = self.task_storage.get_all()\n\n self.assertEqual(task_list[0], self.my_task)", "def is_task(self, task_id, tasks):\r\n for t in tasks:\r\n if t.id == task_id:\r\n return True\r\n return False", "def initTasks(self):\n if self.tkey is None:\n num = TaskHistory(date=self.ddate).search(\n count=True, task_day=self.ddate)\n hour = int(time.strftime(\"%H\"))\n # a hack, many times run init module\n if num > 20 and hour >= 1:\n self.log.info(\"Initialization has been completed\")\n return True\n tlist = TaskLibrary().allTask()\n else:\n tlist = TaskLibrary().getByKey(self.tkey)\n if not tlist:\n self.log.debug(\"no tasks\")\n return False\n\n ts = TaskHistory()\n for task in tlist:\n # status not 1, not init it.\n if int(task.get(\"status\", 0)) != 1:\n continue\n task = self.__parseTask(task)\n if self.__checkInited(task.get(\"task_day\"), task.get(\"task_key\"), task.get(\"task_type\")):\n continue\n ts.insert(task)\n\n self.log.info(\"init task finished\")\n return True", "def check_task_id(id):\n\n\t# Open connection and execute SQL to get a task\n\ttry:\n\t\tdb, cursor = connect()\n\t\t\n\t\tcursor.execute(\"\"\"SELECT id FROM tasks \n\t\t\t\t\t\tWHERE id=%s\"\"\" % id)\n\n\t\ttask = cursor.fetchone()\n\n\t# Get error messages\n\texcept catch_error(), e:\n\t\tprint \"Error %d: %s\" % (e.args[0],e.args[1])\n\n\t# Close connection\n\tfinally:\n\t\tif db:\n\t\t\tdb.close()\n\n\treturn task", "def __check_if_task_exists(self, server_id):\n if server_id in self.__migrating_tasks.keys():\n return True\n return False", "def helper_get_task_or_default(self):\n task_id = self.helper_retrieve_last_request_get_dict_key_val_index_zero_or_return_none(\"taskid\")\n alt_task_store_name = self.helper_retrieve_last_request_get_dict_key_val_index_zero_or_return_none(\"alt_task_store_name\")\n used_task_store = self.task_store\n # don't want to use sth like globals.get(alt_task_store) so that only approved stores can be used\n if alt_task_store_name == \"task_store_trash\":\n used_task_store = self.task_store_trash\n try:\n task = used_task_store.store_dict_id[task_id]\n except Exception as exc:\n # task_id is either None or it is not in store_dict_id\n util.dbgprint(\"exception in helper_get_task_or_default, semi-expected {}\".format(str(exc)))\n self.error_msg_queue_list.append(\"Couldn't retrieve requested note.\")\n return False, 0, 0, self.page_list_notes(no_history=True)\n return True, task_id, task, \"\"", "def get_previous_task():\n return _tasks[0] if len(_tasks) != 0 else None", "def test_delete(self):\n new_task = task.Task()\n self.my_task.key = self.task_storage.add(self.my_task)\n\n key = self.task_storage.delete(self.my_task.key)\n new_task = self.task_storage.find(key)\n\n self.assertIsNone(new_task)", "def test_22_get_specific_completed_task_anonymous(self):\r\n\r\n #model.rebuild_db()\r\n with self.flask_app.app_context():\r\n self.create()\r\n app = db.session.query(App).first()\r\n task = db.session.query(Task)\\\r\n .filter(App.id == app.id)\\\r\n .first()\r\n\r\n for i in range(10):\r\n task_run = TaskRun(app_id=app.id, task_id=task.id,\r\n user_ip=\"127.0.0.1\", info={'answer': 1})\r\n db.session.add(task_run)\r\n db.session.commit()\r\n\r\n ntask = Task(id=task.id, state='completed')\r\n\r\n assert ntask not in db.session\r\n db.session.merge(ntask)\r\n db.session.commit()\r\n\r\n res = self.app.get('app/%s/task/%s' % (app.short_name, task.id),\r\n follow_redirects=True)\r\n msg = 'You have already participated in this task'\r\n assert msg in res.data, res.data\r\n assert 'Try with another one' in res.data, res.data", "def test_find(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n new_task = self.task_storage.find(self.my_task.key)\n\n self.assertEqual(self.my_task, new_task)", "def test_search_not_found(self):\n self.task_storage.add(self.my_task)\n search_task = task.Task(title='title1', notes='note1')\n task_search_list = self.task_storage.search(search_task)\n\n self.assertEqual(task_search_list, None)", "def get_first_task(self, robot_id): \n individual_buffer = self.all_buffers[robot_id]\n task = individual_buffer[-1]\n individual_buffer = np.delete(individual_buffer, -1, 0)\n self.all_buffers[robot_id] = individual_buffer\n return task", "def getFirst(self, t):\n index = self._findFirst(t)\n if index >= 0:\n return self.jobs[index]\n else:\n return None", "def __initTask(self, tkey, tday):\n inited = {}\n for task in TaskLibrary().getFollows(tkey):\n self.log.info(\"recursion rerun task:%s, task_day:%s\" %\n (task.get(\"task_key\"), tday))\n if task.get(\"task_key\") in inited:\n continue\n #mark finished task stats bad, avoid affect check upstream task status.\n self.__upFailure(task.get(\"task_key\"), tday)\n obj = TaskInit(tday, task.get(\"task_key\"), tday)\n obj.initTasks()\n self.__initTask(task.get(\"task_key\"), tday)\n inited[task.get(\"task_key\")] = 1\n\n return True", "def requireTask(self, name):\n t = self.getTask(name)\n if t is None:\n raise Exception(\"Task %s not found in service\" % name)\n return t", "def assertOneTask(self, args, expected_summary, expected_files):\n task_id = self.client.task_trigger(args)\n actual_summary, actual_files = self.client.task_collect(task_id)\n self.assertIsNotNone(actual_summary['shards'][0], actual_summary)\n performance_stats = actual_summary['shards'][0].pop('performance_stats')\n self.assertPerformanceStatsEmpty(performance_stats)\n bot_version = self.assertResults(expected_summary, actual_summary)\n self.assertEqual(expected_files, actual_files)\n return bot_version", "def task1(self):\n \n pass", "def check_indicator_files(tasks):\n\n for task in tasks:\n if task[\"status\"]==\"unknown\":\n if os.path.exists(task[\"result\"]):\n task[\"status\"]=\"previously completed\"\n else:\n task[\"status\"]=\"to do\"\n return", "def _validate_task_id(self, task_id):\n\n\t\tif task_id.isdigit() and int(task_id) <= len(self.tasklist.tasks):\n\t\t\treturn task_id\n\t\telse:\n\t\t\tprint('{} is not an existing task!'.format(task_id))\n\t\t\treturn None", "def find_trans_hung(attinfo, taskinfo):\n\n intro = False\n\n reptime = datetime.datetime.now()\n count = 0\n for attd in sorted(attinfo.values(), key=lambda x: x['start_time']):\n atid = attd['task_id']\n if atid in taskinfo:\n found = False\n for tdict in sorted(taskinfo[atid].values(), key=lambda x: x['start_time']):\n if (tdict['name'].startswith('trans_input') and (reptime - tdict['start_time']).total_seconds() > 2*60*60) or (tdict['name'].startswith('trans_output') and (reptime - tdict['start_time']).total_seconds() > 10*60*60):\n found = True\n if not found:\n break\n if not intro:\n h2(\"The following transfers may be hung\\n\")\n intro = True\n print \" %s_r%dp%02d %s %s %i\\n\" % (attd['unitname'], int(attd['reqnum']), int(attd['attnum']), attd['pfwid'], attd['archive_path'], atid)\n for tdict in sorted(taskinfo[atid].values(), key=lambda x: x['start_time']):\n if (tdict['name'].startswith('trans_input') and (reptime - tdict['start_time']).total_seconds() > 2*60*60) or (tdict['name'].startswith('trans_output') and (reptime - tdict['start_time']).total_seconds() > 10*60*60):\n print \" %s %s %s %s %s %s %s\" % (tdict['id'], tdict['name'], tdict['exec_host'], tdict['start_time'], tdict['request_time'], tdict['grant_time'], tdict['release_time'])\n count += 1\n if count == 0:\n print \"\\nNo hung transfers found.\\n\"" ]
[ "0.64242387", "0.62899816", "0.6108197", "0.60125405", "0.60099566", "0.600749", "0.5769892", "0.57686234", "0.5767578", "0.57550174", "0.57092655", "0.57058084", "0.5655717", "0.562933", "0.56269735", "0.5589191", "0.55790395", "0.55574554", "0.5546012", "0.5513527", "0.55095196", "0.550651", "0.550564", "0.5502002", "0.5501484", "0.5493583", "0.5489544", "0.5464258", "0.5463356", "0.5443266" ]
0.6405698
1
Returns last task in the individual robot buffer. Task is deleted.
def get_last_task(self, robot_id): individual_buffer = self.all_buffers[robot_id] data = individual_buffer[0] individual_buffer = np.delete(individual_buffer, 0, 0) self.all_buffers[robot_id] = individual_buffer return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_last_task(self):\n return self.get_task_by_index(-1)", "def check_last_task(self, robot_id): \n individual_buffer = self.all_buffers[robot_id]\n return individual_buffer[0]", "def get_task(self): \n task = self.buffer[0]\n self.buffer = np.delete(self.buffer, 0, 0)\n return task", "def get_first_task(self, robot_id): \n individual_buffer = self.all_buffers[robot_id]\n task = individual_buffer[-1]\n individual_buffer = np.delete(individual_buffer, -1, 0)\n self.all_buffers[robot_id] = individual_buffer\n return task", "def check_first_task(self, robot_id): \n individual_buffer = self.all_buffers[robot_id]\n return individual_buffer[-1]", "def get_current_task(self):\n return self.get_current_step().get_last_task()", "def get_previous_task():\n return _tasks[0] if len(_tasks) != 0 else None", "def getLastWorker(self):\n return self.entries[-1]", "def current_task(self):\n try:\n return self.active_item(remove=False)\n except queue.Empty:\n return None", "def get_first_task(self):\n return self.get_first_step().get_last_task()", "def check_task(self): \n return self.buffer[0]", "def get_last_waypoint_command(self):\n return self._last_waypoint_command", "def get_task(self):\n return self.queue.get()", "def getLast(self):\n return self.dataBuffer[len(self.dataBuffer) - 1]", "def last(self):\n return self.deque[-1]", "def get_last_cmd(self):\r\n return self.command_manager.get_last_command()", "def get_last_activity(self):\n last_activities = self.get_last_activities(n=1)\n return last_activities[0]", "def get_last_cmd(self):\n self._data_available = False\n return self._cmd", "def check_task_by_deadline(self, robot_id, deadline): \n individual_buffer = self.all_buffers[robot_id]\n if individual_buffer.shape[0] == 1:\n task = self.check_last_task(robot_id)\n return task\n elif individual_buffer.shape[0] == 0:\n print(\"ERROR: buffer for robot \" + str(robot_id) + \" is empty!\")\n else:\n all_buff_dls = individual_buffer[:, -1]\n task_idxs = np.where(all_buff_dls <= deadline)\n if task_idxs[0].size == 0:\n # no task has a deadline that is earlier\n # goal of active task is returned\n # print(\"no task has earlier deadline\")\n # print(\"return goal of active task\")\n task = individual_buffer[-1]\n # print(\"task:\", task)\n return task\n else:\n relevant_task_idx = task_idxs[0][0]\n task = individual_buffer[relevant_task_idx, :]\n # print(\"task:\", task)\n return task", "def last_command(self):\n return self._last_command", "def get_last_log(conn):\n cursor = conn.cursor()\n cursor.execute('SELECT id, task, start_time, end_time FROM timelogs ORDER BY id DESC LIMIT 1')\n row = cursor.fetchone()\n return row", "def last(self):\n if self.is_empty():\n raise ValueError('Queue is empty!')\n return self.last_node().element().value()", "def last_node(self):\n return self._array[self._size-1]", "def get_last_query(self):\n return self.query_history[-1][0] if self.query_history else None", "def getLast(self):\r\n return self._data[-1]", "def delete_task_by_id(self, robot_id, task_id): \n individual_buffer = self.all_buffers[robot_id]\n task_ids = individual_buffer[:, 0]\n task_idx = np.where(task_ids == task_id)\n if task_idx[0].size == 0:\n print(\"ERROR: Task was already deleted of was never in the buffer.\")\n individual_buffer = np.delete(individual_buffer, task_idx, 0)\n self.all_buffers[robot_id] = individual_buffer", "def last_run(self):\n return self._last_run", "def read_command(self):\n if self._pending_commands:\n return self._pending_commands.popleft()\n\n msg = self._serializer.read_msg()\n\n while self._msg_is_taskid(msg):\n self._pending_taskids.append(msg)\n msg = self._serializer.read_msg()\n\n return msg", "def dequeue(self) -> Any:\n if len(self.queue) <= 1:\n task = self.queue[0]\n self.queue = []\n\n return task\n \n last_index = len(self.queue) - 1\n self._swap_tasks(0, last_index)\n\n task = self.queue.pop()\n\n self._bubble_down_task()\n\n return task", "def get_last_measurement(self, param):\n return self.__buffer[param][-1]" ]
[ "0.8099018", "0.8088282", "0.75244987", "0.7495541", "0.7351919", "0.6741725", "0.64481336", "0.62854195", "0.6222197", "0.6192808", "0.6151941", "0.6148577", "0.61086005", "0.6056386", "0.60546494", "0.60426307", "0.60120016", "0.59644395", "0.5953452", "0.59488744", "0.5934622", "0.58941656", "0.58761483", "0.5871603", "0.58563286", "0.58331996", "0.582848", "0.5825246", "0.58111763", "0.57472914" ]
0.83151853
0
Check last task info without deletion.
def check_last_task(self, robot_id): individual_buffer = self.all_buffers[robot_id] return individual_buffer[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_repeated_task(self, task):\n task_status = task in self.tasks_asked\n\n # append if never asked\n if task_status == False:\n self.tasks_asked.append(task)\n\n return task_status", "def check(self):\r\n boto.log.info('checking Task[%s]-now=%s, last=%s' % (self.name, self.now, self.last_executed))\r\n\r\n if self.hourly and not self.last_executed:\r\n return 0\r\n \r\n if self.daily and not self.last_executed:\r\n if int(self.hour) == self.now.hour:\r\n return 0\r\n else:\r\n return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60\r\n\r\n delta = self.now - self.last_executed\r\n if self.hourly:\r\n if delta.seconds >= 60*60:\r\n return 0\r\n else:\r\n return 60*60 - delta.seconds\r\n else:\r\n if int(self.hour) == self.now.hour:\r\n if delta.days >= 1:\r\n return 0\r\n else:\r\n return 82800 # 23 hours, just to be safe\r\n else:\r\n return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60", "def is_new_task(self):\n return self.date_created >= timezone.now() - datetime.timedelta(days=1)", "def get_last_task(self):\n return self.get_task_by_index(-1)", "def checkRely(self, task):\n if not isinstance(task, dict):\n return False\n keys = task.get(\"rely\")\n #is empty or crontab, explain upstream is true\n if not keys or task.get(\"task_type\") == \"crontab\":\n return True\n\n keyl = []\n for k, v in keys.items():\n keyl.append(k)\n\n date = task.get(\"task_day\")\n if not date:\n date = self.date\n\n mkeys = [{\"task_key\": k} for k in keyl]\n tlist = {}\n for doc in self.mgdb.task_history.find({\"$or\": mkeys, \"task_day\": date}):\n tlist[doc.get(\"task_key\")] = doc\n\n if not tlist or len(tlist) != len(mkeys):\n #when debug, always return true.\n if self.config.get(\"is_debug\"):\n return True\n else:\n return False\n for c, d in tlist.iteritems():\n if d.get(\"status\") != \"finished\":\n return False\n\n return True", "def check_task(self): \n return self.buffer[0]", "def check_tasks(self):\n if self.able:\n notifications = []\n for task in self.tasks.all():\n if task.deadline is not None:\n if self._get_delta(task.deadline) < timezone.localtime():\n self.tasks.remove(task)\n notifications.append(Notification(\n title=Notifications.REMIND,\n info=f'{task.info} {self.__str__().replace(\"before\", \"after\")}'\n ))\n return notifications", "def test_22_get_specific_completed_task_anonymous(self):\r\n\r\n #model.rebuild_db()\r\n with self.flask_app.app_context():\r\n self.create()\r\n app = db.session.query(App).first()\r\n task = db.session.query(Task)\\\r\n .filter(App.id == app.id)\\\r\n .first()\r\n\r\n for i in range(10):\r\n task_run = TaskRun(app_id=app.id, task_id=task.id,\r\n user_ip=\"127.0.0.1\", info={'answer': 1})\r\n db.session.add(task_run)\r\n db.session.commit()\r\n\r\n ntask = Task(id=task.id, state='completed')\r\n\r\n assert ntask not in db.session\r\n db.session.merge(ntask)\r\n db.session.commit()\r\n\r\n res = self.app.get('app/%s/task/%s' % (app.short_name, task.id),\r\n follow_redirects=True)\r\n msg = 'You have already participated in this task'\r\n assert msg in res.data, res.data\r\n assert 'Try with another one' in res.data, res.data", "def is_task_taken(new_task, tasks):\n task_ids = [t.task_data.get('task_id') for t in tasks]\n new_task_id = new_task.get('task_id')\n if new_task_id is None:\n return False\n taken = new_task_id in task_ids\n if taken:\n logger.info('Task {} is already taken'.format(new_task_id))\n return taken", "def need_update(self, task: Union[Task, Path]) -> bool:\n if isinstance(task, Path):\n return not task.exists()\n if task.name not in self._database:\n return True\n task_time = self._database.get(task.name)\n return task.need_rerun(task_time)", "def test_delete_task(self):\n check = False\n r = main.List.connection()\n main.List.add(r, \"ToDo\", 1, \"Buy apples\", 2, \"20.05.2015\")\n main.List.delete(r, \"ToDo\", 1)\n task = main.List.pull_from_redis(r, \"ToDo\", False)\n for key in task.iterkeys():\n if key == \"1\":\n check = True\n self.assertFalse(check, \"Deleting task failed.\")", "def is_task_stagnant(task):", "def task_scanned(now_task):", "def check_time(cls, task):\n if not task:\n return\n cur_time = datetime.now().strftime(\"%H:%M %d/%m/%Y\")\n if task.period:\n # if this task has end time and period and end time has passed, we will move time this step period\n # while end time less then current time\n while datetime.strptime(cur_time, \"%H:%M %d/%m/%Y\") > datetime.strptime(task.time_last_copy, \"%H:%M %d/%m/%Y\"):\n task.time_last_copy = cls.date_translation(task.time_last_copy, task.period)\n new_task = Task()\n new_task.name = task.name\n new_task.parent = task.parent\n new_task.host = task.host\n new_task.key = cls.get_free_key_task()\n new_task.type_task = task.type_task\n new_task.admins = task.admins.copy()\n new_task.members = task.members.copy()\n new_task.priority = task.priority\n new_task.status = task.status\n new_task.start_time = task.start_time\n new_task.end_time = task.end_time\n new_task.period = ''\n cls.save_task(new_task)", "def check_first_task(self, robot_id): \n individual_buffer = self.all_buffers[robot_id]\n return individual_buffer[-1]", "def _check_task(self, task: Task) -> bool:\n try:\n extents = list(fiemap(task.path, sync=task.frequency > 1))\n except OSError:\n self.logger.error('Error#%d %s', task.id, task.path, exc_info=True)\n return False\n\n if not extents:\n return False\n\n planner = Planner(self.planner_params, extents)\n clusters = planner.result()\n\n if not clusters:\n return False\n\n task.extents = extents\n task.clusters = clusters\n\n return True", "def find_trans_hung(attinfo, taskinfo):\n\n intro = False\n\n reptime = datetime.datetime.now()\n count = 0\n for attd in sorted(attinfo.values(), key=lambda x: x['start_time']):\n atid = attd['task_id']\n if atid in taskinfo:\n found = False\n for tdict in sorted(taskinfo[atid].values(), key=lambda x: x['start_time']):\n if (tdict['name'].startswith('trans_input') and (reptime - tdict['start_time']).total_seconds() > 2*60*60) or (tdict['name'].startswith('trans_output') and (reptime - tdict['start_time']).total_seconds() > 10*60*60):\n found = True\n if not found:\n break\n if not intro:\n h2(\"The following transfers may be hung\\n\")\n intro = True\n print \" %s_r%dp%02d %s %s %i\\n\" % (attd['unitname'], int(attd['reqnum']), int(attd['attnum']), attd['pfwid'], attd['archive_path'], atid)\n for tdict in sorted(taskinfo[atid].values(), key=lambda x: x['start_time']):\n if (tdict['name'].startswith('trans_input') and (reptime - tdict['start_time']).total_seconds() > 2*60*60) or (tdict['name'].startswith('trans_output') and (reptime - tdict['start_time']).total_seconds() > 10*60*60):\n print \" %s %s %s %s %s %s %s\" % (tdict['id'], tdict['name'], tdict['exec_host'], tdict['start_time'], tdict['request_time'], tdict['grant_time'], tdict['release_time'])\n count += 1\n if count == 0:\n print \"\\nNo hung transfers found.\\n\"", "def check_indicator_files(tasks):\n\n for task in tasks:\n if task[\"status\"]==\"unknown\":\n if os.path.exists(task[\"result\"]):\n task[\"status\"]=\"previously completed\"\n else:\n task[\"status\"]=\"to do\"\n return", "def has_task_changed(self, task):\n\n reloaded = self.get_task(task.id)\n\n if reloaded is None:\n return True\n\n if reloaded.when != task.when:\n return True\n\n return False", "def autodel(): #i hate this code so much\n today, tasks = datetime.today(), []\n to_remove_indexes = []\n deleted_tasks = 0\n\n with open(todofile, 'r') as todo:\n tasks = todo.readlines()\n for i, task in enumerate(tasks):\n try:\n task = json.loads(task)\n except json.decoder.JSONDecodeError:\n return False, False\n if task['deadline'] == \"None\": #because i converted to string in adding\n continue\n dline = datetime.strptime(task['deadline'], \"%Y-%m-%d %H:%M:%S\")\n if dline < today and not task['no_del']:\n to_remove_indexes.append(i)\n deleted_tasks += 1\n\n for index in to_remove_indexes[::-1]:\n del tasks[index]\n \n with open(todofile, 'w') as todo:\n for task in tasks:\n todo.write(task)\n \n return deleted_tasks, True", "def _add_task_action(self, task):\n if not task.is_alive():\n return", "def __check_if_task_exists(self, server_id):\n if server_id in self.__migrating_tasks.keys():\n return True\n return False", "def is_old_task_missing_healthchecks(task: MarathonTask, app: MarathonApp) -> bool:\n health_checks = app.health_checks\n if not task.health_check_results and health_checks and task.started_at:\n now_utc = datetime.datetime.now(pytz.utc)\n healthcheck_startup_time = datetime.timedelta(\n seconds=health_checks[0].grace_period_seconds\n ) + datetime.timedelta(seconds=health_checks[0].interval_seconds * 5)\n is_task_old = task.started_at + healthcheck_startup_time < now_utc\n return is_task_old\n return False", "def complete(self, verbose=False):\r\n #if self.scheduler_launch_time == INVALID_TIME: print \"scheduler launch\"\r\n #if self.node_monitor_launch_time == INVALID_TIME: print \"nm launch\"\r\n #if self.completion_time == INVALID_TIME: print \"completion\"\r\n if verbose:\r\n if self.node_monitor_get_task_time == INVALID_TIME:\r\n print \"Task %s incomplete: node monitor get_task time missing\" % self.id\r\n elif self.scheduler_launch_time == INVALID_TIME:\r\n print \"Task %s incomplete: Scheduler launch time missing\" % self.id\r\n elif self.node_monitor_launch_time == INVALID_TIME:\r\n print \"Task %s incomplete: Node monitor launch time missing\" % self.id\r\n elif self.completion_time == INVALID_TIME:\r\n print \"Task %s incomplete: Completion time missing\" % self.id\r\n return (self.node_monitor_get_task_time != INVALID_TIME and\r\n self.scheduler_launch_time != INVALID_TIME and\r\n self.node_monitor_launch_time != INVALID_TIME and\r\n self.completion_time != INVALID_TIME)", "def has_task_changed(self, task):\n raise NotImplementedError()", "def _check_completion(self):\n sleep(SONG_OVER_CHECK_TIME) ###Wait 10 seconds\n print(\"entering _check_completion\")\n csv_lastline = self._read_lastline()\n print('system time = {}'.format(strftime(\"%H:%M:%S\")))\n print(\"csv_lastline = \", csv_lastline)\n print(\"self._last_line = \", self._last_line)\n if csv_lastline == self._last_line or csv_lastline == \"Empty File\":\n self._song_over = True\n print(\"Song Over\")\n else:\n self._last_line = csv_lastline\n self._song_over = False\n print(\"Song Over = \", self._song_over)\n print('system time = {}'.format(strftime(\"%H:%M:%S\")))\n print(\"self._last_line = \", self._last_line)\n return", "def is_done_in_the_past(self):\n return any(self.hash == rec['hash'] for rec in self.records)", "def check_task_by_deadline(self, robot_id, deadline): \n individual_buffer = self.all_buffers[robot_id]\n if individual_buffer.shape[0] == 1:\n task = self.check_last_task(robot_id)\n return task\n elif individual_buffer.shape[0] == 0:\n print(\"ERROR: buffer for robot \" + str(robot_id) + \" is empty!\")\n else:\n all_buff_dls = individual_buffer[:, -1]\n task_idxs = np.where(all_buff_dls <= deadline)\n if task_idxs[0].size == 0:\n # no task has a deadline that is earlier\n # goal of active task is returned\n # print(\"no task has earlier deadline\")\n # print(\"return goal of active task\")\n task = individual_buffer[-1]\n # print(\"task:\", task)\n return task\n else:\n relevant_task_idx = task_idxs[0][0]\n task = individual_buffer[relevant_task_idx, :]\n # print(\"task:\", task)\n return task", "def status_check(self):\n from coordinator.tasks import cancel_release\n # Check if we hit the time limit\n last_update = self.events.order_by('-created_at')\\\n .first().created_at\n diff = datetime.datetime.utcnow() - last_update.replace(tzinfo=None)\n\n if diff.total_seconds() > settings.RELEASE_TIMEOUT:\n if self.state == 'canceling':\n return\n logger.error(f'canceling release {self.kf_id} for time out.')\n self.cancel()\n self.save()\n django_rq.enqueue(cancel_release, self.kf_id)\n return\n\n # Check if any contained tasks have failed/canceled\n for task in self.tasks.all():\n if task.state in ['failed', 'canceled', 'rejected']:\n if self.state == 'canceling':\n return\n logger.error(f'canceling release: {self.kf_id} task is ' +\n f'{task.state}')\n self.cancel()\n self.save()\n django_rq.enqueue(cancel_release, self.kf_id)\n return", "def complete(self):\r\n if self.scheduler_launch_time == INVALID_TIME:\r\n print \"Missing task scheduler launch time\"\r\n return False\r\n if self.node_monitor_launch_time == INVALID_TIME:\r\n\t print \"Missing task node monitor launch time\"\r\n\t return False\r\n\tif self.completion_time == INVALID_TIME:\r\n\t print \"Missing task completion time\"\r\n\t return False\r\n\tif self.clock_skew == INVALID_TIME_DELTA:\r\n print \"Missing task clock skew\"\r\n\t return False\r\n\treturn True" ]
[ "0.6505494", "0.63198066", "0.6284595", "0.62270695", "0.6204559", "0.616811", "0.6058152", "0.5923084", "0.59140325", "0.58929634", "0.58916634", "0.5885536", "0.58843505", "0.5878879", "0.57961357", "0.5782836", "0.5762973", "0.56852335", "0.5684497", "0.56831384", "0.5679931", "0.567021", "0.5663148", "0.5655246", "0.563529", "0.56317323", "0.5602281", "0.55849147", "0.55797446", "0.55644727" ]
0.6521614
0
Deletes task from robot's buffer by task id.
def delete_task_by_id(self, robot_id, task_id): individual_buffer = self.all_buffers[robot_id] task_ids = individual_buffer[:, 0] task_idx = np.where(task_ids == task_id) if task_idx[0].size == 0: print("ERROR: Task was already deleted of was never in the buffer.") individual_buffer = np.delete(individual_buffer, task_idx, 0) self.all_buffers[robot_id] = individual_buffer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def cancel_and_delete_task(task_id: TaskId):", "def delete(self, tube, task_id):\n cmd = tube.cmd('delete')\n args = (task_id,)\n\n return self.tnt.call(cmd, args)", "async def delete(self, task_id):\n args = (task_id,)\n res = await self.conn.call(self.__funcs['delete'], args)\n return self._create_task(res.body)", "def delete_task(self, tid):\n self.task_controller.delete(tid)", "def deleteTask(task):\n return jutdaapi.delete_ticket(task._ticket_id, 'yep, i really want to do this')", "def remove_task(self, id):\n raise NotImplementedError()", "def delete_task(self, task_id):\n\n request = \"\"\"<delete_task task_id=\"%s\" />\"\"\" % (task_id)\n\n self.make_xml_request(request, xml_result=True)", "def delete_task(task_id):\n db.task.delete_many({'_id': ObjectId(task_id)})", "def delete(self, id):\n if delete_task(get_db(), id):\n return \"\", 204\n api.abort(404, f\"Invalid task with id: {id}\")", "def del_task(self, tid: str):\n if tid in self.__tasks:\n task = self.__tasks.pop(tid)\n task.detach()\n self.__dump()", "def delete_task(task_id):\n task = Tasks.query.get(task_id)\n\n if not task:\n return redirect('/')\n\n db.session.delete(task)\n db.session.commit()\n return redirect('/')", "def remove_task(self, task_id):\n with self.lock:\n self.task_map.pop(task_id)", "def remove_task(task_id):\n response_object = {'status': 'success'}\n\n task = models.Task.query.filter(models.Task.id == task_id).first()\n print('removing task with id {}'.format(task.id))\n app = flask.current_app\n absolute_path = os.path.join(\n app.root_path, app.config['DATA_DIRECTORY'], task.result)\n helpers.remove_file(absolute_path)\n\n database.session.delete(task) # pylint: disable=no-member\n database.session.commit() # pylint: disable=no-member\n\n return jsonify(response_object)", "def delete(task_id):\n # Get the task requested from the db into session\n delete_task = TaskList.query.filter(TaskList.task_id == task_id).one_or_none()\n\n # Did we find the task?\n if delete_task is not None: \n db.session.delete(delete_task)\n db.session.commit()\n return make_response(\n \"Task {task_id} deleted\".format(task_id=task_id), 200\n )\n\n # Otherwise, nope, task to delete not found\n else:\n abort(\n 404, \"Task {task_id} not found\".format(task_id=task_id)\n )", "def _delete():\n\tquery = myTaskSession.query(WorkToolkitDB.db.Task)\n\n\tIDStr = myOpt.id\n\tIDs = re.split('\\s*,\\s*', IDStr)\n\n\tif len(IDs) == 0:\n\t\tprint('ERR: no deleting id input')\n\t\treturn 1\n\n\tfor ID in IDs:\n\t\tmyTask = query.get(ID)\n\t\tmyTaskSession.delete(myTask)\n\n\t\n\tmyTaskSession.commit()\n\n\treturn 0", "def delete_task(id):\n cursor = conn.cursor()\n cursor.execute(\"DELETE from tasks where id = %s;\", (id, ))\n conn.commit()\n print(\"Number of records deleted:\", cursor.rowcount)", "def delete(task_name):\n tasks.delete_one({'name': task_name})", "def remove(self, task_id):\n task_obj = self.find(task_id)\n self.queue.remove(task_obj)\n return task_obj", "def remove_task(id):\n\n if not id:\n raise InvalidAPIUsage(\"id is required\")\n\n collection = get_db_collection()\n\n task = get_task_or_404(collection, id)\n\n collection.delete_one({\"_id\": task[\"_id\"]})\n\n response = jsonify()\n response.status_code = 200\n return response", "def delete(id, name):\n kargs={'host': c.cfg['host'], \"api_version\": c.cfg['api_version'], \"url_path\": \"/tasks\"}\n\n if name != None:\n click.echo(\"remove task by name is not supported yet\")\n sys.exit(1)\n try:\n dict_resp= estask.Task(kargs).delete_task_by_id(id)\n except Exception as e:\n sys.exit(\"failed to delete task: %s\" %str(e))\n\n if 'status' in dict_resp and dict_resp['status'].lower() != 'success':\n sys.exit(\"Fail: %s\"%str(dict_resp))\n\n try:\n click.echo(\"Success: %s\" %(str(dict_resp[\"message\"])))\n except Exception as e:\n sys.exit(\"Fail: %s %s\" %(str(e), str(dict_resp)))", "def delete_task_by_id(task_id):\n result = mongo.db.tasks.delete_one({\"_id\": ObjectId(task_id)})\n return {\"deleted_count\": result.deleted_count}", "def delete(self, task=None):\n if task is None:\n print(\"\\n*** Delete Task ***\\n\\nSelect a task index to delete:\")\n self.show()\n while 1:\n try:\n i = int(input(\"\\nIndex? (0 to cancel): \")) - 1\n if i >= 0:\n print(\"Deleted task \\\"\" + self.tasks.pop(i).name + \"\\\".\")\n self.save()\n elif i == -1:\n print(\"Deletion canceled. \")\n else:\n raise IndexError\n break\n except (ValueError, IndexError) as e:\n print(\"\\n\\\"\" + str(i+1) + \"\\\" is not a valid task index.\", type(e))\n print(\"*\"*19)\n else:\n pass", "def delete_todo_task(self, id):\n if self.is_todo_table_empty() == False:\n if id > self.get_largest_id() or id < 0:\n print(\"\\ntask with id %s does not exist\\n\" % id)\n else:\n self.conn.execute(\"\"\"DELETE FROM todo WHERE id=?;\"\"\", str(id))\n self.decrement_todo_task_ids(id)\n self.conn.commit()\n else:\n print(\"\\nno tasks to delete!\\n\")", "def delete(task_id):\n\n try:\n db_helper.remove_record_by_id(task_id)\n result = {'success': True, 'response': 'Removed task'}\n except:\n result = {'success': False, 'response': 'Something went wrong'}\n\n return jsonify(result)", "def delete(task_id, pfn):\n\n activated = PoolManager.db.query('UPDATE `standalone_deletion_tasks` SET `status` = \\'active\\' WHERE `id` = %s', task_id)\n if activated == 0:\n # task was cancelled\n return -1, None, None, '', '' \n\n return gfal_exec('unlink', (pfn,), deletion_nonerrors)", "def remove(self, task):\n pass", "def delete_task(self):\n tasks = self.session.query(self.Table).order_by(self.Table.deadline).all()\n if tasks:\n print('Chose the number of the task you want to delete:')\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}. {task.deadline.strftime(\"%d %b\")}')\n self.session.query(self.Table).filter(self.Table.id == tasks[int(input())-1].id).delete()\n self.session.commit()\n else:\n print('Nothing to delete!')\n print()", "def deleteTask():\n\tmarkOff(isdelete = 1)", "def delete(self, task_model):\n raise NotImplementedError()", "def remove_task(self, id):\n\n self.log(INFO, \"Removing task {}\".format(id))\n\n collection = self._get_collection()\n result = collection.delete_one({\"_id\": ObjectId(id)})\n\n return bool(result.deleted_count)" ]
[ "0.7585024", "0.7496407", "0.74108887", "0.73956794", "0.7229599", "0.7218389", "0.71924615", "0.71235603", "0.7020509", "0.6904965", "0.6901098", "0.6884655", "0.6880657", "0.68713504", "0.684092", "0.6819857", "0.680542", "0.6800942", "0.6792769", "0.6777958", "0.67621386", "0.67318314", "0.6703109", "0.66205364", "0.66003734", "0.65779334", "0.6545709", "0.65184736", "0.65014994", "0.64883304" ]
0.8852605
0
Prints the contents of all buffers.
def print_all_buffers(self): for robot in range(self.no_robots): print("Buffer for robot: " + str(robot) + ":") print("Task ids: X, Y goal: Z orientation: Deadline:") individual_buffer = self.all_buffers[robot] if isinstance(individual_buffer, np.float) or individual_buffer == []: print("Buffer is empty for robot " + str(robot) + "!") else: for buff_row in range(individual_buffer.shape[0]): task = individual_buffer[buff_row] print("%d %f, %f %f %f" % (int(task[0]), task[1], task[2], task[3], task[4]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dump(self):\n# self.partial_in=\"\"\n# for line in sys.stdin: \n# self.partial_in+=sys.stdin.read(1)\n sys.stdout = sys.__stdout__\n os.system('cls')\n for cb in self.buffers.values():\n cb.dump(sys.stdout)\n sys.stdout = self", "def genout(self):\n ch = self.buffer_output()\n while ch:\n print(ch, end='')\n ch = self.buffer_output()", "def _buffer_all(self):\n self._buffer()", "def print_trace_buffer_list():\n trace_buf_map = _get_trace_buffer_map()\n for key in list(trace_buf_map.keys()):\n print(key)", "def print_all(self):\r\n for e in self.channels:\r\n e.print()", "def print_all(f):\n print (f.read())", "def show(self):\r\n \r\n clear() \r\n print \" \" + \"-\" * self.__width + \" \"\r\n \r\n for row in self.__buffer:\r\n rowData = \"\".join(str(i) for i in row)\r\n print \"|\" + rowData + \"|\"\r\n\r\n print \" \" + \"-\" * self.__width + \" \"\r\n self.clearBuffer()", "def print_queue(self):\n for i in self.Obs:\n print(i)", "def flush(self) -> None:\n if not self._buffer:\n # Only flush stdout buffer. (It could be that Python still has\n # something in its buffer. -- We want to be sure to print that in\n # the correct color.)\n self.stdout.flush()\n return\n\n data = \"\".join(self._buffer)\n\n if _DEBUG_RENDER_OUTPUT:\n self.LOG.write((\"%r\" % data).encode(\"utf-8\") + b\"\\n\")\n self.LOG.flush()\n\n # Print characters one by one. This appears to be the best solution\n # in order to avoid traces of vertical lines when the completion\n # menu disappears.\n for b in data:\n written = DWORD()\n\n retval = windll.kernel32.WriteConsoleW(\n self.hconsole, b, 1, byref(written), None\n )\n assert retval != 0\n\n self._buffer = []", "def print_all():\n\n for i, context in enumerate(CONTEXT_GROUP):\n print('Group #{0:d}'.format(i))\n\n charmap = context[\"charmap\"]\n assert charmap is None or isinstance(charmap, dict)\n\n for j, item in enumerate(StringGeneratorPascalStyle(context)):\n text = process_dakuten(get_text(item[1], charmap, None))\n print('{index:04X}:{address:06X}:{data}'.format(\n index=j,\n address=item[0],\n data=text))", "def print_queue(self):\n for value in self.data:\n element = f'| {value} |'\n print(element)", "def print_all(f):\n\tprint f.read()", "def print_all_contents(self, *args, **kwargs):\n while self.has_to_print():\n # Try to print the first element in the queue.\n tar_to_print: str = self.print_queue[0].tar\n self.print_monitor.wait_turn(self, tar_to_print, *args, **kwargs)\n\n # Print all applicable values in the print_queue.\n while self.print_queue and (self.print_queue[0].tar == tar_to_print):\n msg: str = self.print_queue.popleft().msg\n print(msg, end=\"\", flush=True)\n\n # If True, then all of the output for extracting tar_to_print was in the queue.\n # Since we just finished printing all of it, we can move onto the next one.\n if self.is_output_done_enqueuing[tar_to_print]:\n # Let all of the other workers know that this worker is done.\n self.print_monitor.done_dequeuing_output_for_tar(self, tar_to_print)", "def print_out(self):\n for line in self.canvas:\n for char_style in line:\n colors.print_style_char(char_style)\n sys.stdout.write('\\n')", "def print_generator(app_iter):\n print(('*' * 40) + ' BODY')\n for part in app_iter:\n sys.stdout.write(part)\n sys.stdout.flush()\n yield part\n print()", "def write_display(self):\n for i, value in enumerate(self.buffer):\n self.bus.write_byte_data(self.address, i, value)", "def display():\n\n # Check the pipe setup.\n check_pipe_setup(sequence=True, j=True)\n\n # Call the write method with sys.stdout as the file.\n write(file=sys.stdout)", "def printme(self):\n sys.stdout.write(self._header)\n for k in range(len(self)):\n sys.stdout.write(self.line(k))", "def printall():\n print listAll()", "def print(self):\r\n for e in self.channels:\r\n print(e)", "def dumpBuf(self):\n pafBuf = \"\"\n for key in self.__keyList:\n pafBuf = pafBuf + self.__keyObjs[key].dumpBuf() + \"\\n\"\n for line in self.__trailer:\n pafBuf = pafBuf + line\n return pafBuf", "def display(self, buffer = None):\n raise NotImplementedError", "def pc_output_buffers_full(self, *args):\n return _spacegrant_swig.message_debug_sptr_pc_output_buffers_full(self, *args)", "def my_print(self):\n if self.__size == 0:\n print(\"\")\n return\n [print(\"\") for x in range(0, self.__position[1])]\n for i in range(0, self.__size):\n [print(\" \", end=\"\") for i in range(0, self.__position[0])]\n [print(\"#\", end=\"\") for j in range(0, self.__size)]\n print(\"\")", "def print_out():\n pass", "def print_all_from_jotter(self):\n try:\n with open(self._filename, 'r') as f:\n line = f.readline()\n while line:\n l = line.rstrip(\"\\n\")\n print(l)\n line = f.readline()\n except OSError:\n pass", "def print_all(self):\n print(\n \"\"\"\\nContents of hash table, with blank lines separating distinct\n linked lists:\"\"\".replace(' ', ''))\n\n for linked_list in self.main_array:\n print(linked_list)\n print('')", "def PrintLogs(self) -> None:\n assert self.Finished()\n for f, stream_name in (\n (self.stdout, \"STDOUT\"), (self.stderr, \"STDERR\")):\n f.flush()\n f.seek(0)\n # Since we collected binary data, we have to write binary data.\n encoded = (stream_name.encode(), str(self).encode())\n sys.stdout.buffer.write(b\"BEGIN %s of test %s\\n\" % encoded)\n sys.stdout.buffer.write(f.read())\n sys.stdout.buffer.write(b\"END %s of test %s\\n\" % encoded)\n sys.stdout.buffer.flush()", "def flush_buffers(self):\n\n for file_dictionary in [self.infiles, self.outfiles]:\n for name, fi in file_dictionary.items():\n fi.flush_buffers()", "def dump(self) -> NoReturn:\n index = self._head\n while index:\n print(index.data, end=\" \")\n index = index.next" ]
[ "0.69893146", "0.6602505", "0.6553516", "0.6457419", "0.6448137", "0.6313259", "0.6308344", "0.6298255", "0.61893225", "0.6152568", "0.60924226", "0.6045671", "0.59879345", "0.59661067", "0.59442204", "0.5944079", "0.5938131", "0.5934754", "0.59188473", "0.5822754", "0.58161813", "0.58125633", "0.58053726", "0.5793337", "0.5785709", "0.5778507", "0.5778044", "0.57691514", "0.5742822", "0.57319325" ]
0.702585
0
If all buffers are empty True is returned.
def are_buffers_empty(self): i = 0 for i in range(self.no_robots): if self.is_buffer_empty_for_robot(i) is True: i += 1 else: return False if i >= self.no_robots: return True else: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isBufferEmpty(self):\n return self.ecg_buffer.empty()", "def is_buffer_empty(self): \n if self.buffer.shape == (0, 5):\n return True\n else:\n return False", "def is_empty(self):\r\n return self.buff==[]", "def is_full(self):\n return len(self) == self.buffer_size", "def is_full(self):\n return len(self) == self.buffer_size", "def is_full(self):\n return len(self) == self.buffer_size", "def is_full(self):\n return len(self) == self.buffer_size", "def bufferIsFull(self):\n return len(self.buffer) == self.bufferSize", "def is_buffer_empty_for_robot(self, robot_id): \n if isinstance(self.all_buffers[robot_id], float):\n return True\n elif isinstance(self.all_buffers[robot_id], np.ndarray):\n if self.all_buffers[robot_id].size == 0:\n return True\n else:\n return False\n else:\n return False", "def empty(self):\n return self.queue[0].empty() and self.queue[1].empty()", "def isFull(self):\n return self.__size == len(self.__buffer)", "def full(self):\n return len(self.future_buffer) == self.CAPACITY", "def is_empty(self):\n return self.count.addCallback(lambda x: x == 0)", "def empty(self):\r\n return self.queue == []", "def empty(self):\n return self.queue == []", "def empty(self) -> bool:\n return self.data.get_size() == 0", "def is_empty(self):\n return self.queue == []", "def is_full(self):\n return len(self._data) == 1", "def empty(self):\n return len(self.queue) == 0", "def empty(self):\n return 0 >= len(self.__data)", "def empty(self):\n return self.size == 0", "def empty(self):\n return self.size == 0", "def empty(self):\n return not self.queue", "def isFull(self):\n if len(self.batch) == self.__batch_size:\n return True\n return False", "def empty(self):\n return self.vcount == 0", "def is_empty(self):\n\t\treturn (self._size == 0)", "def has_any(self) -> bool:\n return any(\n BlockAccessor.for_block(b).num_rows() > 0 for b in self._buffer)", "def empty(self):\n return self._size is 0", "def is_empty(self):\n return self._size == 0", "def empty(self) -> bool:\n return self.data.empty()" ]
[ "0.8016782", "0.8015151", "0.7889439", "0.7832754", "0.7832754", "0.7832754", "0.7832754", "0.7737354", "0.756818", "0.7445005", "0.7407083", "0.73865944", "0.72423756", "0.7240853", "0.7211857", "0.7199953", "0.7176888", "0.7172532", "0.7171493", "0.71576613", "0.71323526", "0.71323526", "0.7132052", "0.7128452", "0.712198", "0.7108913", "0.71002847", "0.7097503", "0.70921904", "0.70807874" ]
0.81440246
0
Returns task from robot's individual buffer. Returned task has a deadline that ends before the argumented deadline. If there is no such task, current active task is returned. Task is not deleted from the buffer.
def check_task_by_deadline(self, robot_id, deadline): individual_buffer = self.all_buffers[robot_id] if individual_buffer.shape[0] == 1: task = self.check_last_task(robot_id) return task elif individual_buffer.shape[0] == 0: print("ERROR: buffer for robot " + str(robot_id) + " is empty!") else: all_buff_dls = individual_buffer[:, -1] task_idxs = np.where(all_buff_dls <= deadline) if task_idxs[0].size == 0: # no task has a deadline that is earlier # goal of active task is returned # print("no task has earlier deadline") # print("return goal of active task") task = individual_buffer[-1] # print("task:", task) return task else: relevant_task_idx = task_idxs[0][0] task = individual_buffer[relevant_task_idx, :] # print("task:", task) return task
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_task(self): \n task = self.buffer[0]\n self.buffer = np.delete(self.buffer, 0, 0)\n return task", "def get_task(self):\n return self.queue.get()", "def get_first_task(self, robot_id): \n individual_buffer = self.all_buffers[robot_id]\n task = individual_buffer[-1]\n individual_buffer = np.delete(individual_buffer, -1, 0)\n self.all_buffers[robot_id] = individual_buffer\n return task", "def current_task(self):\n try:\n return self.active_item(remove=False)\n except queue.Empty:\n return None", "def check_last_task(self, robot_id): \n individual_buffer = self.all_buffers[robot_id]\n return individual_buffer[0]", "def next(self) -> Optional[Task]:\n if len(self._pending_tasks) == 0:\n return None\n else:\n task = self._pending_tasks.pop(0)\n task.status = 'Running'\n self._dump_tasks_info()\n return task", "def check_task(self): \n return self.buffer[0]", "def get(self, block=True, timeout=None):\n if not self.connected:\n raise QueueNotConnectedError(\"Queue is not Connected\")\n\n if block:\n payload = self.__db.brpop(self._key, timeout=timeout)\n else:\n payload = self.__db.rpop(self._key)\n\n if not payload:\n return None\n\n task = self.task_class(payload[1])\n\n # if task was marked as unique then\n # remove the unique_hash from lock table\n if task.unique:\n self.__db.srem(self._lock_key, task.unique_hash())\n\n return task", "def get_last_task(self):\n return self.get_task_by_index(-1)", "def check_first_task(self, robot_id): \n individual_buffer = self.all_buffers[robot_id]\n return individual_buffer[-1]", "def get_last_task(self, robot_id): \n individual_buffer = self.all_buffers[robot_id]\n data = individual_buffer[0]\n individual_buffer = np.delete(individual_buffer, 0, 0)\n self.all_buffers[robot_id] = individual_buffer\n return data", "def get_previous_task():\n return _tasks[0] if len(_tasks) != 0 else None", "def get_current_task(self):\n return self.get_current_step().get_last_task()", "def task(self):\n return self._task", "def task(self):\n return self._task", "def task(self):\n return self._task", "def task(self):\n return self._task", "def task(self):\n return self._task", "def task(self) -> base_model.BaseTask:\n return self._task", "def get_current_task(self) -> Tuple[TaskId, any]:\n with self.local_redis.pipeline() as pipe:\n while True:\n try:\n # optimistic locking: https://realpython.com/python-redis/#using-redis-py-redis-in-python\n pipe.watch(TASK_ID_KEY)\n task_id: TaskId = deserialize(retry_get(pipe, TASK_ID_KEY))\n if task_id == self.cached_task_id:\n # print(f'[worker] Returning cached task {task_id}')\n break\n pipe.multi()\n pipe.get(TASK_DATA_KEY)\n # print(f'[worker] Getting new task {task_id}. Cached task was {self.cached_task_id}')\n self.cached_task_id, self.cached_task_data = task_id, deserialize(pipe.execute()[0])\n break\n except redis.WatchError:\n continue\n return self.cached_task_id, self.cached_task_data", "def first(self) -> Task:\n return self._tasks[0]", "def _get_current_task():\r\n return current_task", "def _get_current_task():\r\n return current_task", "def get(self, task_id=None):\n if task_id:\n item = self.find(task_id)\n self.queue.remove(item)\n else:\n item = self.queue.get()\n return item", "def dequeue(self) -> Any:\n if len(self.queue) <= 1:\n task = self.queue[0]\n self.queue = []\n\n return task\n \n last_index = len(self.queue) - 1\n self._swap_tasks(0, last_index)\n\n task = self.queue.pop()\n\n self._bubble_down_task()\n\n return task", "def get_input_task(self, name='0'):\n port = self.get_input(name).other\n if port is None:\n return None\n return port.task", "def next_task(self, t = 0):\r\n \r\n if self.idle_playlist_counter >= len(self.idle_playlist)-1:\r\n self.idle_playlist_counter = 0\r\n if len(self.playlist) ==0:\r\n task = self.idle_playlist[self.idle_playlist_counter]\r\n self.idle_playlist_counter +=1\r\n\r\n if self.playlist_counter < len(self.playlist):\r\n while self.playlist[self.playlist_counter][0] - t < 0:\r\n self.playlist_counter +=1\r\n if abs(self.playlist[self.playlist_counter][0] - t ) < 2.0*1.1:\r\n task = self.playlist[self.playlist_counter]\r\n debug(task)\r\n self.playlist_counter +=1\r\n else:\r\n task = self.idle_playlist[self.idle_playlist_counter]\r\n self.idle_playlist_counter +=1\r\n else:\r\n task = self.idle_playlist[self.idle_playlist_counter]\r\n self.idle_playlist_counter +=1\r\n\r\n return task", "def __get_task_queue(self, task):\n state = task.execution.state\n if Run.State.NEW == state:\n return self._new\n elif state in [Run.State.SUBMITTED,\n Run.State.RUNNING,\n Run.State.UNKNOWN]:\n return self._in_flight\n elif Run.State.STOPPED == state:\n return self._stopped\n elif Run.State.TERMINATING == state:\n return self._terminating\n elif Run.State.TERMINATED == state:\n return self._terminated\n else:\n raise AssertionError(\n \"Unhandled state '%s' in gc3libs.core.Engine.\" % state)", "def current_task() -> Task:\n try:\n return _current_task.get()\n except LookupError:\n raise RuntimeError(\"Can only be called from an async function.\")", "def task(self, name):\n with self.db_lock:\n return self.rcon.hget(self.task_key, name)" ]
[ "0.6866474", "0.6741945", "0.63914305", "0.6383624", "0.6366651", "0.6301874", "0.6252061", "0.62437046", "0.616717", "0.6128929", "0.59898907", "0.59782624", "0.58996314", "0.5898423", "0.5898423", "0.5898423", "0.5898423", "0.5898423", "0.5896144", "0.5887958", "0.5851993", "0.58212656", "0.58212656", "0.57665706", "0.5762172", "0.57532156", "0.57239676", "0.56922936", "0.56662613", "0.5649731" ]
0.75464046
0
Add task to buffer.
def add_task(self, task): self.buffer = np.vstack((self.buffer, task)) return self.buffer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(self, task):\n pass", "def register(self, task: Task) -> None:\n self._buffer.add(task.future)\n task.add_ready_callback(self._add)", "def add_task(self, task):\n self.tasks.append(task)", "def add_task(self, task):\n self.tasks.append(task)", "def add_task(self, task):\n self.tasks.append(task)", "def add_task(self, task):\n self.tasks.append(task)", "def add(self, task):\n raise NotImplementedError()", "def add_task(self, task):\n assert isinstance(task, RenderTask), \"RenderQueue.add_task only accepts RenderTasks\"\n self.tasks.append(task)", "def add_task(self, task):\n self._tasks.append(task)", "def add_task(self, task):\n raise NotImplementedError()", "def add_task(self, robot_id, data):\n if isinstance(self.all_buffers[robot_id], np.float): # mesto je prazno\n individual_buffer = np.vstack((self.buffer_array, data))\n self.all_buffers[robot_id] = individual_buffer\n else:\n individual_buffer = self.all_buffers[robot_id]\n individual_buffer = np.vstack((individual_buffer, data))\n self.all_buffers[robot_id] = individual_buffer", "def add(self, task):\n self._count += 1\n path = os.path.join(self._root, \"%d_%s\" % (self._count, task.guid))\n j.sal.fs.writeFile(path, self._serialize_task(task))", "def push(self, task):\n self.stack.append(task)", "def addTask(self, task):\n self.tasklist.append(task)", "def put(self, task):\n self.put_idx += 1\n self.task_queue.put(task)", "def addTask(self, task, priority=0):\n self.queue.put((priority, task))", "def add_task(self, func, *args, **kwargs):\n self.queue.put((func, args, kwargs))", "def take_task(self, task):\n self._tasks_in_process.append(task)", "def apply_task(self, task):\n self.tasks.add(task)", "def _add(self, task: Task) -> None:\n with self._cond:\n if task not in self._task_set:\n self._task_set.add(task)\n self._tasks.append(task)\n self._cond.notify()", "def put(self, task):\n self.put_id += 1\n self.task_queue.put(task)", "def addTask(self, task):\n # Try to start task first. If it fails then we don't need to\n # undo adding it to taskset\n task.initialize(self)\n task.start()\n\n with self._lock_c:\n self.numtasks += 1\n self.taskset.append(task)", "def add(self, task):\n queue = self.__get_task_queue(task)\n if _contained(task, queue):\n # no-op if the task has already been added\n return\n # add task to internal data structures\n queue.append(task)\n if self._store:\n try:\n self._tasks_by_id[task.persistent_id] = task\n except AttributeError:\n gc3libs.log.warning(\"Task %s has no persistent ID!\", task)\n task.attach(self)\n self.__update_task_counts(task, task.execution.state, +1)", "def put(self, task):\n self.queue.put(task, task.priority)", "def add_task(self, task: \"Task\") -> None: # noqa: F821\n self.tasks[task.code] = task\n task._process_definition = self", "def add_task(self, func, *args, **kargs):\r\n self.tasks.put((func, args, kargs))", "def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))", "def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))", "def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))", "def add_task(self, func, *args, **kargs):\n self.tasks.put((func, args, kargs))" ]
[ "0.78761667", "0.7841464", "0.7740462", "0.7740462", "0.7740462", "0.7740462", "0.76750946", "0.75938886", "0.7581593", "0.7580112", "0.7267468", "0.7236128", "0.7230977", "0.71906996", "0.7155861", "0.71452564", "0.71336526", "0.7082641", "0.7055979", "0.7027959", "0.69195014", "0.6899716", "0.68098915", "0.6715116", "0.66984093", "0.66820544", "0.66765976", "0.66765976", "0.66765976", "0.66765976" ]
0.8252278
0
Returns first task from buffer. Task is deleted.
def get_task(self): task = self.buffer[0] self.buffer = np.delete(self.buffer, 0, 0) return task
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_first_task(self, robot_id): \n individual_buffer = self.all_buffers[robot_id]\n task = individual_buffer[-1]\n individual_buffer = np.delete(individual_buffer, -1, 0)\n self.all_buffers[robot_id] = individual_buffer\n return task", "def first(self) -> Task:\n return self._tasks[0]", "def check_first_task(self, robot_id): \n individual_buffer = self.all_buffers[robot_id]\n return individual_buffer[-1]", "def next(self) -> Optional[Task]:\n if len(self._pending_tasks) == 0:\n return None\n else:\n task = self._pending_tasks.pop(0)\n task.status = 'Running'\n self._dump_tasks_info()\n return task", "def check_task(self): \n return self.buffer[0]", "def popFirst(self, t):\n index = self._findFirst(t)\n if index >= 0:\n return self.jobs.pop(index)", "def get_previous_task():\n return _tasks[0] if len(_tasks) != 0 else None", "def get_last_task(self, robot_id): \n individual_buffer = self.all_buffers[robot_id]\n data = individual_buffer[0]\n individual_buffer = np.delete(individual_buffer, 0, 0)\n self.all_buffers[robot_id] = individual_buffer\n return data", "def get_one_task_by_name(self, name: str) -> \"Task\": # noqa: F821\n tasks = self.get_tasks_by_name(name)\n if not tasks:\n raise PyDSTaskNoFoundException(f\"Can not find task with name {name}.\")\n return tasks.pop()", "def peek(self):\r\n return self.queue[0]", "def peek(self):\r\n return self.queue[0]", "def dequeue(self) -> Any:\n if len(self.queue) <= 1:\n task = self.queue[0]\n self.queue = []\n\n return task\n \n last_index = len(self.queue) - 1\n self._swap_tasks(0, last_index)\n\n task = self.queue.pop()\n\n self._bubble_down_task()\n\n return task", "def check_last_task(self, robot_id): \n individual_buffer = self.all_buffers[robot_id]\n return individual_buffer[0]", "def get_task(self):\n return self.queue.get()", "def peek(self):\n return self.queue[0]", "def get_first_task(self):\n return self.get_first_step().get_last_task()", "def getFirst(self, t):\n index = self._findFirst(t)\n if index >= 0:\n return self.jobs[index]\n else:\n return None", "def get(self, task_id=None):\n if task_id:\n item = self.find(task_id)\n self.queue.remove(item)\n else:\n item = self.queue.get()\n return item", "def peek(self):\n\n return self._queue[0]", "def peek(self):\n return self.the_queue[0]", "def current_task(self):\n try:\n return self.active_item(remove=False)\n except queue.Empty:\n return None", "def get_last_task(self):\n return self.get_task_by_index(-1)", "def peek(self):\r\n if self.size():\r\n return self.queue[0]\r\n else:\r\n return None", "def dequeue(self):\n\n # del self._queue[0]\n return self._queue.pop(0)", "def dequeue(self):\r\n return self.queue.pop(0)", "def dequeue(self):\n return self.queue.pop(0)", "def get(self, block=True, timeout=None):\n if not self.connected:\n raise QueueNotConnectedError(\"Queue is not Connected\")\n\n if block:\n payload = self.__db.brpop(self._key, timeout=timeout)\n else:\n payload = self.__db.rpop(self._key)\n\n if not payload:\n return None\n\n task = self.task_class(payload[1])\n\n # if task was marked as unique then\n # remove the unique_hash from lock table\n if task.unique:\n self.__db.srem(self._lock_key, task.unique_hash())\n\n return task", "def peek(self): # total: O(1)\n return self._queue[self._start] #O(1)", "def pop(self):\n return self.queue.pop(0)", "def pop(self):\r\n return self.queue.pop(0)" ]
[ "0.8032802", "0.75806236", "0.72843575", "0.6869653", "0.66180354", "0.6592223", "0.6541647", "0.6520322", "0.6501034", "0.6446639", "0.6446639", "0.64158726", "0.63821", "0.6381821", "0.63805944", "0.63433826", "0.6270199", "0.6243697", "0.62245923", "0.62143934", "0.61987156", "0.61960304", "0.6173798", "0.61077625", "0.60888565", "0.6071542", "0.60577804", "0.60509676", "0.5994061", "0.597706" ]
0.77799547
1
Creates a dictionary with ROS publisher objects used for publishing goals.
def create_set_goal_pubs(self): pubs = {} for i in range(self.no_robots): pub_topic = '/robot_' + str(i) + '/move_base/goal' # pub_name = 'send_goal_robot_' + str(i) # key pub_name = str(i) # key pubs[pub_name] = rospy.Publisher(pub_topic, MoveBaseActionGoal, queue_size=10) # time.sleep(1.0) return pubs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_hand_publishers(self):\n hand_pub = {}\n\n for joint in [\"FFJ0\", \"FFJ3\", \"FFJ4\",\n \"MFJ0\", \"MFJ3\", \"MFJ4\",\n \"RFJ0\", \"RFJ3\", \"RFJ4\",\n \"LFJ0\", \"LFJ3\", \"LFJ4\", \"LFJ5\",\n \"THJ1\", \"THJ2\", \"THJ3\", \"THJ4\", \"THJ5\",\n \"WRJ1\", \"WRJ2\" ]:\n hand_pub[joint] = rospy.Publisher('/sh_'+joint.lower()+'_mixed_position_velocity_controller/command', Float64, \n latch = True)\n\n return hand_pub", "def make_doi_publishers(publisher: Publisher) -> List[Dict]:\n\n return [\n {\n \"identifier\": publisher.name,\n \"types\": [\"Publisher\"],\n \"name\": publisher.name,\n \"country\": None,\n \"country_code\": None,\n \"country_code_2\": None,\n \"region\": None,\n \"subregion\": None,\n \"coordinates\": None,\n \"members\": [],\n }\n ]", "def _create_pub(name, rostype, *args, **kwargs):\n # counting publisher instance per topic name\n if name in TopicBack.pub_instance_count.keys():\n TopicBack.pub_instance_count[name] += 1\n else:\n TopicBack.pub_instance_count[name] = 1\n\n return rospy.Publisher(name, rostype, *args, **kwargs)", "def _register_or_update_all_publishers(self, config):\n # type: (dict) -> None\n self._pub_audio = ros_utils.create_or_update_publisher(self._config, config, self._pub_audio, 'ROS_audio_msg_topic', Audio, queue_size=10)\n self._pub_balls = ros_utils.create_or_update_publisher(self._config, config, self._pub_balls, 'ROS_ball_msg_topic', BallInImageArray)\n self._pub_lines = ros_utils.create_or_update_publisher(self._config, config, self._pub_lines, 'ROS_line_msg_topic', LineInformationInImage, queue_size=5)\n self._pub_line_mask = ros_utils.create_or_update_publisher(self._config, config, self._pub_line_mask, 'ROS_line_mask_msg_topic', Image)\n self._pub_obstacle = ros_utils.create_or_update_publisher(self._config, config, self._pub_obstacle, 'ROS_obstacle_msg_topic', ObstacleInImageArray, queue_size=3)\n self._pub_goal_posts = ros_utils.create_or_update_publisher(self._config, config, self._pub_goal_posts, 'ROS_goal_posts_msg_topic', GoalPostInImageArray, queue_size=3)\n #self._pub_ball_fcnn = ros_utils.create_or_update_publisher(self._config, config, self._pub_ball_fcnn, 'ROS_fcnn_img_msg_topic', RegionOfInterestWithImage)\n self._pub_debug_image = ros_utils.create_or_update_publisher(self._config, config, self._pub_debug_image, 'ROS_debug_image_msg_topic', Image)\n self._pub_convex_field_boundary = ros_utils.create_or_update_publisher(self._config, config, self._pub_convex_field_boundary, 'ROS_field_boundary_msg_topic', PolygonStamped)\n #self._pub_debug_fcnn_image = ros_utils.create_or_update_publisher(self._config, config, self._pub_debug_fcnn_image, 'ROS_debug_fcnn_image_msg_topic', Image)\n self._pub_white_mask_image = ros_utils.create_or_update_publisher(self._config, config, self._pub_white_mask_image, 'ROS_white_HSV_mask_image_msg_topic', Image)\n self._pub_red_mask_image = ros_utils.create_or_update_publisher(self._config, config, self._pub_red_mask_image, 'ROS_red_HSV_mask_image_msg_topic', Image)\n self._pub_blue_mask_image = ros_utils.create_or_update_publisher(self._config, config, self._pub_blue_mask_image, 'ROS_blue_HSV_mask_image_msg_topic', Image)\n self._pub_field_mask_image = ros_utils.create_or_update_publisher(self._config, config, self._pub_field_mask_image, 'ROS_field_mask_image_msg_topic', Image)\n self._pub_dynamic_color_lookup_table_field_mask_image = ros_utils.create_or_update_publisher(self._config, config, self._pub_dynamic_color_lookup_table_field_mask_image, 'ROS_dynamic_color_lookup_table_field_mask_image_msg_topic', Image)", "def get_publishers(self):", "def setupPublishers(self):\n\n self.rviz_marker_publisher = rospy.Publisher(\"/spartan_grasp/visualization_marker\",\n visualization_msgs.msg.Marker, queue_size=1)\n\n self.rviz_marker_array_publisher = rospy.Publisher(\"/grasp_supervisor/visualization_marker_array\",\n visualization_msgs.msg.MarkerArray, queue_size=1)\n\n self.grasp_pointcloud_publisher = rospy.Publisher(\"/grasp_supervisor/points\", sensor_msgs.msg.PointCloud2,\n queue_size=1)", "def process_custom_publishers(send_on_online=None, send_on_offline=None):\n def process(senders):\n pub_senders = []\n for sender in senders:\n msg_type = helpers.get_message_type_from_string(sender[\"message_type\"])\n publisher = rospy.Publisher(sender[\"topic\"], msg_type, queue_size=5)\n msg = msg_type()\n setattr(msg, sender[\"slot\"], sender[\"value\"])\n pub_senders.append(dict(publisher=publisher, message=msg))\n return pub_senders\n\n online_pubs = process(send_on_online)\n offline_pubs = process(send_on_offline)\n return online_pubs, offline_pubs", "def test_03_publisher(self):\n\n robj = publisher.Repository(\n collection_type=publisher.REPO_CTYPE_SUPPLEMENTAL,\n description=\"Provides only the best BobCat packages!\",\n legal_uris=[\n \"http://legal1.example.com\",\n \"http://legal2.example.com\"\n ],\n mirrors=[\n \"http://mirror1.example.com/\",\n \"http://mirror2.example.com/\"\n ],\n name=\"First Repository\",\n origins=[\n \"http://origin1.example.com/\",\n \"http://origin2.example.com/\"\n ],\n refresh_seconds=70000,\n registered=True,\n registration_uri=\"http://register.example.com/\",\n related_uris=[\n \"http://related1.example.com\",\n \"http://related2.example.com\"\n ],\n sort_policy=publisher.URI_SORT_PRIORITY,\n )\n\n r2obj = copy.copy(robj)\n r2obj.origins = [\"http://origin3.example.com\"]\n r2obj.name = \"Second Repository\"\n r2obj.reset_mirrors()\n\n pprops = {\n \"alias\": \"cat\",\n \"client_uuid\": \"2c6a8ff8-20e5-11de-a818-001fd0979039\",\n \"disabled\": True,\n \"meta_root\": os.path.join(self.test_root, \"bobcat\"),\n \"repository\": r2obj,\n }\n\n # Check that all properties can be set at construction time.\n pobj = publisher.Publisher(\"bobcat\", **pprops)\n\n # Verify that all properties provided at construction time were\n # set as expected.\n for p in pprops:\n self.assertEqual(pprops[p], getattr(pobj, p))\n\n # Verify that a copy matches its original.\n cpobj = copy.copy(pobj)\n for p in pprops:\n if p == \"repository\":\n # These attributes can't be directly compared.\n continue\n self.assertEqual(getattr(pobj, p), getattr(cpobj, p))\n\n # Assume that if the origins match, we have the right selected\n # repository.\n self.assertEqual(cpobj.repository.origins,\n r2obj.origins)\n\n # Compare the source_object_id of the copied repository object\n # with the id of the source repository object.\n self.assertEqual(id(pobj), cpobj._source_object_id)\n\n cpobj = None\n\n # Verify that individual properties can be set.\n pobj = publisher.Publisher(\"tomcat\")\n pobj.prefix = \"bobcat\"\n self.assertEqual(pobj.prefix, \"bobcat\")\n\n for p in pprops:\n if p == \"repositories\":\n for r in pprops[p]:\n pobj.add_repository(r)\n else:\n setattr(pobj, p, pprops[p])\n self.assertEqual(getattr(pobj, p), pprops[p])\n\n pobj.repository = robj\n self.assertEqual(pobj.repository, robj)\n\n # An invalid value shouldn't be allowed.\n self.assertRaises(api_errors.UnknownRepository, setattr,\n pobj, \"repository\", -1)\n\n pobj.reset_client_uuid()\n self.assertNotEqual(pobj.client_uuid, None)\n self.assertNotEqual(pobj.client_uuid, pprops[\"client_uuid\"])\n\n pobj.create_meta_root()\n self.assertTrue(os.path.exists(pobj.meta_root))\n\n pobj.remove_meta_root()\n self.assertFalse(os.path.exists(pobj.meta_root))", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k == 'POSSIBLE_METHODS':\n continue\n if k == 'keysamplers':\n properties[k] = [i.to_dict() for i in self.__dict__[k] if hasattr(i,'to_dict')]\n elif k in {'pooler'}:\n properties[k] = self.__dict__[k].to_dict()\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def make_publishers(\n *,\n n_publishers: int,\n doi_prefix: int,\n faker: Faker,\n min_journals_per_publisher: int = 1,\n max_journals_per_publisher: int = 3,\n) -> PublisherList:\n\n publishers = []\n for i, _ in enumerate(range(n_publishers)):\n n_journals_ = random.randint(min_journals_per_publisher, max_journals_per_publisher)\n journals_ = []\n for _ in range(n_journals_):\n journals_.append(Journal(str(uuid.uuid4()), name=faker.company(), license=random.choice(LICENSES)))\n\n publishers.append(Publisher(i, name=faker.company(), doi_prefix=doi_prefix, journals=journals_))\n doi_prefix += 1\n\n return publishers", "def serialize(self):\n return {\n 'title': self.title,\n 'first_author': self.first_author,\n 'second_author': self.second_author,\n 'publisher': self.publisher,\n 'year_of_publication': self.year_of_publication\n }", "def to_dict(self) -> dict:\n return {\n \"name\": self.package_name,\n \"mpy_version\": self.mpy_version,\n \"publish\": self._publish,\n \"pkg_version\": str(self.pkg_version),\n \"path\": self.package_path.name, # only store the folder name , as it is relative to the publish folder\n \"stub_sources\": [(name, Path(path).as_posix()) for (name, path) in self.stub_sources],\n \"description\": self.description,\n \"hash\": self.hash,\n \"stub_hash\": self.stub_hash,\n }", "def publisher(self, iTag, msgType, addr):\r\n return ROSPublisher(self, iTag, msgType, addr)", "def get_subscriptions(self):\n return {}", "def to_publishable_dict(self):\n res_dict = asdict(self)\n\n if bool(res_dict.get(\"machine_info\")) != bool(not res_dict[\"cluster_info\"]):\n warnings.warn(\n \"Run not publishable! `machine_info` xor `cluster_info` must be specified\"\n )\n\n validate_or_remove_github_commit_key(res_dict, strict=True)\n\n for attr in [\n \"name\",\n \"machine_info\",\n \"cluster_info\",\n \"finished_timestamp\",\n \"error_type\",\n \"error_info\",\n ]:\n if not res_dict[attr]:\n res_dict.pop(attr)\n\n return res_dict", "def publisher(self):\n return self.get(\"publisher\")", "def create_publisher():\n pub = rospy.Publisher(\"/number\", Int64, queue_size=10)\n rospy.set_param(\"/number_publish_frequency\", 2)\n pub_freq = rospy.get_param(\"/number_publish_frequency\")\n rate = rospy.Rate(pub_freq)\n \n rospy.set_param(\"/number_to_publish\",3)\n number = rospy.get_param(\"/number_to_publish\")\n rospy.set_param(\"/try_param\", \"what's up\")\n \n while not rospy.is_shutdown():\n msg = Int64()\n msg.data = number\n pub.publish(msg)\n rate.sleep()", "def build_response_dict(self):\n return {\n \"release\": self.settings['bookstore'][\"release\"],\n \"features\": self.settings['bookstore'][\"features\"],\n }", "def export(self) -> Dict[str, Any]:\n return {\n \"name\": self.name,\n \"channels\": [channel for channel in self.channels],\n \"packages\": self.packages.export(),\n \"logs\": [log for log in self.logs],\n \"actions\": [action for action in self.actions],\n \"debug\": [debug for debug in self.debug],\n }", "def __init__(self, *args, **kwargs):\n super(Publisher, self).__init__(*args, **kwargs)\n self._original = {\n 'status': self.status\n }", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n return {\n \"topic\": [\n self.from_text(),\n ],\n }", "def to_xml_dict(self):\n self._check_required_fields()\n\n root = odict[\n \"r:import\": odict[\n \"@xmlns:r\": \"http://resolver.nkp.cz/v3/\",\n \"r:monograph\": odict[\n \"r:titleInfo\": odict[\n \"r:title\": self.title,\n ],\n ],\n ]\n ]\n mono_root = root[\"r:import\"][\"r:monograph\"]\n\n # subtitle\n MonographComposer._assign_pattern(\n mono_root[\"r:titleInfo\"],\n \"r:subTitle\",\n self.subtitle\n )\n\n # handle ccnb, isbn, uuid\n self._add_identifier(mono_root, \"ccnb\")\n self._add_identifier(mono_root, \"isbn\")\n self._add_identifier(mono_root, \"other_id\", out=\"otherId\")\n\n # add form of the book\n MonographComposer._assign_pattern(\n mono_root,\n \"r:documentType\",\n self.document_type\n )\n\n mono_root[\"r:digitalBorn\"] = \"true\" if self.digital_born else \"false\"\n\n if self.author:\n mono_root[\"r:primaryOriginator\"] = odict[\n \"@type\": \"AUTHOR\",\n \"#text\": self.author\n ]\n\n if any([self.place, self.publisher, self.year]):\n publ = odict()\n\n MonographComposer._assign_pattern(\n publ,\n \"r:publisher\",\n self.publisher\n )\n MonographComposer._assign_pattern(publ, \"r:place\", self.place)\n MonographComposer._assign_pattern(publ, \"r:year\", self.year)\n\n mono_root[\"r:publication\"] = publ\n\n if self.format:\n format_dict = MonographComposer._create_path(\n root,\n odict,\n [\n \"r:import\",\n \"r:digitalDocument\",\n \"r:technicalMetadata\",\n \"r:format\",\n ]\n )\n\n format_dict[\"#text\"] = self.format\n\n return root", "def start_robot_publisher(self):\n print('Robot Pub Node started')\n\n #if GlobalSettings.USE_TEGA:\n msg_type = TegaAction\n msg_topic = ROSCORE_TO_TEGA_TOPIC\n #else:\n # msg_type = JiboAction\n # msg_topic = ROSCORE_TO_JIBO_TOPIC\n\n self.robot_commander = rospy.Publisher(msg_topic, msg_type, queue_size=10)\n rate = rospy.Rate(10) # spin at 10 Hz\n rate.sleep() # sleep to wait for subscribers", "def sample_publisher(name='testname', phone='09100000000', address='testaddress'):\n return Publisher.objects.create(name=name, phone=phone, address=address)", "def publish(self, kpi_dict):\n pass", "def publisher(self, publisher):\r\n return publishers.Publisher(self, publisher)", "def get_publisher(self):\n return self.publisher", "def Publisher(self, default=None):\n return self.data.get('publisher', default)", "def get_pardict(psrs, datareleases):\n pardict = {}\n for psr, dataset in zip(psrs, datareleases):\n pardict[psr.name] = {}\n pardict[psr.name][dataset] = {}\n for par, vals, errs in zip(\n psr.fitpars[1:],\n np.longdouble(psr.t2pulsar.vals()),\n np.longdouble(psr.t2pulsar.errs()),\n ):\n pardict[psr.name][dataset][par] = {}\n pardict[psr.name][dataset][par][\"val\"] = vals\n pardict[psr.name][dataset][par][\"err\"] = errs\n return pardict", "def to_dictionary(self):\n return {'pubkey': self.pubkey.to_dictionary(), 'T': self.T, 'y': self.y, 'pi': self.pi}" ]
[ "0.6675421", "0.61638", "0.60298526", "0.6007344", "0.5813939", "0.57426506", "0.55105156", "0.539758", "0.5318396", "0.5313484", "0.5274191", "0.5245611", "0.52223974", "0.52129316", "0.5200293", "0.5195122", "0.5172014", "0.51601124", "0.5127859", "0.51266104", "0.51221544", "0.5104773", "0.5090943", "0.5085107", "0.5016088", "0.5009294", "0.4994829", "0.4991198", "0.49807686", "0.49673492" ]
0.7441343
0
Returns current robot occupancy. If robot isn't moving > 0, if robot is moving > 1.
def get_robot_occupancy(self): occupancy = np.zeros(self.no_robots) for i in range(self.no_robots): status_topic = '/robot_' + str(i) + '/move_base/status' msg = rospy.wait_for_message(status_topic, GoalStatusArray) msg_list = msg.status_list if msg_list == []: occupancy[i] = 0 else: if len(msg_list) > 1: robot_status = msg_list[-1].status else: robot_status = msg_list[0].status if (robot_status == 1) or (robot_status == 0) or (robot_status == 7): # BUG pazi tuki je lahko se kaksna fora ker je teh statusov like 10 occupancy[i] = 1 # robot on move else: occupancy[i] = 0 # robot on goal return occupancy
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_occupant(self):\n\t\treturn self.occupant", "def get_occupant(self):\n\t\tpass", "def get_occupancy(self):\n # Compute logo for current alignment\n logo = self.get_logo()\n # Compute occupancy denominator by summing number of occurriencies\n den = np.sum(logo, axis=0)\n # Compute occupancy numerator by subtracting gaps from total\n num = den - logo[-1, :]\n # Compute occupancy as fraction of non-gap aligned residues\n return num / den", "def robotCode(self):\n objectDetected, objectDistances = self.senseObstacles()\n return objectDetected, objectDistances", "def get_joystick():\n if HAS_JOYSTICK:\n if joystick_y.value < 20000:\n # Up\n return 1\n if joystick_y.value > 44000:\n # Down\n return -1\n return 0", "def getRobotPosition(self):\n return self.position", "def getRobotPosition(self):\n return self.position", "def getNextLocation(self):\r\n try:\r\n return min(self._robots, key=lambda r: r.active)\r\n except ValueError:\r\n raise RobotProcessError('There is no free robot process.')", "def is_occupied(self):\n return self.occupied", "def robot_location(room):\r\n\r\n for r in range(WIDTH):\r\n for c in range(WIDTH):\r\n if room[r][c] == \"robot\":\r\n return (r, c)", "def get_current(self):\n return self.node.sdo[0x221c].phys # mA", "def getRobotPosition(self):\n return self.position\n #raise NotImplementedError", "def getRobotPosition(self):\n return self.position\n #raise NotImplementedError", "def robotiq_status(self):\r\n return self._arm.robotiq_status", "def reward(self):\n total = 0\n # Penalties for being close to obstacles:\n # -0.25 for being within some threshold\n # -1 for colliding (radii overlap)\n col_reward = -1\n close_reward = -0.25\n close_thresh = 0.2\n pred_mul = 0.05 # Factor to multiply collision penalty by\n ts = self.sim.getTimeStep()\n robot_pos = self.sim.getAgentPosition(self.robot_num)\n r_vel = self.sim.getAgentVelocity(self.robot_num)\n r_rad = self.sim.getAgentRadius(self.robot_num)\n pred_robot_pos = (robot_pos[0] + r_vel[0] * ts,\n robot_pos[1] + r_vel[1] * ts)\n for agent in self.agents:\n if agent != self.robot_num: # Don't care about self collisions\n # Check for collisions/closeness violations\n a_pos = self.sim.getAgentPosition(agent)\n a_vel = self.sim.getAgentVelocity(agent)\n a_rad = self.sim.getAgentRadius(agent)\n dist = self.dist(a_pos, robot_pos)\n if dist < r_rad + a_rad:\n total += col_reward\n elif dist < r_rad + a_rad + close_thresh:\n total += close_reward\n pred_pos = (a_pos[0] + a_vel[0] * ts,\n a_pos[1] + a_vel[1] * ts)\n # Check for predicted collisions\n if self.dist(pred_robot_pos, pred_pos) < r_rad + a_rad:\n total += pred_mul * col_reward\n elif self.dist(pred_robot_pos, pred_pos) < r_rad + a_rad + \\\n close_thresh:\n total += pred_mul * close_reward\n # Check for right hand rule violations (see\n # https://arxiv.org/pdf/1703.08862.pdf for more details)\n # Note the precise sizes of all the zones are determined by\n # the radius of the robot.\n a_ang = math.atan2(a_pos[1] - robot_pos[1], a_pos[0] -\n robot_pos[0])\n r_ang = math.atan2(r_vel[1], r_vel[0])\n a_rel_ang = a_ang - r_ang\n a_vel_ang = math.atan2(a_vel[1], a_vel[0])\n vel_ang_diff = a_vel_ang - r_ang\n # Transformed coordinates where x-axis is aligned along\n # robot's velocity vector\n x_rel = dist * math.cos(a_rel_ang)\n y_rel = dist * math.sin(a_rel_ang)\n ovtk_min_x = 0\n ovtk_max_x = 3 * r_rad\n ovtk_min_y = 0\n ovtk_max_y = 2 * r_rad\n pass_min_x = 1.5 * r_rad\n pass_max_x = 4 * r_rad\n pass_min_y = -3 * r_rad\n pass_max_y = 0\n cross_min_ang = -math.pi/4.0\n cross_max_ang = math.pi/4.0\n cross_max_dist = 4 * r_rad\n # Penalty for violating one of these rules. Same weight for\n # all rules right now\n # Can only incur a social penalty if the robot is moving\n if utils.dist(r_vel, (0.0, 0.0)) > 0.0:\n social_reward = 0.0\n else:\n social_reward = 0.0\n # Check both relative position of the other agent as well as\n # the angle between the agent's and robot's velocities\n if ovtk_min_x <= x_rel <= ovtk_max_x and ovtk_min_y <= \\\n y_rel <= ovtk_max_y and math.fabs(vel_ang_diff) < \\\n math.pi/4.0:\n total += social_reward\n if pass_min_x <= x_rel <= pass_max_x and pass_min_y <= y_rel \\\n <= pass_max_y and math.fabs(vel_ang_diff) > \\\n 3*math.pi/4.0:\n total += social_reward\n if -3*math.pi/4.0 <= vel_ang_diff <= -math.pi/4.0 and dist <\\\n cross_max_dist and cross_min_ang <= a_rel_ang <= \\\n cross_max_ang:\n total += social_reward\n for obs in self.obstacles:\n dist = 0\n if len(obs) > 1:\n # Polygonal obstacle\n o = Polygon(obs)\n p = Point(robot_pos)\n p1, p2 = nearest_points(o, p)\n if not o.contains(p):\n dist = self.dist((p1.x, p1.y), (p2.x, p2.y))\n else:\n # Point obstacle\n dist = self.dist(robot_pos, obs[0])\n if dist < self.sim.getAgentRadius(self.robot_num):\n total += col_reward\n elif (dist < self.sim.getAgentRadius(self.robot_num)\n + close_thresh):\n total += close_reward\n prev_action_ind = self.last_action_ind - 1\n prev2_action_ind = self.last_action_ind - 2\n while prev_action_ind < 0:\n prev_action_ind += self.last_action_capacity\n while prev2_action_ind < 0:\n prev2_action_ind += self.last_action_capacity\n prev_action_ind %= self.last_action_capacity\n prev2_action_ind %= self.last_action_capacity\n # Penalize non-still actions\n if len(self.last_actions) > 0 and self.last_actions[prev_action_ind] \\\n != 0:\n total += -0.01\n # Encourage smooth trajectories by penalizing changing actions,\n # except for starting up if the robot was previously stopped\n if len(self.last_actions) > 1 and self.last_actions[prev_action_ind] \\\n != self.last_actions[prev2_action_ind] and \\\n self.last_actions[prev2_action_ind] != 0:\n total += -0.01\n return torch.tensor([[total]], dtype=torch.float), False", "def find_goal(self):\n w, l, h = self.get_pos()\n gw, gl, gh = self.goal\n try:\n angle_deg = angle((w, l), (gw, gl))\n except ZeroDivisionError:\n if w > gw and l > gl:\n return 2\n elif w < gw and l < gl:\n return 5\n if -105 <= angle_deg <= -75:\n return 0\n elif -75 < angle_deg < 15:\n return 1\n elif -15 <= angle_deg <= 15:\n return 2\n elif 15 < angle_deg < 75:\n return 3\n elif 75 <= angle_deg <= 105:\n return 4\n else:\n return 5", "def value(self):\n return super(Robot, self).value", "def calculate_occupancy(self):\n # TODO will need to be fixed now that using a dict and changed thresholds\n self.occupancy = [list(x > self.t for x in y) for y in self.counts]\n return self.occupancy", "def get_force(self):\n # @todo: Probably need to check the state of the landing gear for this (e.g. are they on the track?).\n # Note: you can get the state of the landing gear by going through self.sim \n return 0.0", "def cargo_used(self) -> Union[float, int]:\n return self.proto.cargo_space_taken", "async def get_occupancy(self) -> bool | None:\n res, fail = await self.read_attributes([\"occupancy\"])\n self.debug(\"read 'occupancy' attr, success: %s, fail: %s\", res, fail)\n if \"occupancy\" not in res:\n return None\n return bool(self.occupancy)", "def robot_arm_acc(self):\n return self.sim.data.qacc[self.arm_index]", "def test_topo_current_occupants_int():\n instance = topo.Topography()\n assert type(instance.current_occupants()[\"Herbivores\"]) == int and type(\n instance.current_occupants()[\"Carnivores\"]) == int", "def obstacle_count(self):\n #scan area in front of robot\n self.scan()\n #Figure ot how many obstacles there were\n see_an_object = False\n count = 0", "def get_collisions(self) -> int:\n return 0 # no obstacles are spawned for Circle tasks", "def tower_status_radiant(self):\n return self._get(\"tower_status_radiant\")", "def getPos(self):\n bcast = myGlobals.actorState.actor.bcast\n\n if myGlobals.bypass.get(name='gangToPodium'):\n bcast.warn('text=\"Lying about the APOGEE gang connector being on the podium\"')\n return self.GANG_ON_PODIUM\n elif myGlobals.bypass.get(name='gangToCart'):\n bcast.warn('text=\"Lying about the APOGEE gang connector being on the cartridge\"')\n return self.GANG_ON_CARTRIDGE\n else:\n return self.getPhysicalPos()", "def get_num_gear(self):\n return self.__num_gear_collected", "def calculate_task_potential(self) -> float:\n cur_xy = self.agent.get_position()[:2]\n goal_xy = np.array([1e3, 0])\n return -np.linalg.norm(cur_xy - goal_xy) * 60", "def getMovement(self):\n return self.ma" ]
[ "0.64873034", "0.64286363", "0.6384329", "0.5948018", "0.58905077", "0.5843008", "0.5843008", "0.5824435", "0.5777446", "0.57264864", "0.57001376", "0.5696047", "0.5696047", "0.56312907", "0.56112075", "0.56098473", "0.5572753", "0.553214", "0.5531029", "0.55245584", "0.55160624", "0.5510763", "0.55065", "0.5504534", "0.54930985", "0.5458146", "0.54353577", "0.53995025", "0.53991514", "0.5389668" ]
0.8479907
0
Sets goal for a robot according to it's id.
def set_goal(self, robot_id, task, pub_msg): pub_names = self.goal_pubs.keys() pub_objs = self.goal_pubs.values() for i in range(len(pub_names)): if robot_id == int(pub_names[i]): Goal = MoveBaseActionGoal() Goal.header.stamp = rospy.Time.now() Goal.header.frame_id = '' Goal.goal_id.stamp = rospy.Time.now() Goal.goal_id.id = str(int(task[0])) Goal.goal.target_pose.header.stamp = rospy.Time.now() Goal.goal.target_pose.header.frame_id = 'map' Goal.goal.target_pose.pose.position.x = task[1] Goal.goal.target_pose.pose.position.y = task[2] z_rot_rad = task[3] * np.pi / 180 q = quaternion_from_euler(0, 0, z_rot_rad) Goal.goal.target_pose.pose.orientation.z = q[2] Goal.goal.target_pose.pose.orientation.w = q[3] pub_obj = pub_objs[i] pub_obj.publish(Goal) print("Goal set for robot " + str(robot_id) + ". Task id: " + str(int(task[0])) + ".") msg_str = "Goal set for robot " + str(robot_id) + ". Task id: " + str(int(task[0])) + ". Time: %s" % rospy.Time.now().to_sec() pub_msg.publish(msg_str) break else: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_goal(self, goal):\n self._pid_lock.acquire() # Acquire Lock\n self._goal = goal\n self._pid_lock.release() # Release Lock", "def set_goal(self, **kwargs):\n return self.env.set_goal(**kwargs)", "def set_goal(self, goal: GoalType) -> None:\n self.goal = goal", "def goal(self, goal):\n\n self._goal = goal", "def set_goal(goal_loc):\n BoardPath._goal_loc = goal_loc", "def set_goal(self, goal):\r\n self.goal = goal\r\n self.start_time = self.get_current_time()", "def set_robot(self, robot):\n self.robot = robot", "def set_goal(self, x):\n self.controllers[0].set_goal(x)\n self.controllers[1].set_goal(x)", "def _set_task(self, goal):\n if goal.actionID == 'dh_change':\n self.dh_change(goal)\n elif goal.actionID == 'set_rcvel':\n self.set_rcvel(goal)\n elif goal.actionID == 'gate_pass':\n self.gate_pass(goal)\n elif goal.actionID == 'object_center':\n self.object_center(goal)\n elif goal.actionID == 'arm':\n self.arm(goal.arm)\n elif goal.actionID == 'rc_off':\n self.rc_off()\n else:\n rospy.loginfo('%s actionID not recognized'%goal.actionID)", "def add_robot(self, robot_id):\n if robot_id not in self._locations.keys():\n self._locations[robot_id] = None\n self.predictors[robot_id] = KalmanPredictor()\n self._logger.info(\"Robot {} added\".format(str(robot_id)))", "def setGoalNode(self, newGoal):\r\n\t\tself.goalNode = newGoal", "def set_goal(self,pos):\n goal = MoveBaseGoal()\n goal.target_pose.header.frame_id = 'map'\n goal.target_pose.header.stamp = rospy.Time.now()\n mygoal = Pose(Point(pos[0],pos[1],0),Quaternion(0,0,0,1))\n goal.target_pose.pose = mygoal\n self.move_base.send_goal(goal)", "def set_goal(self, new_goal, updating=False):\n GOAL_QUERY = \"\"\"UPDATE Goal SET description = %s WHERE id = %s AND curriculum_name = %s\"\"\" if updating \\\n else \"\"\"INSERT INTO Goal (id, curriculum_name, description) VALUES (%s, %s, %s)\"\"\"\n\n if not updating:\n self.db_cursor.execute(\n GOAL_QUERY,\n (new_goal.id, new_goal.curriculum_name, new_goal.description))\n else:\n self.db_cursor.execute(\n GOAL_QUERY,\n (new_goal.description, new_goal.id, new_goal.curriculum_name))\n self.db_connection.commit()", "def goal(self, goal_id):\r\n return Goal(self, goal_id)", "def setPtr(self, newPtr):\n self.goalPtr = newPtr", "def update_goal(self):\n pass", "def goal(self, goal_id):\r\n return goals.Goal(self, goal_id)", "def set_course_goal(self, goal_id, course_name):\n self.db_cursor.execute(\n \"\"\"INSERT INTO CourseGoals (course_name, goal_id) VALUES (%s, %s)\"\"\",\n (course_name, goal_id))\n self.db_connection.commit()", "def SetToolId(self, id):\r\n\r\n self.tool_id = id", "def setBot(self, present, dir=-1):\n if dir == -1:\n # print(\"Bot set present at current node: \" + str(present))\n self.botPresent = present\n else:\n self.dirNodes[dir].botPresent = present", "def assign_goal(self, goal_index):\n gearbox_index = int(np.floor(goal_index / self.cables_per_gearbox))\n cable_index = goal_index - gearbox_index * self.cables_per_gearbox\n # Activate the goal\n self.gearboxes[gearbox_index].hub_cable_goals[cable_index] = 1.", "def set_id(self, player_id):\n pass", "def setID(self, id):\r\n raise NotImplementedError(\"must be implemented in subclass\")", "def set_id(self, id):\n self.id = id\n print(\"self id = \" + str(self.id))", "def set_id(self, id_):\n\n self.id_ = id_", "def set(self, id, key, val):\n try:\n id_settings = self.id_dict[id]\n except KeyError:\n id_settings = {}\n self.id_dict[id] = id_settings\n id_settings[key] = val", "def robotCode(self):\n self.keepGoal2(self.getRobotConf(self.bot), self.goal)", "def set_id(self, id):\n self.__id = id", "def goto(self, goal: Pose):\n self.navigation_lock.acquire()\n self.navigation_goal = goal\n self.navigation_lock.release()", "def set_id(self, id):\n self.data['id'] = id" ]
[ "0.67443836", "0.66931903", "0.65935475", "0.6387873", "0.638348", "0.6232021", "0.61506414", "0.5951643", "0.5907818", "0.5769154", "0.5733853", "0.56343824", "0.56246096", "0.55113935", "0.54608816", "0.544337", "0.5394208", "0.53780353", "0.5371298", "0.5362727", "0.53317165", "0.53135175", "0.53012735", "0.52495754", "0.5243574", "0.52031845", "0.5199907", "0.51994157", "0.5171707", "0.5140138" ]
0.711858
0
Try to get available user to download.
def get_user(self): user = None while user is None: user = self.use() if user is None: logging.info('Waiting for available user to download...') time.sleep(5) return user
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_one_user():", "def available(self, request):\n username = request.query_params['username']\n resp_data = {}\n if User.objects.filter(username=username).exists():\n resp_data['available'] = False\n else:\n resp_data['available'] = True\n return Response(resp_data, status=status.HTTP_200_OK)", "def available_user_resource_list(self):\n url = BASE_URL + '/availableUserResourceList'\n response = self.__client.get(url=url, headers=self.__headers)\n if response.status_code == HTTP_200_OK:\n return response\n return None", "def get_user(self, wanted_user):\n if self.passwd_file:\n return self._get_user_from_file(wanted_user)\n return self._get_user_from_host(wanted_user)", "def getInterestedUsers():", "def getResponsibleUsers():", "def get_user(self):\n return None", "async def get_user(self, user_target: str) -> Optional[User]:\n if len(user_target) >= 17:\n try:\n user_id = int(user_target)\n except ValueError:\n pass\n else:\n user: User = self.bot.get_user(user_id)\n if user is None:\n try:\n user = await self.bot.fetch_user(user_id)\n except NotFound:\n return None\n return user\n return None", "async def get(self, request, uid):\n return await super(User, self).get_item(request.app.pool, 'user', uid)", "def get_user(self):\n raise NotImplementedError", "def get_user():\n\treturn '1', 200", "def getuser(gh, user):\n return gh.users(user).get()", "def get_user(self):\n return self.get('users/self')", "def require_user( request ):\n\n db = get_db()\n\n if ( not 'users_id' in session ):\n return False;\n\n users_id = session[ 'users_id' ]\n\n user = db.execute( text( \"select users_id, name, email from users where users_id = :id and is_active\" ), id = users_id ).fetchone()\n\n if ( not user ):\n return False;\n\n return user", "def get(self, no):\n user = get_a_user(no)\n if not user:\n api.abort(404)\n else:\n return user", "def get(self):\r\n return get_user(request)", "async def getch_user(self, id: str):\n return self.get_user(id) or await self.fetch_user(id)", "def get_user(id):\n pass", "def user_loader(user_id):\r\n return User.query.get(user_id)", "def get_user(self, user_id):\n return None # noqa: WPS324", "async def get_user_account(self):\n uri = \"/v3/spot/assets\"\n success, error = await self.request(\"GET\", uri, auth=True)\n return success, error", "def test_get_user(self):\n print('(' + self.test_get_user.__name__+')', self.test_get_user.__doc__)\n # test for patient\n self.assertDictContainsSubset(\n self.connection.get_user(PATIENT_USERNAME), PATIENT)\n # test for doctor\n self.assertDictContainsSubset(\n self.connection.get_user(DOCTOR_USERNAME), DOCTOR)", "def __user_select(self) -> Union[db.User, CancelSignal]:\n log.debug(\"Waiting for a user selection...\")\n # Find all the users in the database\n users = self.session.query(db.User).order_by(db.User.user_id).all()\n # Create a list containing all the keyboard button strings\n keyboard_buttons = [[self.loc.get(\"menu_all_cancel\")]]\n # Add to the list all the users\n for user in users:\n keyboard_buttons.append([user.identifiable_str()])\n # Create the keyboard\n keyboard = telegram.ReplyKeyboardMarkup(keyboard_buttons, one_time_keyboard=True)\n # Keep asking until a result is returned\n while True:\n # Send the keyboard\n self.bot.send_message(self.chat.id, self.loc.get(\"conversation_admin_select_user\"), reply_markup=keyboard)\n # Wait for a reply\n reply = self.__wait_for_regex(\"user_([0-9]+)\", cancellable=True)\n # Propagate CancelSignals\n if isinstance(reply, CancelSignal):\n return reply\n # Find the user in the database\n user = self.session.query(db.User).filter_by(user_id=int(reply)).one_or_none()\n # Ensure the user exists\n if not user:\n self.bot.send_message(self.chat.id, self.loc.get(\"error_user_does_not_exist\"))\n continue\n return user", "def get_user_noreq(self, request):\n item = Item.objects.get(id=request.POST['item_id'])\n target_user = User.objects.filter(email=request.POST['email'])\n if not target_user.exists():\n # In this case we don't want to return to the initial page\n return JsonResponse({\n 'msg': \"ERROR: The user doesn't exist\"\n })\n if not item.can_be_borrowed():\n return self.init_and_toast(\"ERROR: The item is not available\")\n\n borrowing = Borrowing(user=target_user.first(), item=item, borrowing_by=request.user)\n borrowing.save()\n return self.init_and_toast(\"The item has been borrowed succesfully\")", "def fix_discovery(self):\r\n for item in self.collections['response']['collections']:\r\n if item['id'] == 'LDSO':\r\n try:\r\n self.collections['LDSO'] = self.get(['links']['self']['href'])\r\n except:\r\n self.lds_user = False\r\n else:\r\n self.lds_user = True\r\n else:\r\n self.collections[item['id']] = item['links']['self']['href']\r\n try:\r\n self.user = self.get_current_user()['response']['users'][0]\r\n except:\r\n self.user = \"\"", "def user_access(self):\n results = {}\n self.cache_hosts_wts_tokens(self.download_list)\n for hostname in self.known_hosts.keys():\n if self.known_hosts[hostname].available is False:\n logger.critical(\n f\"Was unable to get user authorization from {hostname}.\"\n )\n continue\n access_token = self.known_hosts[hostname].access_token\n authz = get_user_auth(hostname, access_token)\n results[hostname] = authz\n\n return results", "def get_remote_user(request):\n\n if settings.DEBUG:\n logger.debug(\"Getting Remote User\")\n me = {}\n\n me['url'] = settings.OAUTH_TEST_INFO['BASE']\n\n me['ask'] = \"/api/v1/me\" + \"?_format=json\"\n\n\n me = fhir_request(request, me)\n logger.debug(\"me...\", me)\n if 'errors' and 'code' in me:\n msg = build_message(request,me['errors'])\n return kickout(msg, me['code'])\n\n return me", "def get_user():\n try:\n userId = request.args.get('login_as')\n return users[int(userId)]\n except Exception:\n return None", "def getUserFromKey(key):\n\t\t#get(Key(key))\n\t\t#return None if no user found", "def get_optional_user(request: Request) -> Optional[User]:\n try:\n return _check_and_extract_user(request)\n except HTTPException:\n if request.headers.get(\"Authorization\"):\n raise" ]
[ "0.60041994", "0.5900717", "0.57394034", "0.5721573", "0.56907725", "0.56869805", "0.56774336", "0.5670875", "0.5665978", "0.55958027", "0.5578879", "0.55716324", "0.55705893", "0.55703914", "0.5529979", "0.55233115", "0.5515475", "0.5511456", "0.55111885", "0.5509604", "0.5466628", "0.54577625", "0.545162", "0.5449622", "0.5438064", "0.54364043", "0.5430796", "0.5426141", "0.54257894", "0.54246175" ]
0.7647943
0
set current session Appium. |oAppiumInfo=${AppInfo}|
def aisappium_set_driver_instance(self, oAppiumInfo): self._cache.current = oAppiumInfo.driver
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def appium_init(self):\n desired_cups = {}\n desired_cups['platformName'] = 'Android'\n desired_cups['platformVersion'] = android_version\n desired_cups['deviceName'] = device_name\n desired_cups['appPackage'] = pkg_name\n desired_cups['appActivity'] = activity\n desired_cups['autoLaunch'] = 'false'\n desired_cups['noReset'] = 'true'\n desired_cups['automationName'] = 'uiautomator2'\n driver = webdriver.Remote('http://127.0.0.path:4723/wd/hub', desired_cups)\n global driver\n return driver", "def setAppInfo(self, name, version, devMode=False):\n self._appName = name\n self._appVersion = version\n self._devMode = devMode", "def switchToAppInstaller(dev):\n print('Switching to app install mode')\n SonyExtCmdCamera(dev).switchToAppInstaller()", "def initialize(self, device, platform, version, app, package, activity):\n desired_caps = dict()\n desired_caps['platformName'] = platform\n desired_caps['platformVersion'] = version\n desired_caps['deviceName'] = device\n desired_caps['app'] = app\n desired_caps['appPackage'] = package\n desired_caps['appActivity'] = activity\n server_url = 'http://{0}:{1}/wd/hub'.format(APPIUM_SERVER, APPIUM_PORT)\n print server_url\n self.driver = webdriver.Remote(server_url, desired_caps)\n print DEBUG_STRING, self.driver, id(self.driver)\n self.driver.implicitly_wait(5)\n return self.driver", "def aisappium_open_application(self, remote_url, alias=None, **kwargs):\n index = AppiumLibrary.open_application(self, remote_url, alias, **kwargs)\n driver = self._current_application()\n oAppiumInfo = AtlasInfo(index, alias, driver, remote_url)\n return oAppiumInfo", "def test_48_update_app_info(self, Mock, mock):\r\n html_request = FakeRequest(json.dumps(self.pkg_json_not_found), 200,\r\n {'content-type': 'application/json'})\r\n Mock.return_value = html_request\r\n\r\n self.register()\r\n self.new_application()\r\n app = db.session.query(App).first()\r\n err_msg = \"Task Presenter should be empty\"\r\n assert not app.info.get('task_presenter'), err_msg\r\n\r\n res = self.app.post('/app/sampleapp/tasks/taskpresentereditor',\r\n data={'editor': 'Some HTML code!'},\r\n follow_redirects=True)\r\n assert \"Sample App\" in res.data, \"Does not return to app details\"\r\n app = db.session.query(App).first()\r\n for i in range(10):\r\n key = \"key_%s\" % i\r\n app.info[key] = i\r\n db.session.add(app)\r\n db.session.commit()\r\n _info = app.info\r\n\r\n self.update_application()\r\n app = db.session.query(App).first()\r\n for key in _info:\r\n assert key in app.info.keys(), \\\r\n \"The key %s is lost and it should be here\" % key\r\n assert app.name == \"Sample App\", \"The app has not been updated\"\r\n error_msg = \"The app description has not been updated\"\r\n assert app.description == \"Description\", error_msg\r\n error_msg = \"The app long description has not been updated\"\r\n assert app.long_description == \"Long desc\", error_msg", "def open_app(device, package_name):\n\n device.shell('am start -n ' + package_name + '/' + package_name + \".modules.overview.screen\" +\n '.OverviewActivity')", "def select_app_launcher_app(self, app_name):\n locator = lex_locators[\"app_launcher\"][\"app_link\"].format(app_name)\n self.open_app_launcher()\n self.selenium.wait_until_page_contains_element(locator, timeout=30)\n self.selenium.set_focus_to_element(locator)\n elem = self.selenium.get_webelement(locator)\n link = elem.find_element_by_xpath(\"../../..\")\n self.selenium.set_focus_to_element(link)\n link.click()\n self.wait_until_modal_is_closed()", "def set_application(self, app):\n \n self.app = app", "def ios_app_info(self) -> Optional[pulumi.Input['IosAppInfoArgs']]:\n return pulumi.get(self, \"ios_app_info\")", "def ios_app_info(self) -> 'outputs.IosAppInfoResponse':\n return pulumi.get(self, \"ios_app_info\")", "def start_app(self):\n app_xpath = '//android.widget.FrameLayout[@content-desc=\\\"{app}\\\"]/android.widget.ImageView'\n LOGGER.info('Starting app now!')\n tex = app_xpath.format(app=self.app_name)\n try:\n self.click_element(el_type='xpath', text=tex, handle_error=False)\n except NoSuchElementException:\n LOGGER.exception('Cannot find {app} on home screen of the phone!'.format(\n app=self.app_name))\n sys.exit(1)\n LOGGER.debug(\"{app} is opened on {name}\".format(\n app=self.app_name, name=self.mobile_name))\n time.sleep(5)\n self.set_scroll_length()", "def main():\n configs = [\"show configuration sessions\"]\n with EOSDriver(**MY_DEVICE) as conn:\n conn.register_configuration_session(session_name=\"my-config-session\")\n # for configuration sessions we have to first \"register\" the session with scrapli:\n result = conn.send_configs(configs=configs, privilege_level=\"my-config-session\")\n\n # we should see our session name with an \"*\" indicating that is the active config session\n print(result[0].result)", "def setUp(self):\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['deviceName'] = 'Android Emulator'\n desired_caps['app'] = os.path.abspath(os.path.join(os.getcwd(), 'apps/Android.apk'))\n desired_caps['appPackage'] = 'com.view.viewglass'\n desired_caps['appActivity'] = 'com.view.viewglass.Splash'\n desired_caps['autoGrantPermissions'] = True\n desired_caps['noReset'] = True\n desired_caps['clearSystemFiles'] = True\n self.driver = webdriver.Remote('http://localhost:4444/wd/hub', desired_caps)", "def setUp(self):\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['deviceName'] = 'Android Emulator'\n desired_caps['app'] = os.path.abspath(os.path.join(os.getcwd(), 'apps/Android.apk'))\n desired_caps['appPackage'] = 'com.view.viewglass'\n desired_caps['appActivity'] = 'com.view.viewglass.Splash'\n desired_caps['autoGrantPermissions'] = True\n desired_caps['noReset'] = True\n desired_caps['clearSystemFiles'] = True\n self.driver = webdriver.Remote('http://localhost:4444/wd/hub', desired_caps)", "def setUp(self):\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['deviceName'] = 'Android Emulator'\n desired_caps['app'] = os.path.abspath(os.path.join(os.path.dirname(__file__), 'apps/Android.apk'))\n desired_caps['appPackage'] = 'com.view.viewglass'\n desired_caps['appActivity'] = 'com.view.viewglass.Splash'\n desired_caps['autoGrantPermissions'] = True\n desired_caps['noReset'] = True\n desired_caps['clearSystemFiles'] = True\n self.driver = webdriver.Remote('http://localhost:4444/wd/hub', desired_caps)", "def display_app_info(config, client, app_id):\n try:\n resp = client.get_app_full_info(config.username, app_id)\n result = resp.json()\n app_info = result[\"app_info\"]\n title = click.style(\"App Name : \", fg=\"blue\") + click.style(\n \"{}\".format(app_info[\"title\"]))\n\n if app_info[\"rating_count\"] == 0:\n rating = \"Not yet rated\"\n else:\n rating = \"{:.1f} ({} rating\".format(app_info[\"average_rating\"],\n int(app_info[\"rating_count\"]))\n if app_info[\"rating_count\"] > 1:\n rating += \"s\"\n rating += \")\"\n rating_row = click.style(\"Rating : \", fg=\"blue\") + click.style(\"{}\".format(rating))\n up_status = click.style(\"Status : \", fg=\"blue\")\n if app_info[\"is_up\"]:\n up_status += click.style(\"Up\")\n else:\n up_status += click.style(\"Down\")\n\n last_crawl_str = \"Not yet crawled\"\n if \"last_crawl\" in app_info:\n last_crawl_str = util.format_date(app_info[\"last_crawl\"])\n\n last_crawl = click.style(\"Last Crawl Time : \", fg=\"blue\") + click.style(\n \"{}\".format(last_crawl_str))\n version = click.style(\"Version : \", fg=\"blue\") + click.style(\n \"{}\".format(app_info[\"version\"]))\n\n last_updated_str = util.format_date(app_info[\"updated\"])\n last_update = click.style(\"Last Update : \", fg=\"blue\") + click.style(\n \"{}\".format(last_updated_str))\n\n availability = click.style(\"Availability : \", fg=\"blue\") + click.style(\n \"{:.2f}%\".format(app_info[\"average_uptime\"] * 100))\n\n app_url = click.style(\"Public App URL : \", fg=\"blue\") + click.style(\n \"{}\".format(app_info[\"app_url\"]))\n original_url = click.style(\"Private App URL : \", fg=\"blue\") + click.style(\n \"{}\".format(app_info[\"original_url\"]))\n category = click.style(\"Category : \", fg=\"blue\") + click.style(\n \"{}\".format(app_info[\"category\"]))\n\n desc = click.style(\"Description : \", fg=\"blue\") + click.style(\n \"{}\".format(app_info[\"description\"]))\n price = click.style(\"Price Range : \", fg=\"blue\") + click.style(\n \"{} - {} Satoshis\").format(\n app_info[\"min_price\"], app_info[\"max_price\"])\n doc_url = click.style(\"Docs URL : \", fg=\"blue\") + click.style(\n \"{}\".format(app_info[\"docs_url\"]))\n\n quick_start = click.style(\"Quick Start\\n\\n\", fg=\"blue\") + click.style(\n app_info[\"quick_buy\"])\n\n usage_docs = None\n if \"usage_docs\" in app_info:\n usage_docs = click.style(\"Detailed usage\\n\\n\", fg=\"blue\") + click.style(\n app_info[\"usage_docs\"])\n\n page_components = [title, \"\\n\",\n rating_row, up_status, availability, last_crawl, last_update, version,\n \"\\n\",\n desc, app_url, original_url, doc_url, \"\\n\",\n category, price, \"\\n\", quick_start, \"\\n\"]\n if usage_docs:\n page_components.append(usage_docs + \"\\n\")\n final_str = \"\\n\".join(page_components)\n logger.info(final_str, pager=True)\n\n except ServerRequestError as e:\n if e.status_code == 404:\n logger.info(\n \"The specified id for the app ({}) does not match any apps in the \"\n \"marketplace.\".format(app_id))\n else:\n raise e", "def set_application(self, app_id):\n if self._use_channel_info:\n self._channel = \"\"\n self._channel_name = app_id\n self._is_forced_val = True\n self._forced_count = 0", "def _set_app_info_Primary(self):\n self._Primary_app_info = self._fetch_app_info(self._Primary)", "def current_app_should_be(self, app_name):\n locator = lex_locators[\"app_launcher\"][\"current_app\"].format(app_name)\n elem = self.selenium.get_webelement(locator)\n assert app_name == elem.text, \"Expected app to be {} but found {}\".format(\n app_name, elem.text\n )", "def setAppID(self, appid):\n\t\tself.config.APP_ID = appid", "def __init__(__self__, *,\n ios_app_info: Optional[pulumi.Input['IosAppInfoArgs']] = None,\n ios_robo_test: Optional[pulumi.Input['IosRoboTestArgs']] = None,\n ios_test_loop: Optional[pulumi.Input['IosTestLoopArgs']] = None,\n ios_xc_test: Optional[pulumi.Input['IosXcTestArgs']] = None,\n test_timeout: Optional[pulumi.Input['DurationArgs']] = None):\n if ios_app_info is not None:\n pulumi.set(__self__, \"ios_app_info\", ios_app_info)\n if ios_robo_test is not None:\n pulumi.set(__self__, \"ios_robo_test\", ios_robo_test)\n if ios_test_loop is not None:\n pulumi.set(__self__, \"ios_test_loop\", ios_test_loop)\n if ios_xc_test is not None:\n pulumi.set(__self__, \"ios_xc_test\", ios_xc_test)\n if test_timeout is not None:\n pulumi.set(__self__, \"test_timeout\", test_timeout)", "def __init__(__self__, *,\n ios_app_info: 'outputs.IosAppInfoResponse',\n ios_robo_test: 'outputs.IosRoboTestResponse',\n ios_test_loop: 'outputs.IosTestLoopResponse',\n ios_xc_test: 'outputs.IosXcTestResponse',\n test_timeout: 'outputs.DurationResponse'):\n pulumi.set(__self__, \"ios_app_info\", ios_app_info)\n pulumi.set(__self__, \"ios_robo_test\", ios_robo_test)\n pulumi.set(__self__, \"ios_test_loop\", ios_test_loop)\n pulumi.set(__self__, \"ios_xc_test\", ios_xc_test)\n pulumi.set(__self__, \"test_timeout\", test_timeout)", "def __init__(__self__, *,\n android_app_info: 'outputs.AndroidAppInfoResponse',\n android_instrumentation_test: 'outputs.AndroidInstrumentationTestResponse',\n android_robo_test: 'outputs.AndroidRoboTestResponse',\n android_test_loop: 'outputs.AndroidTestLoopResponse',\n test_timeout: 'outputs.DurationResponse'):\n pulumi.set(__self__, \"android_app_info\", android_app_info)\n pulumi.set(__self__, \"android_instrumentation_test\", android_instrumentation_test)\n pulumi.set(__self__, \"android_robo_test\", android_robo_test)\n pulumi.set(__self__, \"android_test_loop\", android_test_loop)\n pulumi.set(__self__, \"test_timeout\", test_timeout)", "def create_driver(self, app_server):\n config = self.config[app_server]\n\n cmd = config['CMD']\n server_name = config['NAME']\n log_file_name = config['LOG_FILE_NAME']\n full_log_path = os.path.join(os.environ['basedir'], 'logs', 'appium', log_file_name)\n url = config['URL']\n desired_cap = config['DESIRED_CAP']\n self.mobile_name = config['MOBILE_NAME']\n\n with open(full_log_path, \"w\") as file:\n subprocess.Popen(cmd, shell=True, stdout=file, stderr=subprocess.STDOUT)\n LOGGER.info(\"{name} started !\".format(name=server_name))\n try:\n self.driver = webdriver.Remote(url, desired_cap)\n self.touch = TouchAction(self.driver)\n LOGGER.info(\"Connected to {mob}\".format(mob=self.mobile_name))\n except WebDriverException:\n LOGGER.error(\"{dev} is not connected!\".format(\n dev=self.mobile_name))\n time.sleep(3)", "def launch_application(path) -> WebDriver:\n try:\n exp_cap = {\n \"deviceName\": \"WindowsPC\",\n \"app\": path}\n exp_session = webdriver.Remote(\n command_executor='http://127.0.0.1:4723',\n desired_capabilities=exp_cap)\n return exp_session\n except Exception as ex:\n logger.info(f':Error in launching app ERROR: {ex}')", "def android_app_info(self) -> 'outputs.AndroidAppInfoResponse':\n return pulumi.get(self, \"android_app_info\")", "def installCommand(driverName=None, apkFile=None, appPackage=None, outFile=None, local=False):\n with importDriver(driverName) as driver:\n device = getDevice(driver)\n if device and not isinstance(device, SonyMtpAppInstaller):\n switchToAppInstaller(device)\n device = None\n\n print('Waiting for camera to switch...')\n for i in range(10):\n time.sleep(.5)\n try:\n devices = list(listDevices(driver, True))\n if len(devices) == 1 and isinstance(devices[0], SonyMtpAppInstaller):\n device = devices[0]\n break\n except:\n pass\n else:\n print('Operation timed out. Please run this command again when your camera has connected.')\n\n if device:\n installApp(device, apkFile, appPackage, outFile, local)", "def launch_an_app(appname,ui):\r\n ui = ui\r\n time.sleep(WAIT)\r\n \"\"\"Clicking on Launcher button\"\"\"\r\n ui.doDefault_on_obj('Launcher', False, role='button') \r\n time.sleep(WAIT)\r\n ui.doDefault_on_obj(name='Expand to all apps', role='button')\r\n time.sleep(WAIT)\r\n \"\"\"Launching an APK\"\"\"\r\n ui.doDefault_on_obj(appname, False, 'button') \r\n time.sleep(WAIT)", "def launch_app(self):\n os.system (\"adb shell am start -n com.tencent.mm/com.tencent.mm.ui.LauncherUI/\")\n time.sleep (5)" ]
[ "0.6601209", "0.59317946", "0.5883183", "0.5697906", "0.5663244", "0.5568987", "0.54523796", "0.5425999", "0.53608453", "0.5313601", "0.53051645", "0.52826554", "0.527116", "0.5247173", "0.5247173", "0.52119833", "0.5187687", "0.5146341", "0.51420695", "0.5116347", "0.5107598", "0.5104853", "0.50925577", "0.50673014", "0.5058235", "0.5038715", "0.50149894", "0.5003585", "0.4985837", "0.49656585" ]
0.60792583
1
Click element on mobile. |locator=xpath=//[="id123"]|oAppiumInfo=${AppInfo}
def aisappium_click_element(self, locator, oAppiumInfo=None): self._info("Clicking mobile element '%s'." % locator) if oAppiumInfo is not None: self._element_find_atlas(locator, True, True, oAppiumInfo.driver).click() else: self._element_find(locator, True, True).click()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_app_launcher_app(self, app_name):\n locator = lex_locators[\"app_launcher\"][\"app_link\"].format(app_name)\n self.open_app_launcher()\n self.selenium.wait_until_page_contains_element(locator, timeout=30)\n self.selenium.set_focus_to_element(locator)\n elem = self.selenium.get_webelement(locator)\n link = elem.find_element_by_xpath(\"../../..\")\n self.selenium.set_focus_to_element(link)\n link.click()\n self.wait_until_modal_is_closed()", "def launch_an_app(appname,ui):\r\n ui = ui\r\n time.sleep(WAIT)\r\n \"\"\"Clicking on Launcher button\"\"\"\r\n ui.doDefault_on_obj('Launcher', False, role='button') \r\n time.sleep(WAIT)\r\n ui.doDefault_on_obj(name='Expand to all apps', role='button')\r\n time.sleep(WAIT)\r\n \"\"\"Launching an APK\"\"\"\r\n ui.doDefault_on_obj(appname, False, 'button') \r\n time.sleep(WAIT)", "def open_mobile_application(self, platform):\n if platform == ANDROID:\n self.appium_lib.open_application(APPIUM_URL_ANDROID, platformName = APPIUM_DESIRED_CAPS['platformName'],\n deviceName = APPIUM_DESIRED_CAPS['udid'],\n app = APPIUM_DESIRED_CAPS['app'],\n appPackage = APPIUM_DESIRED_CAPS['appPackage'],\n appActivity = APPIUM_DESIRED_CAPS['appActivity'],\n noSign = APPIUM_DESIRED_CAPS['noSign'],\n fullReset = APPIUM_DESIRED_CAPS['fullReset'])\n self.appium_lib.wait_until_element_is_visible(RESULT)\n self.builtin_lib.sleep(1)\n else:\n self.builtin_lib.fail(\"Platform not correct\")\n raise AssertionError(\"Failed to open mobile application!\")\n self.current_driver = self.appium_lib._cache.current # type: WebDriver\n self.os = self.current_driver.capabilities['platformName']", "def appium_init(self):\n desired_cups = {}\n desired_cups['platformName'] = 'Android'\n desired_cups['platformVersion'] = android_version\n desired_cups['deviceName'] = device_name\n desired_cups['appPackage'] = pkg_name\n desired_cups['appActivity'] = activity\n desired_cups['autoLaunch'] = 'false'\n desired_cups['noReset'] = 'true'\n desired_cups['automationName'] = 'uiautomator2'\n driver = webdriver.Remote('http://127.0.0.path:4723/wd/hub', desired_cups)\n global driver\n return driver", "def open_app(device, package_name):\n\n device.shell('am start -n ' + package_name + '/' + package_name + \".modules.overview.screen\" +\n '.OverviewActivity')", "def click_button_xpath(driver, platform):\n xpath = settings.platforms[platform]['search_listings']['show_more_xpath']\n time.sleep(1)\n show_more_button = driver.find_elements_by_xpath(xpath)[0]\n driver.execute_script(\"arguments[0].click();\", show_more_button)", "def _ClickPrimaryActionButton(self):\n self._ExecuteOobeApi('Oobe.clickGaiaPrimaryButtonForTesting')", "def click_element(self,loc,img_name):\r\n self.wait_ele_visible(loc,img_name)\r\n ele=self.get_element(loc,img_name)\r\n try:\r\n ele.click()\r\n except:\r\n self.getScreenShot(img_name)\r\n logging.exception(\"********{}click{}element fail********\".format(img_name,loc))\r\n raise", "def navigate_to_system_then_to_failover(driver):\n\n driver.find_element_by_xpath('//mat-list-item[@ix-auto=\"option__System\"]').click()\n wait_on_element(driver, 0.5, 30, '//mat-list-item[@ix-auto=\"option__Failover\"]')\n driver.find_element_by_xpath('//mat-list-item[@ix-auto=\"option__Failover\"]').click()", "def click_create_an_app(self):\r\n self.log.info('clicking on create app button')\r\n button = self.driver.find_element_by_xpath(\"//a[contains(text(),'Create an App')]\")\r\n button.click()\r\n time.sleep(5)", "def find_element_by_selector(self, selector):\n return UiObject(selector, self.android_device_driver) if UiObject(\n selector, self.android_device_driver).verify_exist() else None", "def select_goto_application_settings_item(self):\n self.driver.click(\"go_to_application_settings_btn\")", "def click(self, x, y):\n # adb click 0,0 will have a weird behavior\n if x <= 0 and y <= 0:\n return\n cmd = \"shell input tap {x} {y}\".format(x=x, y=y)\n self.android_device_driver.adb.exec_adb_cmd(cmd).wait()", "def click(self, element):\n element.click()", "def elementClick(self,locator=\"\",locatorType='id',element=None):\n\n\n try:\n if locator:\n element=self.getElement(locator,locatorType)\n\n element.click()\n self.logger.info(\"clicked on element with locator\"+locator+\" locatorType: \"+locatorType)\n\n except:\n self.logger.info('Cannot click on element with locator '+locator+\" locatorType: \"+locatorType)\n print_stack()", "def start_app(self):\n app_xpath = '//android.widget.FrameLayout[@content-desc=\\\"{app}\\\"]/android.widget.ImageView'\n LOGGER.info('Starting app now!')\n tex = app_xpath.format(app=self.app_name)\n try:\n self.click_element(el_type='xpath', text=tex, handle_error=False)\n except NoSuchElementException:\n LOGGER.exception('Cannot find {app} on home screen of the phone!'.format(\n app=self.app_name))\n sys.exit(1)\n LOGGER.debug(\"{app} is opened on {name}\".format(\n app=self.app_name, name=self.mobile_name))\n time.sleep(5)\n self.set_scroll_length()", "def test_click_OpenToday(self):\n self.tc_id = \"Ts_017\"\n self.tc_desc = \"Verify user is able to click Location types\"\n self.tc_step = \"TC Start\"\n\n clickLocationTypes = clickOpenToday(self.driver)\n\n self.tc_step = \"Launch the url\"\n clickLocationTypes.launchUrl(\"https://massdtaiot.com/dtahip/\")\n self.tc_step = \"Select all cities\"\n clickLocationTypes.allcities()\n self.tc_step = \"Click the HIP CSA filter\"\n clickLocationTypes.clickopentoday()", "def clickDetails(self):\n self.waitForElement(locator=self._userProfile_detailsBtn, locatorType=\"xpath\")\n element = self.getElementList(locator=self._userProfile_detailsBtn, locatorType=\"xpath\")\n self.elementClick(element=element[0])\n pp.time.sleep(2)", "def click_laptop(self):\n self.functions.wait_for_element(self.SELECT_LAPTOP).click()\n laptop_selected = self.functions.wait_for_element(self.LAPTOP_SELECTED)\n assert laptop_selected.text == self.text, \"didn't selected laptop!\"", "def test_landing_page_tag(self, setup_landing, click, locate):\r\n locate.locate_text_part('Browse by tags')\r\n click.click_xpath(LocLandind.tag)\r\n locate.locate_text_part('Here we go with the icons related')\r\n import time\r\n time.sleep(5)\r\n locate.locate_xpath(LocLandind.icon_in_tag)", "def click(self, xpath):\n self.driver.find_element_by_xpath(xpath=xpath).click()", "def click(self, selector):\n el = self.locate_element(selector)\n el.click()", "def find_element(self, attrib_key, attrib_value, match_option=None):\n selector = UiSelector()\n selector.attributes(attrib_key, attrib_value, match_option)\n return UiObject(selector, self.android_device_driver) if UiObject(\n selector, self.android_device_driver).verify_exist() else None", "def tap():\n return \"I have clicked on the elements\"", "def step_imp1(context,message):\n context.driver.get(context.base_url)", "def navigate_to_network_then_to_interfaces(driver):\n driver.find_element_by_xpath('//mat-list-item[@ix-auto=\"option__Network\"]').click()\n wait_on_element(driver, 0.5, 30, '//mat-list-item[@ix-auto=\"option__Interfaces\"]')\n driver.find_element_by_xpath('//mat-list-item[@ix-auto=\"option__Interfaces\"]').click()", "def click(self, id):\n el = self.wait_n_get(By.ID, id)\n el.click()", "def click_by_location(self, elem, **kwargs):\n loc = elem.location\n size = elem.size\n screen_size = self.driver.get_window_size()\n if self.tablet:\n if kwargs['side'] == 'middle':\n x = loc['x'] + size['width'] / 2\n y = loc['y'] + size['height'] / 2\n\n elif kwargs['side'] == 'left':\n x = loc['x'] + size['width'] / 4\n y = loc['y'] + size['height'] / 2\n\n elif kwargs['side'] == 'right':\n x = loc['x'] + size['width'] - 50\n y = loc['y'] + 10\n else:\n x = loc['x'] + size['width'] / 2\n y = loc['y'] + size['height'] / 2\n\n elif self.phone:\n if kwargs['side'] == 'middle':\n x = loc['x'] + size['width'] / 2\n y = loc['y'] + size['height'] / 2\n\n elif kwargs['side'] == 'left':\n x = loc['x'] + size['width'] / 4\n y = loc['y'] + size['height'] / 2\n\n elif kwargs['side'] == 'right':\n x = screen_size['width'] - 40\n y = loc['y'] + 5\n else:\n x = loc['x'] + size['width'] / 2\n y = loc['y'] + size['height'] / 2\n\n # an array of tuples\n action = TouchAction(self.driver)\n action.tap(x=x, y=y).perform()", "def homeCustomScan(window,referenceid):\n try:\n allbuttons = getAppButtons(window)\n atomacclick(allbuttons[0])\n atomacclick(allbuttons[19])\n time.sleep(5)\n Runwindow = getChildwindows(referenceid)\n buttons = getAppButtons(Runwindow)\n atomacclick(buttons[1])\n newb = getAllObjects(Runwindow)\n time.sleep(4)\n atomacclick(newb[2])\n except Exception as er:\n print(\"Not able to click on HomeCustomScan\")\n return False", "def getWebDriverInstance(self):\n #baseURL = \"https://qa-test.avenuecode.com/\"\n if self.device == \"ios_mobile\":\n self.driver = appiumdriver.Remote('http://localhost:4723/wd/hub', self.capabilities)\n self.driver.implicitly_wait(10)\n #return driver\n #driver.get(baseURL)\n\n else:\n print(\"Hello\")\n # Setting Driver Implicit Time out for An Element\n self.driver = appiumdriver.Remote('http://localhost:4723/wd/hub', self.capabilities)\n self.driver.implicitly_wait(10)\n return self.driver\n # Maximize the window\n #driver.maximize_window()\n # Loading browser with App URL\n #driver.get(baseURL)\n #return driver" ]
[ "0.60016996", "0.57440853", "0.562351", "0.55657375", "0.5446922", "0.54291373", "0.5427778", "0.5360228", "0.53453577", "0.5330872", "0.528335", "0.52765787", "0.5207358", "0.5170354", "0.5145716", "0.51455855", "0.51274836", "0.51267576", "0.5112077", "0.5099592", "0.50914454", "0.5083622", "0.5064306", "0.5060537", "0.5028966", "0.50168324", "0.5000455", "0.49959344", "0.4995536", "0.4986385" ]
0.7645754
0
Clears the text field identified by `locator`. |locator=xpath=//[="id123"]|oAppiumInfo=${AppInfo}
def aisappium_clear_text(self, locator, oAppiumInfo=None): self._info("Clear text field '%s'" % locator) if oAppiumInfo is not None: self._element_clear_text_by_locator_atlas(locator, oAppiumInfo.driver) else: self._element_clear_text_by_locator(locator)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear_put_value(self, locator):\n time.sleep(2)\n element = self.driver.find_element(*locator)\n element.send_keys(Keys.CONTROL + 'a')\n element.send_keys(Keys.DELETE)", "def aisappium_input_text(self, locator, text, oAppiumInfo=None):\n self._info(\"Typing text '%s' into text field '%s'\" % (text, locator))\n if oAppiumInfo is not None:\n self._element_input_text_by_locator_atlas(locator, text, oAppiumInfo.driver)\n else:\n self._element_input_text_by_locator(locator, text)", "def clear_send_keys(self, locator, text):\r\n t1 = time.time()\r\n try:\r\n el = self.find_element(locator)\r\n el.clear()\r\n el.send_keys(text)\r\n self.my_print(\"{0} Clear and type element: <{1}> content: {2}, Spend {3} seconds\".format(success,\r\n locator, text,time.time() - t1))\r\n except Exception:\r\n self.my_print(\"{0} Unable to clear and type element: <{1}> content: {2}, Spend {3} seconds\".format(fail,\r\n locator, text,time.time() - t1))\r\n raise", "def Clear_input(self):\r\n self.root.ids.place_name.text = '' # Clear input\r\n self.root.ids.place_country.text = ''\r\n self.root.ids.place_priority.text = ''", "def clear(self):\n logging.getLogger(__name__).info(\"Element input field cleared\\nby = {}\\nvalue = {}\".format(self.by, self.value))\n self.driver.find_element(self.by, self.value).clear()", "def aisappium_input_password(self, locator, text, oAppiumInfo=None):\n self._info(\"Typing password into text field '%s'\" % locator)\n if oAppiumInfo is not None:\n self._element_input_text_by_locator_atlas(locator, text, oAppiumInfo.driver)\n else:\n self._element_input_text_by_locator(locator, text)", "def clearText(self, element_tuple):\n self.log_info(f\"Browser.clearText: Clearing the text of {element_tuple}\")\n self.CORE.find_element(*self.format_element(element_tuple)).clear()\n return", "def fill_input_field(self, by, locator, value=\"\"):\n field = self.wait_until_visible(locator_type=by, locator=locator)\n field.clear()\n field.send_keys(value)", "def fill(self, xpath, text, send=False):\n element = self.driver.find_element_by_xpath(xpath)\n element.clear()\n element.send_keys(text + '\\n' if send else text)", "def clear_field():\n try:\n focused_element = driver.switch_to.active_element\n focused_element.clear()\n wait()\n except Exception as e:\n return \"Error: \" + str(e)\n return \"Success\"", "def aisappium_click_element(self, locator, oAppiumInfo=None):\n self._info(\"Clicking mobile element '%s'.\" % locator)\n if oAppiumInfo is not None:\n self._element_find_atlas(locator, True, True, oAppiumInfo.driver).click()\n else:\n self._element_find(locator, True, True).click()", "def clearEntry(*args, **kwargs):\n\targs[0].set_text('')", "def input(self, text):\n self.element.clear()\n self.element.send_keys(text)", "def clear_fields(self):\n self.root.ids.Title.text = \"\"\n self.root.ids.Artist.text = \"\"\n self.root.ids.Year.text = \"\"", "def on_clear_rcr_txt_btn_clicked(self):\n self.rcr_info_textEdit.clear()", "def clear_edit(self, value=None):\n self.my_text.edit_reset()", "def clear(self):\n for inp in self.inputlst:\n inp.setText(\"\")", "def clear_enter(self, selector, text):\n self._wait_element_localed(self.driver, selector)\n element = self.get_element(selector)\n element.clear()\n element.click()\n element.send_keys(text)", "def clear_txtbox(txtbox: WebElement):\n txtbox.click()\n txtbox.clear()\n return txtbox", "def wait_and_fill_input_field(self, locator_type, locator, text=''):\n field = self.wait.until(EC.visibility_of_element_located((locator_type, locator)))\n field.clear()\n field.send_keys(text)", "def clearTextElement(self, elementId):\n cmdId = self.executeCommand(Command.CLEAR_ELEMENT, {'id': elementId})\n return cmdId", "def test_click_clearsearch(self):\n self.tc_id = \"Ts_012\"\n self.tc_desc = \"Verify user is able to register into the application with existing email id\"\n self.tc_step = \"TC Start\"\n\n clearSearch = Clearsearch(self.driver)\n\n self.tc_step = \"Launch the url\"\n clearSearch.launchUrl(\"https://massdtaiot.com/dtahip/\")\n\n self.tc_step = \"Check the clear search button\"\n clearSearch.clickClearsearch()", "def clear_inputs(self):\n self.root.ids.input_title.text, self.root.ids.input_artist.text, self.root.ids.input_year.text = (\"\", \"\", \"\")", "def _force_clear(self, element):\n value = element.get_attribute(\"value\")\n actions = ActionChains(self.selenium.driver)\n actions.move_to_element(element).click().send_keys(Keys.END)\n for character in value:\n actions.send_keys(Keys.BACKSPACE)\n actions.perform()", "def for_description_enter_lagg_for_functional_testing_without_quotes(driver, description):\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Description\"]').clear()\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Description\"]').send_keys(description)", "def clear(self) -> None:\n logging.info(f\"Clear input field. {self.desc}\")\n js = f\"\"\"var elm = document.querySelectorAll(\"{self.css}\")[{self.index}];\n elm.style.border=\"2px solid red\";\n elm.value = \"\";\"\"\"\n self._execute_javascript(js)", "def clear_text_by_resource_id(self,param,ignore_error_handle = False):\n message = {};\n step = 'clear text of element which resource id is \\'' + str(param.get('resourceid',None));\n resource_id = str(param.get('resourceid',None));\n try:\n # element = self.driver.find_element_by_id(resource_id);\n # element.clear();\n self.driver.clear_text(resource_id);\n message = self.feedback.feedback_action_ok(step);\n except BaseException,e:\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle);\n finally:\n return message;", "def clearField(self):\n self.field.setText(\"\")", "def _clear(self, element):\n\n element.clear()\n self.selenium.driver.execute_script(\"arguments[0].value = '';\", element)\n\n # Select all and delete just in case the element didn't get cleared\n element.send_keys(Keys.HOME + Keys.SHIFT + Keys.END)\n element.send_keys(Keys.BACKSPACE)\n\n if element.get_attribute(\"value\"):\n # Give the UI a chance to settle down. The sleep appears\n # necessary. Without it, this keyword sometimes fails to work\n # properly. With it, I was able to run 700+ tests without a single\n # failure.\n time.sleep(0.25)\n\n # Even after all that, some elements refuse to be cleared out.\n # I'm looking at you, currency fields on Firefox.\n if element.get_attribute(\"value\"):\n self._force_clear(element)", "def actionClear(self):\n self.setText(\"\")" ]
[ "0.63335514", "0.5878214", "0.58614963", "0.5519412", "0.5471537", "0.5400341", "0.5341203", "0.52341026", "0.51790804", "0.5178161", "0.5120839", "0.51207596", "0.5104932", "0.50769293", "0.50294954", "0.50251377", "0.5020689", "0.5019543", "0.5019147", "0.49580956", "0.49523422", "0.4950369", "0.4949766", "0.49003997", "0.48946062", "0.48740414", "0.48669901", "0.4843551", "0.48422322", "0.4837495" ]
0.80684006
0
Types the given `text` into text field identified by `locator`. |locator=xpath=//[="id123"]|oAppiumInfo=${AppInfo} See `introduction` for details about locating elements.
def aisappium_input_text(self, locator, text, oAppiumInfo=None): self._info("Typing text '%s' into text field '%s'" % (text, locator)) if oAppiumInfo is not None: self._element_input_text_by_locator_atlas(locator, text, oAppiumInfo.driver) else: self._element_input_text_by_locator(locator, text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def aisappium_input_password(self, locator, text, oAppiumInfo=None):\n self._info(\"Typing password into text field '%s'\" % locator)\n if oAppiumInfo is not None:\n self._element_input_text_by_locator_atlas(locator, text, oAppiumInfo.driver)\n else:\n self._element_input_text_by_locator(locator, text)", "def wait_for_text(self, locator_type, locator, text):\n self.wait.until(EC.text_to_be_present_in_element((locator_type, locator), text))", "def aisappium_clear_text(self, locator, oAppiumInfo=None):\n self._info(\"Clear text field '%s'\" % locator)\n if oAppiumInfo is not None:\n self._element_clear_text_by_locator_atlas(locator, oAppiumInfo.driver)\n else:\n self._element_clear_text_by_locator(locator)", "def getText(self,locator=\"\",locatorType=\"id\", element=None, info=\"\"):\n try:\n if locator:\n self.logger.debug(\"In Locator condition\")\n element=self.getElement(locator,locatorType)\n\n self.logger.debug(\"Before finding Text\")\n text=element.text\n self.logger.debug(\"After finding element, size is\"+str(len(text)))\n if len(text) == 0:\n text = element.get_attribute(\"innerText\")\n if len(text)!=0:\n self.log.info(\"Getting text on element ::\"+ info)\n self.log.info(\"The text is ::'\"+text+\"'\")\n text=text.strip()\n\n except:\n self.log.error(\"Failed to get text on element\"+info)\n print_stack()\n text=None\n return text", "def enter_text_by_xpath(self, xpath, some_text):\n try:\n print(f\"xpath provided: {xpath}\")\n element = self.wwait.until(EC.element_to_be_clickable((By.XPATH, xpath)))\n\n utils.LOG.info(f\"entering the following text: {some_text}\")\n element.send_keys(some_text)\n except NoSuchElementException as err:\n utils.LOG.warning(f\"Entering Text failed by following xpath: {xpath}\")\n utils.LOG.error(err)\n self.take_screenshot('ErrorEnterText_')", "def get_locator(locator_text: str, locator_type: str = \"id\") -> tuple:\n locator = locator_type.upper()\n return getattr(By, locator), locator_text", "def input_text(self, element: Union[WebElement, Tuple[By, str]], text: str):\n element = self.find_element(element)\n element.send_keys(text)", "def get_text(self, locator=\"\", locator_type=\"id\", element: WebElement = None, info=\"\"):\n try:\n if locator: # This means if locator is not empty\n self.log.debug(\"In locator condition\")\n element = self.get_element_(locator, locator_type)\n self.log.debug(\"Before finding text\")\n text = element.text\n self.log.debug(\"After finding element, size is: \" + str(len(text)))\n if len(text) == 0:\n text = element.get_attribute(\"innerText\")\n if len(text) != 0:\n self.log.info(\"Getting text on element :: \" + info)\n self.log.info(\"The text is :: '\" + text + \"'\")\n text = text.strip()\n except:\n self.log.error(\"Failed to get text on element \" + info)\n print_stack()\n text = None\n return text", "def input_text(self, text):\n self.android_device_driver.adb.exec_adb_cmd(\"shell input text \" +\n text).wait()", "def doTypeText(self, text, timeout=10.0, name=None, tagName=None, className=None,\n id=None, xpath=None, linkText=None, partialLinkText=None, cssSelector=None,\n location=None):\n TestAdapterLib.check_timeout(caller=TestAdapterLib.caller(), timeout=timeout)\n \n if text is None: \n raise TestAdapterLib.ValueException(TestAdapterLib.caller(), \"text argument cannot be equal to none\" )\n\n ret = True\n \n if not self.cfg['wait-until']:\n cmdId = self.implicitlyWait(timeout=timeout)\n if self.isWait(timeout=timeout, commandId=cmdId) is None:\n ret = False\n return ret\n \n cmdId = self.findElement(elementId=None, name=name, tagName=tagName, className=className,\n id=id, xpath=xpath, linkText=linkText, partialLinkText=partialLinkText, \n cssSelector=cssSelector, location=location)\n rsp = self.hasElement(timeout=timeout, commandId=cmdId) \n if rsp is None: ret = False\n else:\n elementVall = rsp.get('GUI', 'value')\n elementId = elementVall.get('element-id')\n\n cmdId = self.typeTextElement(elementId=elementId, text=str(text) )\n if self.hasTextEntered(timeout=timeout, commandId=cmdId) is None:\n ret = False\n return ret", "def input_text(self,loc,value,img_name):\r\n self.wait_ele_visible(loc,img_name)\r\n ele = self.get_element(loc,img_nameue)\r\n try:\r\n ele.send_keys(value)\r\n except:\r\n self.getScreenShot(img_name)\r\n logging.exception(\"********input text fail********\")\r\n raise", "def aisappium_click_element(self, locator, oAppiumInfo=None):\n self._info(\"Clicking mobile element '%s'.\" % locator)\n if oAppiumInfo is not None:\n self._element_find_atlas(locator, True, True, oAppiumInfo.driver).click()\n else:\n self._element_find(locator, True, True).click()", "def get_text(self,locator):\r\n t1 = time.time()\r\n try:\r\n element = self.find_element(locator)\r\n text = element.text\r\n self.my_print(\r\n \"{0} Get element text element: <{1}>, Spend {2} seconds\".format(success, locator, time.time() - t1))\r\n return text\r\n except Exception:\r\n self.my_print(\r\n \"{0} Unable to get element text element: <{1}>, Spend {2} seconds\".format(fail, locator, time.time() - t1))\r\n raise", "def find_element_by_text(self, text, wait_time=5):\n return self.wait_for_element_by_xpath(\"//*[contains(text(), '%s')]\" % text, wait_time)", "def enter_text(self, text):\n self.q(css='#fixture input').fill(text)", "def info_text(self, info_text):\n\n self._info_text = info_text", "def info_text(self, info_text):\n\n self._info_text = info_text", "def type_text(self, element, text):\n try:\n if element.is_displayed():\n element.clear()\n element.send_keys(text)\n print(text + \" is added to textbox\")\n else:\n print(element + \" is not displaying\")\n except Exception as e:\n print(str(e))", "def generic_input_text(self, element_id, text):\n self._generic_input(element_id, text)", "def wait_and_fill_input_field(self, locator_type, locator, text=''):\n field = self.wait.until(EC.visibility_of_element_located((locator_type, locator)))\n field.clear()\n field.send_keys(text)", "def enter(self, selector, text):\n try:\n self._wait_element_localed(self.driver, selector)\n element = self.get_element(selector)\n element.send_keys(text)\n except:\n raise NoSuchElementException(\"no such element found\")", "def set_text_by_element(self,param={},ignore_error_handle = False):\n message = {}\n step = 'set text with element'\n element = param.get('element',None);\n text = str(param.get('text',None));\n try:\n element.send_keys(text);\n message = self.feedback.feedback_action_ok(step)\n except BaseException,e:\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle);\n finally:\n return message;", "def verify_text(self, expected_text: str, *locator):\n e = self.driver.find_element(*locator)\n actual_text = e.text\n assert expected_text == actual_text, f\"Expected {expected_text} does not match actual {actual_text}\"", "def place_holder(self, xpath, loop_index, text_from_xml, text_index, ids, eng_list):\r\n try:\r\n element_text = self.object.element_get_property(self.util.client,\r\n xpath[loop_index]['zone'],\r\n xpath[loop_index]['xpath'],\r\n xpath[loop_index]['index'],\r\n \"placeholder\", self.logger_name)\r\n if element_text:\r\n self.logger.info(\"Testing StringID == \" + str(ids[text_index]))\r\n self.logger.info(\"English Text == \" + eng_list[text_index])\r\n self.util.text_compare2(self.common, text_from_xml[text_index], element_text, ids[text_index],\r\n self.logger_name)\r\n except:\r\n print\" Value not found\"", "def is_text_exists(self, locator_type, locator, text):\n try:\n self.wait_for_text(locator_type, locator, text)\n return True\n except TimeoutException:\n return False", "def output_info(text):\n if conf.eval_output:\n info_dict = {'type':'info', 'text' : text}\n output_message_eval(info_dict)\n else:\n output_message('[INFO] ' + text)", "def verify_text(self, expected_text: str, *locator):\n actual_text = self.driver.find_element(*locator).text\n assert actual_text == expected_text, f'Error. Expected {expected_text} does not match actual {actual_text}'", "def setText(self, element_tuple, text):\n self.log_info(f\"Browser.setText: Setting text of {element_tuple} to {text}\")\n\n self.disable_logging()\n self.clearText(element_tuple)\n self.revert_logging()\n\n self.CORE.find_element(*self.format_element(element_tuple)).send_keys(text)\n return", "def text(self, text, enter=True):\n self.ime.text(text)\n\n if enter:\n self.adb.shell_command(\"input keyevent ENTER\")", "def InputWithSelector(action_runner, input_text, input_selector):\n action_runner.WaitForElement(selector=input_selector)\n action_runner.Wait(0.5)\n # Focus the requested element first and then enter text using single\n # Keyboard events to bypass certain restrictions on websites.\n action_runner.ExecuteJavaScript(\n 'document.querySelector({{selector}}).focus()', selector=input_selector)\n # Wait a bit to make sure the focus is properly set, otherwise we'll end up\n # losing some characters.\n action_runner.Wait(1)\n action_runner.EnterText(input_text)" ]
[ "0.64829", "0.614725", "0.58976835", "0.5713828", "0.5712613", "0.5626021", "0.5529641", "0.5498303", "0.5493129", "0.53856707", "0.5320436", "0.5284056", "0.5233346", "0.5213037", "0.5164269", "0.51591915", "0.51591915", "0.5155974", "0.509612", "0.5091673", "0.50354683", "0.5020918", "0.5012406", "0.5000951", "0.49766868", "0.4943884", "0.49346775", "0.49144095", "0.48982614", "0.48962998" ]
0.7982116
0
Types the given password into text field identified by `locator`. |locator=xpath=//[="id123"]|oAppiumInfo=${AppInfo} Difference between this keyword and `Input Text` is that this keyword does not log the given password. See `introduction` for details about locating elements.
def aisappium_input_password(self, locator, text, oAppiumInfo=None): self._info("Typing password into text field '%s'" % locator) if oAppiumInfo is not None: self._element_input_text_by_locator_atlas(locator, text, oAppiumInfo.driver) else: self._element_input_text_by_locator(locator, text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def aisappium_input_text(self, locator, text, oAppiumInfo=None):\n self._info(\"Typing text '%s' into text field '%s'\" % (text, locator))\n if oAppiumInfo is not None:\n self._element_input_text_by_locator_atlas(locator, text, oAppiumInfo.driver)\n else:\n self._element_input_text_by_locator(locator, text)", "def get_password(self,text):\r\n return self.driver.find_element(*SinginPage.password).send_keys(text)", "def input_user_pass(self, user_pass):\n self.locate_element_by_css_selector(PASSWORD_SELECTOR).send_keys(user_pass)", "def generic_input_password(self, element_id, password):\n self._generic_input(element_id, password, False)", "def log_in_password(self):\n password_elem = waiter.find_element(self.driver, 'password', by=NAME)\n return password_elem.get_attribute('value')", "def aisappium_clear_text(self, locator, oAppiumInfo=None):\n self._info(\"Clear text field '%s'\" % locator)\n if oAppiumInfo is not None:\n self._element_clear_text_by_locator_atlas(locator, oAppiumInfo.driver)\n else:\n self._element_clear_text_by_locator(locator)", "def log_in_password(self, password):\n waiter.find_write(self.driver, 'password', password, by=NAME)", "def set_Password(self, value):\n super(DownloadDocumentInputSet, self)._set_input('Password', value)", "def enter_password(self):", "def input_the_account_name_ad_user_the_password_ap_password(driver, ad_user, ad_password):\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Domain Account Name\"]').clear()\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Domain Account Name\"]').send_keys(ad_user)\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Domain Account Password\"]').clear()\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Domain Account Password\"]').send_keys(ad_password)", "def ask_password(ip):\n password_question = [\n {\n 'type': 'password',\n 'message': 'Enter the password of the machine with the IP address ' + ip + ':',\n 'name': 'password'\n }\n ]\n\n password_answer = prompt(password_question, style=style)\n pwd = password_answer[\"password\"]\n return pwd", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def password(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"password\")", "def create_pwd_login_internal(password, re_enter_password):\r\n if g.platform == 'android':\r\n 'Enter the password in Create password text box'\r\n flag1 = ui_controls.text_box(get_obj_identifier('login_createPassword_txt'), value=password)\r\n\r\n 'Checks for the keyboard presence and hides it'\r\n flag2 = ui_controls.hide_keyboard()\r\n\r\n 'Enter the confirm password in Confirm Password text box'\r\n flag3 = ui_controls.text_box(get_obj_identifier('login_confirmPassword_txt'), value=re_enter_password)\r\n\r\n 'Checks for the keyboard presence and hides it'\r\n flag4 = ui_controls.hide_keyboard()\r\n\r\n 'Enter password hint in Password Hint text box'\r\n #flag5 = ui_controls.text_box(get_obj_identifier('login_passwordHint_txt'), value=hint)\r\n\r\n 'Checks for the keyboard presence and hides it'\r\n #flag6 = ui_controls.hide_keyboard()\r\n\r\n 'Click on create password login button'\r\n flag7 = ui_controls.button(get_obj_identifier('login_createPasswordLogin_btn'))\r\n\r\n status = False if not (flag1 and flag2 and flag3 and flag4 and flag7) else True\r\n else:\r\n 'Setting values on Create Password text box in ios. Using set value as send_keys failing here'\r\n flag1 = ui_controls.setValue(get_obj_identifier('login_createPassword_txt'), password)\r\n sleep(3)\r\n\r\n 'Setting values on Confirm Password text box in ios.'\r\n flag2 = ui_controls.setValue(get_obj_identifier('login_confirmPassword_txt'), value=re_enter_password)\r\n\r\n 'Setting values on Password Hint text box in ios.'\r\n # flag3 = ui_controls.setValue(get_obj_identifier('login_passwordHint_txt'), hint)\r\n sleep(3)\r\n\r\n 'Click on Create Password button in IOS'\r\n flag4 = ui_controls.button(get_obj_identifier('login_createPasswordLogin_btn'))\r\n\r\n status = False if not (flag1 and flag2 and flag4) else True\r\n return status", "def enter_password(self):\n self.password.clear()\n self.password.click()\n self.password.send_keys(TestData.PASSWORD)\n sleep(TestData.DELAY)\n return self.password", "def OnTextCtrlPasswordText(self, event):\r\n\t\tself._password = event.GetString()", "def aisappium_click_element(self, locator, oAppiumInfo=None):\n self._info(\"Clicking mobile element '%s'.\" % locator)\n if oAppiumInfo is not None:\n self._element_find_atlas(locator, True, True, oAppiumInfo.driver).click()\n else:\n self._element_find(locator, True, True).click()", "def test_password_field(self):\n field = self.record.find('field[@name=\\'password\\']')\n self.assertEqual(field.text, 'adt', 'Incorrect password Field')", "def omm_forgot_password_login(password=g.password):\r\n msg, status = \"\", True\r\n try:\r\n sleep(5)\r\n 'Enters value in password text box based on the user inputs'\r\n flag1 = ui_controls.text_box(get_obj_identifier('login_forgotPwd_enterPassword_txt'), value=password)\r\n\r\n 'Clicks on the login button'\r\n flag2 = ui_controls.button(get_obj_identifier('login_forgotPasswordLogin_btn'))\r\n status = False if not (flag1 and flag2) else True\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg" ]
[ "0.68370265", "0.62763005", "0.59110856", "0.5762719", "0.5711707", "0.56723815", "0.5642231", "0.55876017", "0.54255587", "0.53628826", "0.53597707", "0.5320671", "0.5320671", "0.5320671", "0.5320671", "0.5320671", "0.5320671", "0.5320671", "0.5320671", "0.5320671", "0.5320671", "0.5320671", "0.5320671", "0.5320671", "0.5305522", "0.53042156", "0.52364844", "0.5235985", "0.5133217", "0.51154155" ]
0.84931135
0
Hides the software keyboard on the device. (optional) In iOS, use `key_name` to press a particular key, ex. `Done`. In Android, no parameters are used.
def aisappium_hide_keyboard(self, oAppiumInfo=None, key_name=None): if oAppiumInfo is not None: driver = oAppiumInfo.driver else: driver = self._current_application() driver.hide_keyboard(key_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hide_keyboard(\n self, key_name: Optional[str] = None, key: Optional[str] = None, strategy: Optional[str] = None\n ) -> 'WebDriver':\n ext_name = 'mobile: hideKeyboard'\n try:\n self.assert_extension_exists(ext_name).execute_script(\n ext_name, {**({'keys': [key or key_name]} if key or key_name else {})}\n )\n except UnknownMethodException:\n # TODO: Remove the fallback\n data: Dict[str, Optional[str]] = {}\n if key_name is not None:\n data['keyName'] = key_name\n elif key is not None:\n data['key'] = key\n elif strategy is None:\n strategy = 'tapOutside'\n data['strategy'] = strategy\n self.mark_extension_absence(ext_name).execute(Command.HIDE_KEYBOARD, data)\n return cast('WebDriver', self)", "def hide(self):\n self._dev.hide()", "def hide(self):\n self.visible = False", "def hide(self):\n self.set_visible(False)", "def hide( self, event=None ):\n self.visible = 0\n self.withdraw()", "def hide( self, event=None ):\n self.visible = 0\n self.withdraw()", "def HideMe(self, event):\n self.Hide()", "def hide(self):\n\n self.stop_repl()\n self.visible = False\n self.withdraw() # Hide window", "def hide(self, event=None):\n self.visible = 0\n self.withdraw()", "def hide(self, event=None):\r\n self.visible = 0\r\n self.withdraw()", "def hide(self, event=None):\r\n self.visible = 0\r\n self.withdraw()", "def hide(self):\n self.window.run_command(\"hide_panel\", {\"panel\": self.full_name})", "def XPLoseKeyboardFocus(inWidget):\n pass", "def deactivate_license(key_name=None):\n\n _required_version = \"7.0.0\"\n if not __proxy__[\"panos.is_required_version\"](_required_version):\n return (\n False,\n \"The panos device requires version {} or greater for this command.\".format(\n _required_version\n ),\n )\n\n if not key_name:\n return False, \"You must specify a key_name.\"\n else:\n query = {\n \"type\": \"op\",\n \"cmd\": (\n \"<request><license><deactivate><key><features><member>{}</member></features>\"\n \"</key></deactivate></license></request>\".format(key_name)\n ),\n }\n\n return __proxy__[\"panos.call\"](query)", "def hide(self):\r\n if self.visible:\r\n nid = (self.hwnd, 0)\r\n Shell_NotifyIcon(NIM_DELETE, nid)\r\n self.visible = 0", "def hidepassword(cls):\n cls.right_passentry[\"show\"] = \"*\"\n cls.message_label['fg'] = \"green\"\n cls.message_label['text'] = \"Password hidded!\"\n cls.message_label.after(2000, God.messagecleanner)", "def hide(self):\r\n\t\tself.frame.Show(False)", "def hide_editor(self):\r\n self.frame.Hide()", "def hide(self):\n self.hide_token = True\n return self", "def unHide(self):\n self.visible = True", "def hide(self):\n self.root.withdraw()", "def keyboard_clear_focus(self) -> None:\n lib.wlr_seat_keyboard_clear_focus(self._ptr)", "def msg_hide(self,msg):\r\n self.frame.Hide()", "def off(key):\n # print(\"{0} released\".format(key), time.perf_counter())\n\n global keys, esc_count\n\n # caps, shift, etc. aren't automatically registered as strings\n if type(key) == Key:\n keys[esc_count].append((str(key), time.perf_counter(), \"released\"))\n else:\n keys[esc_count].append((key, time.perf_counter(), \"released\"))", "def hide (self):\n \n self.top.withdraw()", "def hide_gui():\n pass", "def hide(self):\n self.root.iconify() # self.root.withdraw()", "def Hide(self):\r\n \r\n return self.SetFlag(self.optionHidden, True)", "def hide_code(hidden=True):\n html(\"\"\"\\\n <script>\n code_show=%s;\n function code_toggle() {\n if (code_show){\n $('div.input').hide();\n } else {\n $('div.input').show();\n }\n code_show = !code_show\n }\n $(document).ready(code_toggle);\n </script>\n <form action=\"javascript:code_toggle()\">\n <input type=\"submit\" value=\"Toggle Code Cells\">\n </form>\"\"\" % (\"true\" if hidden else \"false\"))", "def toggle_pause(self) -> None:\n self.keyboard.press(Key.space)\n self.keyboard.release(Key.space)" ]
[ "0.77006596", "0.6021652", "0.54935855", "0.54915965", "0.5446831", "0.5446831", "0.54330564", "0.54146785", "0.5396251", "0.5385671", "0.5385671", "0.5351623", "0.5299294", "0.5295987", "0.5239104", "0.51872176", "0.5186777", "0.51774293", "0.51689065", "0.51521283", "0.51517546", "0.5146234", "0.5124672", "0.5121809", "0.5096068", "0.509147", "0.5064733", "0.5041957", "0.5035768", "0.5035559" ]
0.7916305
0
Verifies that element identified with locator is disabled. Key attributes for arbitrary elements are `id` and `name`. See `introduction` for details about locating elements.
def aisappium_element_should_be_disabled(self, locator, loglevel='INFO', oAppiumInfo=None): if oAppiumInfo is not None: element = self._element_find_atlas(locator, True, True, oAppiumInfo.driver) else: element = self._element_find(locator, True, True) if element.is_enabled(): self.log_source(loglevel) raise AssertionError("Element '%s' should be disabled " "but did not" % locator) self._info("Element '%s' is disabled ." % locator)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def field_is_not_read_only_xpath(driver, locator):\n elem = driver.find_element_by_xpath(locator)\n is_disabled = elem.get_attribute(\"disabled\")\n if is_disabled == 'true':\n log_to_file('Expected Read Only field to be enabled, but was still disabled', 'WARNING')\n return False\n else:\n print \"Read Only field enabled = true\"\n return True", "def field_is_not_read_only_css(driver, locator):\n elem = driver.find_element_by_css_selector(locator)\n is_disabled = elem.get_attribute(\"disabled\")\n if is_disabled == 'true':\n log_to_file('Expected Read Only field to be enabled, but was still disabled', 'WARNING')\n return False\n else:\n print \"Read Only field enabled = true\"\n return True", "def aisappium_element_should_be_enabled(self, locator, loglevel='INFO', oAppiumInfo=None):\n if oAppiumInfo is not None:\n element = self._element_find_atlas(locator, True, True, oAppiumInfo.driver)\n else:\n element = self._element_find(locator, True, True)\n if not element.is_enabled():\n self.log_source(loglevel)\n raise AssertionError(\"Element '%s' should be enabled \"\n \"but did not\" % locator)\n self._info(\"Element '%s' is enabled .\" % locator)", "def field_is_read_only_xpath(driver, locator):\n elem = driver.find_element_by_xpath(locator)\n is_disabled = elem.get_attribute(\"disabled\")\n if is_disabled == 'true':\n print \"Read Only field = true\"\n return True\n else:\n log_to_file('Expected Read Only field to be disabled, but was still enabled', 'WARNING')\n return False", "def wait_for_disabled(self, timeout=None):\n wait_until(lambda: not self.is_enabled(),\n \"Element '%s' was not disabled in <TIMEOUT>.\" % self._locator,\n timeout)", "def doWaitNotVisibleElement(self, timeout=10.0, name=None, tagName=None, className=None,\n id=None, xpath=None, linkText=None, partialLinkText=None, cssSelector=None,\n location=None):\n TestAdapterLib.check_timeout(caller=TestAdapterLib.caller(), timeout=timeout)\n \n ret = True\n more = {}\n if self.cfg['wait-until']:\n more = {\"wait-until\": True, \"wait-until-timeout\": timeout}\n\n if not self.cfg['wait-until']:\n cmdId = self.implicitlyWait(timeout=timeout)\n if self.isWait(timeout=timeout, commandId=cmdId) is None:\n ret = False\n return ret\n\n cmdId = self.findElement(elementId=None, name=name, tagName=tagName, className=className,\n id=id, xpath=xpath, linkText=linkText, partialLinkText=partialLinkText, cssSelector=cssSelector, \n location=location, more=more)\n rsp = self.hasElement(timeout=timeout+10, commandId=cmdId) \n if rsp is None: \n ret = False \n return ret\n elementVall = rsp.get('GUI', 'value')\n elementId = elementVall.get('element-id')\n\n more = {}\n if self.cfg['wait-until']:\n more = {\"wait-until\": True, \"wait-until-timeout\": timeout, \"wait-until-value\": False}\n\n cmdId = self.displayedElement(elementId=elementId, more= more)\n rsp = self.isElementDisplayed(timeout=timeout+10, commandId=cmdId) \n if rsp is None: \n ret = False \n return ret", "def test_focus_not_on_disabled(self):\n target = 'disable_field'\n field = self.form.fields.get(target, None)\n result_name = self.form.assign_focus_field(target)\n focused = self.find_focus_field()\n\n self.assertTrue(field.disabled)\n self.assertIn(target, self.form.fields)\n self.assertEqual(1, len(focused))\n self.assertNotEqual(target, focused[0])\n self.assertNotEqual(target, result_name)", "def verify_radio_dropdown_element_is_disabled(driver, locator, value, module, test, pass_message, fail_message):\n wait_for_element_XPATH(driver, locator)\n driver.find_element_by_xpath(locator).click()\n elem = driver.find_element_by_xpath(value)\n is_disabled = elem.get_attribute(\"disabled\")\n try:\n assert is_disabled == 'true'\n except AssertionError:\n log_to_file(''+module+':'+test+':'+fail_message+'', 'FAILED')\n print 'ERROR - ASSERTION EXCEPTION - ' + fail_message\n email_module.error_mail(module, test, fail_message, 'AssertionError')\n else:\n log_to_file(''+module+' Module:'+test+':'+pass_message+'', 'PASSED')\n print pass_message", "def wait_for_not_displayed_xpath(self, xpath):\n\n wait = ui.WebDriverWait(self.driver, self.wait_timeout)\n try:\n wait.until_not(lambda bool: self.driver.find_element_by_css_selector(xpath))\n return True\n except TimeoutException:\n return False", "def valid_att_in_label(arch, **kwargs):\n return not arch.xpath('//label[not(@for) and not(descendant::input)]')", "def test_textNotOperator(self):\n xp = XPathQuery(\"/foo[not(@nosuchattrib)]\")\n self.assertEqual(xp.matches(self.e), True)", "def wait_for_element_not_present(self, locator):\r\n for i in range(timeout_seconds):\r\n if self.driver.is_element_present(locator):\r\n time.sleep(1)\r\n else:\r\n break\r\n else:\r\n raise ElementVisiblityTimeout(\"%s presence timed out\" % locator)\r\n return True", "def field_is_not_hidden_xpath(driver, locator):\n elem = driver.find_element_by_xpath(locator)\n is_hidden = elem.get_attribute(\"style\")\n if is_hidden == 'display: none;':\n log_to_file('Hidden field displayed test failed', 'WARNING')\n return False\n else:\n print \"Hidden field displayed = true\"\n return True", "def disabled(name):\n return not enabled(name)", "def verify_element_not_present_XPATH(driver, locator, module, test, pass_message, fail_message):\n try:\n driver.find_element_by_xpath(locator)\n except NoSuchElementException:\n log_to_file(''+module+' Module:'+test+':'+pass_message+'', 'PASSED')\n print pass_message\n else:\n log_to_file(''+module+':'+test+':'+fail_message+'', 'FAILED')\n print 'ERROR WARNING - ' + fail_message\n email_module.error_mail(module, test, fail_message, 'NoSuchElementException')", "def field_is_read_only_css(driver, locator):\n elem = driver.find_element_by_css_selector(locator)\n is_disabled = elem.get_attribute(\"disabled\")\n if is_disabled == 'true':\n print \"Read Only field = true\"\n return True\n else:\n log_to_file('Expected Read Only field to be disabled, but was still enabled', 'WARNING')\n return False", "def test_error_nodes_disabled(self):\n with Nodes()as n:\n for node in n.nodes_error:\n self.assertFalse(\n node.checkbox.find_element_by_tag_name('input').\n is_enabled(),\n 'Node disabled')", "def is_not_present(self):\n logging.getLogger(__name__).info(\"Element is not present\\nby = {}\\nvalue = {}\".format(self.by, self.value))\n return not(self.driver.find_element(self.by, self.value))", "def no_see_tooltip(step, tooltip):\r\n elem = world.browser.find_elements_by_xpath(str(\r\n '//*[@title=\"%(tooltip)s\" or @data-original-title=\"%(tooltip)s\"]' %\r\n dict(tooltip=tooltip)))\r\n elem = [e for e in elem if e.is_displayed()]\r\n assert_true(step, not elem)", "def test_attribute_not_equal_no_quotes(self):\n\n # No quotes\n self.assert_selector(\n self.MARKUP,\n 'body [id!=\\\\35]',\n [\"div\", \"0\", \"1\", \"2\", \"3\", \"pre\", \"4\", \"6\"],\n flags=util.HTML5\n )", "def disabled(self):\n check1 = \"pf-m-disabled\" in self.browser.classes(self)\n check2 = \"pf-m-aria-disabled\" in self.browser.classes(self)\n return check1 or check2 or self.browser.get_attribute(\"disabled\", self) is not None", "def verify_element_not_present_CSS(driver, locator, module, test, pass_message, fail_message):\n try:\n driver.find_element_by_css_selector(locator)\n except NoSuchElementException:\n log_to_file(''+module+' Module:'+test+':'+pass_message+'', 'PASSED')\n print pass_message\n else:\n log_to_file(''+module+':'+test+':'+fail_message+'', 'FAILED')\n print 'ERROR WARNING - ' + fail_message\n email_module.error_mail(module, test, fail_message, 'NoSuchElementException')", "def test_default_hidden_not_in_attributes(self):\n self.assertNotIn(\n ATTR_HIDDEN,\n self.hass.states.get(self.entity.entity_id).attributes)", "def check_disabled(self):\n return None", "def valid_att_in_field(arch, **kwargs):\n return not arch.xpath('//field[not(@name)]')", "def checkbox_should_not_be_selected(self, locator):\n self._info(\"Verifying checkbox '%s' is not selected.\" % locator)\n if self._selenium.is_checked(self._parse_locator(locator)):\n raise AssertionError(\"Checkbox '%s' should not have been selected\"\n % locator)", "def element_not_focused(step, id):\r\n\r\n elem = world.browser.find_element_by_xpath(str('id(\"{id}\")'.format(id=id)))\r\n focused = world.browser.switch_to_active_element()\r\n\r\n assert_false(step, elem == focused)", "def test_offline_nodes_disabled(self):\n with Nodes()as n:\n for node in n.nodes_offline:\n self.assertTrue(\n node.checkbox.find_element_by_tag_name('input').\n is_enabled(),\n 'Node disabled')", "def wait_for_invisible(self, locator, timeout=2):\n try:\n WebDriverWait(self.driver, timeout).until(\n ec.invisibility_of_element_located(locator)\n )\n except (NoSuchElementException, TimeoutException):\n return False\n return True", "def test_attribute_not_equal_quotes(self):\n\n # Quotes\n self.assert_selector(\n self.MARKUP,\n \"body [id!='5']\",\n [\"div\", \"0\", \"1\", \"2\", \"3\", \"pre\", \"4\", \"6\"],\n flags=util.HTML5\n )" ]
[ "0.67634267", "0.6432889", "0.6097619", "0.5995026", "0.5835357", "0.5770251", "0.57616735", "0.5740132", "0.573144", "0.5729595", "0.5673569", "0.5664641", "0.56613714", "0.5625864", "0.5620968", "0.5611174", "0.5512022", "0.5500273", "0.54792833", "0.5465939", "0.54287297", "0.54224885", "0.5410716", "0.5399103", "0.5382695", "0.53813547", "0.5367416", "0.53493804", "0.5330789", "0.53264314" ]
0.77044797
0
Verifies that element identified with locator is enabled. Key attributes for arbitrary elements are `id` and `name`. See `introduction` for details about locating elements.
def aisappium_element_should_be_enabled(self, locator, loglevel='INFO', oAppiumInfo=None): if oAppiumInfo is not None: element = self._element_find_atlas(locator, True, True, oAppiumInfo.driver) else: element = self._element_find(locator, True, True) if not element.is_enabled(): self.log_source(loglevel) raise AssertionError("Element '%s' should be enabled " "but did not" % locator) self._info("Element '%s' is enabled ." % locator)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def aisappium_element_should_be_disabled(self, locator, loglevel='INFO', oAppiumInfo=None):\n if oAppiumInfo is not None:\n element = self._element_find_atlas(locator, True, True, oAppiumInfo.driver)\n else:\n element = self._element_find(locator, True, True)\n if element.is_enabled():\n self.log_source(loglevel)\n raise AssertionError(\"Element '%s' should be disabled \"\n \"but did not\" % locator)\n self._info(\"Element '%s' is disabled .\" % locator)", "def isElementPresent(self,locator=\"\",locatorType='id', element=None):\n\n\n\n\n try:\n if locator:\n element = self.getElement(locator, locatorType)\n\n if element is not None:\n self.logger.info(\"Element found with locator \"+locator+\" LocatorType \"+locatorType)\n return True\n\n else:\n self.logger.info(\"Element not found with locator \" + locator + \" LocatorType \" + locatorType)\n return False\n\n except:\n print(\"Element not found\")\n return False", "def is_element(\n driver: WebDriver, locator_text: str, locator_type: str = \"id\", wait_time: int = 2\n) -> bool:\n\n try:\n locator = get_locator(locator_text, locator_type)\n WebDriverWait(driver, wait_time).until(EC.presence_of_element_located(locator))\n return True\n except:\n return False", "def assert_true_element_by_name(self):\n self.assertTrue(self.is_element_present(By.NAME, self.locator))", "def field_is_mandatory_xpath(driver, locator):\n elem = driver.find_element_by_xpath(locator)\n is_mandatory = elem.get_attribute(\"aria-required\")\n if is_mandatory == 'true':\n print \"Mandatory field = true\"\n else:\n log_to_file('Mandatory field test failed', 'WARNING')", "def is_element_available(self, locator):\r\n if self.driver.is_element_present(locator):\r\n if self.driver.is_visible(locator):\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False", "def verify_element_present(self, locator, msg=None):\r\n try:\r\n self.asset_element_present(locator, msg)\r\n except AssertionError, e:\r\n if msg:\r\n m = \"%s:\\n%s\" % (msg, str(e))\r\n else:\r\n m = str(e)\r\n self.verification_erorrs.append(m)", "def assert_visible(self, locator, msg=None):\r\n e = driver.find_elements_by_locator(locator)\r\n if len(e) == 0:\r\n raise AssertionError(\"Element at %s was not found\" % locator)\r\n assert e.is_displayed()", "def _is_element_clickable(self, locator):\n return self.wait.until(lambda x: self.ec.element_to_be_clickable(self.get_element(locator)))", "def is_element_exist(self, locator):\r\n t1 = time.time()\r\n try:\r\n self.driver.find_element(locator)\r\n self.my_print(\"{0} Element: <{1}> is exist, Spend {2} seconds\".format(success,locator, time.time() - t1))\r\n return True\r\n except TimeoutException:\r\n self.my_print(\"{0} Element: <{1}> is not exist, Spend {2} seconds\".format(fail, locator, time.time() - t1))\r\n return False", "def asset_element_present(self, locator, msg=None):\r\n e = driver.find_elements_by_locator(locator)\r\n if len(e) == 0:\r\n raise AssertionError(\"Element at %s was not found\" % locator)", "def verify_element_appears(self, xpath):\n assert self.wait_until_element_find(locator_type=By.XPATH, locator=xpath)", "def exists(self, **kwargs):\n if 'timeout' in kwargs:\n self.set_implicit_wait(kwargs['timeout'])\n\n if 'driver' in kwargs:\n d = kwargs['driver']\n else:\n d = self.driver\n\n if kwargs.has_key('element'):\n try:\n return kwargs['element']\n except:\n return False\n else:\n try:\n if 'accessibility_id' in kwargs:\n e = d.find_element_by_accessibility_id(kwargs['accessibility_id'])\n elif 'class_name' in kwargs:\n e = d.find_element_by_class_name(kwargs['class_name'])\n elif 'id' in kwargs:\n e = d.find_element_by_id(kwargs['id'])\n elif 'xpath' in kwargs:\n e = d.find_element_by_xpath(kwargs['xpath'])\n else:\n raise RuntimeError(\"exists() called with incorrect param. kwargs = %s\" % kwargs)\n\n return e\n except NoSuchElementException:\n return False\n finally:\n self.set_implicit_wait()", "def field_is_mandatory_css(driver, locator):\n elem = driver.find_element_by_css_selector(locator)\n is_mandatory = elem.get_attribute(\"aria-required\")\n if is_mandatory == 'true':\n print \"Mandatory field = true\"\n else:\n log_to_file('Mandatory field test failed', 'WARNING')", "def has_element(self, attrib_key, attrib_value, match_option=None):\n selector = UiSelector()\n selector.attributes(attrib_key, attrib_value, match_option)\n return UiObject(selector, self.android_device_driver).verify_exist()", "def isElementDisplayed(self, locator=\"\",locatorType='id', element=None):\n isDisplayed=False\n\n try:\n if locator:\n element=self.getElement(locator,locatorType)\n\n if element is not None:\n isDisplayed=element.is_displayed()\n self.logger.info(\"Element is displayed with locator\" + locator + \"LocatorType\" + locatorType)\n\n else:\n self.logger.info(\"Element is not displayed with locator\" + locator + \"LocatorType\" + locatorType)\n return isDisplayed\n\n except:\n print(\"Element not found\")\n return False", "def verify_visible(self, locator, msg=None):\r\n try:\r\n self.assert_visible(locator, msg)\r\n except AssertionError, e:\r\n if msg:\r\n m = \"%s:\\n%s\" % (msg, str(e))\r\n else:\r\n m = str(e)\r\n self.verification_erorrs.append(m)", "def verify_element_is_present_XPATH(driver, locator, module, test, pass_message, fail_message):\n try:\n driver.find_element_by_xpath(locator)\n except NoSuchElementException:\n log_to_file(''+module+':'+test+':'+fail_message+'', 'FAILED')\n print 'ERROR WARNING - ' + fail_message\n email_module.error_mail(module, test, fail_message, 'NoSuchElementException')\n else:\n log_to_file(''+module+' Module:'+test+':'+pass_message+'', 'PASSED')\n print pass_message", "def doCheckLocationElement(self, x, y, timeout=10.0, name=None, tagName=None, className=None,\n id=None, xpath=None, linkText=None, partialLinkText=None, cssSelector=None,\n location=None):\n TestAdapterLib.check_timeout(caller=TestAdapterLib.caller(), timeout=timeout)\n \n if x is None: \n raise TestAdapterLib.ValueException(TestAdapterLib.caller(), \"x argument cannot be equal to none\")\n if y is None: \n raise TestAdapterLib.ValueException(TestAdapterLib.caller(), \"y argument cannot be equal to none\" )\n\n ret = False\n \n if not self.cfg['wait-until']:\n cmdId = self.implicitlyWait(timeout=timeout)\n if self.isWait(timeout=timeout, commandId=cmdId) is None:\n ret = False\n return ret\n \n cmdId = self.findElement(elementId=None, name=name, tagName=tagName, className=className,\n id=id, xpath=xpath, linkText=linkText, partialLinkText=partialLinkText, \n cssSelector=cssSelector, location=location)\n rsp = self.hasElement(timeout=timeout, commandId=cmdId) \n if rsp is None: ret = False\n else:\n elementVall = rsp.get('GUI', 'value')\n elementId = elementVall.get('element-id')\n \n cmdId = self.locationElement(elementId=elementId)\n rsp2 = self.isElementLocation(timeout=timeout, commandId=cmdId) \n if rsp2 is None: ret = False\n else:\n elementVall = rsp2.get('GUI', 'value')\n elementX = elementVall.get('x')\n elementY = elementVall.get('y')\n \n if int(x) == int(elementX) and int(y) == int(elementY):\n ret = True\n return ret", "def is_element_exists(self, locator_type, locator):\n try:\n self.wait_until_element_find(locator_type, locator)\n except TimeoutException:\n return False\n return True", "def isElement(self, elementXpath):\r\n try:\r\n self.browser.find_element_by_xpath(elementXpath)\r\n return True\r\n except:\r\n return False", "def is_element_displayed(self, locator=\"\", locator_type=\"id\", element=None):\n is_displayed = False\n try:\n if locator: # This means if locator is not empty\n element = self.get_element_(locator, locator_type)\n if element is not None:\n is_displayed = element.is_displayed()\n self.log.info(\"Element is displayed with locator: \" + locator +\n \" locatorType: \" + locator_type)\n else:\n self.log.info(\"Element not displayed with locator: \" + locator +\n \" locatorType: \" + locator_type)\n return is_displayed\n except:\n print(\"Element not found\")\n return False", "def wait_for_clickable(self, locator, timeout=2):\n try:\n WebDriverWait(self.driver, timeout).until(\n ec.element_to_be_clickable(locator)\n )\n except (NoSuchElementException, TimeoutException) as err:\n logging.error(f\"Exception Type: {type(err)}\")\n logging.info(f\"Element does not exist: {(locator,)} \")\n return False\n return True", "def test_is_an_element_name():\n for el in roentgen.elements['name']:\n assert(is_an_element(el))", "def check4Id(self, element):\n if self.__identifier in element.attrib:\n return True\n else:\n return False", "def field_is_read_only_xpath(driver, locator):\n elem = driver.find_element_by_xpath(locator)\n is_disabled = elem.get_attribute(\"disabled\")\n if is_disabled == 'true':\n print \"Read Only field = true\"\n return True\n else:\n log_to_file('Expected Read Only field to be disabled, but was still enabled', 'WARNING')\n return False", "def wait_presence_element(self, locator):\n try:\n return WebDriverWait(self.driver, 10).until(ec.presence_of_element_located(locator))\n except AttributeError as e:\n loger.error('定位元素出错')\n self.save_screen_shot()\n raise e", "def field_is_not_read_only_xpath(driver, locator):\n elem = driver.find_element_by_xpath(locator)\n is_disabled = elem.get_attribute(\"disabled\")\n if is_disabled == 'true':\n log_to_file('Expected Read Only field to be enabled, but was still disabled', 'WARNING')\n return False\n else:\n print \"Read Only field enabled = true\"\n return True", "def is_visible(self, locator, timeout=15):\n try:\n ui.WebDriverWait(self.driver, timeout).until(EC.visibility_of_element_located((By.CSS_SELECTOR, locator)))\n return True\n except TimeoutException:\n return False", "def wait_for_visible(self, locator, timeout=2):\n try:\n WebDriverWait(self.driver, timeout).until(\n ec.visibility_of_element_located(locator)\n )\n except (NoSuchElementException, TimeoutException) as err:\n logging.error(f\"Exception Type: {type(err)}\")\n logging.info(f\"Element does not exist: {(locator, )} \")\n return False\n return True" ]
[ "0.61864775", "0.6149874", "0.6101802", "0.6066546", "0.6007085", "0.5998578", "0.5989109", "0.5887555", "0.587811", "0.58647937", "0.57877517", "0.5772594", "0.57453614", "0.5663663", "0.5617136", "0.55940014", "0.55539906", "0.54727566", "0.541773", "0.53922576", "0.5383754", "0.53765905", "0.53285104", "0.5326006", "0.5290725", "0.5282673", "0.52752936", "0.5269877", "0.52473015", "0.524248" ]
0.68271935
0
Verifies that element's name identified with locator is equal 'expected'. Key attributes for arbitrary elements are `id` and `name`. See `introduction` for details about locating elements.
def aisappium_element_name_should_be(self, locator, expected, oAppiumInfo=None): if oAppiumInfo is not None: element = self._element_find_atlas(locator, True, True, oAppiumInfo.driver) else: element = self._element_find(locator, True, True) if expected != element.get_attribute('name'): raise AssertionError("Element '%s' name should be '%s' " "but it is '%s'." % (locator, expected, element.get_attribute('name'))) self._info("Element '%s' name is '%s' " % (locator, expected))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assert_true_element_by_name(self):\n self.assertTrue(self.is_element_present(By.NAME, self.locator))", "def test_is_an_element_name():\n for el in roentgen.elements['name']:\n assert(is_an_element(el))", "def assert_equal_find_element_by_css_selector_attr_text(self):\n self.assertEqual(self.locator[0],\n self.driver.find_element_by_css_selector(\n self.locator[1]).get_attribute(self.locator[2]))", "def test_elements_by_name():\n m = openmc.Material()\n m.add_element('woLfrAm', 1.0)\n with pytest.raises(ValueError):\n m.add_element('uranum', 1.0)\n m.add_element('uRaNiUm', 1.0)\n m.add_element('Aluminium', 1.0)\n a = openmc.Material()\n b = openmc.Material()\n c = openmc.Material()\n a.add_element('sulfur', 1.0)\n b.add_element('SulPhUR', 1.0)\n c.add_element('S', 1.0)\n assert a._nuclides == b._nuclides\n assert b._nuclides == c._nuclides", "def verify_the_group_name_is_group_name(driver, group_name):\n assert wait_on_element(driver, 5, f'//div[text()=\"{group_name}\" and @class=\"item-value\"]')", "def aisappium_element_value_should_be(self, locator, expected, oAppiumInfo=None):\n if oAppiumInfo is not None:\n element = self._element_find_atlas(locator, True, True, oAppiumInfo.driver)\n else:\n element = self._element_find(locator, True, True)\n if expected != element.get_attribute('value'):\n raise AssertionError(\"Element '%s' value should be '%s' \"\n \"but it is '%s'.\" % (locator, expected, element.get_attribute('value')))\n self._info(\"Element '%s' value is '%s' \" % (locator, expected))", "def test_is_an_element_caseinsensitive_name():\n for el in roentgen.elements['name']:\n assert is_an_element(el.upper())\n assert is_an_element(el.lower())\n assert is_an_element(el.capitalize())", "def assertEmail(self,email,email_expected):\n for attr in email_expected.keys():\n try:\n found = getattr(email,attr)\n except AttributeError as e:\n raise AttributeError(\"Could not find attribute '{0}' for this email.\\\n are you sure it exists? - {1}\".format(attr,str(e)))\n expected = email_expected[attr]\n self.assertTrue(expected == found or is_subset(found,expected) or (expected in found),\n \"Expected to find '{0}' for email attribute \\\n '{1}' but found '{2}' instead\".format(expected,\n attr,\n found))", "def test_name_attribute_assignment(self):\n self.assertNotIn('aldous', self.__dict__)\n self.aldous\n self.assertIn('aldous', self.__dict__)\n self.assertIs(self.__dict__['aldous'], self.aldous)", "def verify_element_present(self, locator, msg=None):\r\n try:\r\n self.asset_element_present(locator, msg)\r\n except AssertionError, e:\r\n if msg:\r\n m = \"%s:\\n%s\" % (msg, str(e))\r\n else:\r\n m = str(e)\r\n self.verification_erorrs.append(m)", "def assert_valid_attribute(self, name):\n if name.startswith('_'):\n return\n self.assert_known_field(name)", "def test_name_field(self):\n field = self.record.find('field[@name=\\'name\\']')\n self.assertEqual(field.text, 'GUH-ADT', 'Incorrect Name Field')", "def check_attributes(self):\n self.assertEqual(type(self.amenity_1.name), str)", "def test_name(self):\n insta = Amenity()\n self.assertTrue(hasattr(insta, \"name\"))\n self.assertEqual(insta.name, \"\")", "def test_name(self):\n inst = Amenity()\n self.assertTrue(hasattr(inst, \"name\"))\n self.assertEqual(inst.name, \"\")", "def verify_text(self, expected_text: str, *locator):\n e = self.driver.find_element(*locator)\n actual_text = e.text\n assert expected_text == actual_text, f\"Expected {expected_text} does not match actual {actual_text}\"", "def verify_equal_find_element_by_css_selector_text(self):\n try:\n self.assertEqual(self.locator[0],\n self.driver.find_element_by_css_selector(\n self.locator[1]).text)\n except AssertionError as e:\n self.driver.get_screenshot_as_file(\n \"tests/reports/%s_FALSE.png\" % self.method)\n self.verificationErrors.append(str(e))", "def attrs_eq(received, **expected):\n for k, v in expected.iteritems():\n eq_(v, getattr(received, k))", "def verify_text(self, expected_text: str, *locator):\n actual_text = self.driver.find_element(*locator).text\n assert actual_text == expected_text, f'Error. Expected {expected_text} does not match actual {actual_text}'", "def test_attribute(self):\n xp = XPathQuery(\"/foo[@attrib1]\")\n self.assertEqual(xp.matches(self.e), True)", "def attribute_is_equal(self, attr_name, expected, observed):\n if attr_name != \"metrics\":\n return super().attribute_is_equal(attr_name, expected, observed)\n\n expected_names = [m.metric.mp_metric_name for m in expected]\n return (\n len(expected) == len(observed)\n and all(name in expected_names for name in observed)\n and all(m.value == observed.get(m.metric.mp_metric_name) for m in expected)\n )", "def update_element_name(self, items, new_name):\n if new_name != '':\n for i in items:\n if i.text() == new_name:\n #print(\"Name already exists\")\n msgBox = QMessageBox()\n msgBox.setIcon(QMessageBox.Information)\n msgBox.setText(\"Element with this name already exists.\")\n msgBox.setWindowTitle(\"QMessageBox Example\")\n msgBox.setStandardButtons(QMessageBox.Ok)\n msgBox.exec()\n return False\n return new_name\n else:\n if self.list_of_elements.count() == 0:\n new_name = self.element_name+\"_\"+str(0)\n return new_name\n\n for i in range(0, self.list_of_elements.count()+1):\n new_name = self.element_name+\"_\"+str(i)\n exists = self.list_of_elements.findItems(new_name,\n QtCore.Qt.MatchExactly)\n if len(exists) == 0:\n return new_name\n return False", "def the_name_should_reflect_in_the_state_of_the_device(name):\n assert web_app.check_value_in_state(\"name\",name)", "def html_tree_equal(received, expected):\r\n for attr in ('tag', 'attrib', 'text', 'tail'):\r\n if getattr(received, attr) != getattr(expected, attr):\r\n return False\r\n if len(received) != len(expected):\r\n return False\r\n if any(not html_tree_equal(rec, exp) for rec, exp in zip(received, expected)):\r\n return False\r\n return True", "def find_element_by_name(self, name):\n return self.driver.find_element_by_name(name)", "def test_name_properties_on_attribute():\n assert not Attribute(name=\"b\", path=\"a.b\", file_path=\"a.py\").name_properties\n assert \"private\" in Attribute(name=\"_b\", path=\"a._b\", file_path=\"a.py\").name_properties\n assert \"class-private\" in Attribute(name=\"__b\", path=\"a.__b\", file_path=\"a.py\").name_properties\n assert \"special\" in Attribute(name=\"__b__\", path=\"a.__b__\", file_path=\"a.py\").name_properties", "def test_set_name_attribute(self) -> None:\n\n given = self.test_name\n expected = given\n\n self.helper.name = given\n\n actual = self.helper.name\n\n self.assertEqual(expected, actual)", "def is_element_exist(self, locator):\r\n t1 = time.time()\r\n try:\r\n self.driver.find_element(locator)\r\n self.my_print(\"{0} Element: <{1}> is exist, Spend {2} seconds\".format(success,locator, time.time() - t1))\r\n return True\r\n except TimeoutException:\r\n self.my_print(\"{0} Element: <{1}> is not exist, Spend {2} seconds\".format(fail, locator, time.time() - t1))\r\n return False", "def asset_element_present(self, locator, msg=None):\r\n e = driver.find_elements_by_locator(locator)\r\n if len(e) == 0:\r\n raise AssertionError(\"Element at %s was not found\" % locator)", "def aisappium_element_attribute_should_match(self, locator, attr_name, match_pattern, regexp=False,\n oAppiumInfo=None):\n if oAppiumInfo is not None:\n elements = self._element_find_atlas(locator, False, True, oAppiumInfo.driver)\n else:\n elements = self._element_find(locator, False, True)\n if len(elements) > 1:\n self._info(\"CAUTION: '%s' matched %s elements - using the first element only\" % (locator, len(elements)))\n\n attr_value = elements[0].get_attribute(attr_name)\n\n # ignore regexp argument if matching boolean\n if isinstance(match_pattern, bool) or match_pattern.lower() == 'true' or match_pattern.lower() == 'false':\n if isinstance(match_pattern, bool):\n match_b = match_pattern\n else:\n match_b = ast.literal_eval(match_pattern.title())\n\n if isinstance(attr_value, bool):\n attr_b = attr_value\n else:\n attr_b = ast.literal_eval(attr_value.title())\n\n self._bi.should_be_equal(match_b, attr_b)\n\n elif regexp:\n self._bi.should_match_regexp(attr_value, match_pattern,\n msg=\"Element '%s' attribute '%s' should have been '%s' \"\n \"but it was '%s'.\" % (locator, attr_name, match_pattern, attr_value),\n values=False)\n else:\n self._bi.should_match(attr_value, match_pattern,\n msg=\"Element '%s' attribute '%s' should have been '%s' \"\n \"but it was '%s'.\" % (locator, attr_name, match_pattern, attr_value),\n values=False)\n # if expected != elements[0].get_attribute(attr_name):\n # raise AssertionError(\"Element '%s' attribute '%s' should have been '%s' \"\n # \"but it was '%s'.\" % (locator, attr_name, expected, element.get_attribute(attr_name)))\n self._info(\"Element '%s' attribute '%s' is '%s' \" % (locator, attr_name, match_pattern))" ]
[ "0.7359056", "0.6762324", "0.60369396", "0.5999753", "0.59545654", "0.565154", "0.56473446", "0.56425357", "0.56333536", "0.5630478", "0.5625946", "0.5557263", "0.55410373", "0.55222356", "0.5500503", "0.5445403", "0.54440606", "0.5409522", "0.53976715", "0.5382696", "0.5380621", "0.5377999", "0.5365032", "0.534222", "0.53362393", "0.53235096", "0.5312833", "0.5303346", "0.5300667", "0.5281623" ]
0.7756563
0
Verifies that element's value identified with locator is equal 'expected'. Key attributes for arbitrary elements are `id` and `name`. See `introduction` for details about locating elements.
def aisappium_element_value_should_be(self, locator, expected, oAppiumInfo=None): if oAppiumInfo is not None: element = self._element_find_atlas(locator, True, True, oAppiumInfo.driver) else: element = self._element_find(locator, True, True) if expected != element.get_attribute('value'): raise AssertionError("Element '%s' value should be '%s' " "but it is '%s'." % (locator, expected, element.get_attribute('value'))) self._info("Element '%s' value is '%s' " % (locator, expected))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assert_equal_find_element_by_css_selector_attr_text(self):\n self.assertEqual(self.locator[0],\n self.driver.find_element_by_css_selector(\n self.locator[1]).get_attribute(self.locator[2]))", "def verify_value(driver, locator, value, module, test, pass_message, fail_message):\n wait_for_element_XPATH(driver, locator)\n elem = driver.find_element_by_xpath(locator)\n text = elem.text\n try:\n assert text == value\n except AssertionError:\n log_to_file(''+module+':'+test+':'+fail_message+'', 'FAILED')\n print 'ERROR - ASSERTION EXCEPTION - ' + fail_message\n email_module.error_mail(module, test, fail_message, 'AssertionError')\n else:\n log_to_file(''+module+' Module:'+test+':'+pass_message+'', 'PASSED')\n print pass_message", "def aisappium_element_name_should_be(self, locator, expected, oAppiumInfo=None):\n if oAppiumInfo is not None:\n element = self._element_find_atlas(locator, True, True, oAppiumInfo.driver)\n else:\n element = self._element_find(locator, True, True)\n if expected != element.get_attribute('name'):\n raise AssertionError(\"Element '%s' name should be '%s' \"\n \"but it is '%s'.\" % (locator, expected, element.get_attribute('name')))\n self._info(\"Element '%s' name is '%s' \" % (locator, expected))", "def verify_text(self, expected_text: str, *locator):\n e = self.driver.find_element(*locator)\n actual_text = e.text\n assert expected_text == actual_text, f\"Expected {expected_text} does not match actual {actual_text}\"", "def verify_equal_find_element_by_css_selector_text(self):\n try:\n self.assertEqual(self.locator[0],\n self.driver.find_element_by_css_selector(\n self.locator[1]).text)\n except AssertionError as e:\n self.driver.get_screenshot_as_file(\n \"tests/reports/%s_FALSE.png\" % self.method)\n self.verificationErrors.append(str(e))", "def verify_text(self, expected_text: str, *locator):\n actual_text = self.driver.find_element(*locator).text\n assert actual_text == expected_text, f'Error. Expected {expected_text} does not match actual {actual_text}'", "def assert_true_element_by_name(self):\n self.assertTrue(self.is_element_present(By.NAME, self.locator))", "def verify_tool_tip_value(driver, locator, value, module, test, pass_message, fail_message):\n wait_for_element_XPATH(driver, locator)\n elem = driver.find_element_by_xpath(value)\n tool_tip = elem.get_attribute(\"data-original_title\")\n try:\n assert tool_tip == value\n except AssertionError:\n log_to_file(''+module+':'+test+':'+fail_message+'', 'FAILED')\n print 'ERROR - ASSERTION EXCEPTION - ' + fail_message\n email_module.error_mail(module, test, fail_message, 'AssertionError')\n else:\n log_to_file(''+module+' Module:'+test+':'+pass_message+'', 'PASSED')\n print pass_message", "def expected_value(expected, actual):\n assert expected == actual", "def _assertHtmlEqual(self, actual, expected):\r\n self.assertEqual(actual.tag, expected.tag)\r\n self.assertEqual(actual.attrib, expected.attrib)\r\n self.assertEqual(actual.text, expected.text)\r\n self.assertEqual(actual.tail, expected.tail)\r\n self.assertEqual(len(actual), len(expected))\r\n for actual_child, expected_child in zip(actual, expected):\r\n self._assertHtmlEqual(actual_child, expected_child)", "def test_attributeWithValue(self):\n xp = XPathQuery(\"/foo[@attrib1='value1']\")\n self.assertEqual(xp.matches(self.e), 1)", "def aisappium_element_attribute_should_match(self, locator, attr_name, match_pattern, regexp=False,\n oAppiumInfo=None):\n if oAppiumInfo is not None:\n elements = self._element_find_atlas(locator, False, True, oAppiumInfo.driver)\n else:\n elements = self._element_find(locator, False, True)\n if len(elements) > 1:\n self._info(\"CAUTION: '%s' matched %s elements - using the first element only\" % (locator, len(elements)))\n\n attr_value = elements[0].get_attribute(attr_name)\n\n # ignore regexp argument if matching boolean\n if isinstance(match_pattern, bool) or match_pattern.lower() == 'true' or match_pattern.lower() == 'false':\n if isinstance(match_pattern, bool):\n match_b = match_pattern\n else:\n match_b = ast.literal_eval(match_pattern.title())\n\n if isinstance(attr_value, bool):\n attr_b = attr_value\n else:\n attr_b = ast.literal_eval(attr_value.title())\n\n self._bi.should_be_equal(match_b, attr_b)\n\n elif regexp:\n self._bi.should_match_regexp(attr_value, match_pattern,\n msg=\"Element '%s' attribute '%s' should have been '%s' \"\n \"but it was '%s'.\" % (locator, attr_name, match_pattern, attr_value),\n values=False)\n else:\n self._bi.should_match(attr_value, match_pattern,\n msg=\"Element '%s' attribute '%s' should have been '%s' \"\n \"but it was '%s'.\" % (locator, attr_name, match_pattern, attr_value),\n values=False)\n # if expected != elements[0].get_attribute(attr_name):\n # raise AssertionError(\"Element '%s' attribute '%s' should have been '%s' \"\n # \"but it was '%s'.\" % (locator, attr_name, expected, element.get_attribute(attr_name)))\n self._info(\"Element '%s' attribute '%s' is '%s' \" % (locator, attr_name, match_pattern))", "def verify_element_present(self, locator, msg=None):\r\n try:\r\n self.asset_element_present(locator, msg)\r\n except AssertionError, e:\r\n if msg:\r\n m = \"%s:\\n%s\" % (msg, str(e))\r\n else:\r\n m = str(e)\r\n self.verification_erorrs.append(m)", "def html_tree_equal(received, expected):\r\n for attr in ('tag', 'attrib', 'text', 'tail'):\r\n if getattr(received, attr) != getattr(expected, attr):\r\n return False\r\n if len(received) != len(expected):\r\n return False\r\n if any(not html_tree_equal(rec, exp) for rec, exp in zip(received, expected)):\r\n return False\r\n return True", "def test_attribute(self):\n xp = XPathQuery(\"/foo[@attrib1]\")\n self.assertEqual(xp.matches(self.e), True)", "def attrs_eq(received, **expected):\n for k, v in expected.iteritems():\n eq_(v, getattr(received, k))", "def verify_element_appears(self, xpath):\n assert self.wait_until_element_find(locator_type=By.XPATH, locator=xpath)", "def hasAttributeElement(self, timeout=20.0, commandId=None, expectedText=None):\n TestAdapterLib.check_timeout(caller=TestAdapterLib.caller(), timeout=timeout)\n \n valueLayer = TestTemplates.TemplateLayer(name=\"\")\n if expectedText is not None:\n valueLayer.addKey(name=\"value\", data=expectedText)\n return self.isActionAccepted(timeout=timeout, commandName=Command.GET_ELEMENT_ATTRIBUTE, \n commandId=commandId, expectedValue=valueLayer)", "def check_node_attribute(node, attribute_name: str, expected_value, default_value=None):\n value = default_value\n for attr in node.attribute:\n if attr.name == attribute_name:\n value = helper.get_attribute_value(attr)\n\n if isinstance(expected_value, list):\n return (isinstance(value, (ndarray, list))) and array_equal(expected_value, value, equal_nan=False)\n else:\n return value == expected_value", "def assertEmail(self,email,email_expected):\n for attr in email_expected.keys():\n try:\n found = getattr(email,attr)\n except AttributeError as e:\n raise AttributeError(\"Could not find attribute '{0}' for this email.\\\n are you sure it exists? - {1}\".format(attr,str(e)))\n expected = email_expected[attr]\n self.assertTrue(expected == found or is_subset(found,expected) or (expected in found),\n \"Expected to find '{0}' for email attribute \\\n '{1}' but found '{2}' instead\".format(expected,\n attr,\n found))", "def attribute_is_equal(self, attr_name, expected, observed):\n if attr_name != \"metrics\":\n return super().attribute_is_equal(attr_name, expected, observed)\n\n expected_names = [m.metric.mp_metric_name for m in expected]\n return (\n len(expected) == len(observed)\n and all(name in expected_names for name in observed)\n and all(m.value == observed.get(m.metric.mp_metric_name) for m in expected)\n )", "def test_attributeWithValueAny(self):\n xp = XPathQuery(\"/foo/*[@attrib2='value2']\")\n self.assertEqual(xp.matches(self.e), True)\n self.assertEqual(xp.queryForNodes(self.e), [self.bar2])", "def assert_visible(self, locator, msg=None):\r\n e = driver.find_elements_by_locator(locator)\r\n if len(e) == 0:\r\n raise AssertionError(\"Element at %s was not found\" % locator)\r\n assert e.is_displayed()", "def testAntigenModDateLessEqualSearch(self):\n driver = self.driver\n #find the antigen Modification Date field and enter a date\n driver.find_element(By.ID, \"modificationDate\").send_keys(\"<=2005-06-02\")\n #find the Search button and click it\n driver.find_element(By.ID, 'searchButton').click()\n #waits until the element is located or 10 seconds\n WebDriverWait(self.driver, 10).until(EC.text_to_be_present_in_element((By.ID, 'resultsTable'), 'Ex54'))\n #find the Modified by field\n mod_by = driver.find_element(By.ID, 'modifiedBy').get_attribute('value')\n print(mod_by)\n #Assert the Modified By field returned is correct \n self.assertEqual(mod_by, 'csmith')\n #find the Modification Date field\n mod_date = driver.find_element(By.ID, 'modificationDate').get_attribute('value')\n print(mod_date)\n #Assert the Modification Date field returned is correct \n self.assertEqual(mod_date, '2005-06-02')", "def asset_element_present(self, locator, msg=None):\r\n e = driver.find_elements_by_locator(locator)\r\n if len(e) == 0:\r\n raise AssertionError(\"Element at %s was not found\" % locator)", "def wait_for_value(self, locator, text):\r\n for i in range(timeout_seconds):\r\n try:\r\n e = self.driver.find_element_by_locator(locator)\r\n if e.value == text:\r\n break\r\n except:\r\n pass\r\n time.sleep(1)\r\n else:\r\n raise ElementTextTimeout(\"%s value timed out\" % locator)\r\n return True", "def check_expected_values(self, expected_values, scraped_values):\n\n\t\tfor key in expected_values:\n\t\t\tself.assertIn(key, scraped_values)\n\t\t\tself.assertEqual(expected_values[key], scraped_values[key])", "def verify_dropdown_value(driver, locator, value, module, test, pass_message, fail_message):\n wait_for_element_XPATH(driver, locator)\n elem = Select(driver.find_element_by_xpath(locator))\n text = elem.first_selected_option.text\n try:\n assert text == value\n except AssertionError:\n log_to_file(''+module+':'+test+':'+fail_message+'', 'FAILED')\n print 'ERROR - ASSERTION EXCEPTION - ' + fail_message\n email_module.error_mail(module, test, fail_message, 'AssertionError')\n else:\n log_to_file(''+module+' Module:'+test+':'+pass_message+'', 'PASSED')\n print pass_message", "def test_is_an_element_name():\n for el in roentgen.elements['name']:\n assert(is_an_element(el))", "def test_equivalency(self):\n def compare_func(obj, node):\n # same id\n self.assertEqual(obj.id, node.get(\"id\"))\n\n # same html\n self.assertEqual(obj.html.prettify, node.prettify)\n\n # parents have same id (only for non-root elements)\n if not obj == self.document.root:\n self.assertEqual(obj.parent.id, node.parent.get(\"id\"))\n\n # same number of children\n child_nodes = self.get_children_of_node(node)\n self.assertEqual(len(obj.children), len(child_nodes))\n\n # children have same ids\n for (child_obj, child_node) in zip(obj.children, child_nodes):\n self.assertEqual(child_obj.id, child_node.get(\"id\"))\n\n self.recursively_compare_tree_against_html(compare_func)" ]
[ "0.6755333", "0.64595973", "0.64390033", "0.6227078", "0.6140919", "0.6100266", "0.6070944", "0.5988728", "0.5979792", "0.5908615", "0.5869544", "0.5841395", "0.57603246", "0.57437897", "0.5719058", "0.5705366", "0.5638438", "0.5589582", "0.55772203", "0.5566957", "0.5544057", "0.55369747", "0.5509849", "0.54995507", "0.5448499", "0.54469997", "0.54442734", "0.5433007", "0.5394317", "0.5388446" ]
0.7265637
0
Verify that an attribute of an element matches the expected criteria. The element is identified by _locator_. See `introduction` for details about locating elements. If more than one element matches, the first element is selected. The _attr_name_ is the name of the attribute within the selected element. The _match_pattern_ is used for the matching, if the match_pattern is boolean or 'True'/'true'/'False'/'false' String then a boolean match is applied any other string is cause a string match The _regexp_ defines whether the string match is done using regular expressions (i.e. BuiltIn Library's
def aisappium_element_attribute_should_match(self, locator, attr_name, match_pattern, regexp=False, oAppiumInfo=None): if oAppiumInfo is not None: elements = self._element_find_atlas(locator, False, True, oAppiumInfo.driver) else: elements = self._element_find(locator, False, True) if len(elements) > 1: self._info("CAUTION: '%s' matched %s elements - using the first element only" % (locator, len(elements))) attr_value = elements[0].get_attribute(attr_name) # ignore regexp argument if matching boolean if isinstance(match_pattern, bool) or match_pattern.lower() == 'true' or match_pattern.lower() == 'false': if isinstance(match_pattern, bool): match_b = match_pattern else: match_b = ast.literal_eval(match_pattern.title()) if isinstance(attr_value, bool): attr_b = attr_value else: attr_b = ast.literal_eval(attr_value.title()) self._bi.should_be_equal(match_b, attr_b) elif regexp: self._bi.should_match_regexp(attr_value, match_pattern, msg="Element '%s' attribute '%s' should have been '%s' " "but it was '%s'." % (locator, attr_name, match_pattern, attr_value), values=False) else: self._bi.should_match(attr_value, match_pattern, msg="Element '%s' attribute '%s' should have been '%s' " "but it was '%s'." % (locator, attr_name, match_pattern, attr_value), values=False) # if expected != elements[0].get_attribute(attr_name): # raise AssertionError("Element '%s' attribute '%s' should have been '%s' " # "but it was '%s'." % (locator, attr_name, expected, element.get_attribute(attr_name))) self._info("Element '%s' attribute '%s' is '%s' " % (locator, attr_name, match_pattern))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_attribute(self):\n xp = XPathQuery(\"/foo[@attrib1]\")\n self.assertEqual(xp.matches(self.e), True)", "def test_attributeWithValue(self):\n xp = XPathQuery(\"/foo[@attrib1='value1']\")\n self.assertEqual(xp.matches(self.e), 1)", "def has_element(self, attrib_key, attrib_value, match_option=None):\n selector = UiSelector()\n selector.attributes(attrib_key, attrib_value, match_option)\n return UiObject(selector, self.android_device_driver).verify_exist()", "def test_attributeWithValueAny(self):\n xp = XPathQuery(\"/foo/*[@attrib2='value2']\")\n self.assertEqual(xp.matches(self.e), True)\n self.assertEqual(xp.queryForNodes(self.e), [self.bar2])", "def assert_equal_find_element_by_css_selector_attr_text(self):\n self.assertEqual(self.locator[0],\n self.driver.find_element_by_css_selector(\n self.locator[1]).get_attribute(self.locator[2]))", "def attribute_matches_value(attribute, value, components):\n\n if components[-1] == \"regex\":\n return re.match(value, attribute)\n possible_magic = f\"__{components[-1]}__\"\n if hasattr(attribute, possible_magic):\n return getattr(attribute, possible_magic)(value)\n return getattr(attribute, \"__eq__\")(value)", "def applyAttrPattern(*args, nodeType: AnyStr=\"\", patternName: AnyStr=\"\", **kwargs)->int:\n pass", "def _MatchDeviceAttribute(required_attr, device_attrs):\n if required_attr.name not in device_attrs:\n logging.debug(\n 'No %s in %s.',\n required_attr.name, device_attrs.get('device_serial'))\n return False\n operator = required_attr.operator or '='\n if operator not in _OPERATOR_TO_PREDICTOR:\n # This should never happen, since we check the expression in\n # request_api._ParseAttributeRequirement.\n raise ValueError('Operator \"%s\" is not supported.' % operator)\n\n device_attr_value = device_attrs[required_attr.name]\n required_value = required_attr.value\n if required_attr.operator in _LIST_OPERATORS:\n required_value = required_value.split(',')\n if required_attr.name in common.NUMBER_DEVICE_ATTRIBUTES:\n if isinstance(required_value, list):\n required_value = map(common.ParseFloat, required_value)\n else:\n required_value = common.ParseFloat(required_value)\n if required_value is None:\n # This should never happen, since we check the expression in\n # request_api._ParseAttributeRequirement.\n raise ValueError(\n \"%s can not compare to a non-number value '%s'\" %\n (required_attr.name, required_attr.value))\n device_attr_value = common.ParseFloat(device_attr_value)\n if device_attr_value is None:\n logging.debug(\n 'Device attr %s is a non-number \"%s\".',\n required_attr.name, device_attrs[required_attr.name])\n return False\n return _OPERATOR_TO_PREDICTOR[operator](device_attr_value, required_value)", "def hasAttributeElement(self, timeout=20.0, commandId=None, expectedText=None):\n TestAdapterLib.check_timeout(caller=TestAdapterLib.caller(), timeout=timeout)\n \n valueLayer = TestTemplates.TemplateLayer(name=\"\")\n if expectedText is not None:\n valueLayer.addKey(name=\"value\", data=expectedText)\n return self.isActionAccepted(timeout=timeout, commandName=Command.GET_ELEMENT_ATTRIBUTE, \n commandId=commandId, expectedValue=valueLayer)", "def find_element(self, attrib_key, attrib_value, match_option=None):\n selector = UiSelector()\n selector.attributes(attrib_key, attrib_value, match_option)\n return UiObject(selector, self.android_device_driver) if UiObject(\n selector, self.android_device_driver).verify_exist() else None", "def test_class_attribute_pattern(content, expected):\n match = champollion.parser.js_class._CLASS_ATTRIBUTE_PATTERN.search(\n content\n )\n if expected is None:\n assert match is None\n else:\n assert match.groupdict() == expected", "def check_node_attribute(node, attribute_name: str, expected_value, default_value=None):\n value = default_value\n for attr in node.attribute:\n if attr.name == attribute_name:\n value = helper.get_attribute_value(attr)\n\n if isinstance(expected_value, list):\n return (isinstance(value, (ndarray, list))) and array_equal(expected_value, value, equal_nan=False)\n else:\n return value == expected_value", "def attributes(self, attrib_key, attrib_value, match_option=None):\n selector = UiSelector()\n selector.attributes(attrib_key, attrib_value, match_option)\n return UiObject(selector, self.android_device_driver)", "def get_element_attribute_by_ele_xpath(page, xpath, attribute_name):\r\n if page is None or not xpath or not attribute_name:\r\n return ''\r\n ele = page.xpath(xpath)\r\n time.sleep(1)\r\n if ele and len(ele) > 0:\r\n try:\r\n return ele[0].attrib[attribute_name] \r\n except:\r\n printR(f' Error getting attribute {attribute_name}')\r\n return ''", "def get_attrs_with_name(self, name):\n return self.get_matches([lambda attr: attr.name == name])", "def is_element_attribute(element, attribute_name):\n return element.get(attribute_name) is not None", "def __check_pattern(node):\n if node.tag != \"discover_datasets\":\n return False\n if \"from_tool_provided_metadata\" in node.attrib and string_as_bool(\n node.attrib.get(\"from_tool_provided_metadata\", \"false\")\n ):\n return True\n if \"pattern\" not in node.attrib:\n return False\n pattern = node.attrib[\"pattern\"]\n regex_pattern = NAMED_PATTERNS.get(pattern, pattern)\n # TODO error on wrong pattern or non-regexp\n if \"(?P<ext>\" in regex_pattern:\n return True", "def checkattr(name):\n\n def check(obj):\n try:\n attrgetter(name)(obj)\n return True\n except AttributeError:\n return False\n\n return check", "def matches(self, attr):\n if attr is None: \n return False\n if attr.tablename and attr.tablename != self.tablename:\n return False\n if attr.typ and attr.typ != \"?\" and attr.typ != self.typ:\n return False\n return self.aname == attr.aname", "def validate_attribute(self, attr):\n self.attrs.validate_attribute(attr)", "def validateOneAttribute(self, ctxt, elem, attr, value):\n if ctxt is None: ctxt__o = None\n else: ctxt__o = ctxt._o\n if elem is None: elem__o = None\n else: elem__o = elem._o\n if attr is None: attr__o = None\n else: attr__o = attr._o\n ret = libxml2mod.xmlValidateOneAttribute(ctxt__o, self._o, elem__o, attr__o, value)\n return ret", "def _obj_attr_validator( # pylint: disable=too-many-arguments\n object_name: str,\n attr: str,\n exact_attr: Any,\n regex_attr: Any,\n in_attr: Any,\n exact_attr_ignore: Any = _IGNORE_OBJ_PARAM,\n regex_attr_ignore: Any = _IGNORE_OBJ_PARAM,\n in_attr_ignore: Any = _IGNORE_OBJ_PARAM,\n disallowed_attrs_regex: Optional[AbstractSet[str]] = None,\n) -> Optional[ValidatorFn]:\n\n def get_obj_attr(v: Any, attr: str = attr) -> Any:\n return getattr(v, attr)\n\n if exact_attr is not exact_attr_ignore:\n\n @pred_to_validator(\n f\"{object_name} attribute '{attr}' value '{{value}}' is not '{exact_attr}'\",\n complement=True,\n convert_value=get_obj_attr,\n )\n def obj_attr_equals(v: EmailAddress) -> bool:\n return get_obj_attr(v) == exact_attr\n\n return obj_attr_equals\n\n elif regex_attr is not regex_attr_ignore:\n\n if disallowed_attrs_regex is not None and attr in disallowed_attrs_regex:\n raise ValueError(\n f\"Cannot define regex spec for {object_name} attribute '{attr}'\"\n )\n\n if not isinstance(regex_attr, str):\n raise TypeError(\n f\"{object_name} attribute '{attr}_regex' must be a string value\"\n )\n\n pattern = re.compile(regex_attr)\n\n @pred_to_validator(\n f\"{object_name} attribute '{attr}' value '{{value}}' does not \"\n f\"match regex '{regex_attr}'\",\n complement=True,\n convert_value=get_obj_attr,\n )\n def obj_attr_matches_regex(v: EmailAddress) -> bool:\n return bool(re.fullmatch(pattern, get_obj_attr(v)))\n\n return obj_attr_matches_regex\n\n elif in_attr is not in_attr_ignore:\n\n if not isinstance(in_attr, (frozenset, set)):\n raise TypeError(\n f\"{object_name} attribute '{attr}_in' must be set or frozenset\"\n )\n\n @pred_to_validator(\n f\"{object_name} attribute '{attr}' value '{{value}}' not in {in_attr}\",\n complement=True,\n convert_value=get_obj_attr,\n )\n def obj_attr_is_allowed_value(v: EmailAddress) -> bool:\n return get_obj_attr(v) in in_attr\n\n return obj_attr_is_allowed_value\n else:\n return None", "def validate_attribute(self, attr):\n self.validate(attr)", "def get_attribute(self, selector, attribute):\n el = self.locate_element(selector)\n return el.get_attribute(attribute)", "def look_for_element(self, selector, locator=By.CSS_SELECTOR, attribute=None, timeout=5):\n print(locator)\n try:\n logger.debug(\"Look for element using '{}' locator and '{}' selector\".format(locator, selector))\n WebDriverWait(self.driver, timeout).until(EC.visibility_of_element_located((locator, selector)))\n if attribute:\n logger.debug(\"Return attributes of the element which has '{}' selector\".format(selector))\n return self.driver.find_element(locator, selector).get_attribute(attribute)\n return self.driver.find_element(locator, selector)\n except TimeoutException:\n logger.exception(\"Element '{}' is not appeared in {} seconds\".format(selector, timeout))\n allure.attach(body=self.driver.get_screenshot_as_png(),\n attachment_type=allure.attachment_type.PNG)\n allure.attach.file(source=PATH_TO_LOGS + \"chrome_logs.log\",\n attachment_type=allure.attachment_type.TEXT)\n raise AssertionError(\"Element '{}' is not appeared in {} seconds\".format(selector, timeout))", "def get_attribute(self, selector, attribute):\n self._wait_element_localed(self.driver, selector)\n element = self.get_element(selector)\n attr = element.get_attribute(attribute)\n\n return attr", "def getAttr(node, name):\n path = \"./attributelist/attribute[@name='%s']/@value\" % name\n n = node.xpathEval2(path)\n if len(n):\n return n[0].content\n else:\n return None", "def Match(context, pattern, arg=None):\n if not arg:\n arg = context.node\n arg = Conversions.StringValue(arg)\n bool = re.match(pattern, arg) and boolean.true or boolean.false\n return bool", "def validateOneAttribute(self, doc, elem, attr, value):\n if doc is None: doc__o = None\n else: doc__o = doc._o\n if elem is None: elem__o = None\n else: elem__o = elem._o\n if attr is None: attr__o = None\n else: attr__o = attr._o\n ret = libxml2mod.xmlValidateOneAttribute(self._o, doc__o, elem__o, attr__o, value)\n return ret", "def attr_matches(self, text):\n m = re.match(r\"(\\w+(\\.\\w+)*)\\.(\\w*)\", text)\n if not m:\n return []\n expr, attr = m.group(1, 3)\n object = eval(expr, self.namespace)\n words = dir(object)\n if hasattr(object,'__class__'):\n words.append('__class__')\n words = words + get_class_members(object.__class__)\n matches = []\n n = len(attr)\n for word in words:\n if word[:n] == attr:\n matches.append(\"%s.%s\" % (expr, word))\n return matches" ]
[ "0.6057533", "0.5604119", "0.5601969", "0.54801065", "0.544838", "0.54421836", "0.540534", "0.5361275", "0.5326216", "0.525746", "0.5247057", "0.5226185", "0.51961404", "0.50717026", "0.5016522", "0.4971332", "0.4966717", "0.49417576", "0.4921396", "0.49141058", "0.48743615", "0.48613456", "0.48563412", "0.4852782", "0.4825989", "0.48158547", "0.47985333", "0.47813568", "0.47768113", "0.4775584" ]
0.7767435
0
Sets the network connection Status. Android only.
def aisappium_set_network_connection_status(self, connectionStatus, oAppiumInfo=None): if oAppiumInfo is not None: driver = oAppiumInfo.driver else: driver = self._current_application() return driver.set_network_connection(int(connectionStatus))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_status():\n if self.on:\n connect.SOCKET.sendall(bytes(\"OFF\\n\", \"utf-8\"))\n self.on = False\n else:\n connect.SOCKET.sendall(bytes(\"ON\\n\", \"utf-8\"))\n self.on = True", "def SetConnectionStatus(self, state, info):\n self.connection_state = state\n self.connection_info = info", "def set_connection_status(self, connection_status: Literal[ConnectionState]) -> None:\n self.connection_status = connection_status\n self.publish(self.key_gen(\"connection_status\"), connection_status)", "def set_desired_connection_status(self, connection_status: Literal[ConnectionState]) -> None:\n self.desired_connection_status = connection_status\n self.publish(self.key_gen(\"desired_connection_status\"), connection_status)", "def connection_status(self, connection_status):\n allowed_values = [\"Unknown\", \"Success\", \"Failure\"]\n if connection_status not in allowed_values:\n raise ValueError(\n \"Invalid value for `connection_status` ({0}), must be one of {1}\"\n .format(connection_status, allowed_values)\n )\n\n self._connection_status = connection_status", "def set_status(self, status):\n if status == \"offline\":\n self._status.set_message(\"N\")\n self._status.set_foreground_color(\"red\")\n \n elif status == \"online\":\n self._status.set_message(\"Y\")\n self._status.set_foreground_color(\"Green\")\n \n elif status == \"away\":\n self._status.set_message(\"A\")\n self._status.set_foreground_color(\"Grey\")\n \n elif status == \"busy\":\n self._status.set_message(\"B\")\n self._status.set_foreground_color(\"Yellow\")", "def set_on_tunnel(self, status: bool):\n self._is_on_tunnel = status", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def new_connection_status(self, connection_status):\n _LOGGER.debug(\n \"[%s %s] Received cast device connection status: %s\",\n self.entity_id,\n self._cast_info.friendly_name,\n connection_status.status,\n )\n if connection_status.status == CONNECTION_STATUS_DISCONNECTED:\n self._attr_available = False\n self._invalidate()\n self.schedule_update_ha_state()\n return\n\n new_available = connection_status.status == CONNECTION_STATUS_CONNECTED\n if new_available != self.available:\n # Connection status callbacks happen often when disconnected.\n # Only update state when availability changed to put less pressure\n # on state machine.\n _LOGGER.debug(\n \"[%s %s] Cast device availability changed: %s\",\n self.entity_id,\n self._cast_info.friendly_name,\n connection_status.status,\n )\n self._attr_available = new_available\n if new_available and not self._cast_info.is_audio_group:\n # Poll current group status\n for group_uuid in self.mz_mgr.get_multizone_memberships(\n self._cast_info.uuid\n ):\n group_media_controller = self.mz_mgr.get_multizone_mediacontroller(\n group_uuid\n )\n if not group_media_controller:\n continue\n self.multizone_new_media_status(\n group_uuid, group_media_controller.status\n )\n self.schedule_update_ha_state()", "def SetStatus(self, status):\r\n self.status = status", "def set_online(self, status: bool):\n self.ui.get_object('statusbar_icon').set_from_icon_name(util.online_icons[status], Gtk.IconSize.BUTTON)\n self.ui.get_object('statusbar_icon').set_tooltip_text(util.online_tooltips[status])\n self.config['local_db'] = not status\n self.save_config()", "async def async_set_wifi_led_on(self):\n return", "def set_remote_status(self, mode):\n status = {\n 0: \"Local and locked\",\n 1: \"Remote and locked\",\n 2: \"Local and unlocked\",\n 3: \"Remote and unlocked\",\n }\n logging.info(__name__ + ' : Setting remote control status to %s' % status.get(mode, \"Unknown\"))\n self._execute('C%s' % mode)", "def setstatus(self, status):\n with self.lock:\n self.status = status", "def set_status(self, status):\n # TODO log to db\n self.status = status", "async def set_status(self, ctx, *, status: str = \"online\"):\n\n try:\n status = discord.Status[status.lower()]\n except KeyError:\n await ctx.error(\"Invalid Status\", \"Only `online`, `idle` or `dnd` statuses are available.\")\n else:\n await self.bot.change_presence(status=status, activity=ctx.me.activity)\n await ctx.success(f\"Status changed to {status}.\")", "def set_vpn_state(self, status):\n if hasattr(self, status):\n self.change_to(getattr(self, status))", "def sendConnStatus(self):\n self.sendRunStatus({'name': 'dataConn', 'val': self.dataConns})\n self.sendRunStatus({'name': 'subjectConn', 'val': self.subjectConns})", "def networkMode(self, networkMode):\n\n # Setting the network mode can take a bit of time, so give it 10 seconds\n # to finish\n response = self.at.sendCommand(f\"AT+CFUN={networkMode}\", timeout = 10)\n\n if not response:\n raise modem.AtError(response, \"Failed to set network mode\")", "def set_online_status(self, status):\r\n name = \"label\"\r\n label = self.label\r\n label_sign = self.lbl_online\r\n text = [\"ONLINE\", \"OFFLINE\"]\r\n if status:\r\n label.setStyleSheet(\"#label{color: green;}\")\r\n label.setText(text[0])\r\n pixmap = QPixmap(os.path.abspath(os.path.join(self.app.path, \"PySkeletonViewer\", \"images\", \"green_dot.png\")))\r\n else:\r\n label.setStyleSheet(\"#\"+name+\"{color: red;}\")\r\n label.setText(text[1])\r\n pixmap = QPixmap(os.path.abspath(os.path.join(self.app.path, \"PySkeletonViewer\", \"images\", \"red_dot.png\")))\r\n image = pixmap.scaled(QSize(30, 30))\r\n label_sign.setPixmap(image)", "def set_node_status(self, status):\n self._node.status = status", "def update_status(self):\n num_nbrs = len(self.neighbors)\n if not 2 <= num_nbrs <= 3:\n self.status = 0\n elif num_nbrs == 3:\n self.status = 1", "def switch_network(self,type = None):\n network_type = self.appconfig(type,\"Settings\")\n self.logger.debug(\"Switch network to %s:%s.\" % (type,network_type))\n if self.enter_settings(u\"More…\"):\n if self.device(text=\"Mobile networks\").exists:\n self.device(text=\"Mobile networks\").click()\n if self.device(text=\"Preferred network mode\").wait.exists(timeout=self.timeout):\n self.device(text=\"Preferred network mode\").click()\n if self.device(resourceId=\"android:id/buttonPanel\").wait.exists(timeout=self.timeout):\n self.device(text=network_type).click()\n print self._is_connected(type)\n self.back_to_home()", "def change_status(self, inf, status):\n self.interfaces[inf]['status'] = status", "def set_network_connection_type(self,param={},ignore_error_handle = False):\n message = {};\n step = 'set network connection type \\'' + str(param.get('network_type',0)) + '\\'';\n network_type = param.get('network_type',0);\n try:\n if network_type == 0:\n self.driver.set_network_connection(ConnectionType.NO_CONNECTION);\n elif network_type == 1:\n self.driver.set_network_connection(ConnectionType.AIRPLANE_MODE);\n elif network_type == 2:\n self.driver.set_network_connection(ConnectionType.WIFI_ONLY);\n elif network_type == 4:\n self.driver.set_network_connection(ConnectionType.DATA_ONLY);\n elif network_type == 6:\n self.driver.set_network_connection(ConnectionType.ALL_NETWORK_ON);\n else:\n self.driver.set_network_connection(ConnectionType.NO_CONNECTION);\n message = self.feedback.feedback_action_ok(step);\n except BaseException,e:\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle);\n finally:\n return message;", "def sync_status_to_vc(status, context):\n conn = self._vc_connection\n conn.vip.health.set_status(status, context)", "def status(ctx):\n return show_network_status()", "async def change_status(self, status: str) -> int:\n data = {'status': str(status)}\n r = await self.request.request(url='https://www.roblox.com/home/updatestatus', method='POST', data=j.dumps(data))\n return r.status_code" ]
[ "0.6762321", "0.6707563", "0.6549716", "0.6297312", "0.6239823", "0.62142426", "0.60997283", "0.5929492", "0.5929492", "0.5929492", "0.59281015", "0.5899058", "0.5855906", "0.58518493", "0.5836226", "0.5801515", "0.5731722", "0.5730484", "0.57180864", "0.56975853", "0.5677831", "0.56459975", "0.5637389", "0.56244975", "0.5600393", "0.55973667", "0.5582251", "0.5552729", "0.5503448", "0.549201" ]
0.7377089
0
Return elements that match the search criteria The element is identified by _locator_. See `introduction` for details about locating elements. If the _first_element_ is set to 'True' then only the first matching element is returned. If the _fail_on_error_ is set to 'True' this keyword fails if the search return nothing.
def aisappium_get_elements(self, locator, first_element_only=False, fail_on_error=True, oAppiumInfo=None): if oAppiumInfo is not None: element = self._element_find_atlas(locator, first_element_only, fail_on_error, oAppiumInfo.driver) else: element = self._element_find(locator, first_element_only, fail_on_error) return element
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find(self, selector=\"*\", containing=None, clean=False, first=False,\n _encoding=None):\n\n # Convert a single containing into a list.\n if isinstance(containing, str):\n containing = [containing]\n if not isinstance(selector, str):\n raise TypeError(\"Expected string, got %r\" % type(selector))\n\n encoding = _encoding or self.encoding\n elements = [\n Element(element=found, url=self.url, default_encoding=encoding)\n for found in self.pq(selector)\n ]\n\n if containing:\n elements_copy = list(elements)\n elements = []\n\n for element in elements_copy:\n if any([c.lower() in element.full_text.lower() for c in containing]):\n elements.append(element)\n\n elements.reverse()\n\n # Sanitize the found HTML.\n if clean:\n elements_copy = list(elements)\n elements = []\n\n for element in elements_copy:\n element.raw_html = lxml.html.tostring(cleaner.clean_html(element.lxml))\n elements.append(element)\n\n if first and len(elements) > 0:\n return elements[0]\n else:\n return elements", "def query_xpath(self, query, element=None, find_one=False):\n if element is not None:\n root = self.get_selenium_element(element)\n else:\n root = self._driver\n if find_one:\n try:\n return self._get_element(selenium_element=root.find_element_by_xpath(query))\n except NoSuchElementException:\n return None\n else:\n return {self._get_element(selenium_element=el) for el in root.find_elements_by_xpath(query)}", "def findXpaths(self, query):\t\t\t\t\t\t\t## Multiple Elements\n\t\ttry:\n\t\t\tassert(type(query)) == str or Pattern\n\t\t\treturn self.driver.find_elements_by_xpath(query)\n\t\texcept Exception as e:\n\t\t\tprint(\"Unable to find xpath {}\\n\\n{}\".format(query, e))\n\t\t\treturn -1", "def find_element(**kwargs):\r\n elements = find_elements(**kwargs)\r\n\r\n if not elements:\r\n raise ElementNotFoundError(kwargs)\r\n\r\n if len(elements) > 1:\r\n exception = ElementAmbiguousError(\r\n \"There are {0} elements that match the criteria {1}\".format(\r\n len(elements),\r\n six.text_type(kwargs),\r\n )\r\n )\r\n\r\n exception.elements = elements\r\n raise exception\r\n\r\n return elements[0]", "def check_and_get_all_elements_by_xpath(element, xpath):\r\n if element is None or not xpath:\r\n return []\r\n try:\r\n return element.find_elements_by_xpath(xpath)\r\n except NoSuchElementException:\r\n return []", "def find_elements(self, locator, parent=None):\n return self._element_finder.find(locator, first_only=False,\n required=False, parent=parent)", "def _find(self, finder, finder_kwargs=None):\n finder_kwargs = finder_kwargs or {}\n\n elements = None\n elem_list = []\n\n try:\n elements = finder(**finder_kwargs)\n if not isinstance(elements, list):\n elements = [elements]\n\n except (\n NoSuchElementException,\n StaleElementReferenceException,\n ):\n # This exception is sometimes thrown if the page changes\n # quickly\n pass\n\n if elements:\n elem_list = [\n self.element_class(element, self, finder_kwargs) for element in elements\n ]\n\n return elem_list", "def all_first(self, selector: Union[str, Tuple[str, str]]) -> Collection:\n by = to_by(selector)\n\n # TODO: consider implement it through calling self.collected\n # because actually the impl is self.collected(lambda element: element.element(selector))\n\n return Collection(\n Locator(\n f'{self}.all_first({by})',\n lambda: [webelement.find_element(*by) for webelement in self()],\n ),\n self.config,\n )", "def find_elements(self, by=By.ID, value=None):\n # Get result from the original implementation of the underlying driver.\n results = self._original_methods['find_elements'](by, value)\n # Wrap all returned elements.\n if results:\n updated_results = []\n for element in results:\n updated_results.append(EyesWebElement(element, self._driver))\n results = updated_results\n return results", "def locate_elements(self, selector):\n if ',' not in selector:\n return self.base_driver.find_elements_by_id(selector)\n\n selector_by = selector.split(',')[0].strip()\n selector_value = selector.split(',')[1].strip()\n\n if selector_by == \"i\" or selector_by == 'id':\n elements = self.base_driver.find_elements_by_id(selector_value)\n elif selector_by == \"n\" or selector_by == 'name':\n elements = self.base_driver.find_elements_by_name(selector_value)\n elif selector_by == \"c\" or selector_by == 'class_name':\n elements = self.base_driver.find_elements_by_class_name(selector_value)\n elif selector_by == \"l\" or selector_by == 'link_text':\n elements = self.base_driver.find_elements_by_link_text(selector_value)\n elif selector_by == \"p\" or selector_by == 'partial_link_text':\n elements = self.base_driver.find_elements_by_partial_link_text(selector_value)\n elif selector_by == \"t\" or selector_by == 'tag_name':\n elements = self.base_driver.find_elements_by_tag_name(selector_value)\n elif selector_by == \"x\" or selector_by == 'xpath':\n elements = self.base_driver.find_elements_by_xpath(selector_value)\n elif selector_by == \"s\" or selector_by == 'css_selector':\n elements = self.base_driver.find_elements_by_css_selector(selector_value)\n else:\n raise NameError(\"Please enter a valid type of targeting elements.\")\n\n return elements", "def first_match(self, selectors, element):\n assert isinstance(selectors, list), 'selectors must be of type list!'\n\n for selector in selectors:\n if selector:\n try:\n match = self.advanced_css(selector, element=element)\n if match:\n return match\n except IndexError:\n pass\n return False", "def _find_element(locator, timeout=1, type = By.XPATH):\n elements = _find_elements(locator, timeout, type)\n if elements:\n if len(elements) > 1:\n logger.warning(f\"There is more than one element matching the locator {locator}.\"\n \"Try a more specific locator, or use _find_elements if this is expected.\")\n return None\n return elements[0]\n else:\n logger.warning(\"Could not find element with the locator [%s]\"%(locator))\n return None", "def find_by_xpath(self, element):\n for context_elements in self.within_selector:\n final_elements = context_elements.find_by_xpath(element)\n return final_elements", "def get_elements(self, by, criteria):\n # Need reuse criteria\n return self._find_by_locator().find_elements(by, criteria)", "def _find_element_possibilities(self):\n logger.debug(\n \"Finding element possibilities with selector string:{0}\".format(self._selector))\n possible_elements = []\n strategies = [\n By.CSS_SELECTOR,\n By.ID,\n By.CLASS_NAME,\n By.LINK_TEXT,\n By.PARTIAL_LINK_TEXT,\n By.TAG_NAME,\n By.XPATH,\n By.NAME]\n for strategy in strategies:\n try:\n logger.debug(\"Strategy:{0}\".format(strategy))\n possible_elements.append(\n self._driver.find_element(\n strategy, self._selector))\n except NoSuchElementException as e:\n logger.debug(\n \"Not found using Strategy:{0}. Exception:{1}\".format(\n strategy, e))\n logger.debug(possible_elements)\n if len(possible_elements) == 1:\n return possible_elements[0]\n else:\n raise Exception(\n \"No unique element found using smart search for selector: {0}\".format(\n self._selector))", "def xpath(self, selector, clean=False, first=False, _encoding=None):\n if not isinstance(selector, str):\n raise TypeError(\"Expected string, got %r\" % type(selector))\n\n selected = self.lxml.xpath(selector)\n\n elements = [\n Element(element=selection, url=self.url, default_encoding=_encoding or self.encoding)\n if not isinstance(selection, lxml.etree._ElementUnicodeResult) else str(selection)\n for selection in selected\n ]\n\n # Sanitize the found HTML.\n if clean:\n elements_copy = list(elements)\n elements = []\n\n for element in elements_copy:\n element.raw_html = lxml.html.tostring(cleaner.clean_html(element.lxml))\n elements.append(element)\n\n if first and len(elements) > 0:\n return elements[0]\n else:\n return elements", "def find_by_locator(webdriver_or_element, locator, find_all_elements=False):\n # handle backwards compatibility to support new Locator class\n if isinstance(locator, loc.Locator):\n locator = '{by}={locator}'.format(by=locator.by, locator=locator.locator)\n\n # use the appropriate find method given the locator type;\n # locators should follow the convention \"css=.class\" or \"xpath=//div\"\n # if locator type is unspecified, it will default to css\n if (locator.count('css=') > 0 or locator.count('css_selector=')) and len(locator.split('=', 1)) > 1:\n if find_all_elements:\n return webdriver_or_element.find_elements_by_css_selector(locator.split('=', 1)[-1])\n else:\n return webdriver_or_element.find_element_by_css_selector(locator.split('=', 1)[-1])\n\n elif locator.count('id=') > 0 and len(locator.split('=')) > 1:\n if find_all_elements:\n return webdriver_or_element.find_elements_by_id(locator.split('=', 1)[-1])\n else:\n return webdriver_or_element.find_element_by_id(locator.split('=', 1)[-1])\n\n elif locator.count('xpath=') > 0 and len(locator.split('=')) > 1:\n if find_all_elements:\n return webdriver_or_element.find_elements_by_xpath(locator.split('=', 1)[-1])\n else:\n return webdriver_or_element.find_element_by_xpath(locator.split('=', 1)[-1])\n\n elif locator.count('class_name=') > 0 and len(locator.split('=')) > 1:\n if find_all_elements:\n return webdriver_or_element.find_elements_by_class_name(locator.split('=', 1)[-1])\n else:\n return webdriver_or_element.find_element_by_class_name(locator.split('=', 1)[-1])\n\n elif locator.count('link_text=') > 0 and len(locator.split('=')) > 1:\n if find_all_elements:\n return webdriver_or_element.find_elements_by_link_text(locator.split('=', 1)[-1])\n else:\n return webdriver_or_element.find_element_by_link_text(locator.split('=', 1)[-1])\n\n elif locator.count('partial_link_text=') > 0 and len(locator.split('=')) > 1:\n if find_all_elements:\n return webdriver_or_element.find_elements_by_partial_link_text(locator.split('=', 1)[-1])\n else:\n return webdriver_or_element.find_element_by_partial_link_text(locator.split('=', 1)[-1])\n\n elif locator.count('name=') > 0 and len(locator.split('=')) > 1:\n if find_all_elements:\n return webdriver_or_element.find_elements_by_name(locator.split('=', 1)[-1])\n else:\n return webdriver_or_element.find_element_by_name(locator.split('=', 1)[-1])\n\n elif locator.count('tag_name=') > 0 and len(locator.split('=')) > 1:\n if find_all_elements:\n return webdriver_or_element.find_elements_by_tag_name(locator.split('=', 1)[-1])\n else:\n return webdriver_or_element.find_element_by_tag_name(locator.split('=', 1)[-1])\n\n else: # default to css\n if find_all_elements:\n return webdriver_or_element.find_elements_by_css_selector(locator)\n else:\n return webdriver_or_element.find_element_by_css_selector(locator)", "def find_elements(self, elements_locator: Tuple[By, str], wait_time=10, skip_exception=False) -> List[WebElement]:\n try:\n return WebDriverWait(self.driver, wait_time).until(EC.presence_of_all_elements_located(elements_locator),\n message=f\"Can't find elements with {elements_locator}\")\n except TimeoutException as err:\n if not skip_exception:\n print(f\"Elements was not found in {wait_time} seconds\")\n raise err\n return []", "def find_elements(self, xpath:str):\n try:\n elements = self.driver.find_elements_by_xpath(xpath)\n \n except NoSuchElementException:\n elements = []\n \n return elements", "def check_and_get_all_elements_by_css_selector(element, selector):\r\n if element is None or not selector:\r\n return [] \r\n try:\r\n return element.find_elements_by_css_selector(selector)\r\n except NoSuchElementException:\r\n return []", "def get_element(\n driver: webdriver, locator_text: str, locator_type: str = \"id\", many: bool = None\n):\n\n locator_type = locator_type.upper()\n if hasattr(By, locator_type):\n try:\n locator = get_locator(locator_text, locator_type)\n is_multiple = \"s\" if many else \"\"\n func = getattr(driver, f\"find_element{is_multiple}\")\n return func(*locator)\n except NoSuchElementException:\n return None\n else:\n raise SToolException(\"INVALID_SELECTOR\")", "def find_elements_by_partial_text(self,param,ignore_error_handle = False):\n message = {};\n step = 'find elements by partial text:' + str(param.get('partial_text',None));\n partial_text = str(param.get('partial_text',None));\n try:\n elements = self.driver.find_elements_by_partial_text(partial_text);\n message = self.feedback.feedback_action_ok(step);\n message['elements'] = elements;\n except BaseException,e:\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle)\n finally:\n return message;", "def _find_elements(locator, timeout=1, type = By.XPATH):\n try:\n logger.debug(\"Looking for elements with locator [%s]\"%(locator))\n return WebDriverWait(driver, timeout).until(EC.visibility_of_all_elements_located((type, locator)))\n except TimeoutException:\n logger.warning(f\"No elements with locator {locator} were visible within {timeout} seconds\")\n return None", "def wait_until_elements_find(self, locator_type, locator):\n self.wait.until(EC.presence_of_element_located((locator_type, locator)))\n return self.driver.find_elements(by=locator_type, value=locator)", "def find_by(\n self,\n finder,\n finder_kwargs=None,\n original_find: str = None,\n original_query: str = None,\n wait_time: int = None,\n):\n elem_list = []\n\n find_by = original_find or finder_kwargs[\"by\"]\n query = original_query or finder_kwargs.get(\"value\")\n\n # Zero second wait time means only check once\n if wait_time == 0:\n elem_list = _find(self, finder, finder_kwargs)\n else:\n wait_time = wait_time or self.wait_time\n end_time = time.time() + wait_time\n\n while time.time() < end_time:\n elem_list = _find(self, finder, finder_kwargs)\n\n if elem_list:\n break\n\n return ElementList(elem_list, find_by=find_by, query=query)", "def find_element(self, xpath:str):\n try:\n element = self.driver.find_element_by_xpath(xpath)\n except NoSuchElementException:\n element = []\n return element", "def get_elements_list(self, locator):\n try:\n elem_list = self.driver.find_elements(*locator)\n except NoSuchElementException:\n raise NoSuchElementException('Element with locator [{}] is not found'.format(locator))\n return elem_list", "def element(self):\n if isinstance(self._selector, tuple):\n return self._driver.find_element(*self._selector)", "def find(elem, xpath):\n if elem is None:\n return None\n return next(iter(elem.xpath(xpath)), None)", "def perform_search(self):\n\n self.implicitly_wait(5)\n html_element = self.find_element_by_xpath(\n '/html/body').get_attribute('outerHTML')\n soup = Scraper(html_element)\n target = soup.find_search_field()\n\n for elem in target:\n for attr, value in elem.items():\n placeholder = self.find_elements_by_css_selector(\n f'input[{attr}=\"{value}\"]'\n )\n for element in placeholder:\n try:\n element.send_keys(self.keywords)\n element.send_keys(Keys.RETURN)\n print(colored(':: Placeholder fullfilled ::', 'green'))\n return\n except:\n print(\n colored('Can\\'t type inside the search input', 'yellow'))" ]
[ "0.6258854", "0.6246094", "0.5891519", "0.58794206", "0.5748197", "0.56835234", "0.5626673", "0.55710834", "0.5544823", "0.5541678", "0.5529285", "0.5511625", "0.5486597", "0.5482396", "0.5468115", "0.54622185", "0.54425055", "0.5405061", "0.5363894", "0.5330292", "0.5313725", "0.5308185", "0.53074425", "0.5296847", "0.52940804", "0.52808565", "0.5244356", "0.52280843", "0.5211555", "0.51975197" ]
0.662032
0
Integration test for crowdsource scenario. Alice is evaluator, others are trainers.
def test_crowdsource(): alice = CrowdsourceClient( "Alice", alice_data, alice_targets, XORModel, F.mse_loss, 0, deploy=True) bob = CrowdsourceClient( "Bob", bob_data, bob_targets, XORModel, F.mse_loss, 1, contract_address=alice.contract_address) charlie = CrowdsourceClient( "Charlie", charlie_data, charlie_targets, XORModel, F.mse_loss, 2, contract_address=alice.contract_address) david = CrowdsourceClient( "David", david_data, david_targets, XORModel, F.mse_loss, 3, contract_address=alice.contract_address) eve = CrowdsourceClient("Eve", eve_data, eve_targets, XORModel, F.mse_loss, 4, contract_address=alice.contract_address) # alice is evaluator # others are trainers trainers = [bob, charlie, david, eve] alice.set_genesis_model( round_duration=ROUND_DURATION, max_num_updates=len(trainers) ) # Training threads = [ threading.Thread( target=trainer.train_until, kwargs=TRAINING_HYPERPARAMS, daemon=True ) for trainer in trainers ] # Evaluation threads.append( threading.Thread( target=alice.evaluate_until, args=(TRAINING_ITERATIONS, EVAL_METHOD), daemon=True ) ) # Run all threads in parallel for t in threads: t.start() for t in threads: t.join() print_token_count(bob) print_token_count(charlie) print_token_count(david) print_token_count(eve) assert bob.get_token_count() > david.get_token_count( ), "Bob ended up with fewer tokens than David" assert bob.get_token_count() > eve.get_token_count( ), "Bob ended up with fewer tokens than Eve" assert charlie.get_token_count() > david.get_token_count( ), "Charlie ended up with fewer tokens than David" assert charlie.get_token_count() > eve.get_token_count( ), "Charlie ended up with fewer tokens than Eve" alice_global_model = alice.get_current_global_model() bob_global_model = bob.get_current_global_model() assert same_weights( alice_global_model, bob_global_model ), "Alice and Bob ran the same aggregation but got different model weights" assert str(alice_global_model.state_dict()) == \ str(bob_global_model.state_dict()), \ "Alice and Bob ran the same aggregation but got different model dicts"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_integration1(self):\n self._test_integration(1)", "def test_get_scenarios(self):\n pass", "def test_get_scenario(self):\n pass", "def test_create_scenario1(self):\n pass", "def test_integration3(self):\n self._test_integration(3)", "def test_cases():\n CasesTestCase.generate_tests()\n yield CasesTestCase\n yield DocTestsTestCase", "def test_flow(client, unrestricted_case, elasticsearch):\n # start with case\n response = client.get(api_reverse(\"cases-detail\", args=[unrestricted_case.id]))\n check_response(response)\n content = response.json()\n # onwards to court\n court_url = content.get(\"court\")[\"url\"]\n assert court_url\n response = client.get(court_url)\n check_response(response)\n # onwards to jurisdiction\n jurisdiction_url = content.get(\"jurisdiction\")[\"url\"]\n assert jurisdiction_url\n response = client.get(jurisdiction_url)\n check_response(response)\n content = response.json()\n assert content.get(\"name\") == unrestricted_case.jurisdiction.name", "def test_integration2(self):\n self._test_integration(2)", "def test_single_test_case():\n pass", "def TestOneStep(self):\n pass", "def client():\n\n gcomics_scrape.APP.config['TESTING'] = True\n test_client = gcomics_scrape.APP.test_client()\n\n yield test_client", "def test_for_client():", "def run_custom_training_tests():\n test_custom_training()\n test_custom_distributed_training()\n test_custom_multimodel_training()\n test_custom_distributed_multimodel_training()", "def test_run():\n # Only few steps for test\n timesteps = 128\n\n # Compute all sub testing conf\n envs = ['CartPole-v0']\n ml_platforms = ['torch', 'tf']\n agents = ['dqn', 'a2c']\n\n test_combinations = list(it.product(\n envs,\n ml_platforms,\n agents\n )\n )\n\n # Finally test them all\n for conf in test_combinations:\n env_str, ml_platform_str, agent_str = conf\n run(\n agent_str,\n ml_platform_str,\n env_str,\n 'dense',\n timesteps,\n './target/')", "def test_create_scenario(self):\n pass", "def test_01_visit(self):", "def test_basic_execution(self):", "def test_train(self):\n print \"x=\",self.trainer.train()", "def test_Demo(self):\n self._run(self._example_scenarios, \"Demo\")", "def test_integration(self):\n self.assertTrue(return_true())", "def test_training(self):\n\t\tpass", "def runtest(self):", "def test_integration():\n\n p = pipeline.Pipeline(\n FX_TRAIN,\n FX_TEST,\n FX_LOOKUP,\n RESULTS_DIR\n )\n\n p.run()\n\n assert os.path.isfile(p.path(pipeline.PRED_FILE))", "def test_search_city(self):\n self.tc_id = \"Ts_011\"\n self.tc_desc = \"Verify user is able to register into the application with existing email id\"\n self.tc_step = \"TC Start\"\n\n searchbycity = SearchCity(self.driver)\n\n self.tc_step = \"Launch the url\"\n searchbycity.launchUrl(\"https://massdtaiot.com/dtahip/\")\n\n self.tc_step =\"Search the city\"\n searchbycity.chooseActon()\n # searchbycity.chooseVendor()", "def _run_ci_integration_test():\n _run_install(False)\n _run_integration_tests_on_github(False)", "def runTest(self):\r\n self.setUp()\r\n self.test_CreateROI1()", "def test_strategy(self):\n self.first_play_test(C)", "def runTest(self):\n self.setUp()\n self.test_JupyterNotebooks1()", "def test_functionality(self):\n self.browserObject = globalVars.browserObject\n\n self.preRunSetup()\n \n self.runTestCase()\n \n self.postRunCleanup()", "def test_continuous_traffic(self,setup_suite):\n logger.info('Test Traffic in continuous Mode')\n kwargs = { 'vs_names':['vs-1', 'vs-2']}\n traffic_obj = traffic_start(**kwargs)\n logger.info('Waiting for 10 sec while traffic is flowing ...')\n time.sleep(10)\n traffic_expect_no_errors(traffic_obj, vs_names=['vs-1', 'vs-2'])\n traffic_get_stats(traffic_obj)\n traffic_stop()" ]
[ "0.61749154", "0.58339995", "0.58300877", "0.57939094", "0.5758004", "0.5741501", "0.569589", "0.5691282", "0.56248826", "0.56186783", "0.55778545", "0.5574797", "0.55571", "0.5537148", "0.5529656", "0.5519395", "0.5519279", "0.5517526", "0.5515743", "0.5509292", "0.54952955", "0.54952323", "0.54724276", "0.54645425", "0.5450487", "0.5443788", "0.5432544", "0.5411115", "0.54103214", "0.5378916" ]
0.6831406
0
r""" Perform an iterative procedure to find the optimal weights for K direct spectral estimators of DPSS tapered signals.
def _adaptive_weights(yk, eigvals, sides='onesided', max_iter=150): from multitaper_spectral import mtm_cross_spectrum K = len(eigvals) if sides not in [ 'one_sided', 'two_sided' ]: warnings.warn('Warning: strange input: sides', UserWarning) if max_iter <= 0: warnings.warn('Warning: strange input: iterations', UserWarning) if K < 3: warnings.warn('Warning--not adaptively combining the spectral ' 'estimators due to a low number of tapers.', UserWarning) # we'll hope this is a correct length for L N = yk.shape[ -1 ] L = N / 2 + 1 if sides == 'onesided' else N return (np.multiply.outer(np.sqrt(eigvals), np.ones(L)), 2 * K) rt_eig = np.sqrt(eigvals) # combine the SDFs in the traditional way in order to estimate # the variance of the timeseries N = yk.shape[ 1 ] sdf = mtm_cross_spectrum(yk, yk, eigvals[ :, None ], sides=sides) L = sdf.shape[ -1 ] var_est = np.sum(sdf, axis=-1) / N bband_sup = (1 - eigvals) * var_est # The process is to iteratively switch solving for the following # two expressions: # (1) Adaptive Multitaper SDF: # S^{mt}(f) = [ sum |d_k(f)|^2 S_k(f) ]/ sum |d_k(f)|^2 # # (2) Weights # d_k(f) = [sqrt(lam_k) S^{mt}(f)] / [lam_k S^{mt}(f) + E{B_k(f)}] # # Where lam_k are the eigenvalues corresponding to the DPSS tapers, # and the expected value of the broadband bias function # E{B_k(f)} is replaced by its full-band integration # (1/2pi) int_{-pi}^{pi} E{B_k(f)} = sig^2(1-lam_k) # start with an estimate from incomplete data--the first 2 tapers sdf_iter = mtm_cross_spectrum(yk[ :2 ], yk[ :2 ], eigvals[ :2, None ], sides=sides) err = np.zeros((K, L)) # for numerical considerations, don't bother doing adaptive # weighting after 150 dB down min_pwr = sdf_iter.max() * 10**(-150 / 20.) default_weights = np.where(sdf_iter < min_pwr)[ 0 ] adaptiv_weights = np.where(sdf_iter >= min_pwr)[ 0 ] w_def = rt_eig[ :, None ] * sdf_iter[ default_weights ] w_def /= eigvals[ :, None ] * sdf_iter[ default_weights ] + bband_sup[ :, None ] d_sdfs = np.abs(yk[ :, adaptiv_weights ])**2 if L < N: d_sdfs *= 2 sdf_iter = sdf_iter[ adaptiv_weights ] yk = yk[ :, adaptiv_weights ] for n in range(max_iter): d_k = rt_eig[ :, None ] * sdf_iter[ None, : ] d_k /= eigvals[ :, None ] * sdf_iter[ None, : ] + bband_sup[ :, None ] # Test for convergence -- this is overly conservative, since # iteration only stops when all frequencies have converged. # A better approach is to iterate separately for each freq, but # that is a nonvectorized algorithm. # sdf_iter = mtm_cross_spectrum(yk, yk, d_k, sides=sides) sdf_iter = np.sum(d_k**2 * d_sdfs, axis=0) sdf_iter /= np.sum(d_k**2, axis=0) # Compute the cost function from eq 5.4 in Thomson 1982 cfn = eigvals[ :, None ] * (sdf_iter[ None, : ] - d_sdfs) cfn /= (eigvals[ :, None ] * sdf_iter[ None, : ] + bband_sup[ :, None ])**2 cfn = np.sum(cfn, axis=0) # there seem to be some pathological freqs sometimes .. # this should be a good heuristic if np.percentile(cfn**2, 95) < 1e-12: break else: # If you have reached maximum number of iterations # Issue a warning and return non-converged weights: e_s = 'Breaking due to iterative meltdown in ' e_s += 'multitaper_utils._adaptive_weights.' warnings.warn(e_s, RuntimeWarning) weights = np.zeros((K, L)) weights[ :, adaptiv_weights ] = d_k weights[ :, default_weights ] = w_def nu = 2 * (weights**2).sum(axis=-2) return weights, nu
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_weights():\n weights = {}\n\n\n # estimate run time of step 1 (fast sweep)\n f_range = sweeper_script.settings['stop'] - sweeper_script.settings['start']\n N_samples = sweeper_script.settings['samplecount']\n df = f_range / N_samples\n\n t = N_samples / df\n\n weights['quick scan'] = t\n\n # estimate run time of step 2 (high res sweep)\n df = self.settings['high_res_df']\n N_samples = self.settings['high_res_N']\n\n t = N_samples / df\n\n weights['high res scan'] = t\n\n\n total_time = sum([v for k, v in weights.iteritems()])\n\n weights = {k: v/total_time for k, v in weights.iteritems()}\n\n print('weights',weights)\n\n return weights", "def adaptive_weights(yk, eigvals, sides='onesided', max_iter=150):\r\n from nitime.algorithms import mtm_cross_spectrum\r\n K = len(eigvals)\r\n if len(eigvals) < 3:\r\n print(\"\"\"\r\n Warning--not adaptively combining the spectral estimators\r\n due to a low number of tapers.\r\n \"\"\")\r\n # we'll hope this is a correct length for L\r\n N = yk.shape[-1]\r\n L = N / 2 + 1 if sides == 'onesided' else N\r\n return (np.multiply.outer(np.sqrt(eigvals), np.ones(L)), 2 * K)\r\n rt_eig = np.sqrt(eigvals)\r\n\r\n # combine the SDFs in the traditional way in order to estimate\r\n # the variance of the timeseries\r\n N = yk.shape[1]\r\n sdf = mtm_cross_spectrum(yk, yk, eigvals[:, None], sides=sides)\r\n L = sdf.shape[-1]\r\n var_est = np.sum(sdf, axis=-1) / N\r\n bband_sup = (1-eigvals)*var_est\r\n\r\n # The process is to iteratively switch solving for the following\r\n # two expressions:\r\n # (1) Adaptive Multitaper SDF:\r\n # S^{mt}(f) = [ sum |d_k(f)|^2 S_k(f) ]/ sum |d_k(f)|^2\r\n #\r\n # (2) Weights\r\n # d_k(f) = [sqrt(lam_k) S^{mt}(f)] / [lam_k S^{mt}(f) + E{B_k(f)}]\r\n #\r\n # Where lam_k are the eigenvalues corresponding to the DPSS tapers,\r\n # and the expected value of the broadband bias function\r\n # E{B_k(f)} is replaced by its full-band integration\r\n # (1/2pi) int_{-pi}^{pi} E{B_k(f)} = sig^2(1-lam_k)\r\n\r\n # start with an estimate from incomplete data--the first 2 tapers\r\n sdf_iter = mtm_cross_spectrum(yk[:2], yk[:2], eigvals[:2, None],\r\n sides=sides)\r\n err = np.zeros((K, L))\r\n # for numerical considerations, don't bother doing adaptive\r\n # weighting after 150 dB down\r\n min_pwr = sdf_iter.max() * 10 ** (-150/20.)\r\n default_weights = np.where(sdf_iter < min_pwr)[0]\r\n adaptiv_weights = np.where(sdf_iter >= min_pwr)[0]\r\n\r\n w_def = rt_eig[:,None] * sdf_iter[default_weights]\r\n w_def /= eigvals[:, None] * sdf_iter[default_weights] + bband_sup[:,None]\r\n\r\n d_sdfs = np.abs(yk[:,adaptiv_weights])**2\r\n if L < N:\r\n d_sdfs *= 2\r\n sdf_iter = sdf_iter[adaptiv_weights]\r\n yk = yk[:,adaptiv_weights]\r\n for n in range(max_iter):\r\n d_k = rt_eig[:,None] * sdf_iter[None, :]\r\n d_k /= eigvals[:, None]*sdf_iter[None, :] + bband_sup[:,None]\r\n # Test for convergence -- this is overly conservative, since\r\n # iteration only stops when all frequencies have converged.\r\n # A better approach is to iterate separately for each freq, but\r\n # that is a nonvectorized algorithm.\r\n #sdf_iter = mtm_cross_spectrum(yk, yk, d_k, sides=sides)\r\n sdf_iter = np.sum( d_k**2 * d_sdfs, axis=0 )\r\n sdf_iter /= np.sum( d_k**2, axis=0 )\r\n # Compute the cost function from eq 5.4 in Thomson 1982\r\n cfn = eigvals[:,None] * (sdf_iter[None,:] - d_sdfs)\r\n cfn /= (eigvals[:,None] * sdf_iter[None,:] + bband_sup[:,None])**2\r\n cfn = np.sum(cfn, axis=0)\r\n # there seem to be some pathological freqs sometimes ..\r\n # this should be a good heuristic\r\n if np.percentile(cfn**2, 95) < 1e-12:\r\n break\r\n else: # If you have reached maximum number of iterations\r\n # Issue a warning and return non-converged weights:\r\n e_s = 'Breaking due to iterative meltdown in '\r\n e_s += 'nitime.utils.adaptive_weights.'\r\n warnings.warn(e_s, RuntimeWarning)\r\n weights = np.zeros( (K,L) )\r\n weights[:,adaptiv_weights] = d_k\r\n weights[:,default_weights] = w_def\r\n nu = 2 * (weights ** 2).sum(axis=-2)\r\n return weights, nu", "def _function(self):\n\n\n\n def calculate_weights():\n \"\"\"\n calculate a weight inversely proportional to the expected to duration of the two steps in the\n script\n\n Returns: weights as a dictionary for the two steps\n\n \"\"\"\n weights = {}\n\n\n # estimate run time of step 1 (fast sweep)\n f_range = sweeper_script.settings['stop'] - sweeper_script.settings['start']\n N_samples = sweeper_script.settings['samplecount']\n df = f_range / N_samples\n\n t = N_samples / df\n\n weights['quick scan'] = t\n\n # estimate run time of step 2 (high res sweep)\n df = self.settings['high_res_df']\n N_samples = self.settings['high_res_N']\n\n t = N_samples / df\n\n weights['high res scan'] = t\n\n\n total_time = sum([v for k, v in weights.iteritems()])\n\n weights = {k: v/total_time for k, v in weights.iteritems()}\n\n print('weights',weights)\n\n return weights\n\n def run_scan(name):\n self.current_subscript = name\n sweeper_script.start()\n while self.current_subscript is name:\n time.sleep(0.1)\n\n def calc_new_range():\n\n\n df = self.settings['high_res_df']\n N = self.settings['high_res_N']\n\n r = sweeper_script.data[-1]['r']\n freq = sweeper_script.data[-1]['frequency']\n freq = freq[np.isfinite(r)]\n r = r[np.isfinite(r)]\n\n fo = freq[np.argmax(r)]\n\n f_start, f_end = fo - N/2 *df, fo + N/2 *df\n\n\n # make sure that we convert back to native python types (numpy file types don't pass the Parameter validation)\n return float(f_start), float(f_end), int(N)\n\n\n sweeper_script = self.scripts['zi sweep']\n #save initial settings, so that we can rest at the end of the script\n initial_settings = deepcopy(sweeper_script.settings)\n self.weights = calculate_weights()\n\n # take the signal from the subscript and route it to a function that takes care of it\n sweeper_script.updateProgress.connect(self._receive_signal)\n\n print('====== start quick scan ============')\n\n run_scan('quick scan')\n\n print('====== calculate new scan range ====')\n f_start, f_stop, N = calc_new_range()\n\n print('f_start, f_stop, N', f_start, f_stop, N)\n\n print('====== update sweeper ==============')\n sweeper_script.update({\n 'start' : f_start,\n 'stop' : f_stop,\n 'samplecount' : N\n })\n\n print('====== start high res scan =========')\n # print(sweeper_script.sweeper.finished())\n # print(sweeper_script.sweeper.progress())\n\n run_scan('high res scan')\n\n sweeper_script.updateProgress.disconnect()\n self.data = sweeper_script.data[-1]\n\n self._recording = False\n\n if self.settings['save']:\n self.save()\n\n # set the sweeper script back to initial settings\n sweeper_script.update(initial_settings)\n # make sure that progess is set 1o 100 because we check that in the old_gui\n self.updateProgress.emit(100)", "def run(self):\n # Cache paremeters and arrays\n allWeights = self.allWeights\n spenn, spene, spenu = self.shared.penn, self.shared.pene, self.shared.penu\n ind = self.istart\n nstat = allWeights.shape[0]\n npar = self.penn.shape[1]\n combineHorizontal = self.combineHorizontal\n \n # Choose my estimator\n if 'mean' in self.wtype:\n estimator = self.computeMean\n elif 'median' in self.wtype:\n estimator = self.computeMedian\n else:\n assert False, 'unsupported weight type. must be mean or median'\n \n # Loop over my portion of GPS stations\n for jj in range(nstat):\n # Extract weights\n #plt.semilogy(spenn[ind,:], 'b')\n #plt.semilogy(spene[ind,:], 'r')\n #plt.savefig('figures/penalties_%03d.png' % ind)\n #plt.clf()\n weights = np.tile(np.expand_dims(allWeights[jj,:], axis=1), (1,npar))\n # Compute weighted penalty\n if combineHorizontal:\n wgtPen = estimator(weights, spenn+spene, 0.5*(self.npenalty+self.epenalty))\n self.penn[ind,:] = wgtPen\n self.pene[ind,:] = wgtPen\n else:\n self.penn[ind,:] = estimator(weights, spenn, self.npenalty)\n self.pene[ind,:] = estimator(weights, spene, self.epenalty)\n self.penu[ind,:] = estimator(weights, spenu, self.upenalty)\n ind += 1\n\n return", "def weights_treatment_parameters(init_dict, GRID):\n GRID = np.linspace(0.01, 0.99, num=99, endpoint=True)\n\n coeffs_untreated = init_dict[\"UNTREATED\"][\"params\"]\n coeffs_treated = init_dict[\"TREATED\"][\"params\"]\n cov = construct_covariance_matrix(init_dict)\n x = simulate_covariates(init_dict)\n\n # We take the specified distribution for the cost shifters from the paper.\n cost_mean, cost_sd = -0.0026, np.sqrt(0.270)\n v_mean, v_sd = 0.00, np.sqrt(cov[2, 2])\n\n eval_points = norm.ppf(GRID, loc=v_mean, scale=v_sd)\n\n ate_weights = np.tile(1.0, 99)\n tut_weights = norm.cdf(eval_points, loc=cost_mean, scale=cost_sd)\n\n tt_weights = 1 - tut_weights\n\n def tut_integrand(point):\n eval_point = norm.ppf(point, loc=v_mean, scale=v_sd)\n return norm.cdf(eval_point, loc=cost_mean, scale=cost_sd)\n\n def tt_integrand(point):\n eval_point = norm.ppf(point, loc=v_mean, scale=v_sd)\n return norm.cdf(eval_point, loc=cost_mean, scale=cost_sd)\n\n # Scaling so that the weights integrate to one.\n tut_scaling = quad(tut_integrand, 0.01, 0.99)[0]\n tut_weights /= tut_scaling\n\n tt_scaling = quad(tt_integrand, 0.01, 0.99)[0]\n tt_weights /= tt_scaling\n\n mte = mte_information(coeffs_treated, coeffs_untreated, cov, GRID, x, init_dict)\n\n return ate_weights, tt_weights, tut_weights, mte", "def run(self):\n\n # Initializing required variables for the algorithm\n start = time.time()\n i = 0\n w = self.w\n k = 99.99\n\n #Running the algorithm till tolerance is reached\n while k > self.tolerance: \n\n i = i + 1\n\n if self.verbose:\n print(\"Iteration \" + str(i),end=\"\\r\")\n\n # Following is a gradient descent algorithm\n # You can find the update step at the following\n # link, https://raw.githubusercontent.com/mnk400/linearRegression/master/img/EUC.png\n t_0 = w.T.dot(w)\n t_1 = self.x.T.dot(self.x).dot(w)\n gradient = (2/t_0) * (t_1) - (2/t_0**2) * np.transpose(w).dot(t_1) * (w)\n w_new = w - (1/self.DATAPOINTS) * 0.3 * gradient\n w_new[-1] = -1\n l = len(self.x[0]) - 1 \n y_plt = np.dot(self.x[:,0:l],w_new[:-1]) \n\n #tolerance is checked against the root mean square of change in weights\n k = np.dot(np.transpose(w - w_new),(w - w_new))\n\n #Updating the weights \n w = w_new\n\n if self.verbose:\n print(\"Tolerance Reached\")\n timetaken = round(time.time() - start, 3)\n print(\"Ran for \" + str(timetaken) + \" seconds\" + \" in \" + str(i) + \" iterations.\")\n\n return w_new, y_plt", "def calculate_weighted_results():\n pass", "def weighted_ps(self, mfactor=1.1):\n self.weightedpower=[]\n #ksum=np.sum(self.psdata[self.klist)\n Nk=int(len(self.klist)/mfactor)\n for i in range(self.Nsubs):\n nsum=np.sum(self.psdata[i][1][0:Nk])\n total=np.sum(np.array([self.psdata[i][1][j]*self.powerspectra[i][j] for j in range(Nk)]))\n self.weightedpower.append(total/nsum)\n\n # also find correlation\n self.corr=[]\n for i in range(self.Nsubs):\n self.corr.append(self.ds[i]*self.weightedpower[i])\n\n self.corr_mean=np.mean(self.corr)\n self.corr_sigma=np.sqrt(np.var(self.corr))", "def weighted_current(h,nk=400,fun=None):\n if fun is None:\n delta = 0.01\n def fun(e): return (-np.tanh(e/delta) + 1.0)/2.0\n jgs = np.zeros(h.intra.shape[0]) # current array\n hkgen = h.get_hk_gen() # generator\n fj = current_operator(h) # current operator\n ks = np.linspace(0.0,1.0,nk,endpoint=False) # k-points\n for k in ks: # loop\n hk = hkgen(k) # Hamiltonian\n (es,ws) = lg.eigh(hk) # diagonalize\n ws = ws.transpose() # transpose\n jk = fj(k) # get the generator\n for (e,w) in zip(es,ws): # loop\n weight = fun(e) # weight\n print(weight)\n d = np.conjugate(w)*ket_Aw(jk,w) # current density\n jgs += d.real*weight # add contribution\n# jgs += (np.abs(w)**2*weight).real # add contribution\n jgs /= nk # normalize\n print(\"Total current\",np.sum(jgs))\n np.savetxt(\"CURRENT1D.OUT\",np.matrix([range(len(jgs)),jgs]).T)", "def _buildWeights(self):\r\n # Compute the spatial tree\r\n kd = spatial.cKDTree(self.XYin)\r\n \r\n # Perform query on all of the points in the grid\r\n dist,self.ind=kd.query(self.XYout,distance_upper_bound=self.maxdist,k=self.NNear)\r\n \r\n self.Nc = np.size(self.ind,axis=0)\r\n print '%d interpolation points.'%self.Nc\r\n # Now loop through and get the weights for each point\r\n self.W = np.zeros((self.NNear,self.Nc))\r\n\r\n # Print percentages\r\n p0=0\r\n pstep=5\r\n for ii in range(0,self.Nc):\r\n \r\n if self.verbose:\r\n pfinish = float(ii)/float(self.Nc)*100.0\r\n if pfinish> p0:\r\n print '%3.1f %% complete...'%pfinish\r\n p0+=pstep\r\n \r\n W = self.getWeights(dist[ii,:],self.XYin[self.ind[ii,:],0],self.XYin[self.ind[ii,:],1])\r\n self.W[:,ii] = W.T", "def test_tpr_fwer_alex(self, syn_genomic_data, syn_labels, syn_labels_0based, syn_labels_cat, syn_fm, syn_idx, rep, syn_true_pvalues):\n\n window_length = 35\n best_params_montaez['n_snps'] = n_total_snps\n\n def combi_compute_pvalues(d, x, fm, l,filter_window_size,pf,ps,k):\n\n idx, pvalues, raw_weights = combi_method(d, x,fm, l,filter_window_size,pf,ps,k)\n pvalues_filled = np.ones(n_total_snps)\n pvalues_filled[idx] = pvalues\n raw_weights = postprocess_weights_without_avg(raw_weights, p_svm)\n\t\t\t\n # Map the raw weights to look like p-values between 0 and 1 (reverse order)\t\t\n # Figure out how 'wide' range is\n leftSpan = np.max(raw_weights) - np.min(raw_weights)\n\n # Convert the left range into a 0-1 range (float)\n valueScaled = (raw_weights - np.min(raw_weights)) / leftSpan\n\n # Reverse order\n raw_weights = 1 - valueScaled\t\n\t\t\t\n del d, l\n return pvalues_filled, raw_weights\n\n def challenger_compute_pvalues(d, x, l_0b, l, idx):\n is_only_zeros = False\n with tensorflow.Session().as_default():\n\n model = create_montaez_dense_model(best_params_montaez)\n y_integers = np.argmax(l_0b[idx.train], axis=1)\n class_weights = class_weight.compute_class_weight('balanced', np.unique(y_integers), y_integers)\n d_class_weights = dict(enumerate(class_weights))\n model.fit(x=x[idx.train], y=l_0b[idx.train], validation_data=(x[idx.test], l_0b[idx.test]), epochs=best_params_montaez['epochs'],class_weight=d_class_weights, callbacks=[ReduceLROnPlateau(monitor='val_loss', factor=best_params_montaez['factor'], patience=best_params_montaez['patience'], mode='min'),])\n\n model = iutils.keras.graph.model_wo_softmax(model)\n analyzer = innvestigate.analyzer.LRPAlpha1Beta0(model)\n weights = analyzer.analyze(x).sum(0)\n\n if np.max(abs(weights)) < 0.005:\n fig, axes = plt.subplots(1)\n is_only_zeros = True\n axes.plot(np.absolute(weights).sum(axis=1))\n fig.savefig(os.path.join(IMG_DIR, 'test.png'))\n\n top_indices_sorted, _ = postprocess_weights(weights, top_k, window_length, p_svm, p_pnorm_filter)\n rawlrp_scores_now = postprocess_weights_without_avg(weights,p_svm)\n\t\t\t\t\n # Map the raw weights to look like p-values between 0 and 1 (reverse order)\t\t\n # Figure out how 'wide' range is\n leftSpan = np.max(rawlrp_scores_now) - np.min(rawlrp_scores_now)\n\n # Convert the left range into a 0-1 range (float)\n valueScaled = (rawlrp_scores_now - np.min(rawlrp_scores_now)) / leftSpan\n\n # Reverse order\n rawlrp_scores_now = 1 - valueScaled\t\t\t\t\t\n\t\t\t\t\n pvalues = chi_square(d[:, top_indices_sorted], l)\n pvalues_filled = np.ones(n_total_snps)\n pvalues_filled[top_indices_sorted] = pvalues\n del d, x, l\n\n return pvalues_filled, is_only_zeros, rawlrp_scores_now\n\n fm_2d = syn_fm(\"2d\")\n fm_3d = syn_fm(\"3d\")\n\n clf = LinearSVC(penalty='l2', loss='hinge', C=0.0022, dual=True, tol=1e-3, verbose=0, class_weight='balanced')\n\n bla = Parallel(n_jobs=-1, require='sharedmem')(delayed(combi_compute_pvalues)(clf, syn_genomic_data[str(i)][:], fm_2d[str(i)][:], syn_labels[str(i)], 35, 2, 2, 30) for i in tqdm(range(rep)))\n raw_svmweights_per_run_combi = np.array(list(np.array(bla)[:, 1]))\n pvalues_per_run_combi = np.array(list(np.array(bla)[:, 0]))\n\n pvalues_per_run_rpvt = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(chi_square)(syn_genomic_data[str(i)][:], syn_labels[str(i)]) for i in tqdm(range(rep))))\n\n # len(thresholds) * len(window_sizes) * 10020\n abl = Parallel(n_jobs=-1, require='sharedmem')(delayed(challenger_compute_pvalues)(syn_genomic_data[str(i)][:], fm_3d[str(i)][:], syn_labels_cat[str(i)], syn_labels[str(i)], syn_idx[str(i)]) for i in tqdm(range(rep)))\n\n # Collect results\n pvalues_per_run_dense = np.array(list(np.array(abl)[:, 0]))\n rawlrp_scores_per_run_dense = np.array(list(np.array(abl)[:, 2]))\n \n # INNvestigate bugfix\n zeros_index = np.array(list(np.array(abl)[:, 1])) \n pvalues_per_run_combi = pvalues_per_run_combi[np.logical_not(zeros_index)]\n raw_svmweights_per_run_combi = raw_svmweights_per_run_combi[np.logical_not(zeros_index)]\n pvalues_per_run_dense = pvalues_per_run_dense[np.logical_not(zeros_index)]\n rawlrp_scores_per_run_dense = rawlrp_scores_per_run_dense[np.logical_not(zeros_index)]\n pvalues_per_run_rpvt = pvalues_per_run_rpvt[np.logical_not(zeros_index)]\n true_pvalues = syn_true_pvalues[np.logical_not(zeros_index)]\n\n # COMBI\n res_combi = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(compute_metrics)(pvalues_per_run_combi, true_pvalues, threshold) for threshold in tqdm(thresholds)))\n tpr_combi, _, fwer_combi, precision_combi = res_combi.T\n\n # SVM weights\n res_rawsvm = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(compute_metrics)(raw_svmweights_per_run_combi, true_pvalues, threshold) for threshold in tqdm(thresholds)))\n tpr_rawsvm, _, fwer_rawsvm, precision_rawsvm = res_rawsvm.T\n\n # RPVT\n res_rpvt = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(compute_metrics)(pvalues_per_run_rpvt, true_pvalues, threshold) for threshold in tqdm(thresholds)))\n\t\t\n tpr_rpvt, _, fwer_rpvt, precision_rpvt = res_rpvt.T\n\n # Plot\n fig, axes = plt.subplots(1,2)\n\n fig.set_size_inches(15, 9)\n ax1, ax2 = axes\n\n ax1.set_ylim(0, 0.7)\n ax1.set_xlim(0, 0.3)\n ax1.set_ylabel('True positive rate', fontsize=14)\n ax1.set_xlabel('Family-wise error rate', fontsize=14)\n ax2.set_ylabel('Precision', fontsize=14)\n ax2.set_xlabel('True positive rate', fontsize=14)\n\n # CURVES must stop somewhere\n tpr_rpvt_new = tpr_rpvt[tpr_rpvt < 1]\n fwer_rpvt = fwer_rpvt[tpr_rpvt < 1]\n precision_rpvt = precision_rpvt[tpr_rpvt < 1]\n tpr_rpvt = tpr_rpvt_new\n\n tpr_combi_new = tpr_combi[tpr_combi < 1]\n fwer_combi = fwer_combi[tpr_combi < 1]\n precision_combi = precision_combi[tpr_combi < 1]\t\n tpr_combi = tpr_combi_new\n\n tpr_rawsvm_new = tpr_rawsvm[tpr_rawsvm < 1]\n fwer_rawsvm = fwer_rawsvm[tpr_rawsvm < 1]\n precision_rawsvm = precision_rawsvm[tpr_rawsvm < 1]\n tpr_rawsvm = tpr_rawsvm_new\n\t\t\n # RPVT\n ax1.plot(fwer_rpvt, tpr_rpvt, label='RPVT', color='lightsteelblue', linewidth=2)\n ax2.plot(tpr_rpvt, precision_rpvt, color='lightsteelblue', label='RPVT', linewidth=2)\n\n # COMBI \n ax1.plot(fwer_combi, tpr_combi, color='darkblue', label='COMBI', linewidth=2)\n ax2.plot(tpr_combi, precision_combi, color='darkblue', label='COMBI', linewidth=2)\n \n # raw SVM weights\n ax1.plot(fwer_rawsvm, tpr_rawsvm, linestyle='--', color='darkblue', label='SVM weights', linewidth=2)\n ax2.plot(tpr_rawsvm, precision_rawsvm, linestyle='--', color='darkblue', label='SVM weights', linewidth=2)\n\n # DeepCOMBI + LRP scores\n res_dense = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(compute_metrics)(pvalues_per_run_dense, true_pvalues, threshold) for threshold in tqdm(thresholds)))\n res_rawlrp_dense = np.array(Parallel(n_jobs=-1, require='sharedmem')(delayed(compute_metrics)(rawlrp_scores_per_run_dense, true_pvalues, threshold) for threshold in tqdm(thresholds)))\n\n tpr_dense, _, fwer_dense, precision_dense = res_dense.T\n tpr_rawlrp_dense, _, fwer_rawlrp_dense, precision_rawlrp_dense = res_rawlrp_dense.T\n\n assert fwer_combi.max() <= 1 and fwer_combi.min() >= 0\n tpr_dense_new = tpr_dense[tpr_dense < 1]\n fwer_dense = fwer_dense[tpr_dense < 1]\n precision_dense = precision_dense[tpr_dense < 1]\n tpr_dense = tpr_dense_new\n\n tpr_rawlrp_dense_new = tpr_rawlrp_dense[tpr_rawlrp_dense < 1]\n fwer_rawlrp_dense = fwer_rawlrp_dense[tpr_rawlrp_dense < 1]\n precision_rawlrp_dense = precision_rawlrp_dense[tpr_rawlrp_dense < 1]\n tpr_rawlrp_dense = tpr_rawlrp_dense_new\n\n # DeepCOMBI\n ax1.plot(fwer_dense, tpr_dense, color='fuchsia', label='DeepCOMBI', linewidth=3)\n ax2.plot(tpr_dense, precision_dense, color='fuchsia', label='DeepCOMBI', linewidth=3)\n\n # LRP scores\n ax1.plot(fwer_rawlrp_dense, tpr_rawlrp_dense, color='fuchsia', linestyle='--', label='LRP scores', linewidth=2)\n ax2.plot(tpr_rawlrp_dense, precision_rawlrp_dense, color='fuchsia', linestyle='--', label='LRP scores', linewidth=2)\n\n ax1.legend(fontsize=14,loc= 'lower right')\n ax2.legend(fontsize=14, loc= 'lower right')\n fig.savefig(os.path.join(IMG_DIR, 'tpr_fwer_montaez_final1000_NAR.png'), bbox_inches='tight', dpi=300)\n print(np.sum(zeros_index))\n pdb.set_trace()\n\n # CURVES must stop somewhere\n #combi_fp = combi_fp[combi_fp < 80]\n #combi_tp = combi_tp[:len(combi_fp)]\n #deepcombi_fp = deepcombi_fp[deepcombi_fp < 80]\n #deepcombi_tp = deepcombi_tp[:len(deepcombi_fp)]", "def time_path_iteration(params=params, S=3, T=50, weight=0.3, tol=1e-12, maxiter=100):\n ss_output = get_SS()\n b_ss = ss_output['b_ss']\n b_init = np.array([0, 0.8 * b_ss[0], 1.1 * b_ss[1]]) # t=0\n\n # Guess transition path, finishes at steady_state\n Kguess = np.linspace(b_init.sum(), ss_output['K_ss'], T)\n\n s = 1\n K_dynamic = Kguess\n b_current = np.zeros((S,T)) # initialize array to store savings decisions\n b_current[:,0] = b_init\n\n # Update b_path until convergence\n its = 0\n ee_diff = 7.0\n while ee_diff > tol and its < maxiter:\n its += 1\n w_dynamic = find_w(L=params['labor_supply'].sum(), K=K_dynamic)\n r_dynamic = find_r(L=params['labor_supply'].sum(), K=K_dynamic)\n for t in range(T-2):\n\n #solve for b32, savings decision of middle-aged in first period\n ee_param = (w_dynamic, r_dynamic, params['labor_supply'], b_current[:,t], s, t)\n b_current[s+1,t+1] = opt.root(ee_err_1, 0, args=ee_param).x\n\n # solve for b22, b33, savings decision of young gen in middle/old generations\n ee_param = (w_dynamic, r_dynamic, params['labor_supply'], b_init, s, t)\n b_current[s,t+1], b_current[s+1, t+2]= opt.root(ee_err_23, [0,0], args=ee_param).x\n # fill in table\n b_current[s,T-1] = b_current[s,T-2]\n\n # Check for convergence\n K_prime = b_current.sum(axis=0)\n ee_diff = (K_prime - K_dynamic).max()\n\n# rc_diff = production(K_prime, L=params['labor_supply'].sum())\n# - Ct = (1 + r_dynamic) * ()\n# - np.roll(K_prime, len(K_prime)-1)\n# - (1 - delta) * K_prime\n\n print('Iteration number: ', its, 'Current EE difference: ', ee_diff)\n # update new capital path\n K_dynamic = weight * K_prime + (1-weight) * K_dynamic\n\n fig, ax = plt.subplots(1,1,figsize=(8,6))\n plt.plot(range(T), Kguess, 'r--',lw=0.7, label='Kguess')\n plt.plot(range(T), K_dynamic , label='Capital Path Solution')\n plt.title('Transition Path of Aggregate Capital')\n plt.xlabel('Time period')\n plt.ylabel('Aggregate Capital')\n plt.legend()\n\n fig, ax = plt.subplots(1,1,figsize=(8,6))\n plt.plot(range(T), r_dynamic, 'g-o',label='Interest rate Path Solution')\n plt.title('Transition Path of Aggregate Interest rate')\n plt.xlabel('Time period')\n plt.ylabel('Interest Rate')\n plt.legend()\n\n fig, ax = plt.subplots(1,1,figsize=(8,6))\n plt.plot(range(T), w_dynamic, 'k-o',label='Wage Path Solution')\n plt.title('Transition Path of Wages')\n plt.xlabel('Time period')\n plt.ylabel('Wages')\n plt.legend()\n\n return K_dynamic", "def calc_std_nDCG_AP_corpus_smoothing(p):\n \n# nDCG_MAP_res = base_path +\"\\\\nDCG_MAP_res\\\\\"\n measures_res = linux_base_path+ \"/measures_res\"+setup+\"/\"\n k_val = 50\n NDCG_AP_all_claims_all_param_values = read_pickle(measures_res+\"NDCG_AP_prec_at_k_all_claims_all_param_values_top_k_docs_\"+str(k_val)+\"_at_\"+str(p)) #key:clm,alpha_f,beta_f,k_val,lambda_f val nDCG_score,AP_score\n each_params_AVGnDCG_MAP_dict = read_pickle(measures_res+\"each_params_AVGnDCG_MAP_prec_at_k_dict_top_k_docs_\"+str(k_val)+\"_at_\"+str(p)) #key:alpha_f,beta_f,k_val,lambda_f\n nDCG_MAP_std = {} #key is a configuration quadruplet, value is the std of the measures\n \n \n \n# for k_val in top_k_docs_values:\n for alpha in range(0,11,1): #change just for test!\n for beta in range(0,10,1):\n for lambda_int in range(0,11,1):\n lambda_f = turn_to_float([lambda_int])\n (alpha_f,beta_f) = turn_to_float([alpha,beta])\n curr_AP_var = 0\n curr_nDCG_var = 0\n curr_prec_at_5_var = 0\n curr_prec_at_10_var = 0\n for clm in claim_list:\n curr_nDCG_var += (NDCG_AP_all_claims_all_param_values[str(clm),alpha_f,beta_f,k_val,lambda_f][0] - each_params_AVGnDCG_MAP_dict[alpha_f,beta_f,k_val,lambda_f][0])**2\n curr_AP_var += (NDCG_AP_all_claims_all_param_values[str(clm),alpha_f,beta_f,k_val,lambda_f][1] - each_params_AVGnDCG_MAP_dict[alpha_f,beta_f,k_val,lambda_f][1])**2\n curr_prec_at_5_var += (NDCG_AP_all_claims_all_param_values[str(clm),alpha_f,beta_f,k_val,lambda_f][2] - each_params_AVGnDCG_MAP_dict[alpha_f,beta_f,k_val,lambda_f][2])**2\n curr_prec_at_10_var +=(NDCG_AP_all_claims_all_param_values[str(clm),alpha_f,beta_f,k_val,lambda_f][3] - each_params_AVGnDCG_MAP_dict[alpha_f,beta_f,k_val,lambda_f][3])**2\n curr_nDCG_std = float(float(math.sqrt(curr_nDCG_var))/float(len(claim_list)))\n curr_AP_std = float(float(math.sqrt(curr_AP_var))/float(len(claim_list)))\n curr_prec_at_5_std = float(float(math.sqrt(curr_prec_at_5_var))/float(len(claim_list)))\n curr_prec_at_10_std =float(float(math.sqrt(curr_prec_at_10_var))/float(len(claim_list)))\n nDCG_MAP_std[alpha_f,beta_f,k_val,lambda_f] = (curr_nDCG_std,curr_AP_std,curr_prec_at_5_std,curr_prec_at_10_std)\n save_pickle(measures_res+\"nDCG_MAP_prec_at_k_std_for_each_configuration_k_top_docs_\"+str(k_val)+\"_at_\"+str(p), nDCG_MAP_std)", "def optimal_weights(self,n_points,er,cov):\n \n target_rs = np.linspace(er.min(), er.max(), n_points)\n weights = [self.minimize_vol(target_return, er, cov) for target_return in target_rs]\n \n return weights", "def OutOfSampleExtension(self, P, k = 3):\n \n n_obs = P.shape[0]\n knn = []\n for obs in range(n_obs):\n x_obs = P[obs]\n # get nearest neighbors\n dist = pairwise_distances(np.vstack([self.X, x_obs]))\n knn_aux = np.argsort(dist[self.n:,:], axis = 1)[:, 1 : k + 1]\n knn.append(knn_aux[0]) \n knn = np.array(knn)\n # compute weights\n \n W = [] # Initialize nxn weight matrix\n for i in range(n_obs):\n x_i = P[i]\n G = [] # Local covariance matrix\n for j in range(k):\n x_j = self.X[knn[i][j]]\n G_aux = []\n for k_2 in range(k):\n x_k = self.X[knn[i][k_2]]\n gjk = np.dot((x_i - x_j), (x_i - x_k))\n G_aux.append(gjk)\n G.append(G_aux)\n G = np.array(G)\n G = G + self.reg*np.eye(*G.shape) # Regularization for G\n w = np.linalg.solve(G, np.ones((k))) # Calculate weights for x_i\n w = w / w.sum() # Normalize weights; sum(w)=1\n \n if self.verbose and i % 3 == 0:\n print('[INFO] Weights calculated for {} observations'.format(i + 1))\n \n W.append(w)\n \n W = np.array(W)\n y = []\n for i in range(n_obs):\n y_aux = W[i] @ self.Y[knn[i]]\n y.append(y_aux)\n y = np.array(y)\n return y", "def run(self):\n is_spsa = True\n is_steep_descent = False\n is_rprop = False\n\n k = 0\n theta = self.theta0\n\n while True:\n k = k + 1\n\n self.iter = k\n print(f'starting iter {k} ...')\n\n if self.constraints is not None:\n theta = self.constraints(theta)\n\n print('current param:')\n for name, value in utils.true_param(theta).items():\n print(f' {name}: {value[\"value\"]}')\n\n c_k = self.c / (k ** self.gamma)\n a_k = self.a / ((k + self.A) ** self.alpha)\n\n # print(f' ck: {c_k:0.5f}')\n # print(f' ak: {a_k:0.5f}')\n\n # Run the engine match here to get the gradient\n print('Run engine match ...')\n gradient = self.approximate_gradient(theta, c_k, k)\n\n # For SPSA we update with a small step (theta = theta - a_k * gradient)\n if is_spsa:\n theta = utils.linear_combinaison(1.0, theta, -a_k, gradient)\n logging.info(f'{__file__} > theta from spsa: {theta}')\n # print(f'new param after application of gradient:')\n # for n, v in theta.items():\n # print(f' {n}: {int(v[\"value\"] * v[\"factor\"])}')\n\n # For steepest descent we update via a constant small step in the gradient direction\n elif is_steep_descent:\n mu = -0.01 / max(1.0, utils.norm2(gradient))\n theta = utils.linear_combinaison(1.0, theta, mu, gradient)\n\n # For RPROP, we update with information about the sign of the gradients\n elif is_rprop:\n theta = utils.linear_combinaison(1.0, theta, -0.01, self.rprop(theta, gradient))\n\n # Apply parameter limits\n theta = utils.apply_limits(theta)\n logging.info(f'{__file__} > theta with limits: {theta}')\n # print(f'new param after application of limits:')\n # for n, v in theta.items():\n # print(f' {n}: {int(v[\"value\"] * v[\"factor\"])}')\n\n # We then move to the point which gives the best average of goal\n (avg_goal, avg_theta) = self.average_best_evals(30)\n logging.info(f'{__file__} > avg_theta from average_best_evals: {avg_theta}')\n\n theta = utils.linear_combinaison(0.98, theta, 0.02, avg_theta)\n logging.info(f'{__file__} > theta with avg_theta: {theta}')\n # print(f'new param after application of best average param:')\n # for n, v in theta.items():\n # print(f' {n}: {int(v[\"value\"] * v[\"factor\"])}')\n\n # Apply parameter limits\n theta = utils.apply_limits(theta) # This is the best param.\n logging.info(f'{__file__} > best param: {theta}')\n # print(f'new param after application of limits:')\n # for n, v in theta.items():\n # print(f' {n}: {int(v[\"value\"] * v[\"factor\"])}')\n\n # Log best param values\n for kv, vv in theta.items():\n logging.info(f'<best> iter: {k}, param: {kv}, value: {int(vv[\"value\"]*vv[\"factor\"])}')\n print('best param:')\n for n, v in theta.items():\n print(f' {n}: {int(v[\"value\"] * v[\"factor\"])}')\n\n mean_all_goal, _ = self.average_evaluations(30)\n print(f'mean all goal: {mean_all_goal}')\n\n mean_best_goal, _ = self.average_best_evals(30)\n print(f'mean best goal: {mean_best_goal}')\n\n # Save data in csv for plotting.\n plot_data = {}\n plot_data.update({'iter': k})\n plot_data.update({'meanbestgoal': mean_best_goal})\n plot_data.update({'meanallgoal': mean_all_goal})\n plot_theta = utils.true_param(theta)\n for name, value in plot_theta.items():\n plot_data.update({name: value[\"value\"]})\n\n with open(self.plot_data_file, 'a') as f:\n cnt = 0\n for name, value in plot_data.items():\n cnt += 1\n if cnt == len(plot_data):\n f.write(f'{value}\\n')\n else:\n f.write(f'{value},')\n\n print(f'done iter {k} / {self.max_iter}')\n logging.info(f'{__file__} > done iter {k} / {self.max_iter}')\n print('=========================================')\n\n # Stopping rule 1: Average goal and iteration meet the\n # stop_all_mean_goal and stop_min_iter criteria.\n if k >= self.stop_min_iter and mean_all_goal <= self.stop_all_mean_goal:\n print('Stop opimization due to good average all goal!')\n break\n\n # Stopping rule 2: Average best goal and iteration meet the\n # stop_best_mean_goal and stop_min_iter criteria.\n if k >= self.stop_min_iter and mean_best_goal <= self.stop_best_mean_goal:\n print('Stop opimization due to good average best goal!')\n break\n\n # Stopping rule 3: Max iteration is reached.\n if k >= self.max_iter:\n print('Stop opimization due to max iteration!')\n break\n\n return utils.true_param(theta)", "def compute_weights(self):\n # Do the leave-one-out experiments\n loocv = np.zeros((self.M, self.nump))\n for i in range(self.M):\n for j in range(self.nump):\n loocv[i, j] = self.surrogate_list[i][j].eval(self.x[j, :])\n\n # Compute the model characteristics\n corr_coeff = np.ones(self.M)\n for i in range(self.M):\n corr_coeff[i] = np.corrcoef(np.vstack(\n (loocv[i, :], self.get_fx().flatten())))[0, 1]\n\n root_mean_sq_err = np.ones(self.M)\n for i in range(self.M):\n root_mean_sq_err[i] = 1.0 / math.sqrt(\n self._mean_squared_error(self.get_fx().flatten(), loocv[i, :]))\n\n mean_abs_err = np.ones(self.M)\n for i in range(self.M):\n mean_abs_err[i] = 1.0 / self._mean_abs_err(\n self.get_fx().flatten(), loocv[i, :])\n\n # Make sure no correlations are negative\n corr_coeff[np.where(corr_coeff < 0.0)] = 0.0\n if np.max(corr_coeff) == 0.0:\n corr_coeff += 1.0\n\n # Normalize the test statistics\n corr_coeff /= np.sum(corr_coeff)\n root_mean_sq_err /= np.sum(root_mean_sq_err)\n mean_abs_err /= np.sum(mean_abs_err)\n\n # Create mass functions based on the model characteristics\n m1 = self._prob_to_mass(corr_coeff)\n m2 = self._prob_to_mass(root_mean_sq_err)\n m3 = self._prob_to_mass(mean_abs_err)\n\n # Compute pignistic probabilities from Dempster-Shafer theory\n pignistic = m1.combine_conjunctive([m2, m3]).to_dict()\n self.weights = np.ones(self.M)\n for i in range(self.M):\n self.weights[i] = pignistic.get(str(i+1))", "def _fit(self):\n loss = 1e10\n weights = self._init_weights\n while loss > self._converge_epsilon:\n d_F = 2 * (self._input.t() * self._input *\n weights - self._input.t() * self._label)\n dd_F = 2 * self._input.t() * self._input\n weights = weights - dd_F.inv() * d_F\n loss = self._mse(weights)\n print('Error : {}'.format(loss))\n return weights", "def process_weights(w, df, s, n):\n T, N = w.shape\n weight_mat = pd.DataFrame(index=df.index, columns=df.columns)\n idx = weight_mat.index\n\n def process(series):\n # True if less than 50% of obs. are constant\n return series.value_counts(dropna=False).max() < 0.5 * s\n\n previousValidWeights = None\n for t in range(n, T, s):\n t_s = min(T, t + s)\n weight_vect = np.zeros([N, 1])\n # check if the past data are constant\n # if asset i is constant set w_i = 0\n sub_X = df.iloc[t:t_s, :].copy()\n assets = sub_X.apply(process).values\n weight_vect[assets, 0] = w[t][assets]\n # Verify if sum(w) = 0\n if weight_vect.min()<0:\n weight_vect=weight_vect - weight_vect.min()\n\n if weight_vect.sum() == 0.:\n if previousValidWeights is None:\n weight_mat.iloc[t: t_s] = np.ones([t_s - t, N]) / N\n else :\n weight_mat.iloc[t: t_s] = previousValidWeights\n elif weight_vect.sum() != 1.:\n weight_mat.iloc[t: t_s] = np.ones([t_s - t, 1]) @ weight_vect.T / weight_vect.sum()\n previousValidWeights = np.ones([t_s - t, 1]) @ weight_vect.T / weight_vect.sum()\n else:\n weight_mat.iloc[t: t_s] = np.ones([t_s - t, 1]) @ weight_vect.T\n previousValidWeights = np.ones([t_s - t, 1]) @ weight_vect.T\n\n if abs(weight_mat.iloc[t:t_s].sum().sum()) <= 1e-6:\n print('null prediction, investigate')\n\n return weight_mat", "def optimize(self, return_teacher_params_bool = False):\n\n gen_batches = self.DATASET.data_stream(self.BATCH_SIZE)\n \n num_complete_batches, leftover = divmod(self.DATASET.num_example['train'], self.BATCH_SIZE)\n\n # number of minibatches per epoch\n num_mini_batches_per_epochs = num_complete_batches + bool(leftover)\n\n # number of total iterations\n num_total_iters = self.NUM_EPOCHS * num_mini_batches_per_epochs\n\n # number of time that the sparisty levels get updated\n num_sparsity_updates = num_total_iters // self.MASK_UPDATE_FREQ \n \n mask_update_limit = num_total_iters - self.MASK_UPDATE_FREQ\n \n if self.SAVE_BOOL == True:\n # save the transferred results in the desinated directory.\n\n trans_model_dir = self.unique_model_dir\n\n# while os.path.exists(trans_model_dir):\n# trans_model_dir = trans_model_dir + '_0'\n \n if not os.path.exists(trans_model_dir):\n os.makedirs(trans_model_dir)\n\n np.save(trans_model_dir + '/param_dict.npy', self.param_dict) \n \n \n\n nt_trans_params_all_sparsities_all_runs = []\n nt_trans_masks_all_sparsities_all_runs = []\n nt_trans_vali_all_sparsities_all_runs = []\n teacher_params_all_sparsities_all_runs = []\n \n \n num_sparisty_levels = len(self.NN_DENSITY_LEVEL_LIST) \n num_runs = len(range(self.INIT_RUN_INDEX, self.INIT_RUN_INDEX + self.NUM_RUNS ))\n all_density_all_run_num_total_iters = num_sparisty_levels * num_runs * num_total_iters\n \n \n for nn_density_level in self.NN_DENSITY_LEVEL_LIST: \n \n \n nt_trans_params_all_runs = []\n nt_trans_masks_all_runs = []\n nt_trans_vali_all_runs = []\n teacher_params_all_runs = []\n\n\n for run_index in range(self.INIT_RUN_INDEX, self.INIT_RUN_INDEX + self.NUM_RUNS ):\n\n # do logging\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n\n # a string that summarizes the current ntt experiment\n model_summary_str = self.model_str + '_density_' + str(round(nn_density_level, 2) ) + '_run_' + str(run_index)\n\n if self.SAVE_BOOL == True:\n model_dir_density_run = trans_model_dir + '/' + 'density_' + str(round(nn_density_level, 2) ) + '/' + 'run_' + str(run_index) + '/'\n\n os.makedirs(model_dir_density_run)\n \n logging.basicConfig(filename = model_dir_density_run + \"/\" + model_summary_str + \"_log.log\", format='%(asctime)s %(message)s', filemode='w', level=logging.DEBUG)\n\n else: \n logging.basicConfig(filename = model_summary_str + \"_log.log\" , format='%(asctime)s %(message)s', filemode='w', level=logging.DEBUG)\n \n \n # for different run indices, randomly draw teacher net's parameters\n _, teacher_net_params = self.init_fun(random.PRNGKey(run_index), tuple(self.batch_input_shape))\n \n # the prediction of the teacher net evaluated on validation samples\n vali_teacher_prediction = self.apply_fn(teacher_net_params, self.vali_samples)\n\n vali_teacher_ntk_mat = self.emp_ntk_fn(self.vali_inputs_1, self.vali_inputs_2, teacher_net_params) \n\n # the initial binary mask\n \n if self.PRUNE_METHOD == 'magnitude': \n masks = get_masks_from_jax_params(teacher_net_params, nn_density_level, global_bool = self.GLOBAL_PRUNE_BOOL)\n elif self.PRUNE_METHOD == 'logit_snip':\n logger.info(\"Use logit snip method to get the initial mask\")\n num_examples_snip = 128\n\n# gen_batches_logit_snip = self.DATASET.data_stream(num_examples_snip)\n \n snip_input = self.DATASET.dataset['train']['input'][:num_examples_snip, :]\n \n if self.GLOBAL_PRUNE_BOOL == False:\n logger.warning(\"layerwise sparse net initialized with logit_snip\") \n masks = get_logit_snip_masks(teacher_net_params, nn_density_level, self.apply_fn, snip_input, self.batch_input_shape, GlOBAL_PRUNE_BOOL = self.GLOBAL_PRUNE_BOOL) \n else:\n raise NotImplementedError(\"not implemented\")\n \n\n # the initial student parameters\n masked_student_net_params = get_sparse_params_filtered_by_masks(teacher_net_params, masks)\n\n # instantiate the optimizer triple \n opt_init, opt_update, get_params = self.OPTIMIZER_WITH_PARAMS\n\n opt_state = opt_init(teacher_net_params) \n\n # one step of NTK transfer\n @jit\n def nt_transfer_step(i, opt_state, x, masks):\n\n # parameters in the current optimizer state\n student_net_params = get_params(opt_state)\n\n # gradients that flow through the binary masks\n masked_g = grad(self.nt_transfer_loss)(student_net_params, masks, teacher_net_params, x, nn_density_level)\n\n return opt_update(i, masked_g, opt_state)\n\n # a list of validation loss\n vali_loss_list = []\n\n # calculate the initial validation loss. \n vali_loss = self.eval_nt_transfer_loss_on_vali_data(masked_student_net_params, vali_teacher_prediction, vali_teacher_ntk_mat, nn_density_level)\n\n vali_loss_list.append(vali_loss)\n\n logger.info(\"Before transfer: trans dist %.3f | ntk dist %.3f | targ dist %.3f | l2 pentalty %.3f | nn density %.2f\", vali_loss[0], vali_loss[1], vali_loss[2], vali_loss[3], nn_density_level)\n itercount = itertools.count()\n\n t = time.time()\n\n # loop through iterations\n for num_iter in range(1, num_total_iters + 1): \n \n # a batch of input data\n batch_xs, _ = next(gen_batches) \n\n # reshape the input to a proper format (2d array for MLP and 3d for CNN)\n batch_xs = batch_xs.reshape(self.batch_input_shape) \n\n # update the optimizer state\n opt_state = nt_transfer_step(next(itercount), opt_state, batch_xs, masks )\n\n\n if num_iter % 100 == 0:\n elapsed_time = time.time() - t\n \n if (num_iter <= 500) and (run_index == self.INIT_RUN_INDEX) and (nn_density_level == self.NN_DENSITY_LEVEL_LIST[0]): \n # estimate the program end time.\n remaining_iter_num = all_density_all_run_num_total_iters - num_iter\n remaining_seconds = elapsed_time * ( remaining_iter_num / 100 )\n expected_end_time = str(datetime.now() + timedelta(seconds = remaining_seconds))\n\n # get parameters from the current optimizer state\n student_net_params = get_params(opt_state) \n\n # filter the paramters by masks\n masked_student_net_params = get_sparse_params_filtered_by_masks(student_net_params , masks)\n \n # validation loss\n vali_loss = self.eval_nt_transfer_loss_on_vali_data(masked_student_net_params, vali_teacher_prediction, vali_teacher_ntk_mat, nn_density_level) \n\n vali_loss_list.append(vali_loss)\n\n logger.info('run: %02d/%02d | iter %04d/%04d | trans. dist %.3f | ntk dist %.3f | targ. dist %.3f | l2 %.3f | nn density %.2f | time %.2f [s] | expected finish time %s', run_index, self.NUM_RUNS + self.INIT_RUN_INDEX - 1, num_iter, num_total_iters, vali_loss[0], vali_loss[1], vali_loss[2], vali_loss[3], nn_density_level, elapsed_time, expected_end_time)\n t = time.time()\n\n\n if (num_iter % self.MASK_UPDATE_FREQ == 0) and (num_iter < mask_update_limit):\n # get parameters from the current optimizer state\n student_net_params = get_params(opt_state) \n \n # update masks\n masks = get_masks_from_jax_params(student_net_params, nn_density_level, global_bool = self.GLOBAL_PRUNE_BOOL)\n \n# if self.PRUNE_METHOD == 'logit_snip':\n# logit_snip_batch_xs, _ = next(gen_batches_logit_snip)\n# masks = get_logit_snip_masks(student_net_params, nn_density_level, self.apply_fn, snip_input, self.batch_input_shape, GlOBAL_PRUNE_BOOL = self.GLOBAL_PRUNE_BOOL) \n# else:\n# masks = get_masks_from_jax_params(student_net_params, nn_density_level, global_bool = self.GLOBAL_PRUNE_BOOL)\n\n\n \n elapsed_time = time.time() - t\n \n student_net_params = get_params(opt_state) \n \n # filter the paramters by masks\n masked_student_net_params = get_sparse_params_filtered_by_masks(student_net_params , masks)\n \n vali_loss = self.eval_nt_transfer_loss_on_vali_data(masked_student_net_params, vali_teacher_prediction, vali_teacher_ntk_mat, nn_density_level) \n\n vali_loss_list.append(vali_loss)\n \n logger.info('run: %02d/%02d | iter %04d/%04d | trans. dist %.3f | ntk dist %.3f | targ. dist %.3f | l2 %.3f | nn density %.2f | time %.2f [s]', run_index, self.NUM_RUNS + self.INIT_RUN_INDEX - 1, num_iter, num_total_iters, vali_loss[0], vali_loss[1], vali_loss[2], vali_loss[3], nn_density_level, elapsed_time )\n \n vali_loss_array = np.array(vali_loss_list)\n\n nt_trans_params_all_runs.append(masked_student_net_params)\n nt_trans_masks_all_runs.append(masks)\n nt_trans_vali_all_runs.append(vali_loss_array)\n teacher_params_all_runs.append(teacher_net_params )\n\n if self.SAVE_BOOL == True:\n\n model_summary_str = self.model_str + '_density_' + str(round(nn_density_level, 2) ) + '_run_' + str(run_index)\n\n teacher_param_fileName = model_dir_density_run + 'teacher_params_' + model_summary_str\n np.save(teacher_param_fileName, teacher_net_params)\n\n student_param_fileName = model_dir_density_run + 'transferred_params_' + model_summary_str\n np.save(student_param_fileName, masked_student_net_params)\n\n mask_fileName = model_dir_density_run + 'transferred_masks_' + model_summary_str\n np.save(mask_fileName, masks)\n\n loss_array_fileName = model_dir_density_run + 'loss_array_' + model_summary_str\n np.save(loss_array_fileName, vali_loss_array)\n \n\n nt_trans_params_all_sparsities_all_runs.append( nt_trans_params_all_runs )\n nt_trans_masks_all_sparsities_all_runs.append( nt_trans_masks_all_runs )\n nt_trans_vali_all_sparsities_all_runs.append( nt_trans_vali_all_runs )\n teacher_params_all_sparsities_all_runs.append( teacher_params_all_runs )\n \n if return_teacher_params_bool:\n return nt_trans_params_all_sparsities_all_runs, nt_trans_masks_all_sparsities_all_runs, nt_trans_vali_all_sparsities_all_runs, teacher_params_all_sparsities_all_runs\n\n else:\n return nt_trans_params_all_sparsities_all_runs, nt_trans_masks_all_sparsities_all_runs, nt_trans_vali_all_sparsities_all_runs", "def multi_taper_psd(\r\n s, Fs=2 * np.pi, NW=None, BW=None, adaptive=False,\r\n jackknife=True, low_bias=True, sides='default', NFFT=None\r\n ):\r\n # have last axis be time series for now\r\n N = s.shape[-1]\r\n M = int(np.product(s.shape[:-1]))\r\n\r\n if BW is not None:\r\n # BW wins in a contest (since it was the original implementation)\r\n norm_BW = np.round(BW * N / Fs)\r\n NW = norm_BW / 2.0\r\n elif NW is None:\r\n # default NW\r\n NW = 4\r\n # (else BW is None and NW is not None) ... all set\r\n Kmax = int(2 * NW)\r\n\r\n # if the time series is a complex vector, a one sided PSD is invalid:\r\n if (sides == 'default' and np.iscomplexobj(s)) or sides == 'twosided':\r\n sides = 'twosided'\r\n elif sides in ('default', 'onesided'):\r\n sides = 'onesided'\r\n\r\n # Find the direct spectral estimators S_k(f) for k tapered signals..\r\n # don't normalize the periodograms by 1/N as normal.. since the taper\r\n # windows are orthonormal, they effectively scale the signal by 1/N\r\n spectra, eigvals = tapered_spectra(\r\n s, (NW, Kmax), NFFT=NFFT, low_bias=low_bias\r\n )\r\n NFFT = spectra.shape[-1]\r\n K = len(eigvals)\r\n # collapse spectra's shape back down to 3 dimensions\r\n spectra.shape = (M, K, NFFT)\r\n\r\n last_freq = NFFT / 2 + 1 if sides == 'onesided' else NFFT\r\n\r\n # degrees of freedom at each timeseries, at each freq\r\n nu = np.empty((M, last_freq))\r\n if adaptive:\r\n weights = np.empty((M, K, last_freq))\r\n for i in range(M):\r\n weights[i], nu[i] = utils.adaptive_weights(\r\n spectra[i], eigvals, sides=sides\r\n )\r\n else:\r\n # let the weights simply be the square-root of the eigenvalues.\r\n # repeat these values across all n_chan channels of data\r\n weights = np.tile(np.sqrt(eigvals), M).reshape(M, K, 1)\r\n nu.fill(2 * K)\r\n\r\n if jackknife:\r\n jk_var = np.empty_like(nu)\r\n for i in range(M):\r\n jk_var[i] = utils.jackknifed_sdf_variance(\r\n spectra[i], eigvals, sides=sides, adaptive=adaptive\r\n )\r\n\r\n # Compute the unbiased spectral estimator for S(f) as the sum of\r\n # the S_k(f) weighted by the function w_k(f)**2, all divided by the\r\n # sum of the w_k(f)**2 over k\r\n\r\n # 1st, roll the tapers axis forward\r\n spectra = np.rollaxis(spectra, 1, start=0)\r\n weights = np.rollaxis(weights, 1, start=0)\r\n sdf_est = mtm_cross_spectrum(\r\n spectra, spectra, weights, sides=sides\r\n )\r\n sdf_est /= Fs\r\n \r\n if sides == 'onesided':\r\n freqs = np.linspace(0, Fs / 2, NFFT / 2 + 1)\r\n else:\r\n freqs = np.linspace(0, Fs, NFFT, endpoint=False)\r\n\r\n out_shape = s.shape[:-1] + (len(freqs),)\r\n sdf_est.shape = out_shape\r\n if jackknife:\r\n jk_var.shape = out_shape\r\n return freqs, sdf_est, jk_var\r\n else:\r\n nu.shape = out_shape\r\n return freqs, sdf_est, nu", "def _fit_spectral_embedding(self, K, vect_len_ts):\n MAX_ITER = 500\n ABSTOL = 1e-2\n RELTOL = 1e-4\n \n self.vect_len_ts = vect_len_ts\n \n # Saving state\n h = {}\n h['objval'] = np.zeros(MAX_ITER)\n h['r_norm'] = np.zeros(MAX_ITER)\n h['s_norm'] = np.zeros(MAX_ITER)\n h['eps_pri'] = np.zeros(MAX_ITER)\n h['eps_dual'] = np.zeros(MAX_ITER)\n \n if self.verbose:\n print('{:6} | {:7} | {:7} | {:7} | {:7} | {:12}\\n'.\n format('iter','r norm', 'eps pri', 's norm', 'eps dual', 'objective'))\n \n # block differentiation matrix\n D = self._generate_block_differentiation_matrix(vect_len_ts)\n \n # vect containing respective lengths of Y and W\n vect_size_tv = vect_len_ts - 1\n \n #number of samples\n nsamples = vect_len_ts.sum()\n \n # kappa: proximal parameter\n kappa = self.reg_param/self.admm_rho\n \n # constants for stopping tolerance\n c_prim = np.sqrt(self.n_clusters*vect_len_ts.sum())\n c_dual = np.sqrt(self.n_clusters*vect_size_tv.sum())\n \n print('obj = {:10.2f}'.format(self._objective_function(K)))\n\n \n for k in range(MAX_ITER):\n \n #pdb.set_trace()\n Wold = np.copy(self.W)\n #Hold = np.copy(self.H)\n Yold = np.copy(self.Y)\n rho = self.admm_rho\n def Hstep_cost_function(H):\n \"\"\"\n cost function used by pymanopt to solve the stiefel manifold problem\n ..math: \\min_H - trace(H^\\top*K*H)/T + \\frac{\\rho}{2} \\|H^\\top D - W + Y \\|_F^2\n ..math: s.t. H^\\top H = I\n ..math: T (number of samples)\n \"\"\" \n U = Wold - Yold\n #cost = -np.trace(H.T@K@H) + (self.admm_rho/2)*(norm(H.T@D - Wold + self.Y, 'fro')**2) \n cost = -np.trace(H.T@K@H)/nsamples + (rho/2)*np.trace((H.T@D - U)@(H.T@D-U).T) \n return cost\n \n def egrad(H):\n U = Wold - Yold\n grad_e = -2*(K@H)/nsamples + rho*D@((H.T@D - U).T)\n return grad_e\n \n # ================ H-Update =============\n manifold = Stiefel(vect_len_ts.sum(), self.n_clusters)\n problem = Problem(manifold=manifold, cost=Hstep_cost_function, verbosity = 0)\n #solver = SteepestDescent(maxtime=float('inf'),mingradnorm=1e-8, \n # minstepsize=1e-16, maxiter = self.max_iter) #instantiation pymanopt\n #solver = TrustRegions()\n solver = ConjugateGradient()\n self.H = np.asarray(solver.solve(problem))\n #print('norm variation of H = {}'.format(norm(Hold-self.H, 'fro')))\n #print('H : variation on obj = {}'.format(-np.trace(Hold.T@K@Hold)/nsamples + np.trace(self.H.T@[email protected])/nsamples))\n \n #================= W - update =============\n H_top_times_D = (self.H.T)@D\n target_ell = H_top_times_D + self.Y\n for t in range(self.W.shape[1]):\n self.W[:,t] = self._soft_shrinkage(target_ell[:,t], kappa)\n #print(norm(self.W[:,t] - target_ell[:,t]))\n #print(norm(self.W[:,t] - Wold[:,t]))\n \n #print('norm variation of W = {}'.format(norm(Wold-self.W, 'fro')))\n #print('W: variation on obj = {}'.format(self._columwise_norm(Wold, 1) - self._columwise_norm(self.W, 1)))\n \n \n #================ Y -Updates =============\n self.Y = np.copy(self.Y) + (H_top_times_D - self.W)\n #print('norm variation of Y = {}'.format(norm(Yold-self.Y, 'fro')))\n #print('obj = {:10.2f}'.format(self._objective_function(K)))\n # ============ history ====================\n h['objval'][k] = self._objective_function(K)\n h['r_norm'][k] = norm(H_top_times_D - self.W, 'fro')\n h['s_norm'][k] = self.admm_rho*norm(D@(Wold - self.W).transpose(), 'fro')\n norm_Htop_D = norm(H_top_times_D, 'fro')\n norm_D_Ytop = self.admm_rho*norm([email protected](), 'fro') \n h['eps_pri'][k] = c_prim*ABSTOL + RELTOL*np.max([norm(self.W, 'fro'), norm_Htop_D])\n h['eps_dual'][k] = c_dual*ABSTOL + RELTOL*norm_D_Ytop\n \n # verbose\n if self.verbose:\n print('{:6} | {:5.3f} | {:5.3f} | {:5.3f} | {:5.3} | {:10.2f}\\n'.format(k, h['r_norm'][k], h['eps_pri'][k],\n h['s_norm'][k], h['eps_dual'][k], h['objval'][k]))\n \n # check convergence\n if (h['r_norm'][k] < h['eps_pri'][k]) and (h['s_norm'][k] < h['eps_dual'][k]):\n break\n \n self.history = h\n return self", "def getSSVEP(data,sr,window,ssvep_freq,goodtrials,goodchans):\n\n SSVEP = dict();\n\n #some renaming\n startsamp = window[0]\n endsamp = window[1]\n epochlength = data.shape[0]\n nchan = data.shape[1]\n ntrial = data.shape[2]\n\n # average erp.\n erp = np.mean(data[:, :, goodtrials], axis=2) \n\n # FFT the ERPs\n # remove the mean\n erpmean = np.tile(np.mean(erp, axis=0), [epochlength, 1])\n erp = erp - erpmean\n\n #take fft over prescribed window\n erpf = fft(erp[startsamp:endsamp, :], axis=0)/(endsamp-startsamp) # raw fft \n binwidth = int((endsamp-startsamp)/sr)\n u,s,vh = linalg.svd(erpf[(ssvep_freq-1)*binwidth:(ssvep_freq+1)*binwidth+1,:])\n snr = 2 * (np.abs(u[1,:]**2))/(np.abs(u[0,:])**2 + np.abs(u[2,:])**2)\n\n snrflagsignal = 1\n if np.max(snr) < 1:\n print('Warning NO SSVEP detected at stimulus frequency')\n snrflagsignal = 0\n \n weights = np.zeros((nchan,1),dtype=complex)\n\n\t# This is an optimal set of weights to estimate 30 hz signal. \n weights[:,0] = np.matrix.transpose(vh[0,:])\n\n\t# lets test it on the same interval using weighted electrode vs. original\n erpproject = np.matmul(erpf, weights)\n\n # multiply the weights for all timepoints\n weights_long = np.tile(weights * np.diag(s)[0,0], [1,1000])\n channel_power = np.transpose(erpf) * weights_long\n\n # Now use the weights to loop through indivial trials \n trialestimate = np.zeros((endsamp-startsamp,ntrial),dtype=complex)\n trial_bychan = np.zeros((endsamp-startsamp,nchan, ntrial), dtype = complex)\n trial_fft = np.zeros((endsamp-startsamp,nchan, ntrial), dtype = complex)\n trial_data = np.zeros((endsamp-startsamp,nchan, ntrial))\n\n for trial in goodtrials: \n trialdata = np.squeeze(data[startsamp:endsamp,:,trial])\n trialfft = fft(trialdata,axis=0)\n trialproject = np.matmul(trialfft,weights_long)\n trialfft_weighted = trialfft * weights_long.T\n trial_bychan[:,:,trial] = trialfft_weighted\n trial_fft[:, :, trial] = trialfft\n trialestimate[:,trial] = trialproject[:,0] #new coefficients\n trial_data[:,:,trial] = trialdata\n\n\n SSVEP['goodtrials'] = goodtrials\n SSVEP['goodchannels'] = goodchans\n SSVEP['sr'] = sr;\n SSVEP['ssvep_freq'] = ssvep_freq\n SSVEP['samplerange'] = window\n SSVEP['erp_fft'] = erpf\n SSVEP['svdspectrum'] = u\n SSVEP['svdchan'] = vh[0:2,:]\n SSVEP['snr'] = snr\n SSVEP['snrflag'] = snrflagsignal\n SSVEP['projectspectrum'] = erpproject\n SSVEP['singletrial'] = trialestimate\n SSVEP['weights'] = weights\n SSVEP['power_sub'] = channel_power\n SSVEP['singular'] = np.diag(s)\n SSVEP['trial_bychan'] = trial_bychan\n SSVEP['trialfft'] = trial_fft\n SSVEP['trialdata'] = trial_data\n return SSVEP", "def train(self):\n\n for i in range(self.c):\n mu_i = self.estimate_mu(i).T\n cov_i = self.estimate_sigma(i, mu_i)\n inv_sigma_i = np.linalg.inv(cov_i)\n P_i = self.estimate_P(i)\n Wi = -1 / 2 * inv_sigma_i\n wi = inv_sigma_i @ mu_i\n wi0 = -1 / 2 * mu_i.T @ inv_sigma_i @ mu_i - 1 / 2 * np.log(np.linalg.det(cov_i)) + np.log(P_i)\n self.weights.append([Wi, wi, wi0])\n return self.weights", "def runWattsStrogatz(dim, size, nei, p):\n s = z.Optimize()\n g = ig.Graph.Watts_Strogatz(dim, size, nei, p, loops=True, multiple=False)\n while g.is_dag():\n g = ig.Graph.Watts_Strogatz(dim, size, nei, p, loops=True, multiple=False)\n\n return MFAS_set_cover(s,g), u.get_feedback_arc_set(g)", "def weighted_transmissionData(self, port='Substrate'):\n\n base = self.sims[0].conf['General']['results_dir']\n self.log.info('Computing spectrally weighted transmission data for group at %s' % base)\n abs_vals = np.zeros(self.num_sims)\n ref_vals = np.zeros(self.num_sims)\n trans_vals = np.zeros(self.num_sims)\n freqs = np.zeros(self.num_sims)\n wvlgths = np.zeros(self.num_sims)\n spectra = np.zeros(self.num_sims)\n # Get solar power from chosen spectrum\n path = self.sims[0].conf['Simulation']['input_power_wv']\n wv_vec, p_vec = np.loadtxt(path, usecols=(0, 2), unpack=True, delimiter=',')\n # Get interpolating function for power\n p_wv = interpolate.interp1d(wv_vec, p_vec, kind='linear',\n bounds_error=False, fill_value='extrapolate')\n # Assuming the leaves contain frequency values, sum over all of them\n for i, sim in enumerate(self.sims):\n ref, trans, absorb = sim.data['transmission_data'][port]\n freq = sim.conf['Simulation']['params']['frequency']\n wvlgth = consts.c / freq\n wvlgth_nm = wvlgth * 1e9\n freqs[i] = freq\n wvlgths[i] = wvlgth\n sun_pow = p_wv(wvlgth_nm)\n spectra[i] = sun_pow * wvlgth_nm\n abs_vals[i] = sun_pow * absorb * wvlgth_nm\n ref_vals[i] = sun_pow * ref * wvlgth_nm\n trans_vals[i] = sun_pow * trans * wvlgth_nm\n # Now integrate all the weighted spectra and divide by the power of\n # the spectra\n wvlgths = wvlgths[::-1]\n abs_vals = abs_vals[::-1]\n ref_vals = ref_vals[::-1]\n trans_vals = trans_vals[::-1]\n spectra = spectra[::-1]\n power = intg.trapz(spectra, x=wvlgths * 1e9)\n wght_ref = intg.trapz(ref_vals, x=wvlgths * 1e9) / power\n wght_abs = intg.trapz(abs_vals, x=wvlgths * 1e9) / power\n wght_trans = intg.trapz(trans_vals, x=wvlgths) / power\n out = os.path.join(base, 'weighted_transmission_data.dat')\n with open(out, 'w') as outf:\n outf.write('# Reflection, Transmission, Absorbtion\\n')\n outf.write('%f,%f,%f' % (wght_ref, wght_trans, wght_abs))\n return wght_ref, wght_trans, wght_abs", "def _generate_weighted_matrices(self):\n self.degree_weighted_matrices = dict()\n mes = []\n args = []\n for metaedge, matrix in self.adj_matrices.items():\n mes.append(metaedge)\n args.append({'matrix': matrix, 'w': self.w, 'degree_fwd': self.out_degree[metaedge],\n 'degree_rev': self.in_degree[metaedge]})\n res = parallel_process(array=args, function=mt.weight_by_degree, use_kwargs=True, n_jobs=self.n_jobs,\n front_num=0)\n for metaedge, matrix in zip(mes, res):\n self.degree_weighted_matrices[metaedge] = matrix", "def SkoptPaperStats(maxIters, numRuns):\n\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n\n assert comm.Get_size() == numRuns, \"Please ensure there is one process running per run i.e \" + str(numRuns) + \" processes.\"\n \n # Define the problem bounds.\n skoptBounds = [(10, 1300), (40, 230), (0, 90), (0, 90)]\n\n # Use the same seed list as previously.\n seedList = [572505, 357073, 584216, 604873, 854690, 573165, 298975, 650770, 243921, 191168]\n\n # The target for each algorithm. This was determined by using the values in the literature, so there is clearly some deviation either due to the detuning or computation.\n globalFoM = 1.033\n\n if rank == 0:\n timeList = []\n iterationList = []\n\n # Define which solver will be used.\n optimiser = skopt.Optimizer(skoptBounds, base_estimator = \"GP\", n_initial_points = int(np.ceil(maxIters/10)), random_state = seedList[rank])\n\n # Start timing.\n startTime = time.time()\n timeElapsed = None\n iterationSuccess = None\n\n # Start optimisation.\n for iteration in range(maxIters):\n\n # Make one suggestion.\n nextParams = optimiser.ask()\n\n # Check what FoM this gives. Go negative as this is a minimisation routine.\n fEval = FitnessSkopt(nextParams)\n\n # Update best FoM.\n if abs(fEval) >= globalFoM:\n # The algorithm has managed to surpass or equal the paper value.\n iterationSuccess = iteration\n timeElapsed = time.time() - startTime\n \n if rank == 0:\n iterationList.append(iterationSuccess)\n timeList.append(timeElapsed)\n\n break\n \n # Tell the optimiser about the result.\n optimiser.tell(nextParams, fEval)\n\n # Run complete. Send results to main process. Tags are unique identifiers.\n if rank != 0:\n comm.send(timeElapsed, dest = 0, tag = 1)\n comm.send(iterationSuccess, dest = 0, tag = 2)\n\n # Wait for all the processes to end.\n comm.Barrier()\n\n if rank == 0:\n # Aggregate the data.\n for process in range(comm.Get_size() - 1):\n # Get the data.\n individualTime = None\n individualTime = comm.recv(individualTime, source = process + 1, tag = 1)\n\n individualIter = None\n individualIter = comm.recv(individualIter, source = process + 1, tag = 2)\n\n if individualIter is not None:\n # Both values must therefore be non-null.\n iterationList.append(individualIter)\n timeList.append(individualTime)\n\n avgRuntime = np.average(timeList)\n avgIters = np.average(iterationList)\n try:\n\n fastestTime = np.min(timeList)\n\n except ValueError:\n \n # List is empty.\n fastestTime = float('NaN')\n\n numSuccess = len(iterationList)\n successRate = numSuccess/numRuns\n\n print(\"Bayesian optimisation paper testing complete! Here are the stats:\")\n print(\"Number of successful runs: \" + str(numSuccess) + \" (Success rate of \" + str(successRate) + \")\")\n print(\"Average iterations required for success: \" + str(avgIters))\n print(\"Average time required for success: \" + str(avgRuntime))\n print(\"Fastest convergence time: \" + str(fastestTime))\n print(\"------------------------------------------------------------------------------------------------------------------\")\n \n return", "def fit(self, w):\n w_former = w\n w_next = w\n w_t = w\n w_t_100 = w\n w_diff = 10000\n i = 0\n #tim_beg = t.time()\n # use two part to calculate the a(w,w0):calculate the gradient using regular or SDG, batch = 10\n # calculate the gradient and update the w,w0\n while i < 10000 and np.abs(w_diff) > 0.00001:\n loss_func = self.lost\n grads = self.gradient(loss_func)\n # calculate the y_pred(eta)\n w_next = w_former - grads(w_former) / (10000)\n k =self.lost(w_next) - self.lost(w_former)\n m = np.dot(w_next-w_former, grads(w_former).T)\n if i != 0 and i % 100 == 0:\n w_t = w_t_100\n w_t_100 = w_next\n w_diff = 1 / len(w) * (np.sum(np.abs(w_t_100 - w_t)))\n i_loss = self.lost(w_next)\n print(\"Iteration < %d > with loss < %f >\" % (i, i_loss))\n #self.los_plt.append(i_loss)\n #tim = t.time() - tim_beg\n #self.tim.append(tim)\n i += 1\n w_former = w_next\n #plt.plot(self.tim, self.los_plt)\n #plt.xlabel(\"time\")\n #plt.ylabel('loss')\n #plt.show()\n if i >= 10000:\n print(\"~Optimization stops because finishing iteration~\")\n if np.abs(w_diff) <= 0.00001:\n print(\"~Optimization stops because of difference between weights are less than 0.00001~\")\n self.w_result = w_next", "def train(self):\n \n for demo_traj in self._demo_trajs:\n\n interpolate = interp1d(self._phase._z, demo_traj, kind='cubic')\n\n #strech the trajectory to fit 0 to 1\n stretched_demo = interpolate(self._phase._z)[None,:]\n\n #compute the weights of the trajectory using the basis function\n w_demo_traj = np.dot(np.linalg.inv(np.dot(self._Phi, self._Phi.T) + 1e-12*np.eye(self._n_bfs) ), np.dot(self._Phi, stretched_demo.T)).T # weights for each trajectory\n \n #append the weights to the list\n self._W.append(w_demo_traj.copy())\n\n self._W = np.asarray(self._W).squeeze()\n \n # mean of weights\n self._mean_W = np.mean(self._W, axis=0)\n \n # covariance of weights\n # w1 = np.array(map(lambda x: x - self._mean_W.T, self._W))\n # self._sigma_W = np.dot(w1.T, w1)/self._W.shape[0]\n\n self._sigma_W = np.cov(self._W.T)" ]
[ "0.649906", "0.6344058", "0.6340718", "0.6255578", "0.62282306", "0.62278086", "0.61570895", "0.610795", "0.61020076", "0.60339296", "0.6010726", "0.60035574", "0.59758174", "0.59527", "0.5924075", "0.59101695", "0.5897155", "0.5866766", "0.586247", "0.58438164", "0.5808632", "0.5807483", "0.58038926", "0.5766321", "0.5761529", "0.57566905", "0.5738819", "0.5737908", "0.5695596", "0.5682688" ]
0.6735738
0
Perform an inverse iteration to find the eigenvector corresponding to the given eigenvalue in a symmetric tridiagonal system.
def _tridi_inverse_iteration(d, e, w, x0=None, rtol=1e-6): eig_diag = d - w if x0 is None: x0 = np.random.randn(len(d)) x_prev = np.zeros_like(x0) norm_x = np.linalg.norm(x0) # the eigenvector is unique up to sign change, so iterate # until || |x^(n)| - |x^(n-1)| ||^2 < rtol x0 /= norm_x while np.linalg.norm(np.abs(x0) - np.abs(x_prev)) > rtol: x_prev = x0.copy() _tridisolve(eig_diag, e, x0) norm_x = np.linalg.norm(x0) x0 /= norm_x return x0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eigen_vector_i(self, i):\n return self._eig_vec[:,i]", "def tridi_inverse_iteration(d, e, w, x0=None, rtol=1e-8):\r\n eig_diag = d - w\r\n if x0 is None:\r\n x0 = np.random.randn(len(d))\r\n x_prev = np.zeros_like(x0)\r\n norm_x = np.linalg.norm(x0)\r\n # the eigenvector is unique up to sign change, so iterate\r\n # until || |x^(n)| - |x^(n-1)| ||^2 < rtol\r\n x0 /= norm_x\r\n while np.linalg.norm(np.abs(x0) - np.abs(x_prev)) > rtol:\r\n x_prev = x0.copy()\r\n tridisolve(eig_diag, e, x0)\r\n norm_x = np.linalg.norm(x0)\r\n x0 /= norm_x\r\n return x0", "def eigen_decomp(matrix):\n w = None\n v = None\n ### YOUR CODE HERE\n w,v=np.linalg.eig(matrix)\n ### END YOUR CODE\n return w, v", "def test_inverse_eigenvectors_non_interacting(self, size):\n t_nn = 1.2\n idx = np.arange(size)\n g0_inv_full = np.zeros((size, size), dtype=complex)\n g0_inv_full[idx[:-1], idx[1:]] = g0_inv_full[idx[1:], idx[:-1]] = t_nn\n for g0 in self.g0_loc_inv:\n g0_inv_full[idx, idx] = g0\n rv, h, rv_inv = gt.matrix.decompose_gf(g0_inv_full)\n assert_allclose(rv.dot(rv_inv), np.identity(*h.shape), atol=1e-14)", "def eig_vals_vects(self, matrix=None, attribute=False, attribute_matrix=False):\n if matrix is None:\n matrix = self.get_matrix(attribute=attribute_matrix)\n eigval, eigvect = np.linalg.eig(matrix)\n si = np.argsort(np.imag(eigval))\n eigvect = np.array(eigvect)\n eigvect = eigvect.T[si]\n eigval = eigval[si]\n\n if attribute:\n self.eigvect = eigvect\n self.eigval = eigval\n\n # print 'np.shape(eigvect) = ', np.shape(eigvect)\n # sys.exit()\n return eigval, eigvect", "def eigsolve(self,**kwargs):\n return eigsolve(self,**kwargs)", "def get_eig(self, tolerance=None):\r\n E, V = scipy.linalg.eig(self.data)\r\n E = pd.Series(E.real, name=\"EIG\")\r\n V = pd.DataFrame(V.real)\r\n if tolerance is not None:\r\n E[E/E.max() < tolerance] = 0\r\n return E, V", "def analytical_eig(A):\n n = len(A)\n h = 1/float(n)\n d = 2/float(h)**2\n a = -1/float(h)**2\n eigenval = np.empty(n)\n for j in range(1,n+1):\n eigenval[j-1] = d + 2*a*np.cos((j*np.pi)/(float(n)+1)) # Analytic solution\n \n return eigenval", "def call_eigenValues(self, input_value):\n eigValues, eigVectors = self.getEigen(input_value)\n return tf.reverse(eigVectors, [-1]), tf.reverse(eigValues, [-1])", "def inverse(self, y):\n device = y.device\n return t.einsum('ij,k,kj->ik', y, 1. / t.sqrt(self.eig).to(device), self.rot.to(device))", "def eigen_vector_i_all(self):\n return self._eig_vec", "def eig_vals_vects_hermitian(matrix, sort='imag'):\n # if len(matrix) < 10:\n # print '\\nFinding eigvals, matrix = ', matrix\n eigval, eigvect = np.linalg.eig(matrix)\n # use imaginary part to get ascending order of eigvals\n if sort == 'imag':\n si = np.argsort(np.imag(eigval))\n elif sort == 'real':\n si = np.argsort(np.real(eigval))\n else:\n si = np.arange(len(eigval))\n\n eigvect = np.array(eigvect)\n eigvect_out = eigvect.T[si]\n eigval_out = eigval[si]\n if len(eigval_out) < 10:\n print 'eigvals return as =', eigval_out\n return eigval_out, eigvect_out", "def eig_vals_vects(matrix, sort='imag', not_hermitian=True, verbose=False):\n # if len(matrix) < 10:\n # print '\\nFinding eigvals, matrix = ', matrix\n\n # check if hermitian:\n if not_hermitian:\n eigval, eigvect = np.linalg.eig(matrix)\n else:\n if (matrix == matrix.conj().T).all():\n if verbose:\n print 'Shortcut eigvect/vals since matrix is hermitian...'\n eigval, eigvect = np.linalg.eigh(matrix)\n else:\n if verbose:\n print 'matrix is not hermitian...'\n eigval, eigvect = np.linalg.eig(matrix)\n\n # use imaginary part to get ascending order of eigvals\n if sort == 'imag':\n si = np.argsort(np.imag(eigval))\n elif sort == 'real':\n si = np.argsort(np.real(eigval))\n else:\n si = np.arange(len(eigval))\n\n eigvect = np.array(eigvect)\n eigvect_out = eigvect.T[si]\n eigval_out = eigval[si]\n\n # if len(eigval_out) < 10:\n # print 'eigvals return as =', eigval_out\n\n return eigval_out, eigvect_out", "def GetEigenvector(self, i):\n return _hypre.HypreAME_GetEigenvector(self, i)", "def posdef_inv_eig(tensor, identity, damping):\n eigenvalues, eigenvectors = tf.self_adjoint_eig(tensor + damping * identity)\n return tf.matmul(eigenvectors / eigenvalues, eigenvectors, transpose_b=True)", "def solve_for_eigenvectors(matrix, num, mode=\"general\"):\n\n # Construct a sparse matrix\n if mode == \"general\":\n return linalg.eigs(matrix, num)\n\n if mode == \"symmetric\":\n return linalg.eigsh(matrix, num)", "def eigen_decomposition(self):\n w, V = linalg.eigh(self.K)\n c = w[::-1]\n if isinstance(self.num_xi, float):\n percent_energy = np.cumsum(c) / np.sum(c)\n self.num_xi = np.arange(c.shape[0])[percent_energy < self.num_xi][-1] # num_xi changes\n self.Lambda = w[::-1][:self.num_xi]\n self.V = V[:, ::-1][:, :self.num_xi]", "def eigensolve(self, epsilon=0.85):\n raise NotImplementedError(\"eigensolve Incomplete\")", "def eigen(M):\n values, vectors = np.linalg.eig(M)\n return values, vectors", "def eig(self,manifold_num):\n num_sites = len(self.energies[manifold_num])\n ham = self.manifold_hamiltonian(manifold_num).toarray()\n eigvals, eigvecs = eigh(ham)\n # Force degenerate eigenvectors to be orthogonal\n if self.qr_flag:\n eigvecs, r = np.linalg.qr(eigvecs,mode='reduced')\n if self.check_eigenvectors:\n HV = ham.dot(eigvecs)\n D = eigvecs.T.dot(HV)\n if np.allclose(D,np.diag(eigvals),rtol=1E-11,atol=1E-11):\n pass\n else:\n # warnings.warn('Eigenvalues altered by QR factorization, max absolute change in diagonal matrix of {}'.format(np.max(D-np.diag(eigvals))))\n warnings.warn('Using eigenvectors to diagonalize hamiltonian does not result in the expected diagonal matrix to tolerance, largest deviation is {}'.format(np.max(np.abs(D - np.diag(eigvals)))))\n \n sort_indices = eigvals.argsort()\n eigvals.sort()\n eigvecs = eigvecs[:,sort_indices]\n if self.qr_flag:\n r = r[:,sort_indices]\n self.r_mats.append(r)\n # I choose to pick the phase of my eigenvectors such that the state which has the\n # largest overlap has a positive overlap. For sufficiently small d, and alpha close\n # to 1, this will be the overlap between the same excited and ground states.\n for i in range(eigvals.size):\n max_index = np.argmax(np.abs(eigvecs[:,i]))\n if eigvecs[max_index,i] < 0:\n eigvecs[:,i] *= -1\n\n return eigvals, eigvecs", "def eigensystem(mat):\n e, v = numpy.linalg.eig(mat)\n\n # `eig` returns complex results but we know all of the\n # eigenstates have real energy.\n e = numpy.real(e)\n\n items = zip(e, v.T)\n items = sorted(items, key = operator.itemgetter(0))\n e, v = zip(*items)\n\n return (e, v)", "def _solve_eigen(self, X, y):\n self.means_, self.neighbor_means_ = _class_means_and_neighbor_means(\n X, y, self.within_between_ratio, self.nearest_neighbor_ratio)\n \n Sw = _class_cov(X, y) # within class cov\n Sb = _local_pairwise_cov(self.means_, self.neighbor_means_)\n \n evals, evecs = linalg.eigh(Sb, Sw)\n evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors\n self.scalings_ = np.asarray(evecs)", "def MakeEigenVectors( self ): \n sqrt2 = np.sqrt(2)\n Isqrt2 = 1.0 / sqrt2\n EVectors = np.asarray( [ [ Isqrt2 , Isqrt2 , 0 ] ,\n [ Isqrt2 ,-Isqrt2 , 0 ] , \n [ 0 , 0 , 1 ] ] )\n for i in range( self.NQ ):\n for j in range( self.Nbranches ):\n self.EigenVectors[ i , j , 0 , : ] = EVectors[ j , : ]", "def RDInvert(eigVals, eigVecs):\n diagInv = np.diag(eigVals**-1)\n eigVecsTranspose = np.transpose(eigVecs)\n cInv = np.dot(np.dot(eigVecs,diagInv),eigVecsTranspose)\n return cInv", "def inverse(self):\n self.check_square()\n\n\n N = self.rows\n\n inverse = make_matrix(N, N)\n\n # Solve on a per-column basis using Ax = b formalism\n for j in range(N):\n b = make_matrix(N, 1)\n b[j, 0] = 1\n\n x = self.solve_linear_system(b)\n\n for i in range(N):\n inverse[i, j] = x[i, 0]\n\n return inverse", "def get_eigvect(self, attribute=False, basis=None):\n # First attempt to return, then attempt to load, then calculate if unavailable\n if self.eigvect is not None:\n eigvect = self.eigvect\n else:\n\n # Try to load eigval and eigvect\n print 'Attempting to load eigvect...'\n eigvect = self.load_eigvect(attribute=attribute)\n if eigvect is None:\n print 'mlat.get_eigvect: Could not load eigvect, calculating...'\n # calculate eigval and eigvect\n # Define matrix first to avoid attributing matrix to self\n if self.matrix is None:\n matrix = self.calc_matrix(attribute=False, basis=basis)\n else:\n matrix = self.matrix\n eigval, eigvect = self.eig_vals_vects(matrix=matrix, attribute=attribute)\n else:\n print 'loaded!'\n\n return eigvect", "def test_eigen_multiple_neighborhoods(self):\n # vectorized version\n t0 = time.time()\n extract_vect = EigenValueVectorizeFeatureExtractor()\n eigvals_vect = extract_vect.extract(self.point_cloud, self.neigh, None, None, None)\n print('Timing Vectorize : {}'.format((time.time() - t0)))\n eigvals_vect = np.vstack(eigvals_vect[:3]).T\n\n # serial version\n eigvals = []\n t0 = time.time()\n for n in self.neigh:\n extract = EigenValueSerial()\n eigvals.append(extract.extract(self.point_cloud, n, None, None, None))\n print('Timing Serial : {}'.format((time.time() - t0)))\n eigvals = np.array(eigvals)\n\n np.testing.assert_allclose(eigvals_vect, eigvals)", "def eig(C):\r\n\r\n # class eig(object):\r\n # def __call__(self, C):\r\n\r\n # Householder transformation of a symmetric matrix V into tridiagonal form.\r\n # -> n : dimension\r\n # -> V : symmetric nxn-matrix\r\n # <- V : orthogonal transformation matrix:\r\n # tridiag matrix == V * V_in * V^t\r\n # <- d : diagonal\r\n # <- e[0..n-1] : off diagonal (elements 1..n-1)\r\n\r\n # Symmetric tridiagonal QL algorithm, iterative\r\n # Computes the eigensystem from a tridiagonal matrix in roughtly 3N^3 operations\r\n # -> n : Dimension.\r\n # -> d : Diagonale of tridiagonal matrix.\r\n # -> e[1..n-1] : off-diagonal, output from Householder\r\n # -> V : matrix output von Householder\r\n # <- d : eigenvalues\r\n # <- e : garbage?\r\n # <- V : basis of eigenvectors, according to d\r\n\r\n\r\n # tred2(N, B, diagD, offdiag); B=C on input\r\n # tql2(N, diagD, offdiag, B);\r\n\r\n # private void tred2 (int n, double V[][], double d[], double e[]) {\r\n def tred2 (n, V, d, e):\r\n # This is derived from the Algol procedures tred2 by\r\n # Bowdler, Martin, Reinsch, and Wilkinson, Handbook for\r\n # Auto. Comp., Vol.ii-Linear Algebra, and the corresponding\r\n # Fortran subroutine in EISPACK.\r\n\r\n num_opt = False # factor 1.5 in 30-D\r\n\r\n for j in range(n):\r\n d[j] = V[n-1][j] # d is output argument\r\n\r\n # Householder reduction to tridiagonal form.\r\n\r\n for i in range(n-1,0,-1):\r\n # Scale to avoid under/overflow.\r\n h = 0.0\r\n if not num_opt:\r\n scale = 0.0\r\n for k in range(i):\r\n scale = scale + abs(d[k])\r\n else:\r\n scale = sum(abs(d[0:i]))\r\n\r\n if scale == 0.0:\r\n e[i] = d[i-1]\r\n for j in range(i):\r\n d[j] = V[i-1][j]\r\n V[i][j] = 0.0\r\n V[j][i] = 0.0\r\n else:\r\n\r\n # Generate Householder vector.\r\n if not num_opt:\r\n for k in range(i):\r\n d[k] /= scale\r\n h += d[k] * d[k]\r\n else:\r\n d[:i] /= scale\r\n h = np.dot(d[:i],d[:i])\r\n\r\n f = d[i-1]\r\n g = h**0.5\r\n\r\n if f > 0:\r\n g = -g\r\n\r\n e[i] = scale * g\r\n h = h - f * g\r\n d[i-1] = f - g\r\n if not num_opt:\r\n for j in range(i):\r\n e[j] = 0.0\r\n else:\r\n e[:i] = 0.0\r\n\r\n # Apply similarity transformation to remaining columns.\r\n\r\n for j in range(i):\r\n f = d[j]\r\n V[j][i] = f\r\n g = e[j] + V[j][j] * f\r\n if not num_opt:\r\n for k in range(j+1, i):\r\n g += V[k][j] * d[k]\r\n e[k] += V[k][j] * f\r\n e[j] = g\r\n else:\r\n e[j+1:i] += V.T[j][j+1:i] * f\r\n e[j] = g + np.dot(V.T[j][j+1:i],d[j+1:i])\r\n\r\n f = 0.0\r\n if not num_opt:\r\n for j in range(i):\r\n e[j] /= h\r\n f += e[j] * d[j]\r\n else:\r\n e[:i] /= h\r\n f += np.dot(e[:i],d[:i])\r\n\r\n hh = f / (h + h)\r\n if not num_opt:\r\n for j in range(i):\r\n e[j] -= hh * d[j]\r\n else:\r\n e[:i] -= hh * d[:i]\r\n\r\n for j in range(i):\r\n f = d[j]\r\n g = e[j]\r\n if not num_opt:\r\n for k in range(j, i):\r\n V[k][j] -= (f * e[k] + g * d[k])\r\n else:\r\n V.T[j][j:i] -= (f * e[j:i] + g * d[j:i])\r\n\r\n d[j] = V[i-1][j]\r\n V[i][j] = 0.0\r\n\r\n d[i] = h\r\n # end for i--\r\n\r\n # Accumulate transformations.\r\n\r\n for i in range(n-1):\r\n V[n-1][i] = V[i][i]\r\n V[i][i] = 1.0\r\n h = d[i+1]\r\n if h != 0.0:\r\n if not num_opt:\r\n for k in range(i+1):\r\n d[k] = V[k][i+1] / h\r\n else:\r\n d[:i+1] = V.T[i+1][:i+1] / h\r\n\r\n for j in range(i+1):\r\n if not num_opt:\r\n g = 0.0\r\n for k in range(i+1):\r\n g += V[k][i+1] * V[k][j]\r\n for k in range(i+1):\r\n V[k][j] -= g * d[k]\r\n else:\r\n g = np.dot(V.T[i+1][0:i+1], V.T[j][0:i+1])\r\n V.T[j][:i+1] -= g * d[:i+1]\r\n\r\n if not num_opt:\r\n for k in range(i+1):\r\n V[k][i+1] = 0.0\r\n else:\r\n V.T[i+1][:i+1] = 0.0\r\n\r\n\r\n if not num_opt:\r\n for j in range(n):\r\n d[j] = V[n-1][j]\r\n V[n-1][j] = 0.0\r\n else:\r\n d[:n] = V[n-1][:n]\r\n V[n-1][:n] = 0.0\r\n\r\n V[n-1][n-1] = 1.0\r\n e[0] = 0.0\r\n\r\n\r\n # Symmetric tridiagonal QL algorithm, taken from JAMA package.\r\n # private void tql2 (int n, double d[], double e[], double V[][]) {\r\n # needs roughly 3N^3 operations\r\n def tql2 (n, d, e, V):\r\n\r\n # This is derived from the Algol procedures tql2, by\r\n # Bowdler, Martin, Reinsch, and Wilkinson, Handbook for\r\n # Auto. Comp., Vol.ii-Linear Algebra, and the corresponding\r\n # Fortran subroutine in EISPACK.\r\n\r\n num_opt = False # using vectors from numpy makes it faster\r\n\r\n if not num_opt:\r\n for i in range(1,n): # (int i = 1; i < n; i++):\r\n e[i-1] = e[i]\r\n else:\r\n e[0:n-1] = e[1:n]\r\n e[n-1] = 0.0\r\n\r\n f = 0.0\r\n tst1 = 0.0\r\n eps = 2.0**-52.0\r\n for l in range(n): # (int l = 0; l < n; l++) {\r\n\r\n # Find small subdiagonal element\r\n\r\n tst1 = max(tst1, abs(d[l]) + abs(e[l]))\r\n m = l\r\n while m < n:\r\n if abs(e[m]) <= eps*tst1:\r\n break\r\n m += 1\r\n\r\n # If m == l, d[l] is an eigenvalue,\r\n # otherwise, iterate.\r\n\r\n if m > l:\r\n iiter = 0\r\n while 1: # do {\r\n iiter += 1 # (Could check iteration count here.)\r\n\r\n # Compute implicit shift\r\n\r\n g = d[l]\r\n p = (d[l+1] - g) / (2.0 * e[l])\r\n r = (p**2 + 1)**0.5 # hypot(p,1.0)\r\n if p < 0:\r\n r = -r\r\n\r\n d[l] = e[l] / (p + r)\r\n d[l+1] = e[l] * (p + r)\r\n dl1 = d[l+1]\r\n h = g - d[l]\r\n if not num_opt:\r\n for i in range(l+2, n):\r\n d[i] -= h\r\n else:\r\n d[l+2:n] -= h\r\n\r\n f = f + h\r\n\r\n # Implicit QL transformation.\r\n\r\n p = d[m]\r\n c = 1.0\r\n c2 = c\r\n c3 = c\r\n el1 = e[l+1]\r\n s = 0.0\r\n s2 = 0.0\r\n\r\n # hh = V.T[0].copy() # only with num_opt\r\n for i in range(m-1, l-1, -1): # (int i = m-1; i >= l; i--) {\r\n c3 = c2\r\n c2 = c\r\n s2 = s\r\n g = c * e[i]\r\n h = c * p\r\n r = (p**2 + e[i]**2)**0.5 # hypot(p,e[i])\r\n e[i+1] = s * r\r\n s = e[i] / r\r\n c = p / r\r\n p = c * d[i] - s * g\r\n d[i+1] = h + s * (c * g + s * d[i])\r\n\r\n # Accumulate transformation.\r\n\r\n if not num_opt: # overall factor 3 in 30-D\r\n for k in range(n): # (int k = 0; k < n; k++) {\r\n h = V[k][i+1]\r\n V[k][i+1] = s * V[k][i] + c * h\r\n V[k][i] = c * V[k][i] - s * h\r\n else: # about 20% faster in 10-D\r\n hh = V.T[i+1].copy()\r\n # hh[:] = V.T[i+1][:]\r\n V.T[i+1] = s * V.T[i] + c * hh\r\n V.T[i] = c * V.T[i] - s * hh\r\n # V.T[i] *= c\r\n # V.T[i] -= s * hh\r\n\r\n p = -s * s2 * c3 * el1 * e[l] / dl1\r\n e[l] = s * p\r\n d[l] = c * p\r\n\r\n # Check for convergence.\r\n if abs(e[l]) <= eps*tst1:\r\n break\r\n # } while (Math.abs(e[l]) > eps*tst1);\r\n\r\n d[l] = d[l] + f\r\n e[l] = 0.0\r\n\r\n\r\n # Sort eigenvalues and corresponding vectors.\r\n if 11 < 3:\r\n for i in range(n-1): # (int i = 0; i < n-1; i++) {\r\n k = i\r\n p = d[i]\r\n for j in range(i+1, n): # (int j = i+1; j < n; j++) {\r\n if d[j] < p: # NH find smallest k>i\r\n k = j\r\n p = d[j]\r\n\r\n if k != i:\r\n d[k] = d[i] # swap k and i\r\n d[i] = p\r\n for j in range(n): # (int j = 0; j < n; j++) {\r\n p = V[j][i]\r\n V[j][i] = V[j][k]\r\n V[j][k] = p\r\n # tql2\r\n\r\n N = len(C[0])\r\n if 11 < 3:\r\n V = np.array([x[:] for x in C]) # copy each \"row\"\r\n N = V[0].size\r\n d = np.zeros(N)\r\n e = np.zeros(N)\r\n else:\r\n V = [[x[i] for i in xrange(N)] for x in C] # copy each \"row\"\r\n d = N * [0.]\r\n e = N * [0.]\r\n\r\n tred2(N, V, d, e)\r\n tql2(N, d, e, V)\r\n return (array(d), array(V))", "def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n # TODO - your code here\n inverse = []\n if self.h == 1:\n temp = []\n temp.append(1/self.g[0][0])\n inverse.append(temp)\n else:\n identity_matrix = identity(self.h)\n det_term = 1/self.determinant()\n trace_term = self.trace()\n # implement intermediate scaling step locally\n # trace_x_I = trace_term * identity_matrix\n trace_x_I = []\n for i in range(len(self.g)):\n temp_row = []\n for j in range(len(self.g[i])):\n temp_row.append(trace_term * identity_matrix[i][j])\n trace_x_I.append(temp_row)\n # implement sub-traction locally\n # sub_term = trace_x_I - self.g\n sub_term = []\n for i in range(len(trace_x_I)):\n temp_row = []\n for j in range(len(trace_x_I[i])):\n temp_row.append(trace_x_I[i][j] - self.g[i][j])\n sub_term.append(temp_row)\n # implement final scaling step locally\n # inverse = det_term * sub_term\n inverse = []\n for i in range(len(sub_term)):\n temp_row = []\n for j in range(len(sub_term[i])):\n temp_row.append(det_term * sub_term[i][j])\n inverse.append(temp_row)\n return Matrix(inverse)\n # TODO - your code here", "def eigenvalue_decomposition (a_t_a_matrix ):\r\n # get eigenvalues and -vectors from ATA matrix\r\n eigenvalues = np.zeros (a_t_a_matrix.shape[0] )\r\n eigenvectors = np.zeros ((a_t_a_matrix.shape[0], a_t_a_matrix.shape[0] ))\r\n evals, evecs = np.linalg.eig (a_t_a_matrix )\r\n\r\n # sort them\r\n indices = np.argsort (-evals ) # reverse sort: greatest numbers first\r\n for loop_count, index in enumerate(indices ):\r\n eigenvalues[loop_count] = evals[index]\r\n eigenvectors[:, loop_count] = evecs[:, index]\r\n\r\n # get the normal vector, normalize it and if it's turned to the ground, turn it around\r\n normal_vector = normalize_vector (eigenvectors[:, -1] ) # the last (smallest) vector is the normal vector\r\n if (normal_vector[2] < 0):\r\n normal_vector = normal_vector * -1\r\n\r\n return normal_vector, eigenvalues[-1]" ]
[ "0.67798924", "0.6552502", "0.6531488", "0.6446713", "0.6446481", "0.6435577", "0.6334942", "0.633258", "0.62987125", "0.62801445", "0.62752336", "0.62600726", "0.6252588", "0.62425536", "0.6231015", "0.62025726", "0.62013495", "0.61938316", "0.6188946", "0.615885", "0.61471164", "0.6144845", "0.6129409", "0.6120966", "0.61128676", "0.6099537", "0.6077619", "0.6059575", "0.6057324", "0.60252" ]
0.6665403
1
place the initial supply of the resource at the start of the game into the capacity_list
def initialize_supply(self): unit_count = 0 for i in range(self.start_allocation[0 ] -1, self.start_allocation[1]): for j in range(len(self.capacity_list[i][1])): self.capacity_list[i][1][j] = 1 unit_count += 1 self.total_supply -= unit_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, capacity, initial):\n\t\tself.capacity = capacity\n\t\tself.amount = initial", "def __init__(self, capacity, fillValue = None):\n \n self._items = list() \n self._fillValue = fillValue\n self._DEFAULT_CAPACITY = capacity\n self._logicalSize = 0 #as required by exercise 1\n \n \n for count in xrange(capacity):\n self._items.append(self._fillValue)", "def __init__(self, capacity, units=0):\n self.capacity = capacity\n self.units = units", "def _create_capacity(self, m, comp, prod_name):\n name = comp.name\n cap_res = comp.get_capacity_var() # name of resource that defines capacity\n r = m.resource_index_map[comp][cap_res] # production index of the governing resource\n # production is always lower than capacity\n ## NOTE get_capacity returns (data, meta) and data is dict\n ## TODO does this work with, e.g., ARMA-based capacities?\n ### -> \"time\" is stored on \"m\" and could be used to correctly evaluate the capacity\n cap = comp.get_capacity(None, None, None, None)[0][cap_res] # value of capacity limit (units of governing resource)\n rule = partial(self._capacity_rule, prod_name, r, cap)\n constr = pyo.Constraint(m.T, rule=rule)\n setattr(m, '{c}_{r}_capacity_constr'.format(c=name, r=cap_res), constr)\n # minimum production\n print('DEBUGG dispatchable?', comp.name, comp.is_dispatchable())\n if comp.is_dispatchable() == 'fixed':\n minimum = cap\n var = getattr(m, prod_name)\n values = var.get_values()\n for k in values:\n values[k] = cap\n var.set_values(values)\n else:\n minimum = 0 # -> for now just use 0, but fix this! XXX\n print('DEBUGG ... min:', minimum)\n rule = partial(self._min_prod_rule, prod_name, r, cap, minimum)\n constr = pyo.Constraint(m.T, rule=rule)\n setattr(m, '{c}_{r}_minprod_constr'.format(c=name, r=cap_res), constr)", "def reserve(self):\n assert self.is_available() is True, \"this slot is not available\"", "def __init__(self, capacity, fillValue=None):\r\n self._items = list()\r\n for count in range(capacity):\r\n self._items.append(fillValue)", "def __init__(self, capacity, fillValue=None):\r\n self._items = list()\r\n self._logicalSize = 0\r\n # Track the capacity and fill value for adjustments later\r\n self._capacity = capacity\r\n self._fillValue = fillValue\r\n for count in range(capacity):\r\n self._items.append(fillValue)", "def fill_tank(self):\r\n self.fuel_level = self.fuel_capacity", "def knapsack(items, capacity):\r\n pass", "def __init__(self, capacity):\n self.experiences = RingBuf(capacity)", "def set_capacity(self, cap):\n return self.get_interaction().set_capacity(cap)", "def __init__(self):\n self.capacity = 1000\n self.data = [None]*self.capcity", "def buy_resource(self, num_res):\n check_pur = self.poss_purchases()\n if num_res in check_pur.keys():\n update_res = num_res\n for bin in self.capacity_list:\n for i, res in enumerate(bin[1]):\n if res == 1 and update_res > 0:\n bin[1][i] = 0\n update_res -= 1\n return (num_res, check_pur[num_res])\n else:\n return 'not enough resources'", "def new_capacity_rule(mod, g, p):\n return 0", "def calculate_supply(self):\r\n \r\n for cell in self.cells:\r\n cell.supply = min(self.max_volume,\r\n self.wave_speed / self.free_speed *\r\n (self.cell_length * self.jam_density -\r\n cell.volume)) /self.interval\r\n self.supply = self.cells[0].supply", "def _grow(self):\n self.capacity *= self.factor\n temp = [None] * self.capacity\n for i in range(self.size):\n temp[i] = self.store[i]\n self.store = temp", "def fill_tank(self):\r\n self.fuel_level = self.fuel_capacity\r\n print(\"Fuel tank is full\")", "def __init__(self, capacity=10):\n\n self._board = [None] * capacity # list of 10 None elements\n self._n = 0 # number of actual entries", "def fill_tank(self):\n self.fuel_level = self.fuel_capacity\n print(\"Fuel tank is full.\")", "def fill_tank(self):\n self.fuel_level = self.fuel_capacity\n print(\"Fuel tank is full.\")", "def __init__(self, capacity: int):\n self._pax_with_carry_on = PaxStack()\n self._pax_without_carry_on = PaxStack()\n self._capacity = capacity\n self._current_pax = 0", "def fill_up(self):\n self.fuel = self.gas_tank_size", "def capacity(self, capacity):\n\n self._capacity = capacity", "def new_capacity_rule(mod, prj, prd):\n return 0", "def __init__(self, start_floor, capacity, min_floor, max_floor):\r\n\r\n self.floor = start_floor\r\n self.passengers = []\r\n self.min_floor = min_floor\r\n self.max_floor = max_floor\r\n self.capacity = capacity\r\n\r\n self._update_destinations()", "def __init__(self, capacity):\n self.capacity = capacity #this is example for list implementation\n self.head = [None] * capacity #this is example for list implementation\n self.num_items = 0 #this is example for list implementation", "def capacitygroup_group():", "def __init__(self, capacity, operation, neutral_element):\n assert capacity > 0 and capacity & (capacity - 1) == 0, \"capacity must be positive and a power of 2.\"\n self._capacity = capacity\n self._value = [neutral_element for _ in range(2 * capacity)]\n self._operation = operation", "def __init__(self, capacity, operation, neutral_element):\n assert capacity > 0 and capacity & (capacity - 1) == 0, \"capacity must be positive and a power of 2.\"\n self._capacity = capacity\n self._value = [neutral_element for _ in range(2 * capacity)]\n self._operation = operation", "def __init__(self, capacity, operation, neutral_element):\n assert capacity > 0 and capacity & (capacity - 1) == 0, \"capacity must be positive and a power of 2.\"\n self._capacity = capacity\n self._value = [neutral_element for _ in range(2 * capacity)]\n self._operation = operation" ]
[ "0.6896693", "0.64290607", "0.64275193", "0.63781327", "0.62530994", "0.6205055", "0.6196022", "0.6167869", "0.61573017", "0.6094401", "0.607095", "0.6011403", "0.59734416", "0.593883", "0.591504", "0.58808136", "0.586092", "0.5837047", "0.581497", "0.581497", "0.5810729", "0.5797786", "0.57810116", "0.5771038", "0.57634187", "0.5728486", "0.5716848", "0.57167333", "0.57167333", "0.57167333" ]
0.77604085
0
checks the purchase and reduces the supply on the board returns tuple of number of units and total cost
def buy_resource(self, num_res): check_pur = self.poss_purchases() if num_res in check_pur.keys(): update_res = num_res for bin in self.capacity_list: for i, res in enumerate(bin[1]): if res == 1 and update_res > 0: bin[1][i] = 0 update_res -= 1 return (num_res, check_pur[num_res]) else: return 'not enough resources'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_total_supply() -> int:\n return total_supply", "def test_check_cost():", "def get_expected_cost(self):", "def cost(inventory):\n return inventory.reduce(convert.get_cost)", "def test_product_buy(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 5)\n self.assertEqual(result_buy, 175)", "def cost(self) -> float:", "def get_prodcost(_craftable, tab):\n # print(f'{tab}{_craftable.name} ({_craftable.value}):')\n if _craftable.prod_cost == 0:\n prodcost = 0\n\n tab = tab + ' '\n\n # If there are Craftable Items as required materials ....\n if len(_craftable.craftables_list) > 0:\n for craft, amount in _craftable.craftables_list:\n # partial_cost = round(float(craftables[craft].value) * amount, 2)\n for loop in range(amount):\n partial_cost = get_prodcost(craftables[craft], tab)\n # If the prod cost of the item is more expensive than the buy cost\n if partial_cost >= float(craftables[craft].value):\n partial_cost = float(craftables[craft].value)\n craftables[craft].buy_or_craft = 'buy'\n # print(f'{tab}Buy item {craft}')\n else:\n craftables[craft].buy_or_craft = 'craft'\n # print(f'{tab}Craft item {craft}')\n prodcost = prodcost + partial_cost\n\n # If the Total PROD Cost (prod_cost + res_totalcost) is cheaper the the market value:\n prodcost = prodcost + _craftable.res_totalcost\n if prodcost < float(_craftable.value):\n for resource, amount in _craftable.resources_list:\n partial_cost = round(float(resources[resource].unit_price) * amount, 2)\n # print(f'{tab}{resource} Cost: {partial_cost}')\n\n # print(f'{tab}Total Prod C ost: {round(prodcost, 2)}')\n if prodcost < float(_craftable.value):\n # print(f'{tab}Craft item {_craftable.name}')\n _craftable.buy_or_craft = 'craft'\n else:\n # print(f'{tab}Buy item {_craftable.name}')\n _craftable.buy_or_craft = 'buy'\n return prodcost\n else:\n return _craftable.prod_cost", "def test_excess_quantity(self):\n excess = self._uncertain_demand.excess_stock\n avg_order = sum([int(item) for item in self._data_set.values()]) //len(self._data_set)\n variance = [(item - avg_order) for item in self._data_set.values()]\n stdev = pow(sum([pow(j, 2) for j in variance]) / len(self._data_set), 0.5)\n cal_safety = lambda x, y, z: x * y * (z ** 0.5)\n safety_stock = cal_safety(float(self._z_value), float(stdev), float(self._lead_time))\n cal_reorder_level = lambda x, y, z: ((x ** 0.5) * y) + z\n reorder = cal_reorder_level(float(self._lead_time), avg_order, float(safety_stock))\n cal_excess = lambda x, y, z: round(x - (y + (y - z)), 0) if x > y + (y - z) else 0\n test_excess = cal_excess(self._quantity_on_hand, reorder, safety_stock)\n self.assertEqual(int(excess), int(test_excess))", "def complete_purchase(self, customer_credit=0):\r\n \r\n #take the products first, then tell customer how many tickets to take\r\n #requires IChat interface to be passed to tell customers how many tickets to take\r\n \r\n #switch to list view in the collection window\r\n print(\"YES\")\r\n self._slow_click(target=self._images.get_trade(\"list_view_collection_window\"))\r\n print(\"NO\")\r\n \r\n running_total = self.search_for_products()\r\n running_total -= customer_credit\r\n \r\n print(\"running total is \" + str(running_total))\r\n if running_total == 0 or not running_total:\r\n self.cancel_trade()\r\n return False\r\n \r\n total_tickets_notice = 'Please take %i tickets.' % running_total\r\n self.Ichat.type_msg(total_tickets_notice)\r\n \r\n #wait for the customer to get the tickets, then click confirm\r\n if not self.preconfirm_scan_purchase(running_total): \r\n self.cancel_trade()\r\n \r\n self.go_to_confirmation()\r\n print(\"starting confirmation scan\")\r\n #run a final confirmation scan to check the products and tickets taken\r\n products_bought = self.confirmation_scan(tickets_to_give=running_total, credit=customer_credit)\r\n \r\n self.Ichat.close_current_chat()\r\n \r\n if products_bought:\r\n self._slow_click(target=self._images.get_trade(\"confirm_button\", \"confirm\"))\r\n wait(Pattern(self._images.get_ok_button()), 600)\r\n self._slow_click(target=self._images.get_ok_button())\r\n products_bought[\"total_tickets\"] = running_total\r\n \r\n return products_bought\r\n \r\n else:\r\n self.cancel_trade()\r\n return False", "def check_required_change(drink, amount):\n if (drink == \"espresso\" and amount > MENU[drink][\"cost\"]) or (drink == \"latte\" and amount > MENU[drink][\"cost\"])\\\n or (drink == \"cappuccino\" and amount > MENU[drink][\"cost\"]):\n return amount - MENU[drink][\"cost\"]\n else:\n return 0.00", "def cost(self):\n abs_cost = sum(f['price'] * f['qty'] for f in self.fills)\n return -abs_cost if self.is_ask() else abs_cost", "def calc_qty(self, cr, uid, production_id, context=None):\n prod = self.pool.get('mrp.production').browse(cr, uid,production_id\n , context=context)\n done = 0.0\n for wo in prod.workcenter_lines:\n for mrej in wo.moves_rejection:\n done += mrej.s_rejected_qty or 0.0\n for move in prod.move_created_ids2:\n if move.product_id == prod.product_id:\n #ignore scrapped and extra consumed\n if (not move.scrapped) or (not move.extra_consumed):\n done += move.product_qty\n if (prod.product_qty - done) <= 0:\n raise osv.except_osv(_('Warning!'), _('Click on \"Force To Close\" button to generate remain scrap order.'))\n return (prod.product_qty - done) or prod.product_qty", "def calculate_purchase_return(self,\n supply: int,\n connector_balance: int,\n connector_weight: int,\n deposit_amount: int) -> int:\n pass", "def find_purchase_qty_in_duration(self,from_date,to_date,location,product_id):\n # query=\"\"\"\n # select sum(product_uom_qty) from stock_move mv \n # Inner join stock_location sl on sl.id = mv.location_id and sl.usage='supplier'\n # and mv.location_dest_id in (%s) where state='done' and product_id = %s and date between '%s 00:00:00' and '%s 23:59:59'\n # \"\"\"\n query = \"\"\"select sum(product_uom_qty) as total,product_uom from stock_move mv \n Inner join stock_location sl on sl.id = mv.location_id and sl.usage='supplier' \n and mv.location_dest_id in (%s) where state='done' and product_id = %s and \n date between '%s 00:00:00' and '%s 23:59:59' group by product_uom\"\"\"%(\n ','.join(str(x) for x in location), product_id.id,from_date,to_date)\n self._cr.execute(query)\n result = self._cr.fetchall()\n uom_rec = self.env['product.uom']\n purchase_qty = 0\n for r in result:\n factor_inv = uom_rec.browse(r[1]).factor_inv\n purchase_qty += r[0] * factor_inv\n # Return Qty\n return_query = \"\"\"select sum(product_uom_qty) as total,product_uom \n from stock_move mv Inner join stock_location sl on sl.id = \n mv.location_dest_id and sl.usage='supplier' and mv.location_id in (\n %s) where state='done' and product_id = %s and date between '%s \n 00:00:00' and '%s 23:59:59' group by product_uom\"\"\" % (\n ','.join(str(x) for x in location), product_id.id, from_date,\n to_date)\n self._cr.execute(return_query)\n return_result = self._cr.fetchall()\n purchase_return_qty = 0\n for re in return_result:\n factor_inv = uom_rec.browse(re[1]).factor_inv\n purchase_return_qty += re[0] * factor_inv\n purchase_qty -= purchase_return_qty\n return purchase_qty", "def test_product_buy_more_then_have(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 50)\n self.assertFalse(result_buy)", "def shopping_in_store(goods_list) -> tuple:\n result = 0\n for product in goods_list:\n if stock[product] > 0:\n result += prices[product]\n stock[product] -= 1\n if stock[product] == 0:\n print('Note! Some of your needed products are out of stock. Please come later or pre-order them.')\n return f'Price is: {result}', '\\n'.join(f'{k}: {v}' for k, v in stock.items())", "def line_cost(self):\r\n return self.qty * self.unit_cost", "def calc_energy_and_price(self) -> (float, float):\n\n cost_sum = 0\n energy_sum = 0\n for pump_id in self.pumps:\n pump_energy, pump_cost = self.pumps[pump_id].calculate_energy_and_cost()\n cost_sum += pump_cost\n energy_sum += pump_energy\n\n pump_id.append_index = 0\n\n assert energy_sum >= 0, \"The pumping energy cant be negative!\"\n assert cost_sum >= 0, \"The pumping cost cant be negative!\"\n return energy_sum, cost_sum", "def CalcCostForTurn(self):\r\n costsThisTurn = 0\r\n \r\n inventoryStorageCost = self.currentStock * STORAGE_COST_PER_UNIT\r\n backorderPenaltyCost = self.currentOrders * BACKORDER_PENALTY_COST_PER_UNIT\r\n \r\n costsThisTurn = inventoryStorageCost + backorderPenaltyCost\r\n \r\n return costsThisTurn", "def calculate_total_cost(state):\n pass", "def CalcBeerToDeliver(self):\r\n deliveryQuantity = 0\r\n \r\n #If we can fill the customer's order, we must do it.\r\n if self.currentStock >= self.currentOrders:\r\n deliveryQuantity = self.currentOrders\r\n self.currentStock -= deliveryQuantity\r\n self.currentOrders -= deliveryQuantity\r\n #If the current stock cannot cover the order, we must fill as much as we can, and back-order the rest.\r\n elif self.currentStock >= 0 and self.currentStock < self.currentOrders:\r\n deliveryQuantity = self.currentStock\r\n self.currentStock = 0\r\n self.currentOrders -= deliveryQuantity\r\n\r\n return deliveryQuantity", "def check_costs(self):\r\n if self.cost > self.owner.player.char_ob.currency:\r\n self.add_error(\r\n \"celebration_tier\",\r\n \"You cannot afford to pay the cost of %s.\" % self.cost,\r\n )", "def unitcost(self):\n cost = self.tablecost\n\n for component, quantity in self.components.items():\n cost += component.unitcost * quantity\n\n return cost", "def _product_cost_for_average_update(self, cr, uid, move):\n if move.picking_id.purchase_id:\n \tprice=move.purchase_line_id.price_unit_total\n \tcurrency_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id.id\n \treturn {'cost': price,\n 'currency': currency_id\n }\n \n return super(stock_partial_picking, self)._product_cost_for_average_update(cr, uid, move)", "def getCost(catalog, search):\n department = mp.get(catalog['department'], search)\n artworks = me.getValue(department)\n total_cost = 0\n total_weight = 0\n\n for artwork in lt.iterator(artworks):\n cost = costArtwork(artwork)\n total_cost += cost\n artwork['TransCost (USD)'] = cost\n if artwork['Weight (kg)'] != '':\n total_weight += float(artwork['Weight (kg)'])\n\n return round(total_cost, 2), round(total_weight, 2), artworks", "def calculate_reserves(self):\n # TODO: Add back cash dividends and deduct exchange costs\n console.print(\"Still has to be build.\")", "def get_buy_and_sell_costs(self, orders):\n\n buy_cost = sell_cost = 0\n for _, order in orders.items():\n if order[\"descr\"][\"type\"] == \"buy\":\n buy_cost += float(order[\"cost\"])\n else:\n sell_cost += float(order[\"cost\"])\n\n return buy_cost, sell_cost", "def make_change(cost_of_item, amount_paid, denominations):\n\n\tdef data_quality_check(denominations, **kwargs):\n\t\tcomplete = True\t\n\t\t\n\t\tfor key, value in kwargs.items():\n\t\t\tif not isinstance(value, int):\n\t\t\t\tcomplete = False\n\t\t\t\tprint('The {} is not an integer'.format(key))\n\t\t\telse:\n\t\t\t\tif value < 1:\n\t\t\t\t\tcomplete = False\n\t\t\t\t\tprint('The {} is less than $1'.format(key))\n\t\t\n\t\tdenom = list(set(denominations)) # get out any duplicates\n\t\tif not all(isinstance(x, int) for x in denom):\n\t\t\tcomplete = False\n\t\t\tprint('At least one of the elements in the list is not an integer')\n\t\tif any(i < 1 for i in denom):\n\t\t\tcomplete = False\n\t\t\tprint('At least one of the elements in the list is less than 1')\n\t\tif complete:\n\t\t\tdenominations = sorted(denom, reverse=True) # sort list in reverse order\n\t\treturn complete, denominations\n\n\n\tdef amount_of_change(cost_of_item, amount_paid):\n\t\tprint('Cost of the item is ${}'.format(cost_of_item))\n\t\tprint('Amount paid is ${}'.format(amount_paid))\n\t\treturn amount_paid - cost_of_item\n\n\n\tdef denomination_of_bills(change, denominations):\n\t\tbills_to_return = []\n\t\twhile change > 0:\n\t\t\t# set start = 0. Then change it to the count value every time a bill is chosen\n\t\t\t# so that the next loop does not start at the beginning of the list. This shortens\n\t\t\t# each succesive loop.\n\t\t\tstart = 0\n\t\t\tfor count, bill in enumerate(denominations, start):\n\t\t\t\tif bill <= change:\n\t\t\t\t\tbills_to_return.append(bill)\n\t\t\t\t\tchange -= bill\n\t\t\t\t\tstart = count\n\t\t\t\t\tbreak\n\t\treturn bills_to_return\n\t\n\t##### Start the code #####\n\t## put items into a dictionary so that I can experiment with passing kwargs.\n\t\n\tinfo_dict = {'cost of item': cost_of_item, 'amount paid' : amount_paid}\n\tcomplete, denominations = data_quality_check(denominations, **info_dict)\n\tif not complete:\n\t\treturn\n\n\tchange = amount_of_change(cost_of_item, amount_paid)\n\tif change < 0:\n\t\tprint(\"You haven't paid enough for the item.\")\n\telif change == 0:\n\t\tprint('You paid the exact amount and get no change')\n\telse:\n\t\tprint('The change is ${}'.format(change))\n\t\tbills = denomination_of_bills(change, denominations)\n\t\tif len(bills) == 1:\n\t\t\tprint('The denomination of the change is ${}'.format(''.join(str(x) for x in bills)))\n\t\telse:\n\t\t\tprint('The denominations of the change are ${}'.format(', $'.join(str(x) for x in bills)))", "def calculate_profit(self):", "def calculatePurchaseReturnSolidity(S,R,F,E):\n\n _supply = uint256(S)\n _reserveBalance = uint256(R)\n _reserveRatio = uint256(F)\n _depositAmount = uint256(E)\n\n baseN = uint256(_depositAmount + _reserveBalance);\n\n if _reserveRatio == 100:\n amount = uint256(_supply * baseN) / _reserveBalance\n if amount < _supply: \n raise Exception(\"Error, amount < supply\")\n return amount - _supply\n \n resD = FIXED_ONE\n resN = power(baseN, _reserveBalance, _reserveRatio, 100);\n\n\n result = (_supply * resN / resD) - _supply\n if verbose:\n print(\" supply[%d] * resN[%d] / resD[%d] - supply[%d] = %d \" %\n (_supply, resN, resD, _supply, result))\n\n #Potential fix, reduce the result by the error occurred through rounding\n #if result < minUnit(S):\n # throw(\"Overflow\")\n #return uint256(result- minUnit(S))\n return result" ]
[ "0.6272567", "0.61072993", "0.6053516", "0.60232884", "0.5960245", "0.59333795", "0.5925038", "0.5916389", "0.586347", "0.58519727", "0.58213115", "0.5808379", "0.57883084", "0.5784606", "0.57784134", "0.5758463", "0.5739185", "0.5714941", "0.5692327", "0.5655765", "0.5642398", "0.5639779", "0.56396717", "0.5622661", "0.5618654", "0.56171006", "0.56032795", "0.5583376", "0.55672985", "0.5564996" ]
0.62564915
1
print list of resource currently on the board
def show_board(self): print(self.capacity_list)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def printBoard(self):", "def api_print_board(self):\n print(self.board)", "def show_board(self):\n\n for s in self.board[1:-1]:\n print(''.join(x.symbol for x in s[1:-1]))", "def _print_board(board):\r\n pass", "def print_board(self):\n print(f'{self.name} BOARD:\\n')\n print(' A B C D E F G H I J ')\n print(' -------------------')\n row_number = 0\n for row in self.board:\n print('%d|%s ' % (row_number, ' '.join(row)))\n row_number += 1\n print(f'\\nLIVES REMAINING: {self.lives}\\n')", "def print(self):\n for row in self.board:\n print(row)", "def print_list(self):\r\n pass", "def show_board(board):\n for line in board:\n print(line)", "def print_board(self):\n print(self.board)", "def print_board(self):\n print('Board:')\n print('\\n'.join([''.join(['{:4}'.format(item) for item in row]) for row in self.board]))", "def show_board(board) -> None:\n for line in board:\n print('|'.join(line))", "def print_board(self):\n print(\n self.BOARD_TEMPLATE.format(\n *[self.COUNTER_REPRESENTATION[counter] for counter in self.board])\n )", "def print_board(self):\n self.board.print()", "def printall():\n print listAll()", "def show_board(self):\n print(self.game_board)", "def render_board(self):\n print \"\"\n for row in self._board:\n print row", "def print_board(self):\n print(\" 1 2 3 4 5 6 7\")\n for row in range(self.playable_row_range[0], self.playable_row_range[1]):\n for col in range(self.playable_column_range[0], self.playable_column_range[1]):\n print(\"[{piece}]\".format(piece=self.board[row][col]), end=\" \")\n print('\\n', end=\"\")\n print(\"\\n\")", "def printBoard(self):\n if self.side == self.WHITE or self.side == None:\n for r in [8,7,6,5,4,3,2,1]:\n for c in 'abcdefgh':\n p = self.getPiece(c,r) # print a8 first\n if p == None:\n print \" \",\n else:\n print self.getPieceName(p.type),\n print \"\"\n else:\n for r in [1,2,3,4,5,6,7,8]:\n for c in 'hgfedcba':\n p = self.getPiece(c,r) # print h1 first\n if p == None:\n print \" \",\n else:\n print self.getPieceName(p.type),\n print \"\"", "def printBoard(self):\n if self.side == self.WHITE or self.side == None:\n for r in [8,7,6,5,4,3,2,1]:\n for c in 'abcdefgh':\n p = self.getPiece(c,r) # print a8 first\n if p == None:\n print \" \",\n else:\n print self.getPieceName(p.type),\n print \"\"\n else:\n for r in [1,2,3,4,5,6,7,8]:\n for c in 'hgfedcba':\n p = self.getPiece(c,r) # print h1 first\n if p == None:\n print \" \",\n else:\n print self.getPieceName(p.type),\n print \"\"\n\n for r in [8,7,6,5,4,3,2,1]:\n for c in 'abcdefgh':\n p = self.getPiece(c,r)\n #if p != None and p.header.frame_id == \"chess_board\":\n # print \"Warning, frame is chess_board:\", c+str(r)", "def print_board(self):\n print(*self._board, sep=\"\\n\")", "def print_board(self):\n for row in self.board:\n for col in row:\n print(col, end=\"\")\n print()", "def get_board(self):\n for i in range(20):\n for j in range(20):\n print(self._board[i][j], end='|')\n print()", "def display_board(self):\n print(self.game_board)", "def print_board(self):\n board = \"\"\n for i in range(3):#need to change this in the future\n for j in range(3):#need to change this in the future\n board += self.board[i][j]\n if j != 2:#need to change this in the future\n board += \" | \"\n board += \"\\n\"\n return board", "def display(self):\n for row in self._board_area:\n print(row, end=\"\\n\")", "def breakdown_resources(self):\n print('Resources breakdown:')\n headers = ['Faction', 'Power', 'Leech', 'Coins', 'Ore', 'Knowledge', 'QIC', 'Power Tokens']\n rows = []\n for faction, stats in self.faction_stats.items():\n rows.append([\n faction,\n stats.power,\n stats.leech,\n stats.coins,\n stats.ore,\n stats.knowledge,\n stats.qic,\n stats.pt,\n ])\n print(tabulate(rows, headers=headers))", "def display_board():\n print(board[0], '|', board[1], '|', board[2])\n print(board[3], '|', board[4], '|', board[5])\n print(board[6], '|', board[7], '|', board[8])", "def printBoard(self):\n\t\tkey = [' ', 'X', 'O']\n\t\tprint(' | |')\n\t\tprint(' ' + key[self.state[0][0]] + ' | ' + key[self.state[0][1]] + ' | ' + key[self.state[0][2]])\n\t\tprint(' | |')\n\t\tprint('-----------')\n\t\tprint(' | |')\n\t\tprint(' ' + key[self.state[1][0]] + ' | ' + key[self.state[1][1]] + ' | ' + key[self.state[1][2]])\n\t\tprint(' | |')\n\t\tprint('-----------')\n\t\tprint(' | |')\n\t\tprint(' ' + key[self.state[2][0]] + ' | ' + key[self.state[2][1]] + ' | ' + key[self.state[2][2]])\n\t\tprint(' | |')", "def show(self):\r\n for card in self.cards_list:\r\n print(card)", "def print_board(board):\n\n colors = {\n '*': None,\n '2': 'red',\n '4': 'green',\n '8': 'yellow',\n '16': 'blue',\n '32': 'magenta',\n '64': 'cyan',\n '128': 'grey',\n '256': 'white',\n '512': 'green',\n '1024': 'red',\n '2048': 'blue',\n '4096': 'magenta'\n };\n header = \"Use the arrows keys to play 2048! Press q to quit\";\n print(header);\n N = len(board);\n vertical_edge = \"\";\n for i in range(N + 2):\n vertical_edge += \"-\\t\";\n print(vertical_edge);\n for y in range(N):\n row = \"\";\n for x in board[y]:\n\n # Handling installation fail (no colors printed)\n if termcolor is not None:\n row += termcolor.colored(x, colors[x]);\n else:\n row += x\n\n row += \"\\t\";\n print(\"|\\t\" + row + \"|\");\n if y is not N - 1: print(\"\")\n print(vertical_edge);\n\n if GUI_runnable:\n gui.update_grid(board)\n gui.update()" ]
[ "0.68844163", "0.68560594", "0.6730749", "0.6727542", "0.67088443", "0.66923815", "0.66582954", "0.665567", "0.66136783", "0.6547921", "0.6542215", "0.6513973", "0.6429851", "0.6402211", "0.63863915", "0.635641", "0.63459575", "0.634288", "0.6329027", "0.6325588", "0.63046014", "0.6301112", "0.6293808", "0.62917304", "0.62642586", "0.6212882", "0.6210578", "0.6206719", "0.61863583", "0.6164251" ]
0.7192558
0
Returns PIL.Image objects for all the images in directory. If directory is not specified, uses current directory. Returns a 2tuple containing a list with a PIL.Image object for each image file in root_directory, and a list with a string filename for each image file in root_directory
def get_images(directory=None): if directory == None: directory = os.getcwd() # Use working directory if unspecified image_list = [] # Initialize aggregaotrs file_list = [] directory_list = os.listdir(directory) # Get list of files for entry in directory_list: absolute_filename = os.path.join(directory, entry) try: image = PIL.Image.open(absolute_filename) file_list += [entry] image_list += [image] except IOError: pass # do nothing with errors tying to open non-images return image_list, file_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_images(directory=None): #import from mask.py\n \n if directory == None:\n directory = os.getcwd() # Use working directory if unspecified\n \n image_list = [] # Initialize aggregaotrs\n file_list = []\n \n directory_list = os.listdir(directory) # Get list of files\n for entry in directory_list:\n absolute_filename = os.path.join(directory, entry)\n try:\n image = PIL.Image.open(absolute_filename)\n file_list += [entry]\n image_list += [image]\n except IOError:\n pass # do nothing with errors tying to open non-images\n return image_list, file_list", "def get_images(directory=None):\r\n \r\n if directory == None:\r\n directory = os.getcwd() # Use working directory if unspecified\r\n \r\n image_list = [] # Initialize aggregaotrs\r\n file_list = []\r\n \r\n directory_list = os.listdir(directory) # Get list of files\r\n for entry in directory_list:\r\n if len(file_list)<2:\r\n absolute_filename = os.path.join(directory, entry)\r\n try:\r\n image = PIL.Image.open(absolute_filename)\r\n file_list += [entry]\r\n image_list += [image]\r\n except IOError:\r\n pass # do nothing with errors tying to open non-images\r\n return image_list, file_list", "def load_pic_in_directory(directory):\n return [Image.open(os.path.join(directory, img)) for img in os.listdir(directory)]", "def return_images(directory):\r\n allfiles = os.listdir(directory)\r\n image_list = [im for im in allfiles if '.jpg' in str(im)]\r\n image_list = [directory + im for im in image_list]\r\n return image_list", "def get_existing_images(directory):\n validate_directory(directory)\n directory += '/'\n try:\n return listdir(directory)\n except:\n mkdir(directory)\n return []", "def _getImagesFromDirectory(self, directoryPath):\n files = [f for f in listdir(directoryPath)\n if isfile(join(directoryPath, f))]\n for filePath in files:\n self._imageDictionary[filePath] = image.load(\n self._formatPath(directoryPath, filePath))", "def open_images_in(directory):\n\n files = [\n filename\n for filename in os.listdir(directory)\n if \"_\" in filename and not filename.startswith(\"joined\")\n ]\n tiles = []\n if len(files) > 0:\n i = 0\n for file in files:\n pos = get_image_column_row(file)\n im = Image.open(os.path.join(directory, file))\n\n position_xy = [0, 0]\n count = 0\n for a, b in zip(pos, im.size):\n position_xy[count] = a * b\n count = count + 1\n tiles.append(\n Tile(\n image=im,\n position=pos,\n number=i + 1,\n coords=position_xy,\n filename=file,\n )\n )\n i = i + 1\n return tiles", "def get_images_of_folder(folder):\n\n Settings.dev_print(\"getting images of folder: {}\".format(folder.get_title()))\n if not folder: return []\n imgs = []\n files = []\n valid_images = [\".jpg\",\".gif\",\".png\",\".tga\",\".jpeg\"]\n for f in os.listdir(folder.get_path()):\n ext = os.path.splitext(f)[1]\n if ext.lower() not in valid_images:\n continue\n file = File()\n setattr(file, \"path\", os.path.join(folder.get_path(),f))\n files.append(file)\n Settings.maybe_print(\"image path: {}\".format(os.path.join(folder.get_path(),f)))\n return files", "def parse_dir_imgs(root_pth):\n def visit(imgpths, pth, names):\n # Appends detected image filenames to a list.\n imgpths.extend([os.path.join(pth, name) for name in names\n if os.path.splitext(name)[1].lower() in img_exts])\n # Walk down directory tree and get the image file paths\n imgpaths = []\n for dp, foo, names in os.walk(root_pth):\n visit(imgpaths, dp, names)\n # Make lowercased list of imagefilenames\n imgnames = [os.path.split(pth)[1].lower() for pth in imgpaths]\n return imgnames, imgpaths", "def list_images(img_dir) -> Iterable[str]:\n extensions = (\".png\", \".jpg\", \".jpeg\", \".tif\", \".tiff\")\n\n paths = Path(img_dir).glob(\"**/*\")\n paths = filter(lambda p: p.is_file() and p.suffix.lower() in extensions, paths)\n return (str(p) for p in paths)", "def getImages(path):\n files = list()\n\n for f in listdir(path):\n file = join(path, f)\n if isfile(file):\n files.append(getImage(file))\n\n return files", "def make_image_list(directory):\r\n\tonly_files = [file for file in listdir(directory) if isfile(join(directory, file))]\r\n\treturn only_files", "def readImages(image_dir):\n extensions = ['bmp', 'pbm', 'pgm', 'ppm', 'sr', 'ras', 'jpeg',\n 'jpg', 'jpe', 'jp2', 'tiff', 'tif', 'png']\n\n search_paths = [os.path.join(image_dir, '*.' + ext) for ext in extensions]\n image_files = sorted(sum(map(glob, search_paths), []))\n images = [cv2.imread(f, cv2.IMREAD_UNCHANGED | cv2.IMREAD_COLOR) for f in image_files]\n\n bad_read = any([img is None for img in images])\n if bad_read:\n raise RuntimeError(\n \"Reading one or more files in {} failed - aborting.\"\n .format(image_dir))\n\n return images", "def load_images_from_folder(folder):\n images = []\n for filename in os.listdir(folder):\n img = Image.open(os.path.join(folder,filename))\n images.append(img)\n return images", "def search_images(\n current_dir: str,\n exts={\"jpg\", \"png\", \"jpeg\", \"gif\"}\n) -> typing.Iterable[typing.Tuple[str, str]]:\n for root, _, files in os.walk(current_dir):\n for file_name in files:\n ext = file_name.rsplit('.', 1)[-1].lower()\n if ext in exts:\n yield os.path.join(root, file_name), file_name", "def getAllImages(self):\n\n images = list(self._images)\n for s in self._subdirs:\n images += s.getAllImages()\n return images", "def loadimages(root):\n imgs = []\n\n def add_json_files(path, ):\n for imgpath in glob.glob(path + \"/*.png\"):\n if exists(imgpath) and exists(imgpath.replace('png', \"json\")):\n imgs.append((imgpath, imgpath.replace(path, \"\").replace(\"/\", \"\"),\n imgpath.replace('png', \"json\")))\n for imgpath in glob.glob(path + \"/*.jpg\"):\n if exists(imgpath) and exists(imgpath.replace('jpg', \"json\")):\n imgs.append((imgpath, imgpath.replace(path, \"\").replace(\"/\", \"\"),\n imgpath.replace('jpg', \"json\")))\n\n def explore(path):\n if not os.path.isdir(path):\n return\n folders = [os.path.join(path, o) for o in os.listdir(path)\n if os.path.isdir(os.path.join(path, o))]\n if len(folders) > 0:\n for path_entry in folders:\n explore(path_entry)\n else:\n add_json_files(path)\n\n explore(root)\n\n return imgs", "def _locate_images(self):\r\n extensions = '|'.join(self.valid_extensions)\r\n extension_re = re.compile('.+\\.(%s)$' % extensions, re.IGNORECASE)\r\n files = sorted(os.listdir(self.path))\r\n\r\n images = []\r\n for root, dirs, files in os.walk(self.path, followlinks=self.config['follow_links']):\r\n for filename in sorted(files):\r\n if not filename.startswith('.') and extension_re.match(filename):\r\n images.append(Image(path=os.path.join(root, filename), config=self.config))\r\n if not self.config['recursive']:\r\n break\r\n\r\n if not images:\r\n raise SourceImagesNotFoundError(self.path)\r\n\r\n images = sorted(images, reverse=self.config['algorithm_ordering'][0] != '-')\r\n\r\n return images", "def get_lists_in_dir(dir_path):\n image_list = []\n\n for filename in glob.glob(dir_path + '/*.jpg'):\n image_list.append(filename)\n return image_list", "def get_images(path):\n\n # Cast path to absolute path\n absolute = abspath(path)\n\n img_lis = [] # Holds images in a folder\n file_lis = get_files(absolute)\n\n # Now get the images within file list\n img_lis = [f for f in file_lis if is_filetype(f)]\n\n return img_lis", "def get_images(self, file_path: str) -> Iterable[Image]:\n return []", "def get_all_image_paths(self):\n image_paths, image_labels = [], []\n for directory_name, subdirectory_list, file_list in os.walk(self.root_directory):\n for file_name in file_list:\n if file_name.endswith(('.jpg',)):\n image_paths.append(os.path.join(directory_name, file_name))\n # Translates labels to 0-26 as recommended in the exercise description\n image_labels.append(ord(directory_name[-1]) - 97)\n return image_paths, image_labels", "def _get_images(image_path):\n logger.debug(\"Getting images: '%s'\", image_path)\n if not os.path.isdir(image_path):\n logger.debug(\"Folder does not exist\")\n return None\n files = [os.path.join(image_path, f)\n for f in os.listdir(image_path) if f.lower().endswith((\".png\", \".jpg\"))]\n logger.debug(\"Image files: %s\", files)\n return files", "def create_image_lists(image_dir):\n if not gfile.Exists(image_dir):\n print(\"Image directory '\" + image_dir + \"' not found.\")\n return None\n result = {}\n sub_dirs = [x[0] for x in os.walk(image_dir)]\n # The root directory comes first, so skip it.\n is_root_dir = True\n for sub_dir in sub_dirs:\n print('in sub loop')\n extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']\n file_list = []\n dir_name = os.path.basename(image_dir)\n print(\"Looking for images in '\" + image_dir + \"'\")\n for extension in extensions:\n file_glob = os.path.join(image_dir, dir_name, '*.' + extension)\n file_list.extend(glob.glob(file_glob))\n if not file_list:\n print('No files found')\n continue\n if len(file_list) < 20:\n print('WARNING: Folder has less than 20 images, which may cause issues.')\n label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())\n testing_images = []\n for file_name in file_list:\n base_name = os.path.basename(file_name)\n # We want to ignore anything after '_nohash_' in the file name when\n # deciding which set to put an image in, the data set creator has a way of\n # grouping photos that are close variations of each other. For example\n # this is used in the plant disease data set to group multiple pictures of\n # the same leaf.\n hash_name = re.sub(r'_nohash_.*$', '', file_name)\n # This looks a bit magical, but we need to decide whether this file should\n # go into the training, testing, or validation sets, and we want to keep\n # existing files in the same set even if more files are subsequently\n # added.\n # To do that, we need a stable way of deciding based on just the file name\n # itself, so we do a hash of that and then use that to generate a\n # probability value that we use to assign it.\n testing_images.append(base_name)\n return testing_images", "def get_image_list(source_dir):\n\n dir_list = os.path.os.listdir(source_dir)\n# print(dir_list)\n image_list = []\n os.chdir(source_dir)\n for file in dir_list:\n print(\"Inspecting.... : {}\".format(file))\n\n try:\n if Image.open(file).format:\n image_list.append(file)\n print(\"{} : is an image\".format(file))\n except Exception as e:\n print(\"{} : failed the imageness test.i \\n {}\".format(file, e))\n continue\n\n# print(image_list)\n return image_list", "def list_images(path=['.']):\n for image_dir in set(path):\n if not os.path.isdir(image_dir):\n continue\n for filename in os.listdir(image_dir):\n bname, ext = os.path.splitext(filename)\n if ext.lower() not in VALID_IMAGE_EXTS:\n continue\n\n filepath = os.path.join(image_dir, filename)\n yield strutils.decode(filepath)", "def get_img_files(images, db):\n img_dir = db.source\n if img_dir == None:\n raise ValueError('Cannot locate file without a base path. This method looks for it at \\\n db.source, which is not set. This should be set by the loader during DB construction!')\n img_dir = path.join(img_dir, 'img') \n locs = db.get_img_locs(images)\n titles = db.get_location_titles()\n returnval = []\n for image in images:\n loc = locs[image]\n if loc is None:\n raise ValueError('The image %s could not be found' % image)\n returnval.append(path.join(img_dir, titles[loc], str(image) + '.jpg'))\n return returnval", "def loadimages(root):\n imgs = []\n\n def add_json_files(path,):\n for imgpath in glob.glob(path+\"/*.png\"):\n if exists(imgpath) and exists(imgpath.replace('png',\"json\")):\n imgs.append((imgpath,imgpath.replace(path,\"\").replace(\"/\",\"\"),\n imgpath.replace('png',\"json\")))\n for imgpath in glob.glob(path+\"/*.jpg\"):\n if exists(imgpath) and exists(imgpath.replace('jpg',\"json\")):\n imgs.append((imgpath,imgpath.replace(path,\"\").replace(\"/\",\"\"),\n imgpath.replace('jpg',\"json\")))\n\n def explore(path):\n if not os.path.isdir(path):\n return\n folders = [os.path.join(path, o) for o in os.listdir(path) \n if os.path.isdir(os.path.join(path,o))]\n if len(folders)>0:\n for path_entry in folders: \n explore(path_entry)\n else:\n add_json_files(path)\n\n explore(root)\n\n return imgs", "def getimagelist(folder):\n imagefolder = Path(folder) \n imagelist = imagefolder.glob(\"**/*.png\") \n return list(imagelist)", "def readImages(image_dir):\n images = {}\n extensions = ['bmp', 'pbm', 'pgm', 'ppm', 'sr', 'ras', 'jpeg',\n 'jpg', 'jpe', 'jp2', 'tiff', 'tif', 'png']\n\n search_paths = [os.path.join(image_dir, '*.' + ext) for ext in extensions]\n image_files = sorted(reduce(list.__add__, map(glob, search_paths)))\n for f in image_files:\n images[f[f.rfind(\"/\") + 1:f.rfind(\".\")]] = cv2.imread(f, cv2.IMREAD_UNCHANGED | cv2.IMREAD_COLOR)\n\n return images" ]
[ "0.8136434", "0.80238104", "0.76039886", "0.745085", "0.74299073", "0.73820263", "0.72008425", "0.7091337", "0.7078133", "0.6998026", "0.6991185", "0.6929813", "0.6901793", "0.68704295", "0.6862504", "0.6842621", "0.67554075", "0.67550236", "0.674418", "0.67396843", "0.66955125", "0.6692539", "0.6665976", "0.6664041", "0.66340667", "0.6628912", "0.66196966", "0.6572327", "0.65365046", "0.65321606" ]
0.81507176
0
Assumes model was saved on GPU. Will load based off of cur_dev.
def load(self, path, cur_dev): if cur_dev == 'cpu': self.load_state_dict(torch.load(path, map_location=torch.device('cpu'))) else: self.load_state_dict(torch.load(path)) self.to(torch.device("cuda"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(self):\n try:\n if self.model.is_cuda:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \"save_point.pth\")))\n else:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \\\n \"save_point.pth\"), map_location=\"cpu\"))\n except:\n sys.exit(\"Unable to load previous model\")", "def load_model(self, gpus=1):\r\n\t\t\r\n\t\tif self.model != None:\r\n\t\t\treturn\r\n\r\n\t\t## build the model on the CPU if parallelism is targeted\r\n\t\tif isinstance(gpus, Sequence):\r\n\t\t\tif len(gpus) != 1:\r\n\t\t\t\tdevice = \"/cpu:0\"\r\n\t\t\t\tmultigpu = True\r\n\t\t\telse:\r\n\t\t\t\tdevice = \"/gpu:{:d}\".format(gpus[0])\r\n\t\t\t\tmultigpu = False\r\n\t\telse:\r\n\t\t\tif gpus != 1:\r\n\t\t\t\tdevice = \"/cpu:0\"\r\n\t\t\t\tmultigpu = True\r\n\t\t\telse:\r\n\t\t\t\tdevice = \"/gpu:{:d}\".format(gpus)\r\n\t\t\t\tmultigpu = False\r\n\r\n\r\n\t\tif self.__prop__(\"Resume\"):\r\n\t\t\tself.model = keras.models.load_model(\r\n\t\t\t\tself.__prop__(\"SnapshotDirectory\") + self.__prop__(\"Prefix\") + self.__prop__(\"Resume\") + '.h5\"')\r\n\t\t\tself.single_model = self.model\r\n\t\t\tif multigpu:\r\n\t\t\t\tself.model = multi_gpu_model(self.model, gpus)\r\n\t\telse: \r\n\t\t\t\r\n\t\t\twith tensorflow.device(device):\r\n\t\t\t\tif self.__prop__(\"Prefix\").startswith(\"i3PosNet_VGG16\"):\r\n\t\t\t\t\tself.model = i3PosNetVGG(\r\n\t\t\t\t\t\tinput_shape=self.__prop__(\"InputShape\"), \r\n\t\t\t\t\t\tout_number=self.__prop__(\"TargetShape\"),\r\n\t\t\t\t\t\tlayer_count=self.__prop__(\"LayerCount\"), \r\n\t\t\t\t\t\tfc_layer_count=self.__prop__(\"FCLayerCount\"), \r\n\t\t\t\t\t\tfc_reg=self.__prop__(\"FCReg\"), \r\n\t\t\t\t\t\tconv_reg=self.__prop__(\"ConvReg\"), \r\n\t\t\t\t\t\tshrinking=self.__prop__(\"Shrinking\"),\r\n\t\t\t\t\t\tpadding=self.__prop__(\"Padding\"))\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.model = i3PosNet(image_shape, out = self.__prop__(\"TargetShape\"))\r\n\r\n\t\t\tself.single_model = self.model\r\n\t\t\tif multigpu:\r\n\t\t\t\tself.model = multi_gpu_model(self.model, gpus)\r\n\t\t\t\t\r\n\t\t\t# clear model\r\n\t\t\t# try:\r\n\t\t\t\t# del self.model\r\n\t\t\t# except:\r\n\t\t\t\t# pass\r\n\r\n\t\t\tif self.__prop__(\"Optimizer\") == \"SGD\":\r\n\t\t\t\toptimizer = SGD(\r\n\t\t\t\t\tlr=self.__prop__(\"BaseLR\"),\r\n\t\t\t\t\tdecay=self.__prop__(\"Gamma\") if self.__prop__(\"LRPolicy\") == \"decay\" else 0.,\r\n\t\t\t\t\tmomentum= self.__prop__(\"Momentum\"),\r\n\t\t\t\t\tnesterov=True)\r\n\t\t\telif self.__prop__(\"Optimizer\") == \"Adam\":\r\n\t\t\t\toptimizer = Adam(\r\n\t\t\t\t\tlr=self.__prop__(\"BaseLR\"),\r\n\t\t\t\t\tdecay=self.__prop__(\"Gamma\") if self.__prop__(\"LRPolicy\") == \"decay\" else 0.,\r\n\t\t\t\t\t# use defaults for these for now (b1 = 0.9, b2 = 0.999, e = 1e-8\r\n\t\t\t\t\tbeta_1=self.__prop__(\"Beta1\"),\r\n\t\t\t\t\tbeta_2=self.__prop__(\"Beta2\"),\r\n\t\t\t\t\tepsilon=self.__prop__(\"Epsilon\")\r\n\t\t\t\t\t)\r\n\t\t\t\r\n\t\t\tself.model.compile(loss='mean_squared_error', optimizer=optimizer)", "def load(self):\r\n # self.model.load_state_dict(torch.load(os.path.join(self.ckpt_dir, 'best_model_state_dict.pt')))\r\n if torch.cuda.is_available():\r\n self.model = torch.load(os.path.join(self.ckpt_dir, 'best_model_INN.pt'))\r\n else:\r\n self.model = torch.load(os.path.join(self.ckpt_dir, 'best_model_INN.pt'), map_location=torch.device('cpu'))", "def load_model(model, model_index, device=\"cpu\"):\n with open(\"trained_local_model\"+str(model_index), \"rb\") as f_:\n model.load_state_dict(torch.load(f_))\n model.to(device)\n return model", "def load_model(self):\n if self.model_file is None:\n print(\"argument 'model_file' is error.\")\n return None\n\n if self.use_gpu:\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n else:\n self.device = torch.device(\"cpu\")\n\n if os.path.exists(self.model_file):\n self.Gmodel = nn.DataParallel(G()).to(self.device)\n print(self.Gmodel)\n print(\"Lode model file: %s\" % self.model_file)\n ckpoint = torch.load(self.model_file, map_location=self.device)\n self.load_resl = ckpoint[\"resl\"]\n resl = self.start_resl\n while resl < self.load_resl:\n self.Gmodel.module.grow_network()\n self.Gmodel.to(self.device)\n resl *= 2\n\n self.Gmodel.load_state_dict(ckpoint[\"G\"])\n print(\"Load model done...\")\n\n else:\n print(\"Load model fail...\")\n return None\n\n return True", "def load(self):\n utils.get_previous_weights_from_gdrive(self.config.model_folder)\n last_used_model = utils.get_latest_model_name(self.config.model_folder)\n self.model = load_model(last_used_model)\n self.model.summary()", "def load_model(self):\n saved_path = self.config.path_tmp / self.model.model_name\n if saved_path.exists():\n self.model.load_weights(str(saved_path / 'model.vec'))", "def load_model(self, save=False):\n if not os.path.exists(self.saved_model):\n print('No saved model in {}'.format(self.saved_model))\n\n model_dict = torch.load(self.saved_model, map_location={'cuda:0': 'cpu'})\n\n save_to = self.model_dir\n\n if self.args.config:\n normal_i = 1\n j = 0\n\n for i in range(0, self.model.num_internal_blocks):\n fc_weight = model_dict['mod_list.' + str(j) + '.weight'].transpose(0, 1)\n fc_bias = model_dict['mod_list.' + str(j) + '.bias']\n\n bn_weight = model_dict['mod_list.' + str(j+1) + '.weight']\n bn_bias = model_dict['mod_list.' + str(j+1) + '.bias']\n bn_mean = model_dict['mod_list.' + str(j+1) + '.running_mean']\n bn_var = model_dict['mod_list.' + str(j+1) + '.running_var']\n\n if save:\n blk_path = os.path.join(save_to, 'blk' + str(i + 1))\n utils.ensure_dir(blk_path)\n\n with open(os.path.join(blk_path, 'lin_weight.csv'), 'wb') as csvfile:\n lwriter = csv.writer(csvfile, delimiter=',')\n fc_weight = Binarize(fc_weight)\n for i in range(fc_weight.shape[0]):\n lwriter.writerow(fc_weight[i].tolist())\n\n with open(os.path.join(blk_path, 'lin_bias.csv'), 'wb') as csvfile:\n lwriter = csv.writer(csvfile, delimiter=',')\n lwriter.writerow(fc_bias.tolist())\n\n with open(os.path.join(blk_path, 'bn_weight.csv'), 'wb') as csvfile:\n lwriter = csv.writer(csvfile, delimiter=',')\n lwriter.writerow(bn_weight.tolist())\n\n with open(os.path.join(blk_path, 'bn_bias.csv'), 'wb') as csvfile:\n lwriter = csv.writer(csvfile, delimiter=',')\n lwriter.writerow(bn_bias.tolist())\n\n with open(os.path.join(blk_path, 'bn_mean.csv'), 'wb') as csvfile:\n lwriter = csv.writer(csvfile, delimiter=',')\n lwriter.writerow(bn_mean.tolist())\n\n with open(os.path.join(blk_path, 'bn_var.csv'), 'wb') as csvfile:\n lwriter = csv.writer(csvfile, delimiter=',')\n lwriter.writerow(bn_var.tolist())\n\n j += 3\n\n fc_out_w = Binarize(model_dict['mod_list.' + str(j) + '.weight'].transpose(0, 1))\n fc_out_b = model_dict['mod_list.' + str(j) + '.bias']\n if save:\n dir_out = os.path.join(save_to, 'out_blk')\n utils.ensure_dir(dir_out)\n\n with open(os.path.join(dir_out, 'lin_weight.csv'), 'wb') as csvfile:\n lwriter = csv.writer(csvfile, delimiter=',')\n fc_out_w = Binarize(fc_out_w)\n for i in range(fc_out_w.shape[0]):\n lwriter.writerow(fc_out_w[i].tolist())\n\n with open(os.path.join(dir_out, 'lin_bias.csv'), 'wb') as csvfile:\n lwriter = csv.writer(csvfile, delimiter=',')\n lwriter.writerow(fc_out_b.tolist())\n\n return\n\n\n # this already assumes inputs are binary\n # so we do not load the batch normalization and binarization layers\n\n for i in range(1, self.model.num_internal_blocks + 1):\n fc_weight = model_dict['fc' + str(i) + '.weight'].transpose(0, 1)\n fc_bias = model_dict['fc' + str(i) + '.bias']\n\n bn_weight = model_dict['bn' + str(i) + '.weight']\n bn_bias = model_dict['bn' + str(i) + '.bias']\n bn_mean = model_dict['bn' + str(i) + '.running_mean']\n bn_var = model_dict['bn' + str(i) + '.running_var']\n\n if save:\n blk_path = os.path.join(save_to, 'blk' + str(i))\n utils.ensure_dir(blk_path)\n\n with open(os.path.join(blk_path, 'lin_weight.csv'), 'wb') as csvfile:\n lwriter = csv.writer(csvfile, delimiter=',')\n fc_weight = Binarize(fc_weight)\n for i in range(fc_weight.shape[0]):\n lwriter.writerow(fc_weight[i].tolist())\n\n with open(os.path.join(blk_path, 'lin_bias.csv'), 'wb') as csvfile:\n lwriter = csv.writer(csvfile, delimiter=',')\n lwriter.writerow(fc_bias.tolist())\n\n with open(os.path.join(blk_path, 'bn_weight.csv'), 'wb') as csvfile:\n lwriter = csv.writer(csvfile, delimiter=',')\n lwriter.writerow(bn_weight.tolist())\n\n with open(os.path.join(blk_path, 'bn_bias.csv'), 'wb') as csvfile:\n lwriter = csv.writer(csvfile, delimiter=',')\n lwriter.writerow(bn_bias.tolist())\n\n with open(os.path.join(blk_path, 'bn_mean.csv'), 'wb') as csvfile:\n lwriter = csv.writer(csvfile, delimiter=',')\n lwriter.writerow(bn_mean.tolist())\n\n with open(os.path.join(blk_path, 'bn_var.csv'), 'wb') as csvfile:\n lwriter = csv.writer(csvfile, delimiter=',')\n lwriter.writerow(bn_var.tolist())\n\n fc_out_w = Binarize(model_dict['fc5.weight'].transpose(0, 1))\n fc_out_b = model_dict['fc5.bias']\n\n if save:\n dir_out = os.path.join(save_to, 'out_blk')\n utils.ensure_dir(dir_out)\n\n with open(os.path.join(dir_out, 'lin_weight.csv'), 'wb') as csvfile:\n lwriter = csv.writer(csvfile, delimiter=',')\n fc_out_w = Binarize(fc_out_w)\n for i in range(fc_out_w.shape[0]):\n lwriter.writerow(fc_out_w[i].tolist())\n\n with open(os.path.join(dir_out, 'lin_bias.csv'), 'wb') as csvfile:\n lwriter = csv.writer(csvfile, delimiter=',')\n lwriter.writerow(fc_out_b.tolist())", "def _load_model_from_trained_params(self):\n self.ent_emb = tf.constant(self.trained_model_params[0])\n self.rel_emb = tf.constant(self.trained_model_params[1])", "def checkpoint_load(checkpoint_path, gpu):\n model_info = torch.load(checkpoint_path)\n model = models.vgg19(pretrained=True)\n for param in model.parameters():\n param.requires_grad = False\n \n model.class_to_idx = model_info['class_to_idx']\n\n model = classifier(model)\n model.load_state_dict(model_info[\"model_state_dict\"])\n return model, model.class_to_idx", "def setup(self):\n print(\"setup\")\n \n self.modelToUse = 1\n if self.train:\n print(\"train\")\n else:\n print(\"no train\")\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.envSize = 17\n \n #init model\n if self.train or not os.path.isfile(\"my-saved-model.pt\"):\n self.logger.info(\"Setting up model from scratch.\")\n if self.modelToUse == 0:\n self.policy_net = Model_global_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_global_view(self.envSize, self.envSize, 6).to(device)\n elif self.modelToUse == 1:\n self.policy_net = Model_local_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_local_view(self.envSize, self.envSize, 6).to(device)\n else:\n self.policy_net = Model_combined_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_combined_view(self.envSize, self.envSize, 6).to(device)\n self.model.load_state_dict(self.policy_net.state_dict())\n self.model.eval()\n else:\n self.logger.info(\"Loading model from saved state.\")\n with open(\"my-saved-model.pt\", \"rb\") as file:\n if self.modelToUse == 0:\n self.model = Model_global_view(self.envSize, self.envSize, 6)\n elif self.modelToUse == 1:\n self.model = Model_local_view(self.envSize, self.envSize, 6)\n else:\n self.model = Model_combined_view(self.envSize, self.envSize, 6)\n if torch.cuda.is_available():\n self.model.load_state_dict(torch.load(file))\n self.model.to(device)\n else:\n self.model.load_state_dict(torch.load(file, map_location=device))", "def _load_best_model(self) -> None:\n self.trainer.resume()", "def load(self):\n print(\"==> Loading model from\", self.model_dir)\n self.model = tf.keras.models.load_model(self.model_dir)", "def load_best_model(self) -> None:\n self.resume()", "def load_device():", "def load_model(self) -> None:\n\n try:\n model_class = MODEL_TYPES[self.model_type]\n except KeyError:\n raise KeyError(f\"model type: {self.model_type} not supported\")\n\n if (\n os.path.exists(self.resources_path)\n and len(os.listdir(self.resources_path)) > 0\n ):\n model_name_or_path = self.resources_path\n else:\n model_name_or_path = self.model_name\n\n if self.model_type == \"stable_diffusion\":\n self.model = model_class.from_pretrained(\n model_name_or_path,\n use_auth_token=self.auth_token,\n )\n else:\n self.model = model_class.from_pretrained(model_name_or_path)\n\n self.model.to(self.device)", "def _load_model(self):\n # Initial memory value for recurrence.\n self.prev_mem = tf.zeros((self.train_batch_size, self.memory_dim))\n\n # choose RNN/GRU/LSTM cell\n with tf.variable_scope(\"forward\"):\n self.forward_cell = self.get_cell()\n with tf.variable_scope(\"backward\"):\n self.backward_cell = self.get_cell()\n\n # embedding model\n if not self.attention:\n with tf.variable_scope(\"forward\"):\n self.dec_outputs_fwd, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(\n self.enc_inp_fwd, self.dec_inp, self.forward_cell,\n self.vocab_size, self.vocab_size, self.seq_length)\n with tf.variable_scope(\"forward\", reuse=True):\n self.dec_outputs_fwd_tst, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(\n self.enc_inp_fwd, self.dec_inp, self.forward_cell,\n self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)\n\n with tf.variable_scope(\"backward\"):\n self.dec_outputs_bwd, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(\n self.enc_inp_bwd, self.dec_inp, self.backward_cell,\n self.vocab_size, self.vocab_size, self.seq_length)\n\n with tf.variable_scope(\"backward\", reuse=True):\n self.dec_outputs_bwd_tst, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(\n self.enc_inp_bwd, self.dec_inp, self.backward_cell,\n self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)\n else:\n with tf.variable_scope(\"forward\"):\n self.dec_outputs_fwd, _ = tf.nn.seq2seq.embedding_attention_seq2seq(\n self.enc_inp_fwd, self.dec_inp, self.forward_cell,\n self.vocab_size, self.vocab_size, self.seq_length)\n with tf.variable_scope(\"forward\", reuse=True):\n self.dec_outputs_fwd_tst, _ = tf.nn.seq2seq.embedding_attention_seq2seq(\n self.enc_inp_fwd, self.dec_inp, self.forward_cell,\n self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)\n\n with tf.variable_scope(\"backward\"):\n self.dec_outputs_bwd, _ = tf.nn.seq2seq.embedding_attention_seq2seq(\n self.enc_inp_bwd, self.dec_inp, self.backward_cell,\n self.vocab_size, self.vocab_size, self.seq_length)\n\n with tf.variable_scope(\"backward\", reuse=True):\n self.dec_outputs_bwd_tst, _ = tf.nn.seq2seq.embedding_attention_seq2seq(\n self.enc_inp_bwd, self.dec_inp, self.backward_cell,\n self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)", "def load_model():\n logger.info('load_model called')\n return 1", "def loadModel(self):\n self.model.load_state_dict(torch.load(os.path.join(self.model_save_dir, '{}_trained.pt'.format(self.model_name)), map_location=torch.device(device)))\n return self.model", "def load_model(self):\n Thread(target=self.__load_model).start()", "def __load_model(self):\n loaded = load(self.__file_name)\n self.__model = loaded['model']\n self.__meta_data = loaded['metadata']\n self.__is_ready = True", "def _load_model(self):\n self.model = tf.keras.experimental.load_from_saved_model(\n self.m_cfg['load_model'], custom_objects=self.custom_objects)\n\n ref = 1 if self.m_cfg['configs']['recursive'] else self.levels\n self.opt = [self._inst_optimizer() for _ in range(ref)]\n self.loss = Losses(self.m_cfg['configs']['loss']).value\n\n l_groups = np.split(np.array(self.model.layers), ref)\n self.vars = list(map(\n lambda g: list(chain(*map(lambda e: e.variables, g))), l_groups))", "def mgcLoadMod(self, **kwargs):\n\n def_vals = {\"ld_model\" : False,\n \"model_name\" : 'best_model.hdf5'\n } # default parameters value\n\n for k, v in def_vals.items():\n kwargs.setdefault(k, v)\n \n self.ld_model = kwargs['ld_model']\n self.model_name = kwargs['model_name']\n \n import numpy as np \n if self.ld_model == True:\n print('best epoch')\n # load model weights\n model = self.model\n\n # Load checkpoint best model\n filepath=\"output/model/\" + self.model_name\n\n # load model weights\n model.load_weights(filepath, by_name=False) #model.load_weights\n #model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[\"accuracy\"])\n\n else:\n print('last epoch')\n model = self.model\n\n self.y_pred_prob = model.predict(self.x_test, batch_size=self.batch_size, verbose=0)\n self.y_pred = np.argmax(self.y_pred_prob, axis = 1)\n self.y_true = np.argmax(self.y_test, 1)\n \n return model", "def load_full_model(self, session, model_dir):\n self.full_saver.restore(session, model_dir)", "def train_on_one_batch(self):\n save_model_path = './savedModel/cnn-model'\n self.build_graph(save_model_path)\n\n with tf.device('/gpu:0'):\n tf.reset_default_graph()\n with tf.Session(graph=tf.get_default_graph()) as sess: #config=tf.ConfigProto(log_device_placement=True)\n try:\n graph = self.__load_graph(sess, save_model_path)\n self.__train_and_report(sess, graph, range(1, 2), save_model_path)\n\n except Exception as e:\n logger.error(\"Something is missing from the previous saved graph, remove it and regenerate graph\")\n shutil.rmtree(\"./savedModel\")\n exit()", "def load_model(model, device, model_path):\n if os.path.exists(model_path):\n print(\"Reading model from \", model_path)\n checkpoint = torch.load(model_path, map_location=torch.device(device))\n model.load_state_dict(checkpoint['state_dict'])\n return model\n else:\n raise RuntimeError('Model does not exist!')", "def load_model_file(device_index):\n print(\"\\nStart loading model...\")\n\n return kdp_wrapper.isi_load_nef(device_index, MODEL_FILE, ISI_APP_ID)", "def load_model(self):\n # Load the model\n print('Loading model:', self.model_path)\n t0 = time.time()\n model = load_model(self.model_path)\n t1 = time.time()\n print('Loaded in:', t1 - t0)\n return model", "def load_model(self):\n pass", "def init_game_setting(self):\n np.random.seed(1) \n self.s_prev = np.zeros((80, 80, 1))\n print('loading trained model from {}'.format(self.model_path))\n self.sess = tf.InteractiveSession(graph=self.model)\n self.saver.restore(self.sess, self.model_path)" ]
[ "0.6873723", "0.6652313", "0.66168183", "0.6520783", "0.64138526", "0.6399735", "0.6352811", "0.6347352", "0.63034415", "0.62348884", "0.6229719", "0.62251854", "0.62218446", "0.6182424", "0.6153874", "0.6088584", "0.6076757", "0.6065099", "0.6064757", "0.60603446", "0.6056862", "0.6045546", "0.60418767", "0.60401195", "0.60294217", "0.602937", "0.6028613", "0.6025664", "0.6015614", "0.59959364" ]
0.7106072
0
Initializes a Status, but is actually unused. Instead you should
def __init__(self: "Status") -> None: raise NotImplementedError( "Please instantiate one of the `Status` " "subclasses:\n" "\n\t- `Failed`" "\n\t- `NotStarted`" "\n\t- `InProgress(progress)`" "\n\t- `Succeeded`" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(__self__, *,\n status: Optional[str] = None):\n if status is None:\n status = 'disabled'\n if status is not None:\n pulumi.set(__self__, \"status\", status)", "def __init__(__self__, *,\n status: Optional[str] = None,\n type: Optional[str] = None):\n if status is None:\n status = 'disabled'\n if status is not None:\n pulumi.set(__self__, \"status\", status)\n if type is None:\n type = 'Notary'\n if type is not None:\n pulumi.set(__self__, \"type\", type)", "def __init__(self, status=None, data=None):\n if not isinstance(status, OperStatus):\n raise TypeError(status)\n self.status = status\n self.data = data", "def __init__(self, status=None):\n if status is None:\n status = self.STATUS_NONE\n self.status = status\n self.messages = []\n self.last_command = None\n self.message_ids = []", "def __init__(__self__, *,\n update_status: str):\n pulumi.set(__self__, \"update_status\", update_status)", "def status(self, status: dict):\n pass", "def __init__(self, p_code = 1, p_name = 'Nenhum'):\n\n if not isinstance(p_code, int):\n raise exception.Exception('Erro durante a instanciação da classe \"classes.Status\": O parâmetro \"p_code\" deve ser do tipo \"int\".')\n\n if not isinstance(p_name, str):\n raise exception.Exception('Erro durante a instanciação da classe \"classes.Status\": O parâmetro \"p_name\" deve ser do tipo \"str\".')\n\n self.code = p_code\n self.name = p_name", "def __init__(self):\n self._status = GoalPursuitReadiness.Status.NOT_READY", "def __init__(__self__, *,\n exit_code: Optional[pulumi.Input[int]] = None,\n status: Optional[pulumi.Input['GoogleRpcStatusArgs']] = None):\n if exit_code is not None:\n pulumi.set(__self__, \"exit_code\", exit_code)\n if status is not None:\n pulumi.set(__self__, \"status\", status)", "def __init__(self, *args, **kwds):\n DiagnosticStatus.__init__(self, *args, **kwds)", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def __init__(self,\r\n level=None,\r\n status=None,\r\n status_code=None,\r\n status_code_description=None,\r\n timestamp=None):\r\n\r\n # Initialize members of the class\r\n self.level = level\r\n self.status = status\r\n self.status_code = status_code\r\n self.status_code_description = status_code_description\r\n self.timestamp = APIHelper.RFC3339DateTime(timestamp) if timestamp else None", "def __init__(self):\n self.messages = {\n \"updated\": \"Incident status successfully updated\",\n \"read\": \"Incident(s) successfully retrieved\"\n }\n\n self.status_types = {\n \"DRAFT\": \"draft\",\n \"RESOLVED\": \"resolved\",\n \"REJECTED\": \"rejected\",\n \"UNDER_INVESTIGATION\": \"under investigation\",\n }", "def __init__(self, status):\n self.period_s = 0.1\n self.distance_threshold = 5.0\n self.time_below_threshold_s = 0.2\n self.time_above_threshold_s = 2.0\n self.ir_sensor = InfraredSensor(INPUT_4)\n self.ir_sensor.mode = InfraredSensor.MODE_IR_PROX\n self.last_below_threshold_time_s = None\n self.last_above_threshold_time_s = None\n self.status = status\n self.exit = threading.Event()\n super(CollisionWatch, self).__init__()", "def SetStatus(self, status):\r\n self.status = status", "def __init__(self, status, metadata=None, **kwargs):\n\n if isinstance(status, Status):\n self.status = status\n elif isinstance(status, dict):\n self.status = Status(**status)\n self.status = Status(**status)\n\n if metadata is None:\n self.metadata = MetaData()\n elif isinstance(metadata, MetaData):\n self.metadata = metadata\n elif isinstance(metadata, dict):\n self.metadata = MetaData.from_dict(metadata)\n\n for k, v in kwargs.items():\n # set attributes for whatever else you want using keyword args\n setattr(self, k.lower(), v)", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(LIMITS_STATUS, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.sysid is None:\n self.sysid = 0\n if self.compid is None:\n self.compid = 0\n if self.limits_state is None:\n self.limits_state = 0\n if self.last_trigger is None:\n self.last_trigger = 0\n if self.last_action is None:\n self.last_action = 0\n if self.last_recovery is None:\n self.last_recovery = 0\n if self.last_clear is None:\n self.last_clear = 0\n if self.breach_count is None:\n self.breach_count = 0\n if self.mods_enabled is None:\n self.mods_enabled = 0\n if self.mods_required is None:\n self.mods_required = 0\n if self.mods_triggered is None:\n self.mods_triggered = 0\n else:\n self.sysid = 0\n self.compid = 0\n self.limits_state = 0\n self.last_trigger = 0\n self.last_action = 0\n self.last_recovery = 0\n self.last_clear = 0\n self.breach_count = 0\n self.mods_enabled = 0\n self.mods_required = 0\n self.mods_triggered = 0", "def __init__(self, label=None, status_counts=None):\n self.swagger_types = {\n 'label': str,\n 'status_counts': StatusCounts\n }\n\n self.attribute_map = {\n 'label': 'label',\n 'status_counts': 'statusCounts'\n }\n\n self._label = label\n self._status_counts = status_counts", "def status(self, status: str):\n\n self._status = status", "def status(self, status: str):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status" ]
[ "0.7733864", "0.7512126", "0.75076115", "0.740955", "0.7241169", "0.7056067", "0.70380515", "0.7006886", "0.6964561", "0.69371766", "0.69135594", "0.69135594", "0.69135594", "0.69135594", "0.69135594", "0.69135594", "0.69135594", "0.69052196", "0.68986386", "0.6813976", "0.680187", "0.6794902", "0.677587", "0.6749192", "0.6724038", "0.6724038", "0.671785", "0.671785", "0.671785", "0.671785" ]
0.81595194
0
Determines if two status are equal. Two statues are considered equal if they are of the same Status type; if both `Status`es are `InProgress`, then their progress values must compare equal also.
def __eq__(self: "Status", other: "Status") -> bool: # type: ignore self_type = type(self) other_type = type(other) if self_type is InProgress and other_type is InProgress: return self.progress == other.progress # type: ignore else: return self_type == other_type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other):\n if not isinstance(other, ResultStatus):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, PrecisEngineTaskStatusProgress):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, ServiceStatus):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other: Union[int, Status]):\n if isinstance(other, int):\n return self.code == other\n\n return isinstance(other, self.__class__) and self.code == other.code", "def test_equals_with_different_issue_statuses(self):\n measurement_1 = Measurement(self.metric(), issue_status=[{\"issue_id\": \"issue_id\"}])\n measurement_2 = Measurement(self.metric())\n self.assertFalse(measurement_1.equals(measurement_2))", "def __eq__(self, other):\n if not isinstance(other, ServerPolicyStatus):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, NodeStatus):\n return False\n\n return self.__dict__ == other.__dict__", "def __lt__(self: \"Status\", other: \"Status\") -> bool:\n self_type = type(self)\n other_type = type(other)\n both_not_in_progress = not self.in_progress and not other.in_progress\n\n if both_not_in_progress and self_type is other_type:\n return False\n elif self_type is Failed:\n return True\n elif self_type is NotStarted and other_type in (InProgress, Succeeded):\n return True\n elif self_type is InProgress and other_type is InProgress:\n return self.progress < other.progress # type: ignore\n elif self_type is InProgress and other_type is Succeeded:\n return True\n else:\n return False", "def __eq__(self, other):\n if not isinstance(other, DestinyPublicActivityStatus):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, AssetManagedDeviceStatus):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, V1alpha1ApplicationStatus):\n return False\n\n return self.to_dict() == other.to_dict()", "def __eq__(self, other):\n return (self.get_status() == other.get_status() and \n self.get_nickname() == other.get_nickname() and \n self._email == other._email)", "def __eq__(self, other):\n if not isinstance(other, StatusFlowDirectToVo):\n return False\n\n return self.__dict__ == other.__dict__", "def __equals__(self, to_compare):\n try:\n # Try to compare - this likely fails when it is compared to a non\n # RcStatus object\n return \\\n (self.was_available_once == to_compare.was_available_once) and \\\n (self.is_available == to_compare.is_available) and \\\n (self.signal_strength_percent == to_compare.signal_strength_percent)\n\n except AttributeError:\n return False", "def _is_equal_same_type(self, other):\n # approximate_online_count\n if self.approximate_online_count != other.approximate_online_count:\n return False\n \n # approximate_user_count\n if self.approximate_user_count != other.approximate_user_count:\n return False\n \n # description\n if self.description != other.description:\n return False\n \n # discovery_splash_hash\n if self.discovery_splash_hash != other.discovery_splash_hash:\n return False\n \n # discovery_splash_type\n if self.discovery_splash_type != other.discovery_splash_type:\n return False\n \n # emojis\n if self.emojis != other.emojis:\n return False\n \n # features\n if self.features != other.features:\n return False\n \n # icon_hash\n if self.icon_hash != other.icon_hash:\n return False\n \n # icon_type\n if self.icon_type != other.icon_type:\n return False\n \n # id\n if self.id != other.id:\n return False\n \n # invite_splash_hash\n if self.invite_splash_hash != other.invite_splash_hash:\n return False\n \n # invite_splash_type\n if self.invite_splash_type != other.invite_splash_type:\n return False\n \n # stickers\n if self.stickers != other.stickers:\n return False\n \n # name\n if self.name != other.name:\n return False\n \n return True", "def __equals__(self, to_compare):\n try:\n # Try to compare - this likely fails when it is compared to a non\n # ActuatorOutputStatus object\n return \\\n (self.active == to_compare.active) and \\\n (self.actuator == to_compare.actuator)\n\n except AttributeError:\n return False", "def __eq__(self, other):\n if not isinstance(other, TeamStatsStat):\n return False\n\n return self.__dict__ == other.__dict__", "def is_equal(self, a, b):\n return a == b", "def __eq__(self, other):\r\n return self.label == other.label and self.positive_state == other.positive_state", "def __ne__(self, other):\n if not isinstance(other, ServiceStatus):\n return True\n\n return self.to_dict() != other.to_dict()", "def __eq__(self, other):\n if not isinstance(other, BeneficiariesQueryRegisterStatus):\n return False\n\n return self.__dict__ == other.__dict__", "def check_equal(self, v1, v2, screen_name):\n result = (v1 == v2)\n if(result):\n logging.info(\"success: check_equal: {}: {}, {}\".format(\n screen_name, v1, v2)\n )\n else:\n self.take_screen(\"check_equal_{}\".format(screen_name))\n logging.error(\"failed: check_equal: {}: {}, {}\".format(\n screen_name, v1, v2)\n )\n return result", "def __eq__(self, other):\n if not isinstance(other, ListAgentStatusRequestBody):\n return False\n\n return self.__dict__ == other.__dict__", "def cmp_status(self, cur_status, prev_status, prev_prev_status, field_name_tuple):\n ignore_keys = ['update_date']\n # consider status as new if last record was at least one day ago\n header_field_name, update_time_field_name, activity_list_field_name = field_name_tuple\n time_diff = dateutil.parser.parse(cur_status[header_field_name][update_time_field_name]) - dateutil.parser.parse(prev_prev_status[header_field_name][update_time_field_name])\n if time_diff >= timedelta(days=1):\n return False\n\n # if last record is more recent, consider status as new only if any non-ignored field is different\n cur_status = {k:v for k,v in cur_status[activity_list_field_name][0].items() if k not in ignore_keys}\n prev_status = {k:v for k,v in prev_status[activity_list_field_name][0].items() if k not in ignore_keys}\n return cur_status == prev_status", "def __eq__(self, other):\n if type(self) is not type(other):\n return NotImplemented\n \n # avatar\n if self.avatar != other.avatar:\n return False\n \n # boosts_since\n if self.boosts_since != other.boosts_since:\n return False\n \n # flags\n if self.flags != other.flags:\n return False\n \n # joined_at\n if self.joined_at != other.joined_at:\n return False\n \n # nick\n if self.nick != other.nick:\n return False\n \n # pending\n if self.pending != other.pending:\n return False\n \n # role_ids\n if self.role_ids != other.role_ids:\n return False\n \n # timed_out_until\n if self.timed_out_until != other.timed_out_until:\n return False\n \n return True", "def __eq__(self, other: Any) -> bool:\n if not isinstance(other, DetectionResult):\n return False\n\n return self.to_pb2().__eq__(other.to_pb2())", "def __eq__(left, right):\n if not is_FiniteStateMachine(right):\n raise TypeError('Only instances of FiniteStateMachine '\n 'can be compared.')\n if len(left._states_) != len(right._states_):\n return False\n colors_equal = True\n for state in left.iter_states():\n try:\n right_state = right.state(state.label())\n except LookupError:\n return False\n\n # we handle colors separately\n if not state.fully_equal(right_state, compare_color=False):\n return False\n if state.color != right_state.color:\n colors_equal = False\n\n left_transitions = state.transitions\n right_transitions = right.state(state).transitions\n if len(left_transitions) != len(right_transitions):\n return False\n for t in left_transitions:\n if t not in right_transitions:\n return False\n\n # handle colors\n if colors_equal:\n return True\n if left.is_monochromatic() and right.is_monochromatic():\n return True\n return False", "def _is_equal_same_type(self, other):\n # id\n self_id = self.id\n other_id = other.id\n if (self_id and other_id) and (self_id != other_id):\n return False\n \n # bot\n if self.bot != other.bot:\n return False\n \n # description\n if self.description != other.description:\n return False\n \n # icon_hash\n if self.icon_hash != other.icon_hash:\n return False\n \n # icon_type\n if self.icon_type != other.icon_type:\n return False\n \n # name\n if self.name != other.name:\n return False\n \n return True", "def __eq__(left, right):\n if not is_FSMTransition(right):\n raise TypeError('Only instances of FSMTransition ' \\\n 'can be compared.')\n return left.from_state == right.from_state \\\n and left.to_state == right.to_state \\\n and left.word_in == right.word_in \\\n and left.word_out == right.word_out", "def __ne__(self, other):\n if not isinstance(other, V1alpha1ApplicationStatus):\n return True\n\n return self.to_dict() != other.to_dict()" ]
[ "0.73409915", "0.71478164", "0.68326336", "0.68325627", "0.6828575", "0.6799611", "0.67651767", "0.6650099", "0.663792", "0.66026837", "0.6585347", "0.6560694", "0.6519736", "0.64620346", "0.6334076", "0.62699586", "0.6169474", "0.61421174", "0.6139374", "0.6112142", "0.60678226", "0.60598695", "0.60597366", "0.6044216", "0.6022671", "0.60156375", "0.5945098", "0.5942111", "0.59340495", "0.59291387" ]
0.81159604
0
Determines if one `Status` is less than another one.
def __lt__(self: "Status", other: "Status") -> bool: self_type = type(self) other_type = type(other) both_not_in_progress = not self.in_progress and not other.in_progress if both_not_in_progress and self_type is other_type: return False elif self_type is Failed: return True elif self_type is NotStarted and other_type in (InProgress, Succeeded): return True elif self_type is InProgress and other_type is InProgress: return self.progress < other.progress # type: ignore elif self_type is InProgress and other_type is Succeeded: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __lt__(self, other):\n status = self.get_status()\n Ostatus = other.get_status()\n \n if status == Ostatus:\n return self.get_nickname() < other.get_nickname()\n \n if status == \"online\":\n return True\n elif status == \"away\" and Ostatus != \"online\":\n return True\n elif status == \"busy\" and Ostatus not in [\"online\", \"away\"]:\n return True\n else:\n return False", "def less(value, other):\n return value > other", "def __lt__(self, other):\n return self.percentage > other.percentage", "def __lt__(self, other):\n return self.percentage > other.percentage", "def __lt__(self, other):\n return self.percentage > other.percentage", "def less_equal(value, other):\n return value >= other", "def __lt__(self, other):\n return self._value < other.value_in_unit(self.unit)", "def __lt__(self, other):\n return self <= other and not self >= other", "def __lt__(self, other):\n return self <= other and self != other", "def __lt__(self, other):\n return self <= other and self != other", "def __lt__(self, rhs):\n return self.balance < rhs.balance", "def lt(self, x, y):\n return self.le(x,y) and x != y", "def __lt__(self, other):\n return self._priority < other._priority", "def test_less_than(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"lessThan\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::lt\"},\n )", "def __gt__(self, other):\n return self >= other and self != other", "def __gt__(self, other):\n return self >= other and self != other", "def __ge__(self: _TT, other: _TT) -> bool:\n if type(self) != type(other):\n raise TypeError(\"Types do not match\")\n return self.value >= other.value", "def compare_to(self, other) -> int:\n if self.id == other.id:\n return 0\n if self.status != other.status:\n return -1 if self.status < other.status else 1\n if self.last_played != other.last_played:\n return -1 if self.last_played < other.last_played else 1\n return -1 if self.id < other.id else 1", "def __lt__(self, other):\n return self.__le__(other) and self.__ne__(other)", "def statusCompare (x, y):\n xs = db.status.get(x, 'order')\n ys = db.status.get(y, 'order')\n c = float(xs) - float(ys)\n if c >= 0.0: \n return int(c)\n else:\n return -int(abs(c))", "def __gt__(self, other):\n\t\ttry:\n\t\t\treturn self.val > other.val\n\t\texcept:\n\t\t\treturn self.val > other", "def __gt__(self, other):\n return other < self", "def __gt__(self, other):\n return self._ordinals > other.ordinal()", "def __lt__(self, other):\n return self.priority < other.priority", "def __lt__(self, other):\n return self.priority < other.priority", "def __lt__(self, other):\n return self.priority < other.priority", "def _lt(self, node_a, node_b):\n node_a = self.__getitem__(node_a)\n node_b = self.__getitem__(node_b)\n if 'val' in node_a and 'val' in node_b:\n return node_a['val'] > node_b['val']\n else:\n return False", "def __le__(self: _TT, other: _TT) -> bool:\n if type(self) != type(other):\n raise TypeError(\"Types do not match\")\n return self.value <= other.value", "def __gt__(self, other):\n return self.to_seconds() > other.to_seconds()", "def __gt__(self, other):\n return self.to_seconds() > other.to_seconds()" ]
[ "0.6996155", "0.67470247", "0.6650936", "0.6650936", "0.6650936", "0.66351974", "0.66143465", "0.6600206", "0.6576387", "0.6576387", "0.65719974", "0.6560425", "0.6551801", "0.6539531", "0.6516321", "0.6516321", "0.6514021", "0.65093094", "0.6497051", "0.6491132", "0.6490221", "0.6489479", "0.6484056", "0.64791584", "0.64791584", "0.64791584", "0.64755034", "0.6464477", "0.6464287", "0.6464287" ]
0.748035
0
Determines if this `Status` is in progress or not. This is different from a comparison to an `InProgress` because said comparison would require both `Status` to have the same `progress` values (if they are both indeed `InProgress`), while this method returns true for any `InProgress` progress value.
def in_progress(self: "Status") -> bool: return isinstance(self, InProgress)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self: \"Status\", other: \"Status\") -> bool: # type: ignore\n self_type = type(self)\n other_type = type(other)\n\n if self_type is InProgress and other_type is InProgress:\n return self.progress == other.progress # type: ignore\n else:\n return self_type == other_type", "def is_completed(self):\r\n return self.progress == 'Complete'", "def hasProgress(self) -> bool:\n ...", "def hasProgress(self) -> bool:\n ...", "def hasProgress(self) -> bool:\n ...", "def hasProgress(self) -> bool:\n ...", "def __lt__(self: \"Status\", other: \"Status\") -> bool:\n self_type = type(self)\n other_type = type(other)\n both_not_in_progress = not self.in_progress and not other.in_progress\n\n if both_not_in_progress and self_type is other_type:\n return False\n elif self_type is Failed:\n return True\n elif self_type is NotStarted and other_type in (InProgress, Succeeded):\n return True\n elif self_type is InProgress and other_type is InProgress:\n return self.progress < other.progress # type: ignore\n elif self_type is InProgress and other_type is Succeeded:\n return True\n else:\n return False", "def isStatus(self):\n return self.type == \"MPI_Status\"", "def is_complete(self) -> bool:\n return self.status in [\"SUCCESS\", \"ERROR\", \"CANCELLED\"]", "def __eq__(self, other):\n if not isinstance(other, PrecisEngineTaskStatusProgress):\n return False\n\n return self.__dict__ == other.__dict__", "def status(self):\n if self.num_steps >= self.timeout:\n return Status.TIMEOUT\n\n return Status.IN_PROGRESS", "def _check_completed(self):\n current_rung_df = self.sieve_board.loc[\n self.sieve_board['status'].isin(\n [StatusType.WAITTING, StatusType.RUNNING])\n ]\n if current_rung_df.empty:\n return True\n else:\n return False", "def check_status(self, status):\n #assert( isinstance(status, self.STATES) )\n # print self.current_state\n if isinstance(status, list):\n return np.any( [self.current_state == ss for ss in status] )\n elif isinstance(status, int):\n return self.current_state == status\n else:\n raise Exception(\"Unknown state type\")", "def is_pending(self):\n status = self.get_status()\n return status[\"status\"] == 3", "def finished(self):\n if len(self.progress) > 0:\n return self.progress[-1].status in [TestStatus.completed, TestStatus.canceled]\n return False", "def isValidationInProgress(self):\n validation = self.getLatestValidValidation()\n today = date.today()\n if validation and validation.getDownTo():\n validfrom = validation.getDownFrom().asdatetime().date()\n validto = validation.getDownTo().asdatetime().date()\n if validfrom <= today <= validto:\n return True\n return False", "def check_status(self):\n if not self.completed and not self.failed:\n if not self.is_job:\n self._remove_children_dependency()\n self.status = 'COMPLETED'\n self.completed = True\n else:\n completed = True\n failed = False\n for job_instance in self.instances:\n if job_instance.failed:\n failed = True\n break\n elif not job_instance.completed:\n completed = False\n\n if failed:\n self.status = 'FAILED'\n self.failed = True\n self.completed = False\n return False\n\n if completed:\n # The job node just finished, remove this dependency\n self.status = 'COMPLETED'\n self._remove_children_dependency()\n self.completed = True\n\n return not self.failed", "def is_pending(self):\n if self.status == \"PENDING\":\n return True\n else:\n return False", "def status(self):\n assert(self.__complete)\n return self.__status", "def is_running(self):\n # do we have a job ID to work with?\n if self.jobid == None:\n return False\n else:\n q_status = self.queue.get_status(self.jobid)\n\n if q_status == self.queue.state[\"active\"]:\n self.meta[\"status\"] = 'PENDING'\n return True\n else:\n return False", "def is_completed(self):\n if not self.is_started:\n return False\n elif self._tasks_in_progress:\n return False\n return True", "def active(self):\n resp = self.server.request(\"get\", \"/jobs/%s/%s/state\" % (\n self.sessionid, self.name))\n if resp.body == b'1':\n return True\n return False", "def is_complete(self):\n status = self.get_status()\n return (status is self._STATUS_COMPLETED)", "def status(self):\n with self.__lock:\n assert(self.__complete)\n return self.__status", "def is_current(self):\n return self.CURRENT == self.status", "def isInProgressPath(packagePath):\n return packagePath.endswith(PackageUtil.inProgressExt)", "def _has_state(self, submissionId, states):\n status = self.status(submissionId)\n self._log.debug('Current state of job {}: {}'.format(submissionId, status))\n return status in states", "def __repr__(self: \"InProgress\") -> str:\n return f\"InProgress({self.progress})\"", "def is_active(self):\n return not self.pending", "def __equals__(self, to_compare):\n try:\n # Try to compare - this likely fails when it is compared to a non\n # RcStatus object\n return \\\n (self.was_available_once == to_compare.was_available_once) and \\\n (self.is_available == to_compare.is_available) and \\\n (self.signal_strength_percent == to_compare.signal_strength_percent)\n\n except AttributeError:\n return False" ]
[ "0.6593676", "0.6345074", "0.6344414", "0.6344414", "0.6344414", "0.6344414", "0.6319323", "0.62467813", "0.5986506", "0.5957657", "0.5918512", "0.58473325", "0.5845322", "0.5829266", "0.58214915", "0.58043075", "0.5737248", "0.5733746", "0.5689461", "0.5687277", "0.5666091", "0.56322616", "0.5607027", "0.56027234", "0.5598899", "0.5598599", "0.5538476", "0.5526809", "0.55167335", "0.5516439" ]
0.78564006
0
Returns the string representation of this `NotStarted` `Status`.
def __repr__(self: "NotStarted") -> str: return "NotStarted()"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_status_as_string(self):\n if self.downloaded == 0:\n return \"[Starting... ]\"\n return \"[%s, %s, %s]\" % self.get_status()", "def get_status_string(self, instance):\n return instance.get_status_string()", "def __str__(self):\n\n return self.status_text", "def __str__(self):\n sb = ''\n sb += '\\nInterfaceStatus [ ' + self.interface_name + ' ]\\n'\n sb += '\\tLinkState : ' + str(self.InterfaceState.enumval(self.link)) + '\\n'\n sb += '\\tLineProtoState : ' + str(self.InterfaceState.enumval(self.lineproto)) + '\\n'\n return sb", "def __str__(self):\n struct_repr = \", \".join([\n \"was_available_once: \" + str(self.was_available_once),\n \"is_available: \" + str(self.is_available),\n \"signal_strength_percent: \" + str(self.signal_strength_percent)\n ])\n\n return f\"RcStatus: [{struct_repr}]\"", "def __str__(self):\n return self.status_text", "def __str__(self):\n return self.status_text", "def __str__(self):\n return self.status_text", "def to_string(self):\n return \"{name} {status} ({tags}) {mtime}\".format(\n name=self.name, status=self.status, tags=\",\".join(self.tags),\n mtime=self.mtime\n )", "def __str__(self):\n struct_repr = \", \".join([\n \"active: \" + str(self.active),\n \"actuator: \" + str(self.actuator)\n ])\n\n return f\"ActuatorOutputStatus: [{struct_repr}]\"", "def __repr__(self: \"InProgress\") -> str:\n return f\"InProgress({self.progress})\"", "def _get_status(self):\n return u'%s' % (self.get_status_display())", "def __repr__(self):\r\n return 'SubtaskStatus<%r>' % (self.to_dict(),)", "def __repr__(self):\n return '{}({}{}{})'.format(\n self.__class__.__qualname__,\n 'running ' if self.enabled else 'stopped ',\n 'from {}, '.format(self.start_time) if self.start_time is not None else '',\n 'timeout={}'.format(self.timeout)\n )", "def __str__(self):\n # return status text.\n return self.status_text", "def status_message(self) -> str:\n return pulumi.get(self, \"status_message\")", "def __repr__(self) -> str:\n return f\"<TestStatus {self.test_id}: {self.status}>\"", "def status_message(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status_message\")", "def __str__(self):\n if self.done:\n state = 'completed'\n id1 = str(self.id)\n elif self.written:\n state = 'written'\n id1 = str(self.id)\n else:\n state = 'not written'\n id1 = 'NA'\n return \"Job: {name} ID: {id}, state: {state}\".format(\n name=self.name, id=id1, state=state)", "def status(self) -> str:\n return self._status", "def status(self) -> str:\n return self._status", "def status(self) -> str:\n return self._status", "def status(self) -> str:\n return self._status", "def status(self) -> str:\n return self._status", "def status(self) -> str:\n return self._status", "def get_status_line(self):\n return f'{str(self.protocol)} {int(self.status)} {self.status.phrase}'", "def __str__(self):\n struct_repr = \", \".join([\n \"type: \" + str(self.type),\n \"text: \" + str(self.text)\n ])\n\n return f\"StatusText: [{struct_repr}]\"", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")" ]
[ "0.77509296", "0.72206014", "0.6962406", "0.69475144", "0.69259286", "0.692106", "0.692106", "0.692106", "0.6907531", "0.69062734", "0.68426645", "0.68326247", "0.6829954", "0.68250954", "0.68156147", "0.6750187", "0.6736905", "0.6733693", "0.6731404", "0.6716653", "0.6716653", "0.6716653", "0.6716653", "0.6716653", "0.6716653", "0.67045325", "0.6702554", "0.66910344", "0.66910344", "0.66910344" ]
0.78173107
0
Creates a new `InProgress` status. If the given `progress` is not `0 100 progress.
def __init__(self: "InProgress", progress: int = 0) -> None: self.progress = max(0, min(progress, 100))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def progress(self, progress: float):\n if progress is None:\n raise ValueError(\"Invalid value for `progress`, must not be `None`\") # noqa: E501\n \n self._progress = progress", "def push_progress(self, status, object_id, progress):\n pass", "def update_progress(self, progress, message):\n assert 0 <= progress < 100\n self._progress = int(progress)\n self.logger.info(\n \"status: STARTED %d%% %s\", self._progress, message or \"\"\n )\n self._callback('on_progress_update', self._progress, message)\n return self.update_response(\n self.encoder.encode_started(self._progress, message)\n )", "def progress(self, progress: float):\n if progress is None:\n raise ValueError(\"Invalid value for `progress`, must not be `None`\") # noqa: E501\n\n self._progress = progress", "def __repr__(self: \"InProgress\") -> str:\n return f\"InProgress({self.progress})\"", "def set_started(self, progress=None, message=None):\n if progress is not None:\n assert 0 <= progress < 100\n self._progress = int(progress)\n self.logger.info(\n \"status: STARTED %d%% %s\", self._progress, message or \"\"\n )\n self._callback('on_started', self._progress, message)\n return self.update_response(\n self.encoder.encode_started(self._progress, message)\n )", "def progress(self, progress):\n\n self._progress = progress", "def progress(self, progress):\n\n self._progress = progress", "def set_progress(self, progress: float):", "def in_progress(self: \"Status\") -> bool:\n return isinstance(self, InProgress)", "def set_paused(self, progress=None):\n if progress is not None:\n assert 0 <= progress < 100\n self._progress = int(progress)\n self.logger.info(\"status: PAUSED %d%%\", self._progress)\n self._callback('on_paused', self._progress)\n return self.update_response(self.encoder.encode_paused(self._progress))", "def mark_as_in_progress(self):\n\n in_prog = self.todo_scroll_cell.get()\n if in_prog is None:\n self.master.show_error_popup('No Item', 'There is no item in the list to mark as in progress')\n return\n self.todo_scroll_cell.remove_selected_item()\n self.in_progress_scroll_cell.add_item(in_prog)", "def start_progress_bar(self, of=None):\n if of is None:\n if self.n_batches_last is None:\n of = 0\n else:\n of = self.n_batches_last\n if self.progress is None:\n self.progress = Progress(\n TextColumn(\"Epoch {task.fields[epoch]}\"),\n SpinnerColumn(),\n TextColumn(\"Batch {task.fields[batch]:3} / {task.fields[of]:3}\"),\n BarColumn(),\n TimeElapsedColumn(table_column=Column(header=\"Elapsed\")),\n TextColumn(\"/\"),\n TimeRemainingColumn(table_column=Column(header=\"Remaining\")),\n TextColumn(\"|\"),\n TextColumn(\"[red]Loss: {task.fields[batch_loss]:1.3f}[red],\"),\n TextColumn(\"[red][b]Avg.: {task.fields[running_mean]:1.3f}[/b][/red]\"),\n auto_refresh=False,\n transient=True,\n )\n fields = {\n \"epoch\": 1,\n \"running_mean\": np.nan,\n \"batch_loss\": np.nan,\n \"of\": of,\n \"batch\": 0,\n }\n self.task = self.progress.add_task(\n f\"Epoch {self.i_epoch + 1}\", total=of, **fields\n )\n self.progress.update(\n self.task, completed=0, total=of, epoch=self.i_epoch + 1, mode=\"training\"\n )", "def set_progress(self, progress: int) -> None:\n self.update(progress % len(self.frames)) # prevent IndexError if progress >= len(frames)", "def _setProgress(self, progress):\n # print \"Progress set %.2f --------------------------------\" % progress\n\n if progress > 100.0:\n progress = 100.0\n\n self._progress = progress\n self._progressChangedNotifier.notify(self)", "def current_progress(self, current_progress):\n\n self._current_progress = current_progress", "def _prepare(self, progress: BaseProgressMonitor):\n self._started = True\n self._total_bytes = None\n self._downloaded_bytes = 0\n self._progress = progress\n if self.show_progress_bar:\n self._tqdm = tqdm(total=None, unit=\"bytes\", dynamic_ncols=True, file=sys.stdout)\n else:\n self._tqdm = None", "def _convert_progress(self, progress):\n is_done = int(progress['exercises_success']) == int(progress['exercises_total'])\n score = 0.0 if (not is_done and self._should_be_done) \\\n else round(self.weight * float(progress['points_reached']) / float(progress['points_total']), 1)\n\n return {\n 'is_done': is_done,\n 'score': score\n }", "def _create_progress_entry(self, student=None, task_state=PROGRESS):\r\n progress = {'attempted': 3,\r\n 'succeeded': 2,\r\n 'total': 5,\r\n 'action_name': 'rescored',\r\n }\r\n return self._create_entry(task_state=task_state, task_output=progress, student=student)", "def run(self, item=None, progress=None):\n # these lines can be removed\n assert isinstance(progress, dl.Progress)\n progress.update(status='inProgress', progress=0)", "def take_snapshot(self, progress: Union[int, float],\n now: Optional[Union[int, float]] = None):\n if not self._progresses or progress - self._progresses[-1] > .001:\n # we only record the time and corresponding progress if the\n # progress has been advanced by 0.1%\n if now is None:\n now = time.time()\n self._progresses.append(progress)\n self._times.append(now)", "def create_and_start_job(self, token, status, desc, progress, est_complete, context=None):\n return self._client.call_method(\n 'UserAndJobState.create_and_start_job',\n [token, status, desc, progress, est_complete], self._service_ver, context)", "def setProgress(self, progress):\n\t\tself.config.PROGRESS = [progress]", "def progress_bar(progress):\n bar_length = 50\n block = int(round(bar_length * progress))\n text = 'Progress: [{0}] {1}'.format('#' * block + '-' * (bar_length - block),\n progress * 100)\n # Print progress after removing the previous progress\n sys.stdout.write('\\r' + text)\n sys.stdout.flush()", "def _setProgress(self):\n\n self.progress = (self.iteration, self.iterationCount)", "def update_task_progress():\r\n current_time = datetime.now(UTC)\r\n progress = {\r\n 'action_name': action_name,\r\n 'attempted': num_attempted,\r\n 'succeeded': num_succeeded,\r\n 'failed': num_failed,\r\n 'total': num_total,\r\n 'duration_ms': int((current_time - start_time).total_seconds() * 1000),\r\n 'step': curr_step,\r\n }\r\n _get_current_task().update_state(state=PROGRESS, meta=progress)\r\n\r\n return progress", "def progress(count, total, status=''):\n bar_len = 60\n filled_len = int(round(bar_len * count / float(total)))\n\n percents = round(100.0 * count / float(total), 1)\n bar = '=' * filled_len + '-' * (bar_len - filled_len)\n \n if count >= total: \n sys.stdout.write('[%s] %s%s ...%s%s\\r' % (bar, percents, '%', status, '\\n'))\n sys.stdout.flush()\n else:\n sys.stdout.write('[%s] %s%s ...%s\\r' % (bar, percents, '%', status))\n sys.stdout.flush()", "def progress_status(self):\n from tqdm import tqdm\n pbar_a = tqdm(total=len(self.jobs), position=0)\n pbar_a.set_description('Submitted jobs ...')\n pbar_b = tqdm(total=self.n_submit_script, position=1)\n pbar_b.set_description('Running jobs ...')\n pbar_c = tqdm(total=self.n_submit_script, position=2)\n pbar_c.set_description('Completed jobs ...')\n pbar_d = tqdm(total=self.n_submit_script, position=3)\n pbar_d.set_description('Failed? jobs ...')\n while self.n_completed < self.n_submit_script:\n pbar_a.n = self.n_submitted\n pbar_b.n = self.n_running\n pbar_c.n = self.n_completed\n pbar_d.n = self.n_failed + self.n_unknown\n pbar_a.refresh()\n pbar_b.refresh()\n pbar_c.refresh()\n pbar_d.refresh()\n sleep(5)\n self.update_status()", "def set_progress(self, progress: Dict[str, str], is_add: bool):\n if is_add:\n self.progress.append(progress)\n else:\n self.progress[-1] = progress", "async def status(self, ctx, project_name: str) -> discord.Message:\n if not ctx.projects.find_project(project_name):\n await ctx.send(\"This project doesn't exist.\")\n return\n progress_bar = ctx.projects.project_progress_bar(project_name)\n if not progress_bar:\n progress_bar = self.empty_progress_bar\n await ctx.send(progress_bar)" ]
[ "0.63577783", "0.6356068", "0.6276104", "0.625529", "0.6140855", "0.5771439", "0.5700525", "0.5700525", "0.5654446", "0.56454754", "0.5644193", "0.56381303", "0.56319237", "0.5591338", "0.55786866", "0.555182", "0.5487537", "0.54759014", "0.5439351", "0.54385674", "0.5426031", "0.5422642", "0.541002", "0.5381402", "0.5356902", "0.53198856", "0.52874035", "0.52622545", "0.5259578", "0.5234087" ]
0.7329669
0
Returns the string representation of this `InProgress` `Status`.
def __repr__(self: "InProgress") -> str: return f"InProgress({self.progress})"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_status_as_string(self):\n if self.downloaded == 0:\n return \"[Starting... ]\"\n return \"[%s, %s, %s]\" % self.get_status()", "def __str__(self):\n sb = ''\n sb += '\\nInterfaceStatus [ ' + self.interface_name + ' ]\\n'\n sb += '\\tLinkState : ' + str(self.InterfaceState.enumval(self.link)) + '\\n'\n sb += '\\tLineProtoState : ' + str(self.InterfaceState.enumval(self.lineproto)) + '\\n'\n return sb", "def __str__(self):\n if self.done:\n state = 'completed'\n id1 = str(self.id)\n elif self.written:\n state = 'written'\n id1 = str(self.id)\n else:\n state = 'not written'\n id1 = 'NA'\n return \"Job: {name} ID: {id}, state: {state}\".format(\n name=self.name, id=id1, state=state)", "def get_status_string(self, instance):\n return instance.get_status_string()", "def __repr__(self):\n if self.status == STATUS_CLOSED:\n status = \"CLOSED\"\n elif self.status == STATUS_OPEN:\n status = \"OPEN\"\n else:\n status = \"UNKNOWN\"\n return f\"<{self.__class__.__name__} [{self.key}] status={status} failures={self.failures} checkin={self.checkin}, jitter={self._last_jitter}>\"", "def __repr__(self):\r\n return 'SubtaskStatus<%r>' % (self.to_dict(),)", "def __str__(self):\n struct_repr = \", \".join([\n \"was_available_once: \" + str(self.was_available_once),\n \"is_available: \" + str(self.is_available),\n \"signal_strength_percent: \" + str(self.signal_strength_percent)\n ])\n\n return f\"RcStatus: [{struct_repr}]\"", "def __str__(self):\n struct_repr = \", \".join([\n \"active: \" + str(self.active),\n \"actuator: \" + str(self.actuator)\n ])\n\n return f\"ActuatorOutputStatus: [{struct_repr}]\"", "def __str__(self):\n\n return self.status_text", "def __str__(self):\n return self.status_text", "def __str__(self):\n return self.status_text", "def __str__(self):\n return self.status_text", "def status(self) -> str:\n return self._status", "def status(self) -> str:\n return self._status", "def status(self) -> str:\n return self._status", "def status(self) -> str:\n return self._status", "def status(self) -> str:\n return self._status", "def status(self) -> str:\n return self._status", "def __str__(self):\n # return status text.\n return self.status_text", "def to_string(self):\n return \"{name} {status} ({tags}) {mtime}\".format(\n name=self.name, status=self.status, tags=\",\".join(self.tags),\n mtime=self.mtime\n )", "def _get_status(self):\n return u'%s' % (self.get_status_display())", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")" ]
[ "0.7800333", "0.7232849", "0.7208602", "0.70328575", "0.680751", "0.68061364", "0.6797123", "0.6787951", "0.6785813", "0.6751872", "0.6751872", "0.6751872", "0.6749815", "0.6749815", "0.6749815", "0.6749815", "0.6749815", "0.6749815", "0.67199665", "0.6706302", "0.670496", "0.66850007", "0.66850007", "0.66850007", "0.66850007", "0.66850007", "0.66850007", "0.66850007", "0.66850007", "0.66850007" ]
0.8420762
0
Returns the string representation of this `Succeeded` `Status`.
def __repr__(self: "Succeeded") -> str: return "Succeeded()"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def success_message(cls):\n return f'Successfully performed \"{cls.display_name.lower()}\"'", "def get_status_as_string(self):\n if self.downloaded == 0:\n return \"[Starting... ]\"\n return \"[%s, %s, %s]\" % self.get_status()", "def status_message(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status_message\")", "def get_success_message(cls, args, results):\n return cls.success_message", "def __str__(self) -> str:\n return f\"Scrape <Success: {str(not self.failed)}>\"", "def __str__(self):\n\n return self.status_text", "def __str__(self):\n # return status text.\n return self.status_text", "def __repr__(self) -> str:\n return f\"Scrape <Success: {str(not self.failed)}>\"", "def get_status_string(self, instance):\n return instance.get_status_string()", "def __str__(self):\n return self.status_text", "def __str__(self):\n return self.status_text", "def __str__(self):\n return self.status_text", "def __repr__(self):\n return '<AntagObjective #%r | Name %r | Key %r| Type #%r | Succeeded %r>' % (self.id, self.mindname,\n self.mindkey, self.objective_type,\n self.objective_succeeded)", "def status_message(self) -> str:\n return pulumi.get(self, \"status_message\")", "def detailed_status_message(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status_message\")", "def detailed_status_message(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status_message\")", "def __str__(self):\n struct_repr = \", \".join([\n \"active: \" + str(self.active),\n \"actuator: \" + str(self.actuator)\n ])\n\n return f\"ActuatorOutputStatus: [{struct_repr}]\"", "def status(self):\n return str(self.verificationRun.status())", "def provisioning_status_message(self) -> str:\n return pulumi.get(self, \"provisioning_status_message\")", "def provisioning_status_message(self) -> str:\n return pulumi.get(self, \"provisioning_status_message\")", "def provisioning_status_message(self) -> str:\n return pulumi.get(self, \"provisioning_status_message\")", "def provisioning_status_message(self) -> str:\n return pulumi.get(self, \"provisioning_status_message\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")" ]
[ "0.6830981", "0.67693746", "0.6734282", "0.66141194", "0.65933067", "0.6506182", "0.6493938", "0.64815587", "0.6469697", "0.64494514", "0.64494514", "0.64494514", "0.6377693", "0.6371968", "0.6348982", "0.6348982", "0.63412035", "0.634012", "0.63318545", "0.63318545", "0.63318545", "0.63318545", "0.6323982", "0.6323982", "0.6323982", "0.6323982", "0.6323982", "0.6323982", "0.6323982", "0.6323982" ]
0.8132395
0
Disconnect the Google account of the current loggedin user.
def gdisconnect(): # Only disconnect the connected user. access_token = login_session.get('access_token') if access_token is None: response = make_response( json.dumps('Current user not connected.'), 401) response.headers['Content-Type'] = 'application/json' return response url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token h = httplib2.Http() result = h.request(url, 'GET')[0] if result['status'] == '200': response = make_response(json.dumps('Successfully disconnected.'), 200) response.headers['Content-Type'] = 'application/json' return response else: response = make_response( json.dumps('Failed to revoke token for given user.'), 400) response.headers['Content-Type'] = 'application/json' return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gdisconnect():\r\n # only disconnect a connected user\r\n credentials = login_session.get('credentials')\r\n if credentials is None:\r\n response = make_response(json.dumps(\r\n 'Current user not connected.'), 401)\r\n response.headers['Content-Type'] = 'application/json'\r\n return response\r\n # Execute HTTP GET request to revoke current token\r\n access_token = credentials.access_token\r\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token\r\n h = httplib2.Http()\r\n result = h.request(url, 'GET')[0]\r\n if result['status'] == '200':\r\n del login_session['access_token']\r\n del login_session['gplus_id']\r\n del login_session['username']\r\n del login_session['email']\r\n del login_session['picture']\r\n response = make_response(json.dumps('Successfully disconnected.'), 200)\r\n response.headers['Content-Type'] = 'application/json'\r\n return response\r\n else:\r\n response = make_response(json.dumps(\r\n 'Failed to revoke token for given user.', 400))\r\n response.headers['Content-Type'] = 'application/json'\r\n return response", "def gdisconnect():\n\n # Only disconnect a connected user.\n access_token = login_session.get('access_token')\n if access_token is None:\n response = make_response(json.dumps('Current user not connected.'),\n 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n if login_session['provider'] == 'facebook':\n fbdisconnect()\n del login_session['facebook_id']\n\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n if result['status'] == '200':\n response = make_response(json.dumps('Successfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n response = make_response(\n json.dumps('Failed to revoke token for given user.', 400))\n response.headers['Content-Type'] = 'application/json'\n return response", "def gdisconnect():\n\taccess_token = session.get('access_token')\n\tuname = session.get('username')\n\n\tif not access_token:\n\t\tresponse = make_response(\n\t\t\tjson.dumps('Current user not connected.'), 401)\n\t\tresponse.headers['Content-Type'] = 'application/json'\n\t\treturn response\n\n\turl = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token\n\th = httplib2.Http()\n\tresult = h.request(url, 'GET')[0]\n\n\tif result['status'] != '200':\n\t\t# For whatever reason, the given token was invalid.\n\t\tresponse = make_response(\n\t\t\tjson.dumps('Failed to revoke token for given user.'), 400)\n\t\tresponse.headers['Content-Type'] = 'application/json'\n\t\t[session.pop(k, None) for k, _ in session.items()]\n\t\treturn response\n\t#Clearing out session data\n\t[session.pop(k, None) for k, _ in session.items()]\n\treturn redirect(request.referrer)", "def disconnect():\n\n # Only disconnect a connected user.\n credentials = session.get('credentials')\n if credentials is None:\n response = make_response(json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Execute HTTP GET request to revoke current token.\n access_token = credentials.access_token\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n\n if result['status'] == '200':\n # Reset the user's session.\n del session['credentials']\n response = make_response(json.dumps('Successfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n # For whatever reason, the given token was invalid.\n response = make_response(\n json.dumps('Failed to revoke token for given user.', 400))\n response.headers['Content-Type'] = 'application/json'\n return response", "def gdisconnect():\n\n # Only disconnect a connected user.\n access_token = login_session.get('access_token')\n if access_token is None:\n response = make_response(\n json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n\n if result['status'] == '200':\n # Reset the user's sesson.\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n del login_session['email']\n\n return redirect(url_for('showSports'))\n else:\n # For whatever reason, the given token was invalid.\n response = make_response(\n json.dumps('Failed to revoke token for given user.', 400))\n response.headers['Content-Type'] = 'application/json'\n return response", "def gdisconnect():\r\n access_token = login_session.get('access_token')\r\n if access_token is None:\r\n print('Access Token is None')\r\n flash('Current user not connected.')\r\n return redirect(url_for('showCategories'))\r\n # print('Got access token for the user: {}'.\r\n # format(login_session['username']))\r\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' %\\\r\n login_session['access_token']\r\n h = httplib2.Http()\r\n result = h.request(url, 'GET')[0]\r\n # print('Access token revoke result:{}'.format(result))\r\n if result['status'] == '200':\r\n del login_session['access_token']\r\n del login_session['gplus_id']\r\n del login_session['username']\r\n del login_session['email']\r\n del login_session['picture']\r\n flash('Successfully logged out')\r\n return redirect(url_for('showCategories'))\r\n else:\r\n flash('Failed to revoke token for given user.')\r\n return redirect(url_for('showCategories'))", "def gdisconnect():\n\n access_token = login_session.get('access_token')\n if access_token is None:\n response = make_response(json.dumps(\n 'Current user not connected.'\n ), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s'\\\n % login_session['access_token']\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n if result['status'] == '200':\n del login_session['access_token']\n del login_session['gplus_id']\n # del login_session['username']\n # del login_session['email']\n # del login_session['picture']\n response = make_response(json.dumps('Successfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n response = make_response(json.dumps(\n 'Failed to revoke token for given user.', 400\n )\n )\n response.headers['Content-Type'] = 'application/json'\n return response", "def gdisconnect():\n # Only disconnect a connected user.\n access_token = login_session.get('access_token')\n if access_token is None:\n print 'Access Token is None'\n response = make_response(json.dumps('Current user not \\\n connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n print 'In gdisconnect access token is %s', access_token\n print 'User name is: '\n print login_session['username']\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' \\\n % login_session['access_token']\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n print 'result is '\n print result\n if result['status'] == '200':\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n response = make_response(json.dumps('Successfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n response = make_response(json.dumps\n ('Failed to revoke token for given user.',\n 400))\n response.headers['Content-Type'] = 'application/json'\n return response", "def logout_with_google():\n access_token = login_session.get('access_token')\n if access_token is None:\n print 'Access Token is None'\n response = make_response(\n json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n print 'In gdisconnect access token is %s', access_token\n print 'User name is: '\n print login_session['username']\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' \\\n % login_session['access_token']\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n print 'result is '\n print result\n if result['status'] == '200':\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n del login_session['user_id']\n del login_session['email']\n del login_session['picture']\n response = make_response(json.dumps('Successfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return redirect(url_for('login'))\n else:\n response = make_response(\n json.dumps('Failed to revoke token for given user.', 400))\n response.headers['Content-Type'] = 'application/json'\n return response", "def disconnect(self, login_session):\n\n # Only disconnect a connected user.\n credentials = login_session.get('credentials')\n\n if 'gplus_id' in login_session:\n del login_session['gplus_id']\n if 'credentials' in login_session:\n del login_session['credentials']\n\n if credentials is None:\n response = make_response(\n json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n access_token = credentials.access_token\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n if result['status'] != '200':\n # For whatever reason, the given token was invalid.\n response = make_response(\n json.dumps('Failed to revoke token for given user.', 400))\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n return \"You have been logged out.\"", "def gdisconnect():\n\n if login_session['access_token'] is None:\n response = make_response(json.dumps('User does not login'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n url = (\n \"https://accounts.google.com/\"\n \"o/oauth2/revoke?token=%s\") % login_session['access_token']\n h = httplib2.Http()\n response = h.request(url, 'GET')\n result = response[0]\n content = response[1]\n if result['status'] == '200':\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n return redirect(url_for('home'))\n else:\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n response = make_response(json.dumps('Fail to logout', 400))\n response.headers['Content-Type'] = 'application/json'\n return response", "def gdisconnect():\n access_token = login_session['access_token']\n print 'In gdisconnect access token is %s', access_token\n print 'User name is: '\n print login_session['username']\n if access_token is None:\n print 'Access Token is None'\n response = make_response(\n json.dumps('Current user not connected.'),\n 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' \\\n % login_session['access_token']\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n print 'result is '\n print result\n if result['status'] == '200':\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n flash('Successfully disconnected.', 'alert-success')\n return redirect(url_for('showStyles'))\n else:\n response = make_response(json.dumps(\n 'Failed to revoke token for given user.',\n 400))\n response.headers['Content-Type'] = 'application/json'\n return response", "def disconnect():\n if 'provider' in login_session:\n if login_session['provider'] == 'google':\n gdisconnect()\n del login_session['gplus_id']\n del login_session['access_token']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n del login_session['user_id']\n del login_session['provider']\n flash(\"You have successfully been logged out.\")\n return redirect(url_for('showCategories'))\n else:\n flash(\"You were not logged in\")\n return redirect(url_for('showCategories'))", "def gdisconnect():\n # Verify that the nonce received is valid.\n if request.args.get('state') != login_session['state']:\n response = make_response(\n json.dumps({'error': 'Invalid state parameter'}), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n # Only disconnect a connected user.\n access_token = login_session.get('access_token')\n if access_token is None:\n response = make_response(\n json.dumps({'error': 'Current user not connected.'}), 404\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Execute HTTP GET request to revoke current token.\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n\n if result['status'] == '200':\n # Reset the user's session\n del login_session['provider']\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n # Our response will include a new nonce.\n state = get_new_state()\n login_session['state'] = state\n response = make_response(\n json.dumps({'success': 'User disconnected', 'nonce': login_session['state']}), 200\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n elif result['status'] == '400':\n del login_session['provider']\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n # Our response will include a new nonce.\n state = get_new_state()\n login_session['state'] = state\n response = make_response(\n json.dumps({'success': 'User was already disconnected', 'nonce': login_session['state']}), 200\n )\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n response = make_response(\n json.dumps(\"Error: \"+result['status']), 401\n )\n response.headers['Content-Type'] = 'application/json'\n return response", "def gdisconnect():\n try:\n access_token = login_session['credentials']\n except KeyError:\n flash('Failed to get access token')\n return redirect(url_for('home'))\n print(\"User's name was {}.\".format(login_session['name']))\n if access_token is None:\n print('Access Token is None')\n response = make_response(\n json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n del login_session['credentials']\n del login_session['user_id']\n del login_session['name']\n del login_session['email']\n print('Successfully logged out.')\n flash('Successfully logged out.')\n return redirect(url_for('home'))", "def disconnect():\n\n if 'provider' in login_session:\n if login_session['provider'] == 'google':\n gdisconnect()\n del login_session['gplus_id']\n if login_session['provider'] == 'facebook':\n fbdisconnect()\n del login_session['facebook_id']\n del login_session['access_token']\n del login_session['username']\n del login_session['email']\n del login_session['picture']\n del login_session['user_id']\n del login_session['provider']\n flash(\"You have successfully been logged out.\")\n return redirect(url_for('showCategories'))\n else:\n flash(\"You were not logged in\")\n return redirect(url_for('showCategories'))", "def disconnect():\n\n\tglob.tokens.deleteToken(glob.tokens.getTokenFromUserID(999))", "def _disconnect(remote, *args, **kwargs):\n if not current_user.is_authenticated:\n return current_app.login_manager.unauthorized()\n\n account = RemoteAccount.get(\n user_id=current_user.get_id(), client_id=remote.consumer_key\n )\n if account:\n external_id = account.extra_data.get(\"external_id\")\n\n if external_id:\n oauth_unlink_external_id(dict(id=external_id, method=\"cern_openid\"))\n\n with db.session.begin_nested():\n account.delete()\n\n disconnect_identity(g.identity)", "def fbdisconnect():\n\n facebook_id = login_session['facebook_id']\n # The access token must me included to successfully logout\n access_token = login_session['access_token']\n # Only disconnect a connected user.\n if access_token is None:\n response = make_response(json.dumps('Current user not connected.'),\n 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n if login_session['provider'] == 'google':\n gdisconnect()\n del login_session['gplus_id']\n\n url = 'https://graph.facebook.com/%s/permissions?access_token=%s' % (\n facebook_id, access_token)\n h = httplib2.Http()\n result = h.request(url, 'DELETE')[1]\n return \"you have been logged out\"", "def disconnect(self):\n\n self.connection.logout()", "def disconnect(self):\n r = requests.post(f'{self.SERVER_ADDR}/api/disconnect', headers={'Authorization': 'Token ' + self.token})\n r.raise_for_status()", "def ws_disconnect(message):\n language = message.channel_session['knocker']\n grLangUser = Group('knocker-{0}-{1}'.format(language, \n message.user.id))\n grLangUser.discard(message.reply_channel)", "def log_out_user(self):\n flask_login.logout_user()", "def logout(self):\n self._client.clear_credentials()", "async def disconnect(self):\n await self._client.disconnect()", "async def disconnect(self):\n if not self._session:\n await self._create_session()\n await self._session.post(self._network.SERVER_ADDR + '/api/disconnect')", "def disconnect_user(room: PublicChatRoom, user) -> bool:\n return room.disconnect_user(user)", "def logOut(self):\n self.client.logout()", "async def logout(self):\n try:\n user = self.request.session.get(\"user\")\n chat = self.request.session.get(\"chat\")\n active_sockets = self.request.app.active_sockets\n active_sockets.get_chat(chat).del_user(user)\n\n self.request.session.pop(\"user\")\n self.request.user = None\n self.request.chat = None\n\n return {\n \"Type\": \"account\",\n \"Command\": \"logout\",\n \"Status\": \"success\"\n }\n except KeyError:\n return {\"Type\": \"account\", \"Command\": \"logout\", \"Status\": \"error\"}", "def logout(self):\n self.session.disconnect()" ]
[ "0.7913717", "0.7829547", "0.7714489", "0.7667014", "0.76639616", "0.76466906", "0.75815266", "0.7579596", "0.75685894", "0.74723876", "0.7466163", "0.7343223", "0.72729737", "0.7220345", "0.6879733", "0.65540224", "0.65148944", "0.6474869", "0.64209366", "0.628367", "0.61803967", "0.6126193", "0.6100739", "0.6098037", "0.6097515", "0.6050643", "0.60253775", "0.59723157", "0.5955368", "0.5954961" ]
0.7893552
1
Shortcut to get the flash scope from the view context.
def _flash(self): return self.response.context[CONTEXT_VAR]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_current():\n return getattr(_request_store, 'context', None)", "def get_scope(self, ):\n return self.attrs.get(self.AttributeNames.SCOPE, None)", "def scope(self):\n return self._scope", "def scope(self):\n return self._scope", "def get_scope_read(self, view: str) -> str:\n return getattr(self, f\"get_scope_read_{view}\", self.get_scope_read_default)()", "def current_context():\n return _current.get()", "def get_flash():\n key = settings.FLASH_COOKIE_NAME\n data = getattr(local, 'flash_message', None)\n if data is None:\n if key in local.request.cookies:\n data = local.request.cookies[key]\n local.flash_message = None\n if data:\n return pickle.loads(b64decode(data))\n return u''", "def __getitem__(self, name):\n return self.current_scope[name]", "def getScope(self):\n return self.graph.get(\"__scope\", '')", "def scope(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"scope\")", "def _getScopeName(self):\r\n return self.name # + \"@b%d\" % self.blscope_ids[-1]\r", "def scope(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"scope\")", "def scope(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"scope\")", "def get_scope(self):\n raise NotImplementedError()", "def scope(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"scope\")", "def flash_message(self, request):\n return FlashMessagesElement()", "def flash_message(self, request):\n return FlashMessagesElement()", "def get_scope(self, namec: syntax.Construct):\n if namec.construct != syntax.VAR_NAME:\n raise Exception(f\"Invalid arg {namec.construct} {namec.args[0]}\")\n name = namec.args[0]\n if self.sscope is not None and name in self.sscope.dict:\n return self.sscope\n if self.mscope is not None and name in self.mscope.dict:\n return self.mscope\n # at this point, it cannot be a method\n if self.lscope is not None and name in self.lscope.dict:\n return self.lscope\n if self.gscope is not None and name in self.gscope.dict:\n return self.gscope\n return None", "def scope(self):\n return 'global' if self.parent is None else 'local'", "def window(self):\n\tif getattr(self.android.settings, 'LV_AVOID_FOCUSED_COMMAND',\n\t\t\t\tself.android.internal.device.google_experience):\n\t\treturn window.previous(self)\n\n def fallback_window_command():\n try:\n w=self.android.internal.transport.view_server_query( 'FOCUSED\\n' )[0]\n except:\n w=\"\"\n return w\n\n try:\n # can't use GET_FOCUS command in secure builds, so fall back to FOCUSED command\n if self.android.device.is_secure_build():\n raise Exception()\n\t w=self.android.internal.transport.view_server_query('GET_FOCUS\\n')[0].split()[1]\n except:\n w = fallback_window_command()\n\n\tself.android.log.verbose(android.ui.TAG, \"Current window: '%s'\" % w)\n\treturn w", "def _get_extension_scope(cls, extension) -> ExtensionScope:\n return getattr(extension, '_scope', ExtensionScope.UNKNOWN)", "def dpp_scope_active():\n return _dpp_scope_active", "def scope(self) -> Optional[str]:\n return pulumi.get(self, \"scope\")", "def scope(self) -> Optional[str]:\n return pulumi.get(self, \"scope\")", "def scope(self) -> Optional[str]:\n return pulumi.get(self, \"scope\")", "def scope(self) -> Optional[str]:\n return pulumi.get(self, \"scope\")", "def scope(self) -> Optional[str]:\n return pulumi.get(self, \"scope\")", "def scope(self) -> str:\n return pulumi.get(self, \"scope\")", "def local_context(self):\n return self._local_context", "def scope_context(self):\n if self._course_id:\n return 'COURSE'\n elif self._partner_id:\n return 'PARTNER'\n elif self._group_id:\n return 'GROUP'\n else:\n return None" ]
[ "0.5630328", "0.55336535", "0.54515636", "0.54515636", "0.535993", "0.5351943", "0.53239036", "0.5320285", "0.5200118", "0.5198629", "0.5180058", "0.5162203", "0.5162203", "0.5159747", "0.51287633", "0.509993", "0.509993", "0.50981593", "0.509058", "0.50878936", "0.50770384", "0.5064166", "0.5053079", "0.5053079", "0.5053079", "0.5053079", "0.5053079", "0.50514215", "0.50408936", "0.50328374" ]
0.70502704
0
Flash scope shouldn't be stored in the session if there's no flash.
def test_session_state_for_unused_flash(self): self.response = self.client.get(reverse(views.render_template)) self.assertFalse(_SESSION_KEY in self.client.session)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_session_state_for_used_flash(self):\n self.response = self.client.get(reverse(views.set_flash_var))\n self.response = self.client.get(reverse(views.render_template))\n self.assertTrue(_SESSION_KEY in self.client.session)\n\n # Flash scope should be removed from the session\n self.response = self.client.get(reverse(views.render_template))\n self.assertFalse(_SESSION_KEY in self.client.session)", "def _flash(self):\n return self.response.context[CONTEXT_VAR]", "def get_flash():\n key = settings.FLASH_COOKIE_NAME\n data = getattr(local, 'flash_message', None)\n if data is None:\n if key in local.request.cookies:\n data = local.request.cookies[key]\n local.flash_message = None\n if data:\n return pickle.loads(b64decode(data))\n return u''", "def test_keep_lifecycle(self):\n self.response = self.client.get(reverse(views.set_flash_var))\n self.assertEqual('Message', self._flash()['message'])\n\n self.response = self.client.get(reverse(views.keep_var))\n self.assertEqual('Message', self._flash()['message'])\n\n # Flash value won't be removed now because it was explicitely kept\n self.response = self.client.get(reverse(views.render_template))\n self.assertEqual('Message', self._flash()['message'])\n\n # Flash value will be removed when this request hits the app\n self.response = self.client.get(reverse(views.render_template))\n self.assertFalse('message' in self._flash())", "def flash(self):\n\t\traise NotImplementedError", "def test_default_lifecycle(self):\n self.response = self.client.get(reverse(views.set_flash_var))\n self.assertEqual('Message', self._flash()['message'])\n\n self.response = self.client.get(reverse(views.render_template))\n self.assertEqual('Message', self._flash()['message'])\n\n # Flash value will be removed when this request hits the app\n self.response = self.client.get(reverse(views.render_template))\n self.assertFalse('message' in self._flash())", "def test_discard_lifecycle(self):\n self.response = self.client.get(reverse(views.discard_var))\n self.assertEqual('Message', self._flash()['message'])\n\n # Flash value will be removed when this request hits the app\n self.response = self.client.get(reverse(views.render_template))\n self.assertFalse('message' in self._flash())", "def set_flash(data):\n local.flash_message = b64encode(pickle.dumps(data))", "def setFlashCookie(self, flashCookie):\n pass", "def setFlashCookieByName(self, name):\n pass", "def test_multiple_variables_lifecycle(self):\n self.response = self.client.get(reverse(views.set_flash_var))\n self.assertEqual('Message', self._flash()['message'])\n\n self.response = self.client.get(reverse(views.set_another_flash_var))\n self.assertEqual('Message', self._flash()['message'])\n self.assertEqual('Another message', self._flash()['anotherMessage'])\n\n # 'message' will be removed when this request hits the app\n self.response = self.client.get(reverse(views.render_template))\n self.assertFalse('message' in self._flash())\n self.assertEqual('Another message', self._flash()['anotherMessage'])\n\n # 'anotherMessage' will be removed when this request hits the app\n self.response = self.client.get(reverse(views.render_template))\n self.assertFalse('message' in self._flash())\n self.assertFalse('anotherMessage' in self._flash())", "def before_request():\n g.user = None\n if 'user' in session:\n g.user = session['user']", "def flash_message(self, request):\n return FlashMessagesElement()", "def flash_message(self, request):\n return FlashMessagesElement()", "def check_session(wrapped):\n\n def check(request, *arg, **kwargs):\n collection = request.GET.get('collection', None)\n journal = request.GET.get('journal', None)\n document = request.GET.get('document', None)\n range_start = request.GET.get('range_start', None)\n under_development = request.GET.get('under_development', None)\n range_end = request.GET.get('range_end', None)\n py_range = request.GET.get('py_range', None)\n sa_scope = sorted([v for k, v in request.GET.items() if k == 'sa_scope'])\n la_scope = sorted([v for k, v in request.GET.items() if k == 'la_scope'])\n locale = request.GET.get('_LOCALE_', request.locale_name)\n\n if journal == 'clean' and 'journal' in request.session:\n del request.session['journal']\n document = None\n journal = None\n if 'document' in request.session:\n del request.session['document']\n document = None\n\n if document == 'clean' and 'document' in request.session:\n del request.session['document']\n document = None\n\n session_under_development = request.session.get('under_development', None)\n session_collection = request.session.get('collection', None)\n session_journal = request.session.get('journal', None)\n session_document = request.session.get('document', None)\n session_range_start = request.session.get('range_start', None)\n session_range_end = request.session.get('range_end', None)\n session_py_range = request.session.get('py_range', None)\n session_sa_scope = sorted(request.session.get('sa_scope', []))\n session_la_scope = sorted(request.session.get('la_scope', []))\n session_locale = request.session.get('_LOCALE_', None)\n\n if collection and collection != session_collection:\n request.session['collection'] = collection\n if 'journal' in request.session:\n del request.session['journal']\n elif not session_collection:\n request.session['collection'] = 'scl'\n\n if under_development and under_development != session_under_development:\n request.session['under_development'] = under_development\n\n if journal and journal != session_journal:\n request.session['journal'] = journal\n\n if document and document != session_document:\n request.session['document'] = document\n request.session['journal'] = document[1:10]\n\n if range_start and range_start != session_range_start:\n request.session['range_start'] = range_start\n\n if range_end and range_end != session_range_end:\n request.session['range_end'] = range_end\n\n if py_range and py_range != session_py_range:\n request.session['py_range'] = py_range\n\n if sa_scope and sorted(sa_scope) != sorted(session_sa_scope):\n request.session['sa_scope'] = sorted(sa_scope)\n\n if la_scope and sorted(la_scope) != sorted(session_la_scope):\n request.session['la_scope'] = sorted(la_scope)\n\n if locale and locale != session_locale:\n request.session['_LOCALE_'] = locale\n\n return wrapped(request, *arg, **kwargs)\n\n check.__doc__ = wrapped.__doc__\n\n return check", "def test_replace_flash_scope(self):\n request = lambda: self.client.get(reverse(views.replace_flash))\n self.assertRaises(TypeError, request)", "def before_request():\n\n session.permanent = True\n app.permanent_session_lifetime = timedelta(minutes=5)\n session.modified = True\n global_buffer.user = current_user", "def current_session(self, session):\n if self._session is None:\n self._session = session\n else:\n if session is None or self._session.session_id != session.session_id:\n self._session.active = False\n self._session = session", "def __after__(self, action, environ):\n websession['user'] = c.user\n websession['messages'] = c.messages\n websession.save()", "def setFlash(self,txt):\n self.flash = txt", "def before_request():\n g.user = None\n if 'user_id' in session:\n g.user = User.query.get(session['user_id'])", "def test_now_lifecycle(self):\n self.response = self.client.get(reverse(views.set_now_var))\n self.assertEqual('Message', self._flash()['message'])\n\n # Flash value will be removed when this request hits the app\n self.response = self.client.get(reverse(views.render_template))\n self.assertFalse('message' in self._flash())", "def save_cat():\n\n #if cat is already there then flash cat already saved to favs\n #else commit c", "def restore_session_to_last_valid_values():\n\n session['space_id'] = session.get(\n 'last_valid_space_id',\n environ.get('CONTENTFUL_SPACE_ID')\n )\n session['delivery_token'] = session.get(\n 'last_valid_delivery_token',\n environ.get('CONTENTFUL_DELIVERY_TOKEN')\n )\n session['preview_token'] = session.get(\n 'last_valid_preview_token',\n environ.get('CONTENTFUL_PREVIEW_TOKEN')\n )", "def _load_local():\n local = session.get('local')\n\n if local is None:\n g.local = None\n else:\n g.local = local", "def test_get_session_missing(self):\n study_id = self.storage.create_study(sample_study_spec())\n self.assertIsNone(self.storage.get_session(study_id, 'missing'))\n\n session = sample_session(study_id)\n self.storage.create_session(session)\n self.assertIsNone(self.storage.get_session('missing', session.id))", "def isScopeActive(self, name):", "def on_before_render(self, request):\n \n cookie_name = request.get_action_parameter(\"session_cookie_name\",\n \"gyro-session-uuid\")\n uuid = request.get_cookie(cookie_name)\n \n session = None\n \n if uuid:\n session = self.storage.get_session(uuid)\n else:\n uuid = generate_uuid()\n \n request.session_uuid = uuid\n \n if session is not None:\n request.session = session\n else:\n def set_session(r):\n if not r:\n r = {}\n \n request.session = r\n \n return plugin.run_hook(\"on_new_session\", request).add_callback(\n set_session)", "def on_after_render(self, request):\n self.write_session_cookie(request)\n return self.storage.set_session(request.session_uuid, request.session)", "def allowForSessionCookies(self):\n if not self.__loaded:\n self.__load()\n \n return self.__exceptionsAllowForSession" ]
[ "0.71783197", "0.6143732", "0.6092475", "0.6072942", "0.5673149", "0.5556315", "0.5392896", "0.5371768", "0.5344594", "0.5288238", "0.5274396", "0.52371675", "0.5140583", "0.5140583", "0.51296693", "0.5043332", "0.500041", "0.49937376", "0.49333295", "0.4916233", "0.4902425", "0.48898542", "0.4856482", "0.48484755", "0.48341474", "0.4760712", "0.4749516", "0.47428188", "0.47352716", "0.47274148" ]
0.6303686
1
Flash scope should be removed from the session if there's no flash.
def test_session_state_for_used_flash(self): self.response = self.client.get(reverse(views.set_flash_var)) self.response = self.client.get(reverse(views.render_template)) self.assertTrue(_SESSION_KEY in self.client.session) # Flash scope should be removed from the session self.response = self.client.get(reverse(views.render_template)) self.assertFalse(_SESSION_KEY in self.client.session)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_keep_lifecycle(self):\n self.response = self.client.get(reverse(views.set_flash_var))\n self.assertEqual('Message', self._flash()['message'])\n\n self.response = self.client.get(reverse(views.keep_var))\n self.assertEqual('Message', self._flash()['message'])\n\n # Flash value won't be removed now because it was explicitely kept\n self.response = self.client.get(reverse(views.render_template))\n self.assertEqual('Message', self._flash()['message'])\n\n # Flash value will be removed when this request hits the app\n self.response = self.client.get(reverse(views.render_template))\n self.assertFalse('message' in self._flash())", "def test_session_state_for_unused_flash(self):\n self.response = self.client.get(reverse(views.render_template))\n self.assertFalse(_SESSION_KEY in self.client.session)", "def test_discard_lifecycle(self):\n self.response = self.client.get(reverse(views.discard_var))\n self.assertEqual('Message', self._flash()['message'])\n\n # Flash value will be removed when this request hits the app\n self.response = self.client.get(reverse(views.render_template))\n self.assertFalse('message' in self._flash())", "def logout():\n if session:\n session.pop('user')\n flash('You were successfully logged out')\n return redirect('/')\n else:\n return redirect('/')", "def dropsession():\n session.pop('user', None)\n return redirect(url_for('login'))", "def leaveScope(self, name):", "def get_flash():\n key = settings.FLASH_COOKIE_NAME\n data = getattr(local, 'flash_message', None)\n if data is None:\n if key in local.request.cookies:\n data = local.request.cookies[key]\n local.flash_message = None\n if data:\n return pickle.loads(b64decode(data))\n return u''", "def _flash(self):\n return self.response.context[CONTEXT_VAR]", "def logout():\n session.pop('logged_in', None)\n session.pop('fname', None)\n session.pop('patron', None)\n flash('You were logged out')\n return redirect('/')", "def user_logout():\n\n session.pop('logged_in', None)\n flash('You are now logged out')\n\n return redirect('/')", "def logout():\n session.pop('user_id', None)\n flash('Your were logged out')\n return redirect(url_for('login'))", "def deleteScope():\n global currScope\n scopeStack.pop()\n currScope = scopeStack[-1]", "def remove_session(self, session):\n if session in self.sessions:\n self.sessions.remove(session)\n else:\n print(\"Sorry, you can't remove that session.\")", "def logout():\n\n session.clear()\n flash('See you next time!')\n return redirect('/')", "def logout():\n if \"username\" in session:\n session.pop(\"username\", None)\n flash(\"You have been logged out.\")\n return redirect(url_for(\"index\"))", "def logout():\n if 'access_token' in login_session:\n del login_session['access_token']\n del login_session['email']\n flash(\"you are now logout\")\n return redirect(url_for('catelog'))", "def logout():\n \n del session[\"logged_in\"]\n flash(\"See you later! ;)\")\n return redirect('/')", "def logout():\n\n if session.get('user_id'):\n del session['user_id']\n flash('You are now logged out.')\n return redirect('/login')", "def logout_user():\n\n print \"Logging out.\"\n session.clear()\n flash(\"You are now logged out.\")\n\n return redirect('/')", "def remove(self, session: \"pwncat.manager.Session\"):", "def logout():\n session.pop('logged_in', None)\n flash('You were logged out', 'success')\n return redirect(url_for('show_entries'))", "def remove_session(self) -> None:\n pass", "def process_logout():\n\n print \" LOGGED OUT USER \"\n\n del session[\"user_id\"]\n\n flash(\"You have Successfully Logged Out!\")\n\n return redirect(\"/\")", "def process_logout():\n\n print \" LOGGED OUT USER \"\n\n del session[\"user_id\"]\n\n flash(\"You have Successfully Logged Out!\")\n\n return redirect(\"/\")", "def flash(self):\n\t\traise NotImplementedError", "def sessionid_unload(self):\n req = self._cw\n is_success = False\n form_session_id = req.form.get(\"sessionid\", \"\")\n sessionid = req.session.sessionid\n if (req._headers_in.getRawHeaders(\n 'x-requested-with') == ['XMLHttpRequest']):\n if form_session_id == sessionid:\n if sessionid in req.session.repo._expired_sessionids:\n self._cw.session.repo._expired_sessionids[sessionid] = False\n is_success = True\n return {\"unloaded\": repr(is_success)}", "def logout():\n session.pop('logged_in', None)\n flash('You were logged out')\n return redirect(url_for('get_devices'))", "def logout():\n try:\n if session[\"user\"]:\n flash(\"You have logged out successfully\", category=\"success\")\n session.pop(\"user\")\n except KeyError:\n flash(\"You are not logged in\", category=\"error\")\n try:\n if session[\"admin\"]:\n session.pop(\"admin\")\n except KeyError:\n # user is not an admin\n pass\n finally:\n return redirect(url_for(\"get_terms\"))", "def logout():\n session.pop('username', None)\n session.pop('user_id', None)\n flash (\"You are logged out\")\n return redirect(url_for('index'))", "def logout(self):\n if 'user' in session:\n del session['user']\n session.save()\n return render('logout.html')" ]
[ "0.60079455", "0.59813166", "0.5645087", "0.5635631", "0.55518097", "0.5528965", "0.5521264", "0.548529", "0.5476425", "0.5468991", "0.5454142", "0.5405645", "0.5397564", "0.5381994", "0.53716916", "0.5347582", "0.5346075", "0.5346033", "0.5322238", "0.53199506", "0.5278886", "0.5258934", "0.52543354", "0.52543354", "0.5228524", "0.5204031", "0.5198175", "0.5192648", "0.5190192", "0.5185166" ]
0.6868987
0
Lowlevel processing of prosodic strings.
def _process_prosody(sonority): assert 9 not in sonority[1:-1] assert sonority[0] == sonority[-1] == 9 # create the output values psequence = [] first = True # stores whether first syllable is currently being processed for i in range(1, len(sonority) - 1): # get a segment with context a, b, c = sonority[i - 1], sonority[i], sonority[i + 1] if b == 7: # a vowel if first: psequence.append('X') first = False elif c == 9: # last psequence.append('Z') else: psequence.append('Y') elif b == 8: # a tone psequence.append('T') elif a >= b >= c or c == 8: # descending if c == 9: # word final position psequence.append('Z' if b == 7 else 'N') # vowel or consonant else: if first: first = False psequence.append('A') else: psequence.append('L') elif b < c or a > b <= c or a < b <= c: # ascending # check for syllable first if a == 9: psequence.append('A') elif a >= b: if c == 9: psequence.append('N') else: if psequence[-1] != 'A': psequence = psequence[:-1] + [psequence[-1].replace('L', 'M')] + ['B'] else: psequence.append('C') else: psequence.append('C') elif a < b > c: # consonant peak if first: psequence.append('X') first = False else: psequence.append('Y') else: raise ValueError( "Conversion to prosodic string failed due to a condition which was not " "defined in the convertion, for details compare the numerical string " "{0} with the profile string {1}".format(sonority, psequence)) return psequence
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preprocess(string):\r\n # string = [strQ2B(ch) for ch in string.strip()]\r\n # return ''.join(string)\r\n return string", "def test_process_string():\n decode = StringProcessor()\n assert decode.process_string(\"ab\") == \"\"\n decode.output = \"\"\n\n assert decode.process_string(\"ab*\") == \"b\"\n decode.output = \"\"\n\n assert decode.process_string(\"ab^\") == \"ba\"\n decode.output = \"\"\n\n assert decode.process_string(\"^\") == \"\"", "def process(self, s: str) -> str:\n raise NotImplementedError(\"must be implemented by subclasses\")", "def test_text_preprocesing(self):\n input_text = \"ABCabc123!@#\"\n processed_text = (\n preprocessing.preprocess_text(input_text, remove_punctuation=True, to_lower=True)\n .numpy()\n .decode(\"utf-8\")\n )\n\n # Converting to lower case\n assert processed_text.lower() == processed_text\n\n # Removing punctuation\n assert (\n processed_text.translate(str.maketrans(\"\", \"\", string.punctuation)) == processed_text\n )\n\n # Overall\n assert processed_text.replace(\"\\x00\", \"\") == input_text.lower().translate(\n str.maketrans(\"\", \"\", string.punctuation)\n )\n assert processed_text.replace(\"\\x00\", \"\") == \"abcabc123\"", "def prepost_string(config):\n PREPROCESS = \"\"\n POSTPROCESS = \"\"\n\n preprocess_suffices = {\"sortmerna\": \"\", \"trimming\": \"\", \"phixfilt\": \"\",\n \"fastuniq\": \"\"}\n\n # SortMeRNA rRNA filtering\n if config[\"preprocessing\"][\"sortmerna\"]:\n PREPROCESS += \".sortmerna\"\n preprocess_suffices[\"trimming\"] = \".sortmerna\"\n\n # Trimming\n if config[\"preprocessing\"][\"trimmomatic\"]:\n PREPROCESS += \".trimmomatic\"\n preprocess_suffices[\"phixfilt\"] = preprocess_suffices[\n \"trimming\"] + \".trimmomatic\"\n elif config[\"preprocessing\"][\"cutadapt\"]:\n PREPROCESS += \".cutadapt\"\n preprocess_suffices[\"phixfilt\"] = preprocess_suffices[\n \"trimming\"] + \".cutadapt\"\n else:\n preprocess_suffices[\"phixfilt\"] = preprocess_suffices[\"trimming\"]\n\n # Filtering\n if config[\"preprocessing\"][\"phix_filter\"]:\n preprocess_suffices[\"fastuniq\"] = preprocess_suffices[\n \"phixfilt\"] + \".phixfilt\"\n PREPROCESS += \".phixfilt\"\n else:\n preprocess_suffices[\"fastuniq\"] = preprocess_suffices[\"phixfilt\"]\n\n # Deduplication\n if config[\"preprocessing\"][\"fastuniq\"]:\n PREPROCESS += \".fastuniq\"\n\n if PREPROCESS != \"\":\n config[\"run_preprocessing\"] = True\n else:\n config[\"run_preprocessing\"] = False\n\n if config[\"remove_duplicates\"]:\n POSTPROCESS += \".markdup\"\n\n return PREPROCESS, POSTPROCESS, preprocess_suffices, config", "def preprocess(self, text):\r\n return text", "def process_string(string: str) -> str:\n\n return string if string else Presenter.DEFAULT", "def small_preprocess(data):\r\n \r\n # Remove new line characters\r\n data = [re.sub('\\s+', ' ', sent) for sent in data]\r\n # Remove distracting single quotes\r\n data = [re.sub(\"\\'\", \"\", sent) for sent in data]\r\n\r\n return data", "def parse_string(self, data):\n pass", "def pre_process_string_data(item: dict):\r\n try:\r\n result_item = {key: item[key] for key in KEYS + ['_id']}\r\n for prop in result_item:\r\n if type(result_item[prop]) is str and prop != '_id':\r\n result_item[prop] = re.sub(' +', ' ', item[prop])\r\n result_item[prop] = re.sub('\\n', ' ', item[prop])\r\n result_item[prop] = item[prop].strip().strip('\"').strip(\"'\").lower().strip()\r\n return result_item\r\n except KeyError:\r\n logging.warning(\"Wrong formed entity with id %s\", item['_id'])\r\n return None", "def preprocess(sent):\n return sent", "def get_processed_string(self, input_string):\n if input_string[:6] == '[sic]\"':\n return input_string[6: -1]\n else:\n return input_string.format(**self)", "def _sanitize_string(self, string):\n # get the type of a unicode string\n unicode_type = type(Pyasciigraph._u('t'))\n input_type = type(string)\n if input_type is str:\n if sys.version < '3':\n info = unicode(string)\n else:\n info = string\n elif input_type is unicode_type:\n info = string\n elif input_type is int or input_type is float:\n if sys.version < '3':\n info = unicode(string)\n else:\n info = str(string)\n else:\n info = str(string)\n return info", "def PPString(inp, mol, i, n, outFile):\n alchemy = re.compile('^\\w*2\\w*_\\d\\d\\d$')\n ppstr = re.sub('\\*', '', mol.string[i])\n if ppstr:\n PPStr = ppstr\n pp_root, pp_ext = os.path.split(ppstr)\n else:\n if inp.setting['pp_type'] == 'geodecker':\n element = mol.type_list[i].title()\n if 'd_shell' in inp.setting:\n if type(inp.setting['d_shell']) is not list:\n inp.setting['d_shell'] = [inp.setting['d_shell']]\n if qtk.n2ve(mol.type_list[i].title()) > 10:\n shell = '-d'\n elif 'd_shell' in inp.setting \\\n and element in inp.setting['d_shell']:\n shell = '-d'\n else:\n element = qtk.element[mol.type_list[i].title()]\n if element.group < 3 and mol.Z[i] > 1:\n if mol.Z[i] != 3:\n shell = '-sp'\n else:\n shell = '-s'\n else:\n shell = ''\n pp_xc_dict = {\n 'lda': 'pz',\n 'pbe0': 'pbe',\n 'b3lyp': 'blyp',\n }\n pp_xc = inp.setting['pp_theory'].lower()\n if pp_xc in pp_xc_dict:\n pp_xc = pp_xc_dict[pp_xc]\n PPStr = ''.join([c for c in mol.type_list[i] if not c.isdigit()])\\\n + '.' + pp_xc + shell + '-hgh.UPF'\n elif inp.setting['pp_type'] == 'cpmd':\n PPStr = PPName(inp, mol, i, n)\n xc = inp.setting['pp_theory'].lower()\n if not mol.string[i]:\n if inp.setting['pp_type'] == 'geodecker':\n PPCheck(pp_xc, mol.type_list[i].title(), PPStr)\n elif inp.setting['pp_type'] == 'cpmd':\n saved_pp = PPCheck_cpmd(pp_xc, mol.type_list[i].title(), PPStr)\n new_pp1 = saved_pp + '.UPF'\n conv_pp = sp.Popen(\"%s %s\" % \\\n (qtk.setting.espresso_cpmd2upf_exe, saved_pp),\n shell=True)\n conv_pp.wait()\n new_pp1_file = os.path.split(new_pp1)[1]\n new_pp1_trg = os.path.join(qtk.setting.espresso_pp, new_pp1_file)\n if not os.path.exists(new_pp1_trg):\n shutil.copy(new_pp1, qtk.setting.espresso_pp)\n PPStr = PPStr + '.UPF'\n\n elif alchemy.match(mol.string[i]):\n cpmd_pp = alchemyPP(xc, PPStr)\n new_pp1 = cpmd_pp + '.UPF'\n if not os.path.exists(new_pp1):\n qtk.report('espresso', \"rewrite Goedecker's PP to UPF\")\n conv_pp = sp.Popen(\"%s %s\" % \\\n (qtk.setting.espresso_cpmd2upf_exe, cpmd_pp),\n shell=True)\n conv_pp.wait()\n if conv_pp.returncode != 0:\n # dirty fix for espresso alchemy conversion routine\n qtk.warning('conversion failed..., trying path end points')\n root, _ = os.path.splitext(PPStr)\n element_str = re.sub('_.*', '', root)\n element1 = re.sub('2.*', '', element_str)\n element2 = re.sub('.*2', '', element_str)\n fraction = float(re.sub('.*_', '', root))/100\n if fraction == 0.0:\n strpp = element1 + \"_q\" + str(qtk.n2ve(element1)) +\\\n \"_\" + xc + '.psp'\n elif fraction == 1.0:\n strpp = element2 + \"_q\" + str(qtk.n2ve(element2)) +\\\n \"_\" + xc + '.psp'\n else:\n qtk.exit(\"PP conversion failed for intermediate lambda\")\n strpp = os.path.join(qtk.setting.cpmd_pp, strpp)\n conv_pp = sp.Popen(\"%s %s\" % \\\n (qtk.setting.espresso_cpmd2upf_exe, strpp),\n shell=True)\n conv_pp.wait()\n os.rename(strpp + '.UPF', new_pp1)\n new_pp1_file = os.path.split(new_pp1)[1]\n new_pp1_trg = os.path.join(qtk.setting.espresso_pp, new_pp1_file)\n if not os.path.exists(new_pp1_trg):\n shutil.copy(new_pp1, qtk.setting.espresso_pp)\n PPStr = PPStr + '.UPF'\n\n return PPStr", "def _pinyin(self, rest):\n # Fix if sentence contains some english '.tr yacin太牛了'\n rest = filter(lambda x: not self.isascii(x), rest.decode('utf8'))\n def reduce_reading((char, readings)):\n \"\"\"If a character has multiple cjklib readings, use the fine-tuning\n dict from pinyin toolkit and CEDICT as a backup.\"\"\"\n if len(readings) == 1:\n return readings[0]\n else:\n try:\n return self.pinyin_toolkit_lookup[char]\n except KeyError:\n return self._dict_reading_lookup(char)\n\n readings = [self.char_lookup.getReadingForCharacter(x, 'Pinyin') for x in rest]\n res = u' '.join(map(reduce_reading, zip(rest, readings)))\n return res.encode('utf8')", "def _process_strings(line,\n lang_nlp,\n get_lemmas,\n get_pos,\n remove_stopwords,\n replace_stopwords,\n get_maps):\n\n # strip, replace special tokens\n orig_line = line\n line = line.strip()\n line = re.sub(r'&apos;', '\\'', line.strip())\n line = re.sub(r'&quot;', '\\\"', line.strip())\n # Tokenize etc.\n line_nlp = lang_nlp(line)\n spacy_tokens = [elem.text for elem in line_nlp]\n spacy_tokens_lower = [elem.text.lower() for elem in line_nlp]\n spacy_lemmas = None\n spacy_pos = None\n if get_lemmas:\n spacy_lemmas = list()\n for elem in line_nlp:\n if elem.lemma_ == '-PRON-' or elem.lemma_.isdigit():\n spacy_lemmas.append(elem.lower_)\n else:\n spacy_lemmas.append(elem.lemma_.lower().strip())\n if get_pos:\n spacy_pos = [elem.pos_ for elem in line_nlp]\n\n # Generate a mapping between whitespace tokens and SpaCy tokens\n ws_tokens = orig_line.strip().split()\n ws_tokens_lower = orig_line.strip().lower().split()\n ws_to_spacy_map = dict()\n spacy_to_ws_map = dict()\n if get_maps:\n ws_loc = 0\n ws_tok = ws_tokens[ws_loc]\n\n for spacy_loc, spacy_tok in enumerate(spacy_tokens):\n while True:\n # Map whitespace tokens to be identical to spacy tokens\n ws_tok = re.sub(r'&apos;', '\\'', ws_tok)\n ws_tok = re.sub(r'&quot;', '\\\"', ws_tok)\n\n if spacy_tok == ws_tok or spacy_tok in ws_tok:\n # Terminate\n if ws_loc >= len(ws_tokens):\n break\n\n # Extend maps\n if not ws_to_spacy_map.get(ws_loc, None):\n ws_to_spacy_map[ws_loc] = list()\n ws_to_spacy_map[ws_loc].append(spacy_loc)\n if not spacy_to_ws_map.get(spacy_loc, None):\n spacy_to_ws_map[spacy_loc] = list()\n spacy_to_ws_map[spacy_loc].append(ws_loc)\n\n # Move pointer\n if spacy_tok == ws_tok:\n ws_loc += 1\n if ws_loc < len(ws_tokens):\n ws_tok = ws_tokens[ws_loc]\n else:\n ws_tok = ws_tok[len(spacy_tok):]\n break\n else:\n ws_loc += 1\n\n # Assert full coverage of whitespace and SpaCy token sequences by the mapping\n ws_covered = sorted(list(ws_to_spacy_map.keys()))\n spacy_covered = sorted(list(set(list([val for val_list in ws_to_spacy_map.values() for val in val_list]))))\n assert ws_covered == [n for n in range(len(ws_tokens))], \\\n 'WS-SpaCy mapping does not cover all whitespace tokens: {}; number of tokens: {}'\\\n .format(ws_covered, len(ws_tokens))\n assert spacy_covered == [n for n in range(len(spacy_tokens))], \\\n 'WS-SpaCy mapping does not cover all SpaCy tokens: {}; number of tokens: {}' \\\n .format(spacy_covered, len(spacy_tokens))\n\n if remove_stopwords:\n # Filter out stopwords\n nsw_spacy_tokens_lower = list()\n nsw_spacy_lemmas = list()\n for tok_id, tok in enumerate(spacy_tokens_lower):\n if tok not in STOP_WORDS:\n nsw_spacy_tokens_lower.append(tok)\n if get_lemmas:\n nsw_spacy_lemmas.append(spacy_lemmas[tok_id])\n else:\n if replace_stopwords:\n nsw_spacy_tokens_lower.append('<STPWRD>')\n if get_lemmas:\n nsw_spacy_lemmas.append('<STPWRD>')\n\n spacy_tokens_lower = nsw_spacy_tokens_lower\n if get_lemmas:\n spacy_lemmas = nsw_spacy_lemmas\n\n return line_nlp, spacy_tokens_lower, spacy_lemmas, spacy_pos, ws_tokens, ws_tokens_lower, ws_to_spacy_map, \\\n spacy_to_ws_map", "def precious(*patterns: Any) -> Any: # type: ignore\n strings: List[str] = []\n for pattern in each_string(*patterns):\n if not isinstance(pattern, AnnotatedStr):\n pattern = AnnotatedStr(pattern)\n pattern.precious = True\n strings.append(pattern)\n if len(patterns) == 1 and isinstance(patterns[0], str):\n assert len(strings) == 1\n return strings[0]\n return strings", "def parseString(self, s):\n pass", "def __format_input_translator(str_to_process):\n return re.sub(r'\\([^)]*\\)', '', str_to_process).replace(' ', '').split('/')", "def value(self, p_str, p_str_1=None): # real signature unknown; restored from __doc__ with multiple overloads\n return \"\"", "def bpe_postprocess(string) -> str:\n return string.replace(\"@@ \", \"\")", "def preprocess_text(text: str) -> Tuple[List[str], Dict]:\n raise NotImplementedError", "def first_part_pid(self,text,pid):\n\n len_max=4\n key_list=pid.keys()\n while 1:\n num=min(len_max,len(text))\n if len_max==0:\n sys.exit('error pid dico not complete or invalid input :'+str([text[:min(3,len(text))]])+'\\\n \\n Complete proc_info.py')\n \n if text[:num].lower() in key_list:\n tag=text[:num].lower()\n text=text[num:]\n return text, pid[tag]\n else:\n len_max+=-1", "def provableInPeano(inString):\n proofString = \"\"\n while True:\n if isPeanoProof(proofString, inString)==\"yes\":\n return \"yes\"\n proofString = utils.nextASCII(proofString)", "def process(self, string, parser):\n first_letter = string[0].lower()\n # Set state depending on is first char is a vowel or consonant.\n if (first_letter in VOWELS):\n parser.state = Vowel()\n elif (first_letter in CONSONANTS):\n parser.state = Consonant()\n else:\n # Strip non alpha-characters\n string = string[1:]\n\n # Return string for for processing.\n return string", "def beautify(self, string):\n\n\t\tif not string:\n\t\t\treturn string\n\n\t\t# string may differ because of escaped characters\n\t\tstring, phrases = self.parse(string)\n\n\t\tif not phrases:\n\t\t\treturn string\n\n\t\tif not self.positional and not self.always:\n\t\t\traise errors.ArgumentError(\"Found phrases, but no styles \"\n\t\t\t\t\t\t\t\t\t \"were supplied!\")\n\n\t\treturn self.stringify(string, phrases)", "def cleaning(string, EOS=False):\n\n # before cleaning up, first identify end of the sentences (EOS)\n if EOS:\n pLu = '[{}]'.format(\"\".join([chr(i) for i in range(sys.maxunicode) if chr(i).isupper()]))\n EOS = re.compile(r'([a-z]+|[ş|ı])(\\. )((' + pLu + '[a-z]?)|([0-9]+))')\n string = EOS.sub(r'\\1#\\3', string)\n\n # period at the end of the sentences are being replaced with hastag (#)\n string = string.lower()\n mapping = {}\n mapping['99_807'] = 231\n mapping['105_770'] = 105\n mapping['117_770'] = 117\n mapping['105_775'] = 105\n mapping['117_776'] = 252\n mapping['115_807'] = 351\n mapping['103_774'] = 287\n mapping['97_770'] = 97\n mapping['111_776'] = 246\n mapping['97_785'] = 97\n Alist = {97, 99, 103, 105, 111, 115, 117}\n solv_prob = []\n flag = False\n for i, c in enumerate(string):\n if flag:\n flag = False\n continue # pass this character\n if not ord(c) in Alist:\n solv_prob.append(c) # no need to check this character\n else:\n if i == len(string) - 1:\n continue\n cn = string[i + 1] # next character\n key = '{}_{}'.format(ord(c), ord(cn)) # creating string with their ordinal\n if key in mapping.keys(): # cheking if this is to be mapped\n solv_prob.append(chr(mapping[key])) # append the mapped character to the list\n flag = True # raising flag to pass next character\n continue\n else:\n solv_prob.append(c)\n\n data = ''.join(solv_prob)\n data = data.replace('iğdır', 'ığdır')\n data = data.replace('irak', 'ırak')\n # Data= [d if len(d) > 0 else '#' for d in data.splitlines()] # removing empty lines\n return data", "def input_parser(input_string: str) -> str: \n if is_int(input_string):\n return input_string\n #he is int, give back plz.\n else:\n try:\n modified_input: str = input_string.strip()\n\n evaluatable_pairs: str = regex_splitter(modified_input)\n\n while not (is_int(evaluatable_pairs)):\n evaluatable_pairs = regex_splitter(evaluatable_pairs)\n\n return (evaluatable_pairs)\n\n except:\n raise Exception(\"Invalid Input\")", "def polite_string(a_string):\n if is_py3() and hasattr(a_string, 'decode'):\n try:\n return a_string.decode('utf-8')\n except UnicodeDecodeError:\n return a_string\n\n return a_string", "def test_probabilistic_parsers():" ]
[ "0.63097847", "0.6143286", "0.5945406", "0.58660376", "0.5793575", "0.5683354", "0.5629431", "0.5610261", "0.5553382", "0.5538807", "0.5524956", "0.55078727", "0.54349685", "0.54175425", "0.5411978", "0.5410777", "0.5368676", "0.5366849", "0.536479", "0.53452194", "0.53289926", "0.5302493", "0.5300831", "0.52888846", "0.5265524", "0.52582896", "0.5255919", "0.5247426", "0.523986", "0.52372503" ]
0.62622064
1
Convert a sequence in supposed IPA to the B(road)IPA of CLTS. Notes The mapping is not guaranteed to work as well as the more elaborate mapping with `pyclts`.
def bipa(sequence): return [_token2clts(segment)[0] for segment in sequence]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def translate_sequence(rna_sequence, genetic_code):\n #Crate an empty list to store AA sequence:\n AA_list = []\n # Convert all rna_sequence to upper case:\n rna_sequence=rna_sequence.upper()\n # Convert all rna_sequence into a list:\n rna_list = list(rna_sequence)\n # This conditon will run if rna_sequence is at least 3 bases long, and only once it find start codon ,\n #and stop once it finds stop codon.\n while True:\n if len(rna_list) > 2:\n codon=''.join(rna_list[0:3])\n #Delete first 3 bases since its alread added as codon, thus no longer needed.\n del rna_list[0:3]\n else:\n break\n #Using genetic code dictionary to find AA for each corresponding codon:\n AA=genetic_code[codon]\n #Break loop once it finds stop codon\n if AA=='*':\n break\n #Add add translatable AA to the AA_list:\n AA_list.append(AA)\n return ''.join(AA_list)", "def back_translate(aln_file, seqdict):\n aln = SeqIO.parse(aln_file.name, 'fasta')\n bt_seq = []\n for prot_seq in aln:\n codon = 0\n bt = ''\n nuc = seqdict[prot_seq.id]\n for aa in prot_seq:\n if aa == '-':\n bt += '---'\n else:\n bt += nuc[codon*3:(codon*3)+3]\n codon += 1\n bt_seq.append(bt)\n return bt_seq", "def back_translate(seq):\n\n base_nucleotide_list = []\n for i in seq:\n res = __get_key(i,CodonTable)\n base_nucleotide_list.append(res)\n return ''.join(base_nucleotide_list)", "def transform_seq(seq):\n # TODO add character checking based on ASCII code\n return \"\".join(\"\" if aa in msa_characters else aa for aa in seq)", "def revcomp(self, seq):\n tab = self.maketrans(b'ACNGT', b'TGNCA')\n return seq.translate(tab)[::-1]", "def convert_to_one_letter_code_sing(seq):\n conversion = {\n \"GLY\": \"G\", \"PRO\": \"P\", \"VAL\": \"V\", \"ALA\": \"A\", \"LEU\": \"L\",\n \"ILE\": \"I\", \"MET\": \"M\", \"CYS\": \"C\", \"PHE\": \"F\", \"TYR\": \"Y\",\n \"TRP\": \"W\", \"HIS\": \"H\", \"ARG\": \"R\", \"LYS\": \"K\", \"GLN\": \"Q\",\n \"THR\": \"T\", \"ASP\": \"D\", \"ASN\": \"N\", \"SER\": \"S\", \"GLU\": \"E\"\n }\n n_seq = conversion[seq]\n return n_seq", "def translate(rna):\n RNA_CODON_TABLE = {\"UUU\": \"F\", \"UUC\": \"F\", \"UUA\": \"L\", \"UUG\": \"L\",\n \"UCU\": \"S\", \"UCC\": \"S\", \"UCA\": \"S\", \"UCG\": \"S\",\n \"UAU\": \"Y\", \"UAC\": \"Y\", \"UAA\": \"*\", \"UAG\": \"*\",\n \"UGU\": \"C\", \"UGC\": \"C\", \"UGA\": \"*\", \"UGG\": \"W\",\n \"CUU\": \"L\", \"CUC\": \"L\", \"CUA\": \"L\", \"CUG\": \"L\",\n \"CCU\": \"P\", \"CCC\": \"P\", \"CCA\": \"P\", \"CCG\": \"P\",\n \"CAU\": \"H\", \"CAC\": \"H\", \"CAA\": \"Q\", \"CAG\": \"Q\",\n \"CGU\": \"R\", \"CGC\": \"R\", \"CGA\": \"R\", \"CGG\": \"R\",\n \"AUU\": \"I\", \"AUC\": \"I\", \"AUA\": \"I\", \"AUG\": \"M\",\n \"ACU\": \"T\", \"ACC\": \"T\", \"ACA\": \"T\", \"ACG\": \"T\",\n \"AAU\": \"N\", \"AAC\": \"N\", \"AAA\": \"K\", \"AAG\": \"K\",\n \"AGU\": \"S\", \"AGC\": \"S\", \"AGA\": \"R\", \"AGG\": \"R\",\n \"GUU\": \"V\", \"GUC\": \"V\", \"GUA\": \"V\", \"GUG\": \"V\",\n \"GCU\": \"A\", \"GCC\": \"A\", \"GCA\": \"A\", \"GCG\": \"A\",\n \"GAU\": \"D\", \"GAC\": \"D\", \"GAA\": \"E\", \"GAG\": \"E\",\n \"GGU\": \"G\", \"GGC\": \"G\", \"GGA\": \"G\", \"GGG\": \"G\"}\n str = ''\n list = [rna[i:i+3] for i in range(0,len(rna),3)]\n for x in list:\n #checks if x is in key of RNA_CODON_TABLE\n if x in RNA_CODON_TABLE:\n #appends only if the value for the given key is not *\n if RNA_CODON_TABLE[x] != '*':\n str = str + RNA_CODON_TABLE[x]\n #if only one char is extra(meaning apart form the 3 pair characters available in dictionary)\n #checks if the char is in following\n elif len(x) == 1 and x in ['A','G','C','U']:\n str = str + x\n #if the char is of length 2 i.e, 2 words extra\n elif len(x) == 2 and x[0] in ['A','G','C','U'] and x[1] in ['A','G','C','U']:\n #Then appending the char to the actually converted string\n str = str + x[0]\n str = str + x[1]\n #if the char is not in the above characters then it is a unrecognised character.\n else:\n print(\"Unrecognised character:\",x)\n return str", "def binary_to_seq():\n bin_seq, dico_binary, comp_seq, file_comp = utf8_to_binary()\n \n #for each binary value associate the corresponding letter (key) \n #according to the dictionnary \n dna_seq = \"\"\n reading_binary = \"\"\n for value in bin_seq:\n reading_binary += value\n for letter, code in dico_binary.items():\n if code == reading_binary:\n dna_seq += letter\n reading_binary = \"\"\n break\n \n #print(dna_seq, bin_seq, comp_seq, file_comp)\n return dna_seq, bin_seq, comp_seq, file_comp", "def backtranslate(p_seq, n_seq):\r\n # Keep track of the new sequence. Also keep track of which codon we are\r\n # actually processing (gaps don't count)\r\n newseq = ''\r\n codon = 0\r\n for aa in p_seq:\r\n if aa == '-':\r\n newseq += '---'\r\n else:\r\n newseq += n_seq[codon*3:(codon*3) + 3]\r\n codon += 1\r\n return newseq", "def translate_sequence(sequence, genetic_code = {'GUC': 'V', 'ACC': 'T', 'GUA': 'V', 'GUG': 'V', 'ACU': 'T', 'AAC': 'N', 'CCU': 'P', 'UGG': 'W', 'AGC': 'S', 'AUC': 'I', 'CAU': 'H', 'AAU': 'N', 'AGU': 'S', 'GUU': 'V', 'CAC': 'H', 'ACG': 'T', 'CCG': 'P', 'CCA': 'P', 'ACA': 'T', 'CCC': 'P', 'UGU': 'C', 'GGU': 'G', 'UCU': 'S', 'GCG': 'A', 'UGC': 'C', 'CAG': 'Q', 'GAU': 'D', 'UAU': 'Y', 'CGG': 'R', 'UCG': 'S', 'AGG': 'R', 'GGG': 'G', 'UCC': 'S', 'UCA': 'S', 'UAA': '*', 'GGA': 'G', 'UAC': 'Y', 'GAC': 'D', 'UAG': '*', 'AUA': 'I', 'GCA': 'A', 'CUU': 'L', 'GGC': 'G', 'AUG': 'M', 'CUG': 'L', 'GAG': 'E', 'CUC': 'L', 'AGA': 'R', 'CUA': 'L', 'GCC': 'A', 'AAA': 'K', 'AAG': 'K', 'CAA': 'Q', 'UUU': 'F', 'CGU': 'R', 'CGC': 'R', 'CGA': 'R', 'GCU': 'A', 'GAA': 'E', 'AUU': 'I', 'UUG': 'L', 'UUA': 'L', 'UGA': '*', 'UUC': 'F'}, start_pos = 0):\n #find first orf\n #first_orf_seq = find_first_orf(sequence)\n\n # ensure sequence is uppercase\n seq = sequence.upper()\n\n #translate the sequence\n protein = \"\"\n for i in range(0, len(seq) - (len(seq) % 3), 3):\n codon = seq[i:i + 3]\n if genetic_code[codon] == \"*\":\n break\n protein += genetic_code[codon]\n return protein", "def proteinTranslation(seq, geneticCode = STANDARD_GENETIC_CODE):\n\n seq = seq.replace('T','U') # Make sure we have RNA sequence\n proteinSeq = []\n \n i = 0\n while i+2 < len(seq):\n \n codon = seq[i:i+3]\n aminoAcid = geneticCode[codon]\n \n if aminoAcid is None: # Found stop codon\n break\n\n proteinSeq.append(aminoAcid)\n i += 3\n\n return proteinSeq", "def translate(self) -> Seq:\n AA = \"\".join(\n self.codons[self.sequence[i : i + 3]]\n for i in range(0, len(self.sequence), 3)\n if self.codons[self.sequence[i : i + 3]] != \"Stop\"\n )\n return Seq(AA, self.id)", "def translate(dna):\n rna = dna.replace('T', 'U')\n startIndex = dna.find('AUG') + 1\n aminoAcidsSeq = \"\"\n for i in range(startIndex, len(rna), 3):\n # codon = rna[i: i+3]\n aminoAcidsSeq += code[rna[i: i+3]]\n if aminoAcidsSeq[len(aminoAcidsSeq) - 1] == '*':\n aminoAcidsSeq = aminoAcidsSeq[:-1]\n break\n return aminoAcidsSeq", "def complement(seq):\n complement_dict = {'A': 'T', 'C': 'G', 'T': 'A', 'G': 'C'}\n seq_list = list(seq)\n seq_list = [complement_dict[base] for base in seq_list]\n return ''.join(seq_list)", "def to_rna(seq):\n seq=seq.replace('A','U')\n seq=seq.replace('T','A')\n seq=seq.replace('C',\"P\")\n seq=seq.replace('G','C')\n seq=seq.replace('P','G')\n return seq", "def Translate(self):\n dna_to_protein = {\n 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',\n 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',\n 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',\n 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',\n 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',\n 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',\n 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',\n 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',\n 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',\n 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',\n 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',\n 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',\n 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',\n 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',\n 'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*',\n 'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W',\n }\n \n length = self.length\n reading = {}\n for i in range(3):\n reading['frame_'+str(i+1)] = tuple([dna_to_protein[self.sequence[index:index+3]] for index in range(i,length-2,3)])\n reverse_strand = Analyze_DNA_Sequence.Complementary(self,'5-3')\n for i in range(3):\n reading['frame_'+str(i+4)] = tuple([dna_to_protein[reverse_strand[index:index+3]] for index in range(i,length-2,3)])\n\n return reading", "def translate(codon):\n \n table = { \n 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M', \n 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T', \n 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K', \n 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R', \n 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L', \n 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P', \n 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q', \n 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R', \n 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V', \n 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A', \n 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E', \n 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G', \n 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S', \n 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L', \n 'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*', \n 'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W', \n } \n \n assert codon in table.keys(), \"Not a valid codon sequence.\"\n \n return table[codon]", "def translate(nuc):\n\tfrom Bio import Seq\n\ttry:\n\t\ttmp_aa = Seq.translate(nuc.replace('-','N')) #returns string when argument is a string, Bio.Seq otherwise\n\texcept:\n\t\tprint(\"translation failed\",nuc)\n\t\ttmp_aa = 'X'*len(nuc)//3\n\taa_seq = \"\"\n\tfor i,aa in enumerate(tmp_aa):\n\t\tif nuc[i*3:(i+1)*3]=='---':\n\t\t\taa_seq+='-'\n\t\telse:\n\t\t\taa_seq+=aa\n\treturn aa_seq", "def translate(args):\n from jcvi.utils.cbook import gene_name\n\n transl_tables = [str(x) for x in range(1, 25)]\n p = OptionParser(translate.__doc__)\n p.add_option(\n \"--ids\",\n default=False,\n action=\"store_true\",\n help=\"Create .ids file with the complete/partial/gaps label\",\n )\n p.add_option(\n \"--longest\",\n default=False,\n action=\"store_true\",\n help=\"Find the longest ORF from each input CDS\",\n )\n p.add_option(\n \"--table\",\n default=1,\n choices=transl_tables,\n help=\"Specify translation table to use\",\n )\n p.add_option(\n \"--strip_names\",\n default=False,\n action=\"store_true\",\n help=\"Strip alternative splicing (e.g. At5g06540.1 -> At5g06540)\",\n )\n p.add_option(\n \"--unique\",\n default=False,\n action=\"store_true\",\n help=\"Ensure the output FASTA contains unique identifiers\",\n )\n p.set_outfile()\n\n opts, args = p.parse_args(args)\n strip_names = opts.strip_names\n unique = opts.unique\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n (cdsfasta,) = args\n if opts.longest:\n cdsfasta = longestorf([cdsfasta])\n\n f = Fasta(cdsfasta, lazy=True)\n outfile = opts.outfile\n fw = must_open(outfile, \"w\")\n\n if opts.ids:\n idsfile = cdsfasta.rsplit(\".\", 1)[0] + \".ids\"\n ids = open(idsfile, \"w\")\n else:\n ids = None\n\n five_prime_missing = three_prime_missing = 0\n contain_ns = complete = cannot_translate = total = 0\n\n seen = set()\n grand_total = 0\n for name, rec in f.iteritems_ordered():\n grand_total += 1\n\n if strip_names:\n name = gene_name(name)\n\n if unique and name in seen:\n continue\n\n cds = rec.seq\n cdslen = len(cds)\n peplen = cdslen // 3\n total += 1\n\n # Try all three frames\n pep = \"\"\n for i in range(3):\n newcds = cds[i : i + peplen * 3]\n newpep = newcds.translate(table=opts.table)\n if len(newpep.split(\"*\")[0]) > len(pep.split(\"*\")[0]):\n pep = newpep\n\n labels = []\n if \"*\" in pep.rstrip(\"*\"):\n logging.error(\"{0} cannot translate\".format(name))\n cannot_translate += 1\n labels.append(\"cannot_translate\")\n\n contains_start = pep.startswith(\"M\")\n contains_stop = pep.endswith(\"*\")\n contains_ns = \"X\" in pep\n start_ns = pep.startswith(\"X\")\n end_ns = pep.endswith(\"X\")\n\n if not contains_start:\n five_prime_missing += 1\n labels.append(\"five_prime_missing\")\n if not contains_stop:\n three_prime_missing += 1\n labels.append(\"three_prime_missing\")\n if contains_ns:\n contain_ns += 1\n labels.append(\"contain_ns\")\n if contains_start and contains_stop:\n complete += 1\n labels.append(\"complete\")\n if start_ns:\n labels.append(\"start_ns\")\n if end_ns:\n labels.append(\"end_ns\")\n\n if ids:\n print(\"\\t\".join((name, \",\".join(labels))), file=ids)\n\n peprec = SeqRecord(pep, id=name, description=rec.description)\n SeqIO.write([peprec], fw, \"fasta\")\n fw.flush()\n seen.add(name)\n\n print(\n \"Complete gene models: {0}\".format(percentage(complete, total)), file=sys.stderr\n )\n print(\n \"Missing 5`-end: {0}\".format(percentage(five_prime_missing, total)),\n file=sys.stderr,\n )\n print(\n \"Missing 3`-end: {0}\".format(percentage(three_prime_missing, total)),\n file=sys.stderr,\n )\n print(\"Contain Ns: {0}\".format(percentage(contain_ns, total)), file=sys.stderr)\n\n if cannot_translate:\n print(\n \"Cannot translate: {0}\".format(percentage(cannot_translate, total)),\n file=sys.stderr,\n )\n\n fw.close()\n\n logging.debug(\n \"Total records: {}, Unique records (strip_names={}): {}\".format(\n grand_total, strip_names, len(seen)\n )\n )\n\n return cdsfasta, outfile", "def _seq2vec(seq):\n vec = np.zeros(len(seq), dtype=int)\n for aai, aa in enumerate(seq):\n vec[aai] = AA2CODE[aa]\n return vec", "def convert2seq(self,seq_int):\n\t\treturn [self.aminoacids[i] for i in seq_int]", "def test_convert_input(self):\n m, seq = DNA.make_seq(\"ACGGT--A\").parse_out_gaps()\n aligned_seq = Aligned(m, seq)\n mapped_gap, new_seq = _convert_input(aligned_seq, None)\n self.assertIs(new_seq.moltype, DNA)\n self.assertIs(mapped_gap, m)\n self.assertIs(new_seq, seq)\n mapped_gap, new_seq = _convert_input(\"ACGGT--A\", DNA)\n self.assertEqual(str(mapped_gap), str(m))\n self.assertEqual(str(new_seq), str(seq))", "def translate_DNA(dnaseq):\n\n gen = aa_generator_DNA(dnaseq)\n seq = ''\n aa = next(gen, None)\n while aa:\n seq += aa\n aa = next(gen, None)\n return seq", "def translate(self, xs, max_length=100):\n with chainer.no_backprop_mode(), chainer.using_config('train', False):\n hxs = self.crnn(xs)\n ys = self.decoder.translate(hxs, max_length)\n return ys", "def back_translate(self):\n base = Bio.Alphabet._get_base_alphabet(self.alphabet)\n if not isinstance(base, Bio.Alphabet.ProteinAlphabet):\n raise ValueError(\"Nucleic acids cannot be back translated!\")\n\n # right now this just uses the most-prevalent codon for each AA\n # TODO: select codons with a weighted average using random.choice\n return Seq(\n \"\".join([CodonUsage.SynonymousCodons[seq3(AA).upper()][0] for AA in str(self)]),\n IUPAC.unambiguous_dna,\n )", "def mask_sequence(seq, gaps):\n seq = [i.upper() for i in seq]\n for gap in gaps:\n for i in range(gap[0] - 1, gap[1]):\n try:\n seq[i] = seq[i].lower()\n except:\n continue\n return ''.join(seq)", "def decode(seq, nt_to_bits=None):\r\n if nt_to_bits is None:\r\n nt_to_bits = DEFAULT_GOLAY_NT_TO_BITS\r\n received_bits = _seq_to_bits(seq, nt_to_bits)\r\n corrected_bits, num_errors = decode_bits(received_bits) # errors in # bits\r\n if corrected_bits is None:\r\n return None, num_errors\r\n else:\r\n # put match into nucleotide format\r\n return _bits_to_seq(corrected_bits, nt_to_bits), num_errors", "def bieos2ot(tag_sequence):\n new_sequence = []\n for t in tag_sequence:\n assert t == 'B' or t == 'I' or t == 'E' or t == 'O' or t == 'S'\n if t == 'O':\n new_sequence.append(t)\n else:\n new_sequence.append('T')\n assert len(new_sequence) == len(tag_sequence)\n return new_sequence", "def convert_1to3(seq,allow_stop=True):\n term_list = []\n if allow_stop ==True:\n no_stop_seq = str(seq).replace('*','')\n for i in no_stop_seq:\n res = __get_key(i,aa3_to1_dict)\n term_list.append(res)\n else:\n for i in seq:\n res = __get_key(i,aa3_to1_dict)\n term_list.append(res)\n\n return \"\".join(term_list)", "def clts(sequence):\n return [_token2clts(segment)[1] for segment in sequence]" ]
[ "0.5927967", "0.59073716", "0.57882273", "0.5535857", "0.54425794", "0.5429711", "0.5424817", "0.5406879", "0.5380455", "0.53228873", "0.5287982", "0.5282211", "0.52763784", "0.52703464", "0.5240813", "0.5222085", "0.5175288", "0.5172747", "0.5154597", "0.5141814", "0.5124684", "0.5123629", "0.5103571", "0.5096733", "0.50959367", "0.50928533", "0.50909686", "0.50898194", "0.5088451", "0.50782984" ]
0.75388896
0
Initializes standings object based on your league ID number and year Required arguments leagueId ID of ESPN league, get from standings page url seasonId Year
def __init__(self, leagueId, seasonId): self.league_id = leagueId self.season_id = seasonId self.league_data = {'leagueId': self.league_id, 'seasonId': self.season_id, 'view': 'official'} self.soup = self.make_soup(LeagueStandings.base_url) self.get_standings() self.get_stats() self.get_title()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, league):\n # Set basic attributes\n self.league = league\n league.season = self\n league.history.seasons.append(self)\n self.year = league.cosmos.year\n # Record name, league offices, and commissioner, since this could change later (i.e.,\n # we can't rely on accessing these attributes of the franchise itself)\n self.league_name = league.name\n self.league_offices = league.offices\n self.commissioner = league.commissioner\n self.umpires = league.umpires\n # These may be set by self.review()\n self.champion = None\n self.standings = None\n self.league_leaders = None\n # Prepare award attributes\n self.championship_trophy = None\n self.pennants = []\n self.division_titles = []\n # Attribute a TeamSeason object for each team, and attribute these objects to this one\n self.teams = []\n for team in league.teams:\n team.season = TeamSeason(team=team)\n self.teams.append(team.season)\n # Devise a league schedule\n self.schedule = LeagueSchedule(league)", "def standings_by_season(season):\n season = int(season) + 1\n scoreboard = nba_py.Scoreboard(month=7,\n day=1,\n year=season)\n east_standings = scoreboard.east_conf_standings_by_day()\n west_standings = scoreboard.west_conf_standings_by_day()\n\n return render_template(\"standings.html\",\n title=\"standings\",\n east_standings=enumerate(east_standings, 1),\n west_standings=enumerate(west_standings, 1),\n team=CITY_TO_TEAM)", "def initialize(self):\r\n if self.method == \"naive\":\r\n self.teams = TeamsNaive(\r\n self.data_path, self.season_to_play, self.season_data\r\n )\r\n self.gsim = GameNaive(False)\r\n if not self.playoffs_only:\r\n self.season = Season(self.season_calendar, self.teams_info, self.gsim)", "def __init__(self, player):\n self.player = player\n player.career.seasons.append(self)\n self.team = player.team\n self.league = self.team.league\n self.year = self.team.cosmos.year", "def __init__(self, manager):\n self.player = manager\n manager.career.seasons.append(self)\n self.team = manager.team\n self.league = self.team.league\n self.year = self.team.cosmos.year", "def __init__(self, team):\n # Set basic attributes\n self.team = team\n team.season = self\n team.history.seasons.append(self)\n self.league = team.league\n self.year = team.cosmos.year\n # Record city, nickname, and organization, since this could change later (i.e.,\n # we can't rely on accessing these attributes of the franchise itself)\n self.city = team.city\n self.nickname = team.nickname\n self.organization = team.organization\n self.owner = team.owner\n self.manager = team.manager\n self.scout = team.scout\n self.players = team.players\n # Prepare attributes\n self.games = []\n # Prepare award attributes\n self.championship = None\n self.pennant = None\n self.division_title = None\n self.wild_card_berth = None", "def season_games(year):\n\tLOG.debug('Getting season %d', year)\n\tdata = read_html(io=season_games_url(year),\n\t\t\t\t\t attrs={'id': 'games'},\n\t\t\t\t\t infer_types=False,\n\t\t\t\t\t header=0)\n\tif len(data) != 1:\n\t\traise CantFindTheRightTable\n\tdata = data.pop()\n\n\t# Cleaning.\n\tdel data[\"Unnamed: 3\"]\n\t# The code below issues \"UserWarning: \" So we catch UserWarnings.\n\twith warnings.catch_warnings():\n\t\twarnings.filterwarnings(action='ignore', category=UserWarning,\n\t\t\t\t\t\t\t\tmodule=r'pandas\\.core\\.frame',\n\t\t\t\t\t\t\t\tmessage=(r\"Boolean Series key will be reindexed\"\n\t\t\t\t\t\t\t\t\t\t r\" to match DataFrame index\\.\"))\n\t\t# These rows are mid-table header rows.\n\t\tdata = data[data.Week != \"Week\"][data.Week != \"nan\"]\n\n\tdata['week'] = (data.Week\n\t\t\t\t\t.replace(\"WildCard\", \"wild-card\")\n\t\t\t\t\t.replace(\"Division\", \"divisional\")\n\t\t\t\t\t.replace(\"ConfChamp\", \"conference\")\n\t\t\t\t\t.replace(\"SuperBowl\", \"super-bowl\")\n\t\t\t\t\t.apply(\n\t\t\t\t\t\tlambda s: (int(s)\n\t\t\t\t\t\t\t\t if all(c in '1234567890' for c in s)\n\t\t\t\t\t\t\t\t else s)))\n\tdel data['Week']\n\n\tdata['season'] = year\n\tdata['game_date'] = pd.to_datetime(\n\t\tdata.Date\n\t\t.replace(r\"$\", r\", %d\" % year, regex=True)\n\t\t.replace(r\"^(January|February) (\\d+), \\d+$\", r\"\\1 \\2, %d\" % (year + 1),\n\t\t\t\t regex=True))\n\tdel data['Date']\n\n\tfor column in \"PtsW\", \"PtsL\", \"YdsW\", \"TOW\", \"YdsL\", \"TOL\":\n\t data[column] = data[column].apply(int)\n\n\tdata['WatL'] = data['Unnamed: 5'].apply(lambda x: x == '@')\n\tdel data['Unnamed: 5']\n\tdata['hometeam'] = (~data.WatL * data['Winner/tie'] +\n\t\t\t\t\t\tdata.WatL * data['Loser/tie'])\n\tdata['awayteam'] = (data.WatL * data['Winner/tie'] +\n\t\t\t\t\t\t~data.WatL * data['Loser/tie'])\n\tdata['winner'] = data['Winner/tie']\n\tfor column in 'Winner/tie', 'Loser/tie', \"WatL\":\n\t\tdel data[column]\n\tfor column in 'hometeam', 'awayteam', 'winner':\n\t\tdata[column] = data[column].apply(lambda s: s.split()[-1].lower())\n\n\treturn data", "def __init__(self, api_key, season, week):\n\n self._ak = api_key\n self._base_url = 'https://api.sportsdata.io/v3/nfl/'\n self.season = season\n self.week = week\n self._player_dict = filter_players(load_players_file(), position='QB')", "def get_standings(self):\n self.standings = self.soup.find('table', id='standingsTable')", "def scrape():\n league_year = Config.get_property(\"league_year\")\n\n # Create table\n season_data = client.season_schedule(league_year)\n season_data = br_enum_to_string(season_data)\n return season_data", "def __init__(self, league_id):\n self.league_id = league_id", "def get_team_standings(self, amount=5):\n url = f\"{self.BASE_URL}/en/results.html/{self.date.year}/team.html\"\n try:\n soup = set_soup(url)\n table = soup.find(\"table\", {\"class\": \"resultsarchive-table\"})\n if table is None:\n logger.info(f\"Team standings table not found for year {self.date.year}\")\n return\n rows = table.find_all(\"tr\")[1 : amount + 1]\n team_rows = [\n [cell.text.strip() for cell in row.find_all(\"td\")] for row in rows\n ]\n standings = [\n {\n \"team\": row[2],\n \"position\": int(row[1]),\n \"points\": float(row[-2]),\n }\n for row in team_rows\n ]\n return {\"teamStandings\": standings, \"teamUrl\": url}\n except Exception:\n logger.exception(f\"Error getting team standings {self.date.year}\")", "async def get_season(self, server: model.Server):\n api_url = ('https://eu.api.blizzard.com/sc2/'\n f'ladder/season/{server.id()}')\n payload = {'locale': 'en_US',\n 'access_token': await self.get_access_token()}\n data, status = await self._perform_api_request(api_url, params=payload)\n if status != 200:\n raise InvalidApiResponse(f'{status}: {api_url}')\n\n return model.Season(\n season_id=data.get('seasonId'),\n number=data.get('number'),\n year=data.get('year'),\n server=server,\n start=datetime.fromtimestamp(int(data.get('startDate'))),\n end=datetime.fromtimestamp(int(data.get('endDate')))\n )", "def get_seasons_information():\n\n #getting the guidebox_id variable from show_page.html\n guidebox_id = request.args.get(\"guidebox_id\")\n\n #make API to get season information, gets back list of season information\n seasons_results = guidebox_season_info(guidebox_id)\n\n for season in seasons_results:\n date = season[\"first_airdate\"]\n year = str(date)[0:4]\n season[\"first_airdate\"] = year\n\n return jsonify(seasons_results)", "def get_season_year(league_id):\n\n today = date.today()\n\n month = today.month\n year = today.year\n\n if league_id == \"10\":\n season_year = str(year)\n else:\n if month >= 10:\n # Defaulting to current season in October\n next_year = int(str(year)[-2:]) + 1\n season_year = str(year) + \"-\" + str(next_year)\n else:\n # Defaulting to the current or just completed season\n # from Jan. to Sept.\n next_year = int(str(year)[-2:])\n season_year = str(year - 1) + \"-\" + str(next_year)\n\n return season_year", "def make_standings_df(self):\n columns = ['DATE', 'TEAM', 'teamId', 'R', 'HR', 'RBI', 'SBN', 'OBP', \n 'K', 'QS', 'SV', 'ERA', 'WHIP', 'POINTS', 'CHANGE']\n trimmed_table = self.parse_soup(self.standings)\n self.df_standings = pd.DataFrame(trimmed_table, columns=columns) \n # load season standings csv from file\n try: # if it already exists\n df = pd.read_csv('2016_standings.csv', index_col=0)\n except OSError:\n df = pd.DataFrame(columns=columns) # if it doesn't already exist\n df = df.append(self.df_standings)\n df.to_csv('2016_standings.csv')", "def getseason(data):\n ## Season key is the most reliable\n season = data.get(\"season\")\n if season:\n ## Season key is an integer formatted \"YYS\" and is 2000-based (i.e.- 171 == 2017-Winter)\n season = str(season)\n year = int(f\"20{season[:2]}\")\n ## Anichart Season key is 1-indexed\n season = int(season[2]) - 1\n ## This should normally pass; if it consistently does not, we'll have to investigate why\n try: return SeasonCharts.buildseason(season,year)\n ## If something goes wrong, we'll try another method\n except: print(f\"Failed to parse season: {data['season']}\")\n ## Next, we'll iterate over rankings to try to determine the season/year\n ## There are multiple types of rankings based on season, year, and both combined,\n ## so we'll piece it together based on whatever we come across first\n season,year = None,None\n for ranking in data.get(\"rankings\",list()):\n ## Quicker exit (without just making this loop its own function)\n if season and year: continue\n ## We'll ignore stuff we've already gotten and assume that nothing in\n ## rankings contradicts eachother\n if not season:\n ## Defaults to None one way or another if it's not supplied\n season = ranking.get(\"season\")\n if not year: year = ranking.get(\"year\")\n ## Check if we made it\n if season and year:\n ## As above, this should always work out-of-the-box\n try: return SeasonCharts.buildseason(season,year)\n except: print(season,year)\n ## Welp, we're stumped...\n return None", "def test_get_standings(self):\n msg = \"Response status is not 200\"\n response = self.api.get_standings(self.season, self.nhl_season)\n self.assertEqual(response.status_code, 200, msg)", "def get_season_url(\n base_url: str, year: Optional[int] = None, season: Optional[str] = None\n) -> str:\n if year is None or season is None:\n return f\"{base_url}/season\"\n return f\"{base_url}/season/{year}/{season.lower()}\"", "def parse_seasons (self, id, response_data):\n raw_seasons = response_data['value']\n videos = raw_seasons['videos']\n\n # get art video key\n video = {}\n for key, video_candidate in videos.iteritems():\n if not self._is_size_key(key):\n video = video_candidate\n\n # get season index\n sorting = {}\n for idx, season_list_entry in video['seasonList'].iteritems():\n if self._is_size_key(key=idx) == False and idx != 'summary':\n sorting[int(season_list_entry[1])] = int(idx)\n\n seasons = {}\n\n for season in raw_seasons['seasons']:\n if self._is_size_key(key=season) == False:\n seasons.update(self._parse_season_entry(season=raw_seasons['seasons'][season], video=video, sorting=sorting))\n return seasons", "def parse(self, response):\n # gather season numbers and titles from the page\n season_numbers = response.css('select[name=\"sea\"] option::attr(value)').getall()\n season_titles = response.css('select[name=\"sea\"] option::text').getall()\n\n for season_number, season_title in zip(season_numbers, season_titles):\n # assemble URL for this season\n season_url = response.url + \"/?sea=\" + season_number\n\n # scrape the season page\n yield scrapy.Request(url=season_url,\n callback=self.parse_season,\n meta=dict(season_id=season_number,\n season_title=season_title.split(\" \")[0]))", "def getskaters(league, year):\n \n url = 'https://www.eliteprospects.com/league/' + league + '/stats/' + year + '?page='\n # print('Collects data from ' + 'https://www.eliteprospects.com/league/' + league + '/stats/' + year)\n \n print(\"Beginning scrape of \" + league + \" skater data from \" + year + \".\")\n \n # Return list with all plyers for season in link \n players = []\n \n page = (requests.get(url+str(1), timeout = 500))\n first_page_string = str(page)\n \n while first_page_string == '<Response [403]>':\n print(\"Just got a 403 Error before entering the page. Time to Sleep, then re-obtain the link.\")\n time.sleep(100)\n page = (requests.get(url+str(1), timeout = 500))\n first_page_string = str(page)\n print(\"Changed the string before entering the page. Let's try again\")\n \n if (str(first_page_string) == '<Response [404]>'):\n print(\"ERROR: \" + str(first_page_string) + \" on league: \" + league + \" in year: \" + year + \". Data doesn't exist for this league in this year.\")\n \n else:\n \n for i in range(1,99):\n page = requests.get(url+str(i), timeout = 500) \n page_string = str(page)\n \n while page_string == '<Response [403]>':\n print(\"Just got a 403 Error within the page. Time to Sleep, then re-obtain the link.\")\n time.sleep(100)\n page = requests.get(url+str(i), timeout = 500) \n page_string = str(page)\n print(\"Changed the string within the page. Let's try again\")\n \n soup = BeautifulSoup(page.content, \"html.parser\")\n\n # Get data for players table\n player_table = soup.find( \"table\", {\"class\":\"table table-striped table-sortable player-stats highlight-stats season\"})\n \n try:\n df_players = tableDataText(player_table)\n \n except AttributeError:\n print(\"BREAK: TABLE NONE ERROR: \" + str(requests.get(url+str(i), timeout = 500)) + \" On League: \" + league + \" In Year: \" + year)\n break\n \n if len(df_players)>0:\n\n if df_players['#'].count()>0:\n # Remove empty rows\n df_players = df_players[df_players['#']!=''].reset_index(drop=True)\n\n # Extract href links in table\n href_row = []\n for link in player_table.find_all('a'):\n href_row.append(link.attrs['href'])\n\n # Create data frame, rename and only keep links to players\n df_links = pd.DataFrame(href_row) \n df_links.rename(columns={ df_links.columns[0]:\"link\"}, inplace=True)\n df_links= df_links[df_links['link'].str.contains(\"/player/\")].reset_index(drop=True) \n\n # Add links to players\n df_players['link']=df_links['link'] \n\n players.append(df_players)\n\n # Wait 3 seconds before going to next\n #time.sleep(1)\n #print(\"Scraped page \" + str(i))\n \n else:\n #print(\"Scraped final page of: \" + league + \" In Year: \" + year)\n break\n\n \n if len(players)!=0:\n df_players = pd.concat(players).reset_index()\n\n df_players.columns = map(str.lower, df_players.columns)\n\n # Clean up dataset\n df_players['season'] = year\n df_players['league'] = league\n\n df_players = df_players.drop(['index','#'], axis=1).reset_index(drop=True)\n\n df_players['playername'] = df_players['player'].str.replace(r\"\\(.*\\)\",\"\")\n df_players['position'] = df_players['player'].str.extract('.*\\((.*)\\).*')\n df_players['position'] = np.where(pd.isna(df_players['position']), \"F\", df_players['position'])\n\n df_players['fw_def'] = df_players['position'].str.contains('LW|RW|C|F')\n df_players.loc[df_players['position'].str.contains('LW|RW|C'), 'fw_def'] = 'FW'\n df_players.loc[df_players['position'].str.contains('D'), 'fw_def'] = 'DEF'\n\n # Adjust columns; transform data\n team = df_players['team'].str.split(\"“\", n=1, expand=True)\n df_players['team'] = team[0]\n\n # drop player-column\n df_players = df_players.drop(columns = ['fw_def'], axis=1)\n print(\"Successfully scraped all \" + league + \" skater data from \" + year + \".\")\n\n return df_players\n \n else: print(\"LENGTH 0 ERROR: \" + str(requests.get(url+str(1), timeout = 500)) + \" On League: \" + league + \" In Year: \" + year)", "def list_chassis_per_season(start_year=1950, end_year=2020, file_changed=False):\n\n def _get_chassis_names(years):\n req = requests.get(WIKIPEDIA_F1_URL)\n soup = BeautifulSoup(req.content, 'html.parser')\n links = soup.find_all('a')\n\n for link in links:\n link_text = ['Formula', 'One', 'season', 'cars']\n if str(link.get('href')).split('_')[-4:] == link_text:\n link_season = str(WIKIPEDIA + link.get('href')).replace(' ', '')\n season = str(link.get('href')).replace(':', '_').split('_')[-5:-4][0]\n\n if int(season) in years:\n\n req = requests.get(link_season)\n soup = BeautifulSoup(req.content, 'html.parser')\n team_divs = soup.findAll('div', attrs={'class': 'mw-category-group'})\n teams_season_list = []\n\n for team_div in team_divs:\n team_links = team_div.findAll('a')\n for team_link in team_links:\n teams_season_list.append(team_link.get('title'))\n\n\n # Checking missing teams\n missing_teams = MISSING_CHASSIS.get(season)\n for missing_team in missing_teams:\n if missing_team not in teams_season_list:\n teams_season_list.append(missing_team)\n\n seasons_chassis[season] = teams_season_list\n\n save_json(seasons_chassis)\n\n return seasons_chassis\n\n seasons_chassis = load_json(F1_CHASSIS)\n\n # Check if we got the chassis names for all seasons listed\n update_list = []\n\n for year in range(start_year, end_year + 1):\n if str(year) not in seasons_chassis.keys():\n update_list.append(year)\n\n chassis = _get_chassis_names(update_list) if update_list else seasons_chassis\n\n return chassis", "def initialize_layers(self, years):\n min_year = min(years)\n max_year = max(years)\n ordered_years = list(range(min_year, max_year + 1))\n self.layers = [Layer(y) for y in ordered_years]", "def season_series(game_id, pref_team, other_team, last_season=False):\n\n # Init empty dictionaries and lists\n games_against = list()\n pref_toi = dict()\n pref_goals = dict()\n pref_assists = dict()\n pref_points = dict()\n pref_record = {\"wins\": 0, \"losses\": 0, \"ot\": 0}\n roster_player = True\n\n # If this is the first game of the season, we can set the 'last_season' flag to enable the\n # season series function to check last year's season series between the two teams.\n if not last_season:\n season_start = str(game_id)[0:4]\n season_end = str(int(season_start) + 1)\n yesterday = datetime.now() - timedelta(days=1)\n # yesterday = datetime.now() + timedelta(days=50)\n # schedule_url = (\n # f\"/schedule?teamId={pref_team.team_id}\"\n # f\"&expand=schedule.broadcasts,schedule.teams&startDate=\"\n # f\"{season_start}-08-01&endDate={yesterday:%Y-%m-%d}\"\n # )\n schedule_url = (\n f\"/schedule?teamId={pref_team.team_id}\"\n f\"&expand=schedule.broadcasts,schedule.teams\"\n f\"&season={season_start}{season_end}\"\n )\n else:\n season_start = int(str(game_id)[0:4]) - 1\n season_end = str(int(season_start) + 1)\n yesterday = datetime.now() - timedelta(days=1)\n # yesterday = datetime.now() + timedelta(days=50)\n # schedule_url = (\n # f\"/schedule?teamId={pref_team.team_id}\"\n # f\"&expand=schedule.broadcasts,schedule.teams&startDate=\"\n # f\"{season_start}-08-01&endDate={season_end}-06-01\"\n # )\n schedule_url = (\n f\"/schedule?teamId={pref_team.team_id}\"\n f\"&expand=schedule.broadcasts,schedule.teams\"\n f\"&season={season_start}{season_end}\"\n )\n\n schedule = api.nhl_api(schedule_url).json()\n dates = schedule[\"dates\"]\n\n # Loop through scheduled to get previously played games against\n for date in dates:\n game = date[\"games\"][0]\n game_type = game[\"gameType\"]\n game_id = game[\"gamePk\"]\n game_team_home = game[\"teams\"][\"home\"][\"team\"][\"name\"]\n game_team_away = game[\"teams\"][\"away\"][\"team\"][\"name\"]\n teams = [game_team_away, game_team_home]\n game_status = game[\"status\"][\"abstractGameState\"]\n if game_type == \"R\" and game_status == \"Final\" and other_team.team_name in teams:\n game_feed = f\"/game/{game_id}/feed/live\"\n games_against.append(game_feed)\n\n # If the two teams haven't played yet, just exit this function\n if not games_against:\n return None, None, None\n\n # Loop through newly created games_against list to get each stats\n for feed in games_against:\n game = api.nhl_api(feed).json()\n game_data = game[\"gameData\"]\n home_team_name = game_data[\"teams\"][\"home\"][\"name\"]\n pref_homeaway = \"home\" if home_team_name == pref_team.team_name else \"away\"\n other_homeaway = \"away\" if home_team_name == pref_team.team_name else \"home\"\n\n # Get season series\n end_period = game[\"liveData\"][\"linescore\"][\"currentPeriod\"]\n extra_time = True if end_period > 3 else False\n pref_score = game[\"liveData\"][\"linescore\"][\"teams\"][pref_homeaway][\"goals\"]\n other_score = game[\"liveData\"][\"linescore\"][\"teams\"][other_homeaway][\"goals\"]\n if pref_score > other_score:\n pref_record[\"wins\"] += 1\n elif other_score > pref_score and extra_time:\n pref_record[\"ot\"] += 1\n else:\n pref_record[\"losses\"] += 1\n\n season_series_str = f\"Series: {pref_record['wins']}-\" f\"{pref_record['losses']}-{pref_record['ot']}\"\n\n # Get stats leaders\n # pref_teamstats = game[\"liveData\"][\"boxscore\"][\"teams\"][pref_homeaway][\"teamStats\"]\n pref_playerstats = game[\"liveData\"][\"boxscore\"][\"teams\"][pref_homeaway][\"players\"]\n for id, player in pref_playerstats.items():\n try:\n # Calculate TOI\n player_toi_str = player[\"stats\"][\"skaterStats\"][\"timeOnIce\"]\n player_toi_minutes = int(player_toi_str.split(\":\")[0])\n player_toi_seconds = int(player_toi_str.split(\":\")[1])\n player_toi = (player_toi_minutes * 60) + player_toi_seconds\n pref_toi[id] = pref_toi.get(id, 0) + player_toi\n\n # Point Totals\n player_goal_str = player[\"stats\"][\"skaterStats\"][\"goals\"]\n pref_goals[id] = pref_goals.get(id, 0) + int(player_goal_str)\n player_assist_str = player[\"stats\"][\"skaterStats\"][\"assists\"]\n pref_assists[id] = pref_assists.get(id, 0) + int(player_assist_str)\n player_points = int(player_goal_str) + int(player_assist_str)\n pref_points[id] = pref_points.get(id, 0) + int(player_points)\n\n except KeyError:\n pass\n\n # Calculate Stats Leaders\n sorted_toi = sorted(pref_toi.values(), reverse=True)\n leader_toi = sorted_toi[0]\n\n sorted_points = sorted(pref_points.values(), reverse=True)\n leader_points = sorted_points[0]\n\n # Get TOI leader\n for id in pref_toi.keys():\n if pref_toi[id] == leader_toi:\n player_name = roster.player_attr_by_id(pref_team.roster, id, \"fullName\")\n if player_name is None:\n roster_player = False\n player_id_only = id.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n leader_toi_avg = leader_toi / len(games_against)\n m, s = divmod(leader_toi_avg, 60)\n toi_m = int(m)\n toi_s = int(s)\n toi_s = \"0{}\".format(toi_s) if toi_s < 10 else toi_s\n toi_avg = \"{}:{}\".format(toi_m, toi_s)\n player_short_name = f\"{player_name[0]}. {' '.join(player_name.split()[1:])}\"\n toi_leader_str = \"TOI Leader: {} with {} / game.\".format(player_short_name, toi_avg)\n\n # Handle tied points leaders\n point_leaders = list()\n for id in pref_points.keys():\n if pref_points[id] == leader_points:\n point_leaders.append(id)\n\n if leader_points == 0:\n points_leader_str = \"Points Leader: None (all players have 0 points).\"\n\n elif len(point_leaders) == 1:\n leader = point_leaders[0]\n player_name = roster.player_attr_by_id(pref_team.roster, leader, \"fullName\")\n # If the player is no longer on the team, get their information (change string here?)\n if player_name is None:\n roster_player = False\n player_id_only = leader.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n player_goals = pref_goals[leader]\n player_assists = pref_assists[leader]\n if not roster_player:\n points_leader_str = (\n f\"Points Leader: {player_name} with {leader_points} points \"\n f\"({player_goals}G {player_assists}A) \"\n )\n else:\n points_leader_str = \"Points Leader: {} with {} ({}G {}A).\".format(\n player_name, leader_points, player_goals, player_assists\n )\n\n elif len(point_leaders) > 3:\n point_leaders_with_attrs = list()\n for leader in point_leaders:\n player_name = roster.player_attr_by_id(pref_team.roster, leader, \"fullName\")\n if player_name is None:\n player_id_only = leader.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n player_goals = pref_goals[leader]\n player_assists = pref_assists[leader]\n player_short_name = f\"{player_name[0]}. {' '.join(player_name.split()[1:])}\"\n point_leaders_with_attrs.append(player_short_name)\n\n point_leaders_joined = \", \".join(point_leaders_with_attrs[0:3])\n leftover_leaders = len(point_leaders) - 3\n points_leader_str = (\n f\"Points Leaders: {point_leaders_joined} & {leftover_leaders} others ({leader_points} each).\"\n )\n\n else:\n point_leaders_with_attrs = list()\n for leader in point_leaders:\n player_name = roster.player_attr_by_id(pref_team.roster, leader, \"fullName\")\n if player_name is None:\n player_id_only = leader.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n player_goals = pref_goals[leader]\n player_assists = pref_assists[leader]\n player_short_name = f\"{player_name[0]}. {' '.join(player_name.split()[1:])}\"\n player_str = f\"{player_short_name} ({player_goals}G {player_assists}A)\"\n point_leaders_with_attrs.append(player_str)\n\n point_leaders_joined = (\n f\", \".join(point_leaders_with_attrs[:-1]) + f\" & {point_leaders_with_attrs[-1]}\"\n )\n points_leader_str = \"Points Leaders: {} with {} each.\".format(point_leaders_joined, leader_points)\n\n return season_series_str, points_leader_str, toi_leader_str", "def __init__(self, data):\n self.game_id = data['game_id']\n data.pop('game_id', None)\n # dictionary of innings\n self.innings = []\n # loops through the innings\n for x in sorted(data):\n try:\n result = {\n 'inning': int(x),\n 'home': int(data[x]['home']),\n 'away': int(data[x]['away'])\n }\n # possible error when 9th innning home team has 'x'\n # becuase they did not bat\n except ValueError:\n result = {\n 'inning': int(x),\n 'home': data[x]['home'],\n 'away': int(data[x]['away'])\n }\n self.innings.append(result)", "def getTeamStat(self, year = 2014):\r\n \r\n year_next = (year % 100) + 1\r\n season = str(year) + '-' + str(year_next)\r\n \r\n stat_url = 'http://stats.nba.com/stats/leaguedashteamstats?Conference=&'\\\r\n 'DateFrom=&DateTo=&Division=&GameScope=&GameSegment=&'\\\r\n 'LastNGames=0&LeagueID=00&Location=&MeasureType=Base&'\\\r\n 'Month=0&OpponentTeamID=0&Outcome=&PORound=0&PaceAdjust=N&'\\\r\n 'PerMode=PerGame&Period=0&PlayerExperience=&PlayerPosition=&'\\\r\n 'PlusMinus=N&Rank=N&Season=' + season + '&SeasonSegment=&'\\\r\n 'SeasonType=Regular+Season&ShotClockRange=&StarterBench=&'\\\r\n 'TeamID=0&VsConference=&VsDivision='\r\n \r\n response = requests.get(stat_url)\r\n data = json.loads(response.text)\r\n \r\n headers = data['resultSets'][0]['headers']\r\n stat_data = data['resultSets'][0]['rowSet']\r\n df = pd.DataFrame(stat_data,columns=headers) \r\n \r\n team_df = df[[\"TEAM_ID\",\"TEAM_NAME\",\"GP\",\"W\",\"L\",\"W_PCT\",\"MIN\",\"FGM\",\r\n \"FGA\",\"FG_PCT\",\"FG3M\",\"FG3A\",\"FG3_PCT\",\"FTM\",\"FTA\",\"FT_PCT\",\r\n \"OREB\",\"DREB\",\"REB\",\"AST\",\"TOV\",\"STL\",\"BLK\",\"BLKA\",\"PF\",\r\n \"PFD\",\"PTS\",\"PLUS_MINUS\"]]\r\n \r\n return team_df", "def get_player_games(self, year, use_local=True):", "def test_get_league_leaders___skaters(self):\n msg = \"Response status is not 200\"\n response = self.api.get_league_leaders___skaters(self.season, self.nhl_season)\n self.assertEqual(response.status_code, 200, msg)", "async def standings(self, ctx: commands.Context, *, search: HockeyStandings = None) -> None:\n source = {\n \"all\": StandingsPages,\n \"conference\": ConferenceStandingsPages,\n \"western\": ConferenceStandingsPages,\n \"eastern\": ConferenceStandingsPages,\n \"division\": DivisionStandingsPages,\n \"massmutual\": DivisionStandingsPages,\n \"central\": DivisionStandingsPages,\n \"discover\": DivisionStandingsPages,\n \"scotia\": DivisionStandingsPages,\n \"north\": DivisionStandingsPages,\n \"massmutual\": DivisionStandingsPages,\n \"east\": DivisionStandingsPages,\n \"honda\": DivisionStandingsPages,\n \"west\": DivisionStandingsPages,\n }\n if search is None:\n search = \"division\"\n standings, page = await Standings.get_team_standings(search.lower(), session=self.session)\n for team in TEAMS:\n if \"Team\" in team:\n source[team.replace(\"Team \", \"\").lower()] = DivisionStandingsPages\n else:\n source[team] = TeamStandingsPages\n await BaseMenu(\n source=source[search](pages=standings),\n page_start=page,\n delete_message_after=False,\n clear_reactions_after=True,\n timeout=60,\n ).start(ctx=ctx)" ]
[ "0.65982366", "0.65470594", "0.6177968", "0.60476357", "0.58978647", "0.5896803", "0.58631915", "0.5856052", "0.5768446", "0.57641816", "0.5709512", "0.565424", "0.5648739", "0.5625175", "0.5538045", "0.5525612", "0.5517583", "0.5513361", "0.54954636", "0.54488313", "0.54294205", "0.5426691", "0.54204714", "0.54050034", "0.53724", "0.53449357", "0.5337972", "0.5322651", "0.5298218", "0.5297228" ]
0.74785626
0
Gets HTML standings table
def get_standings(self): self.standings = self.soup.find('table', id='standingsTable')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def standings(self):\r\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\",\r\n \"User-Agent\": user_agent}\r\n req = self.session.get('http://' + self.domain + '/standings.phtml', headers=headers).content\r\n soup = BeautifulSoup(req, \"html.parser\")\r\n table = soup.find('table', {'id': 'tablestandings'}).find_all('tr')\r\n clasificacion = list()\r\n [clasificacion.append('%s\\t%s\\t%s\\t%s\\t%s' % (\r\n tablas.find('td').text, tablas.find('div')['id'], tablas.a.text, tablas.find_all('td')[3].text,\r\n tablas.find_all('td')[4].text)) for tablas in table[1:]]\r\n return clasificacion", "def getHTMLTable(self):\n stocks = self.getTrackedStocks()\n\n html = \" \\\n <html> \\\n <head></head> \\\n <body> \\\n <table> \\\n <tr> \\\n <th>Ticker</th> \\\n <th>Company</th> \\\n <th>Price</th> \\\n <th>Daily Change</th> \\\n <th>Daily Percent Change</th> \\\n <th>Year High</th> \\\n <th>Year Low</th> \\\n </tr>\"\n\n for stock in stocks:\n html += \"<tr>\"\n\n html += \"<td>\"\n html += str(stock.target)\n html += \"</td>\"\n\n html += \"<td>\"\n html += str(stock.company)\n html += \"</td>\"\n\n html += \"<td>\"\n html += str(stock.curr)\n html += \"</td>\"\n\n html += \"<td>\"\n html += str(stock.daily_change)\n html += \"</td>\"\n\n html += \"<td>\"\n html += str(stock.daily_percent)\n html += \"</td>\"\n\n html += \"<td>\"\n html += str(stock.year_high)\n html += \"</td>\"\n\n html += \"<td>\"\n html += str(stock.year_low)\n html += \"</td>\"\n\n html += \"</tr>\"\n\n html += \"</table></body></html>\"\n\n return html", "def _html_table(self):\n return '</i>'.join(APtable._repr_html_(self).split('</i>')[1:])", "def _repr_html_(self):\n return html_table(self)", "def extract_main_table_from_html(html):\n soup = bs(html, 'html.parser')\n table = soup.find('table')\n return(table)", "def to_html_table(self):\n td = '<td>'\n nwtd = '<td nowrap=\"true\">'\n ftd = '<td class=\"format\">'\n ctd = '<td class=\"cen\">'\n etd = '</td>'\n \n if self.is_power_onoff():\n out = td + 'Power On/Off' + etd\n else:\n out = nwtd + '<strong>' + self['target'].ljust(20) + '</strong>' + etd\n\n if 'Date' in self:\n out += ctd + self['Date'] + etd\n else:\n out += td + etd\n\n if 'UTstart' in self:\n out += ctd + self['UTstart'] + etd\n else:\n out += td + etd\n\n if 'UTend' in self:\n out += ctd + self['UTend'] + etd\n else:\n out += td + etd\n\n if 'exposure' in self:\n out += ctd + self['exposure'] + etd\n else:\n out += td + etd\n\n if 'sample' in self:\n out += ctd + self['sample'] + etd\n else:\n out += td + etd\n\n if 'nframe' in self:\n out += ctd + self['nframe'] + etd\n else:\n out += td + etd\n \n if self.is_power_onoff():\n out += (td + etd)*3\n else:\n speed = self['speed']\n out += ctd + self['filters'].ljust(11) + etd + ctd + self['x_bin'] + 'x' + self['y_bin'] + etd + ctd + speed + etd \n \n if self.number_windows() > 0:\n out += ctd + self['x1_size'].rjust(4) + 'x' + self['y1_size'].ljust(4) + etd + td + self['x1_start'].ljust(3) + etd + td + self['y1_start'].ljust(4) + etd\n else:\n out += (td + etd)*3\n \n if self.number_windows() > 1:\n out += ctd + self['x2_size'].rjust(4) + 'x' + self['y2_size'].ljust(4) + etd + td + self['x2_start'].ljust(3) + etd + td + self['y2_start'].ljust(4) + etd\n else:\n out += (td + etd)*3\n\n if 'grating' in self:\n out += ctd + self['grating'] + etd\n else:\n out += td + etd\n\n if 'slit_width' in self:\n out += ctd + self['slit_width'] + etd\n else:\n out += td + etd\n\n if 'slit_angle' in self:\n out += ctd + self['slit_angle'] + etd\n else:\n out += td + etd\n \n if 'ID' in self:\n out += ctd + self['ID'] + etd\n else:\n out += td + etd\n\n if 'PI' in self:\n out += ctd + self['PI'] + etd\n else:\n out += td + etd\n \n if 'Comment' in self:\n out += nwtd + self['Comment'] + etd\n else:\n out += td + etd\n\n return out", "def open(self) -> str:\n self.html_table = self.html_table + \"\"\"<table class =\"table table-striped\">\\n\"\"\"\n return self.html_table", "def html(station):\r\n yield '<html><head><title>%s</title></head><body>'%station\r\n yield '<table><tr><td><h2>%s</h2><p><b>%s</b></p></td>'%\\\r\n (station,station.data.get('weather',''))\r\n yield '<td><img src=\"%s\"></td></tr>'%station.icon()\r\n yield '<tr><td><p>Currently: %s - '%\\\r\n datetime.now().strftime('%Y-%m-%d %H:%M:%S')\r\n yield 'Time of observation: %s</p></td></tr>'%station.datetime()\r\n yield '</table><hr><table><tr><th><h3>URLs</h3></th><th><h3>Metrics</h3></th><th><h3>Info</h3></th></tr>'\r\n urls,metrics,info = [],[],[]\r\n for k,v in station.items():\r\n if not v: continue\r\n k = titler(k)\r\n tr = '<tr><th align=\"right\">%s</th><td>%s</td></tr>'\r\n if type(v) == type(0.):\r\n metrics.append(tr%(k,'%.3f'%v))\r\n elif v.startswith('http'):\r\n urls.append(tr%(k,'<a href=\"%s\">%s</a>'%(v,filter(None,v.split('/'))[-1])))\r\n else:\r\n info.append(tr%(k,v))\r\n r = ['<td valign=\"top\"><table>%s</table></td>'%''.join(locals()[x]) \\\r\n for x in ('urls','metrics','info')]\r\n yield '<tr>%s</tr></table>'%''.join(r)\r\n yield '</body></html>'", "def _repr_html_(self):\n\n html = [css(\"table\")]\n with self.connection as db:\n for n in db.execute(\"SELECT * FROM cache\"):\n html.append(\"<table class='climetlab'>\")\n html.append(\"<td><td colspan='2'>%s</td></tr>\" % (n[\"path\"],))\n\n for k in [x for x in n.keys() if x not in (\"path\", \"owner_data\")]:\n v = humanize.bytes(n[k]) if k == \"size\" else n[k]\n html.append(\"<td><td>%s</td><td>%s</td></tr>\" % (k, v))\n html.append(\"</table>\")\n html.append(\"<br>\")\n return \"\".join(html)", "def getHtml(self):\n if len(self.rows)<1:\n return ''\n if self.useTableSorter:\n if self.tableAttr:\n h = '<table %s>\\n' % self.tableAttr\n else:\n h = '<table class=\"tablesorter\">\\n'\n h += '<thead>\\n'\n h += self.rows[0]\n h += '\\n</thead><tbody>\\n'\n h += '\\n'.join(self.rows[1:])\n h += '\\n</tbody></table>\\n'\n else:\n h = '<table%s>\\n' % sep(self.tableAttr)\n h += '\\n'.join(self.rows)\n h += '\\n</table>\\n'\n return h", "def get_table(html) -> None:\n\tre_table_class = re.compile('.*2iSP.*') # familiar regex template (str w/ '2iSP')\n\ttable_class = html.find('div', {'class': re_table_class})\n\ttable_lst = re.findall('[А-Я|A-Z][^А-Я|A-Z]*', table_class.text) # regex for capitals\n\n\tfor param in table_lst:\n\t\tif 'Осадки' in param:\n\t\t\tweather_dict['precipation'] = re.search(r'\\d+', param).group()\n\t\telif 'Ветер' in param:\n\t\t\tweather_dict['wind'] = re.search(r'\\d+', param).group()\n\t\telif 'Давление' in param:\n\t\t\tweather_dict['pressure'] = re.search(r'\\d+', param).group()\n\t\telif 'Восход' in param:\n\t\t\tweather_dict['sunrise'] = ':'.join(re.findall(r'\\d+', param))\n\t\telif 'Закат' in param:\n\t\t\tweather_dict['sunset'] = ':'.join(re.findall(r'\\d+', param))", "def _repr_html_(self): # pragma: no cover\n return u\"\"\"<table>\n <tr><th>Station Name</th><th>Station Number</th><th>State</th></tr>\n <tr><td>{0:s}</td><td>{1:d}</td><td>{2:s}</td></tr>\n</table>\"\"\".format(self.description, self.station_number, self.state)", "def _create_table_html(self, table):\n if table != {} and table is not None:\n html_output = [['<hr>']]\n else:\n html_output = []\n\n for t in self._util_func.dict_key_list(table.keys()):\n html_output.append(table[t])\n\n return html_output", "def as_html(table): \n if isinstance(table,Table):\n html = \"<table width=\\\"\" + str(table.total_width()) + \"\\\"\" + table.html_attributes + \" ><colgroup>\\n\"\n if table.col_width_dict:\n for i in range(table.no_of_columns()):\n html += \"<col width=\\\"\" + str(table.col_width_percent(i)) + \"%\\\"/>\\n\"\n html += \"</colgroup><tbody>\\n\" \n row = \"<tr>\"\n for c in range(table.no_of_columns()):\n row += \"<th width=\\\"\"+str(table.col_width_percent(c))+\"%\\\">\" + table.cell(0,c) +\"</th>\"\n row += \"</tr>\\n\"\n html += row\n for r in range(1,table.no_of_rows()):\n row = \"<tr>\"\n for c in range(table.no_of_columns()):\n row += \"<td>\" + table.cell(r,c) + \"</td>\"\n row += \"</tr>\\n\"\n html += row\n return mark_safe(html)\n else:\n return table", "def html_data_table(self):\n return \"XXX\"", "def _html_repr(self):\n html = '<table id=%s>' % (self._id,)\n\n for row in range(self.rows):\n html += '<tr>'\n for col in range(self.columns):\n if row == 0 and self.header_row or col == 0 and self.header_column:\n tag = 'th'\n else:\n tag = 'td'\n html += '<%(tag)s id=%(id)s></%(tag)s>' % {\n 'tag': tag,\n 'id': self._get_cell_id(row, col),\n }\n html += '</tr>'\n html += '</table>'\n return html", "def _construct_html_table(self, df: Table) -> str:\n string = attach_tag_tr('\\n'.join(map(attach_tag_th, df.columns)))\n stringified_df = _stringify_table(df)\n\n for (i, row_elements) in stringified_df.iterrows():\n # Commented out code is a bit sleaker, but restrictive\n #string += '\\n' + attach_tag_tr('\\n'.join(map(attach_tag_td,\n # row_elements)))\n table_content: List = []\n for col, val in row_elements.iteritems():\n if col == 'cost':\n table_content.append(attach_tag_td_rjust(val))\n else:\n table_content.append(attach_tag_td(val))\n\n string += '\\n' + attach_tag_tr('\\n'.join(table_content))\n\n return attach_tag_table(\n attach_tag_caption(f'All Costs of {self.trip_title}')\n + '\\n'\n + attach_tag_tbody(string))", "def _gen_html(self):\n yield \"\\n<table border=%r summary='a table'>\\n\" % self.border\n header = self.header\n for row in self:\n yield \"<tr>\\n \"\n for el in row:\n if header:\n yield \"<th>%s</th> \" % el\n else:\n yield '<td bgcolor=\"%s\">%s</td> ' % \\\n (getattr(row, \"color\", self.color), el)\n yield \"\\n</tr>\\n\"\n header = False\n yield \"</table>\\n\"", "def open_body(self) -> str:\n self.html_table = self.html_table + \"\"\"<tbody class=\"table--size-md\">\\n\"\"\"\n return self.html_table", "def _get_markup(self):\n return make_soup(self.driver.find_element_by_id(\"contestDetailTable\").get_attribute(\"innerHTML\"))", "def show_html_tables(html_tables):\n\n for (it,t) in enumerate(html_tables):\n print(f\"Table {it}\")\n for (ir,r) in enumerate(t):\n print(f\" Row {ir}\")\n for (ic,c) in enumerate(r):\n print(f\" Col {ic}: {c}\")", "def standings():\n teams = Team.query.all()\n teams = list(reversed(sorted(teams, key=lambda team: team.points)))\n for team in teams:\n team.logo = url_for('static', filename='images/teams/{}'.format(team.logo_image))\n\n return render_template('standings/standings.html', teams=teams, title='Standings')", "def buildTrivialStatsTable(self, deltaSeriesCollection, klass=TRIVIAL_STATS_TABLE, style=''):\n tableWrapper = HTML().div()\n klass = '{} {}'.format(TABLE_SUMMARY, klass)\n table = tableWrapper.table(border='1', klass=klass, style=style)\n self.buildStatsTableHeader(table)\n tbody = table.tbody\n\n for i, deltaSeries in enumerate(deltaSeriesCollection, 1):\n row = tbody.tr\n row.td('{0:,}'.format(i), klass=TD_KEY)\n row.td(deltaSeries.beginProbeName, klass=TD_KEY)\n row.td(deltaSeries.endProbeName, klass=TD_KEY)\n row.td(DURATION_FORMAT.format(deltaSeries.getMin()))\n row.td(DURATION_FORMAT.format(deltaSeries.getMax()))\n row.td(DURATION_FORMAT.format(deltaSeries.getMedian()))\n row.td(DURATION_FORMAT.format(deltaSeries.getMean()))\n row.td(DURATION_FORMAT.format(deltaSeries.getPercentile(self.percentile1)))\n row.td(DURATION_FORMAT.format(deltaSeries.getPercentile(self.percentile2)))\n row.td(DURATION_FORMAT.format(deltaSeries.getStandardDeviation()))\n return tableWrapper", "def show_table():\n\n title_list = ('ID', 'Platform', 'Producer', 'Year', 'Elements')\n \n return table, title_list", "def create_html_layout(self):\n page = \"\"\"<!DOCTYPE html>\n <!doctype html>\n <html lang=\"en\">\n <head>\n <meta charset=\"utf-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, shrink-to-fit=no\">\n </head>\n </html>\n <head>\n \t<meta charset=\"UTF-8\">\n </head>\n <body>\n <div class=\"container\">\n <div class=\"row\">\n <div class=\"col-sm\">\n <h4>eda report: Exploratory data analysis</h4>\n </div>\n <div class=\"col-sm\">\n <h3>Inspecting dataframe of size: {size}\n </div>\n </div>\n </div>\n \t<table class=\"table table-hover\" style=\".table\">\n <thead>\n <tr style=\"font-size: 15px;\">\n <th width=\"5%\" align=\"left\" scope=\"col\">Variable Name</th>\n <th width=\"12%\" align=\"left\" scope=\"col\">Data Type</th>\n <th width=\"15%\" align=\"left\" scope=\"col\">Histogram</th>\n <th width=\"11%\" align=\"left\" scope=\"col\">Stats</th>\n <th width=\"7%\" align=\"left\" scope=\"col\">Missing NA</th>\n <th width=\"5%\" align=\"left\" scope=\"col\">Outliers</th>\n </tr>\n </thead>\n <tbody>\"\"\".format(size=self.df.size)\n\n end_page = \"\"\" \n </tbody>\n </table>\n </body>\n \"\"\"\n rows_html = []\n for i, column in enumerate(self.df.columns):\n Summary = ColumnSummary(data=self.df[column])\n datatype = Summary.data_type()\n missing = Summary.missing_values()\n stats = Summary.statistic_summary()\n outliers = Summary.outliers()\n Summary.create_histogram(i)\n html = f\"\"\"\n <tr>\n <td style=\"font-size: 15px;\" width=\"10%\" align=\"left\"> {column}</td>\n <td style=\"font-size: 15px;\"width=\"10%\" align=\"left\"> {datatype}</td>\n <td><img class=\"img-fluid\" src=\"hist_images/histogram{i}.png?{random.randint(0,\n 2e9)}\" style=\"width:800px\"> </td>\n <td style=\"font-size: 15px;\">mean: {stats.mean}<br>\n mode: {stats.mode}<br><br>\n min: {stats.min}<br>\n max: {stats.max}<br><br>\n lower-bound: {stats.lower}<br>\n upper-bound: {stats.upper}<b</td>\n <td style=\"font-size: 15px;\">{missing}</td>\n <td style=\"font-size: 15px;\">{outliers}</td>\n </tr>\n \"\"\"\n rows_html.append(html)\n\n merged_html = page + \"\".join(rows_html) + end_page\n return merged_html", "def playerStandings():\n conn = connect()\n c = conn.cursor()\n # Gets all the information from the view \"standings\".\n c.execute(\"SELECT * from standings;\")\n result = c.fetchall()\n conn.close()\n return result", "def _get_tabletype(cls) -> str:\n return 'HTML'", "def all_score_tables_html(results_dict):\n columns_classes = \"columns is-desktop is-centered\"\n\n half = \"is-half\"\n third = \"is-one-third\"\n\n row1 = html.Div(\n [\n single_entity_score_table_html(\n results_dict[\"PRO\"], \"Property\", half\n ),\n single_entity_score_table_html(\n results_dict[\"APL\"], \"Application\", half\n ),\n ],\n className=columns_classes,\n )\n\n row2 = html.Div(\n [\n single_entity_score_table_html(\n results_dict[\"CMT\"], \"Characterization\", half\n ),\n single_entity_score_table_html(\n results_dict[\"SMT\"], \"Synthesis\", half\n ),\n ],\n className=columns_classes,\n )\n\n row3 = html.Div(\n [\n single_entity_score_table_html(\n results_dict[\"DSC\"], \"Descriptor\", third\n ),\n single_entity_score_table_html(\n results_dict[\"SPL\"], \"Phase\", third\n ),\n single_entity_score_table_html(\n results_dict[\"MAT\"], \"Material\", third\n ),\n ],\n className=columns_classes,\n )\n return html.Div([row1, row2, row3])", "def close(self) -> str:\n self.html_table = self.html_table + \"\"\"</table>\\n\"\"\"\n return self.html_table", "def player_standings():\n DB = connect()\n c = DB.cursor()\n c.execute(\"SELECT * FROM current_standings\")\n DB.commit()\n standings = c.fetchall()\n DB.close()\n return standings" ]
[ "0.75585085", "0.69713396", "0.6739402", "0.641833", "0.6333908", "0.63318825", "0.62906903", "0.6253942", "0.6233505", "0.62233174", "0.6205858", "0.6203317", "0.61881346", "0.61482185", "0.6146875", "0.6112345", "0.6097109", "0.60647565", "0.6005432", "0.5987025", "0.598594", "0.5967541", "0.5964423", "0.5943357", "0.5931428", "0.5928484", "0.5896645", "0.58955413", "0.58881205", "0.5883448" ]
0.755721
1
Gets HTML cumulative stats table
def get_stats(self): self.stats = self.soup.find('table', id='statsTable')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _repr_html_(self):\n\n html = [css(\"table\")]\n with self.connection as db:\n for n in db.execute(\"SELECT * FROM cache\"):\n html.append(\"<table class='climetlab'>\")\n html.append(\"<td><td colspan='2'>%s</td></tr>\" % (n[\"path\"],))\n\n for k in [x for x in n.keys() if x not in (\"path\", \"owner_data\")]:\n v = humanize.bytes(n[k]) if k == \"size\" else n[k]\n html.append(\"<td><td>%s</td><td>%s</td></tr>\" % (k, v))\n html.append(\"</table>\")\n html.append(\"<br>\")\n return \"\".join(html)", "def getHTMLTable(self):\n stocks = self.getTrackedStocks()\n\n html = \" \\\n <html> \\\n <head></head> \\\n <body> \\\n <table> \\\n <tr> \\\n <th>Ticker</th> \\\n <th>Company</th> \\\n <th>Price</th> \\\n <th>Daily Change</th> \\\n <th>Daily Percent Change</th> \\\n <th>Year High</th> \\\n <th>Year Low</th> \\\n </tr>\"\n\n for stock in stocks:\n html += \"<tr>\"\n\n html += \"<td>\"\n html += str(stock.target)\n html += \"</td>\"\n\n html += \"<td>\"\n html += str(stock.company)\n html += \"</td>\"\n\n html += \"<td>\"\n html += str(stock.curr)\n html += \"</td>\"\n\n html += \"<td>\"\n html += str(stock.daily_change)\n html += \"</td>\"\n\n html += \"<td>\"\n html += str(stock.daily_percent)\n html += \"</td>\"\n\n html += \"<td>\"\n html += str(stock.year_high)\n html += \"</td>\"\n\n html += \"<td>\"\n html += str(stock.year_low)\n html += \"</td>\"\n\n html += \"</tr>\"\n\n html += \"</table></body></html>\"\n\n return html", "def fastqc_stats_table(self):\n \n headers = OrderedDict()\n headers['percent_duplicates'] = {\n 'title': '% Dups',\n 'description': '% Duplicate Reads',\n 'max': 100,\n 'min': 0,\n 'scale': 'RdYlGn-rev',\n 'format': '{:.1f}%'\n }\n headers['percent_gc'] = {\n 'title': '% GC',\n 'description': 'Average % GC Content',\n 'max': 80,\n 'min': 20,\n 'scale': 'PRGn',\n 'format': '{:.0f}%'\n }\n headers['avg_sequence_length'] = {\n 'title': 'Length',\n 'description': 'Average Sequence Length (bp)',\n 'min': 0,\n 'scale': 'RdYlGn',\n 'format': '{:.0f}'\n }\n headers['total_sequences'] = {\n 'title': 'M Seqs',\n 'description': 'Total Sequences (millions)',\n 'min': 0,\n 'scale': 'Blues',\n 'modify': lambda x: x / 1000000,\n 'shared_key': 'read_count'\n }\n self.general_stats_addcols(self.fastqc_stats, headers)", "def html_table(self,relpath=None):\n tbl = Table(('module','status'),\n module='FastQC test',status='Outcome')\n tbl.add_css_classes('fastqc_summary','summary')\n for name in self.modules:\n tbl.add_row(module=Link(name,self.link_to_module(name,\n relpath=relpath)),\n status=\"<span class='%s'>%s</span>\" % (\n self.status(name),\n self.status(name)))\n return tbl.html()", "def load_stats_table(url):\n\n req = requests.get(url)\n soup_page = BeautifulSoup(req.text, 'html.parser')\n stats_table = soup_page.table\n global_variables.page_number += 1\n return stats_table", "def buildStatsTable(self, category, timelineStats, benchmarkTlsMap):\n statsReport = str(self.buildStatsTitle(category, benchmarkTlsMap.keys(), len(timelineStats)))\n if len(timelineStats.deltaSeriesRepo) > 1:\n tabHeader = ''\n tabBody = ''\n tableCount = 0\n for eventName, deltaSeriesCollection in timelineStats.deltaSeriesRepo.iteritems():\n tabId = '{}_{}'.format(eventName, makeUniqueId())\n tabId = tabId.replace(' ', '_').replace('.', '_').replace(':', '_')\n tabHeader += TAB_HEADER_FMT.format(tabId, tabState(tableCount == 0), eventName)\n table = self._buildStatsTable(eventName, deltaSeriesCollection, benchmarkTlsMap)\n tabBody += TAB_BODY_FMT.format(tabId, tabContentState(tableCount == 0), table)\n tableCount += 1\n tabBody = TAB_BODY_PREFIX + tabBody + TAB_BODY_SUFFIX\n statsReport += TAB_CONTAINER_FMT.format(tabHeader, tabBody) + TAB_JS\n else:\n deltaSeriesCollection = timelineStats.getTscDeltaSeriesCollection()\n statsReport += self._buildStatsTable(TSC_EVENT_NAME, deltaSeriesCollection, benchmarkTlsMap)\n return statsReport", "def get_stats(evts_perigee) -> Table:\n rows = []\n\n for evt in reversed(evts_perigee):\n rows.append(evt.info)\n\n out = Table(rows=rows)\n return out", "def to_html(self) -> str:\n coverage_class = 'zero' if self.called == 0 else 'all'\n return '''<tr id=\"func-{}\" class=\"cov-health-{}\">\n <td><a href=\"#line-{}\">{}</a></td>\n <td>{}</td><td>{}%</td><td>{}%</td>\n </tr>\\n'''.format(\n self.name, coverage_class, self.linenum, self.pretty_name, self.called,\n self.returned, self.blocks\n )", "def html_data_table(self):\n return \"XXX\"", "def summarize_as_table(self):\n h = human_readable_size\n h_throughput = human_readable_throughput\n table = [\n ['Total Time (seconds)', '%.3f' % self.total_time,\n self.std_dev_total_time],\n ['Maximum Memory', h(self.max_memory), h(self.std_dev_max_memory)],\n ['Maximum CPU (percent)', '%.1f' % self.max_cpu,\n self.std_dev_max_cpu],\n ['Maximum Sent Throughput', h_throughput(self.max_sent_throughput),\n h_throughput(self.max_sent_throughput)],\n ['Maximum Recv Throughput', h_throughput(self.max_recv_throughput),\n h_throughput(self.max_recv_throughput)],\n ['Average Memory', h(self.average_memory),\n h(self.std_dev_average_memory)],\n ['Average CPU (percent)', '%.1f' % self.average_cpu,\n self.std_dev_average_cpu],\n ['Average Sent Throughput',\n h_throughput(self.average_sent_throughput),\n h_throughput(self.average_sent_throughput)],\n ['Average Recv Throughput',\n h_throughput(self.average_recv_throughput),\n h_throughput(self.average_recv_throughput)],\n ]\n return tabulate(\n table,\n headers=[\n 'Metric over %s run(s)' % (self.total_files),\n 'Mean',\n 'Standard Deviation'\n ],\n tablefmt=\"grid\"\n )", "def _html_table(self):\n return '</i>'.join(APtable._repr_html_(self).split('</i>')[1:])", "def create_html_layout(self):\n page = \"\"\"<!DOCTYPE html>\n <!doctype html>\n <html lang=\"en\">\n <head>\n <meta charset=\"utf-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, shrink-to-fit=no\">\n </head>\n </html>\n <head>\n \t<meta charset=\"UTF-8\">\n </head>\n <body>\n <div class=\"container\">\n <div class=\"row\">\n <div class=\"col-sm\">\n <h4>eda report: Exploratory data analysis</h4>\n </div>\n <div class=\"col-sm\">\n <h3>Inspecting dataframe of size: {size}\n </div>\n </div>\n </div>\n \t<table class=\"table table-hover\" style=\".table\">\n <thead>\n <tr style=\"font-size: 15px;\">\n <th width=\"5%\" align=\"left\" scope=\"col\">Variable Name</th>\n <th width=\"12%\" align=\"left\" scope=\"col\">Data Type</th>\n <th width=\"15%\" align=\"left\" scope=\"col\">Histogram</th>\n <th width=\"11%\" align=\"left\" scope=\"col\">Stats</th>\n <th width=\"7%\" align=\"left\" scope=\"col\">Missing NA</th>\n <th width=\"5%\" align=\"left\" scope=\"col\">Outliers</th>\n </tr>\n </thead>\n <tbody>\"\"\".format(size=self.df.size)\n\n end_page = \"\"\" \n </tbody>\n </table>\n </body>\n \"\"\"\n rows_html = []\n for i, column in enumerate(self.df.columns):\n Summary = ColumnSummary(data=self.df[column])\n datatype = Summary.data_type()\n missing = Summary.missing_values()\n stats = Summary.statistic_summary()\n outliers = Summary.outliers()\n Summary.create_histogram(i)\n html = f\"\"\"\n <tr>\n <td style=\"font-size: 15px;\" width=\"10%\" align=\"left\"> {column}</td>\n <td style=\"font-size: 15px;\"width=\"10%\" align=\"left\"> {datatype}</td>\n <td><img class=\"img-fluid\" src=\"hist_images/histogram{i}.png?{random.randint(0,\n 2e9)}\" style=\"width:800px\"> </td>\n <td style=\"font-size: 15px;\">mean: {stats.mean}<br>\n mode: {stats.mode}<br><br>\n min: {stats.min}<br>\n max: {stats.max}<br><br>\n lower-bound: {stats.lower}<br>\n upper-bound: {stats.upper}<b</td>\n <td style=\"font-size: 15px;\">{missing}</td>\n <td style=\"font-size: 15px;\">{outliers}</td>\n </tr>\n \"\"\"\n rows_html.append(html)\n\n merged_html = page + \"\".join(rows_html) + end_page\n return merged_html", "def statistics():\n return render_template('statistics.html'), 200", "async def stat_table(self, data):\n\n table = \"\"\n table += tabulate([data[\"stats\"][1]], data[\"stats\"][0], tablefmt=\"grid\") + \"\\n\"\n table += tabulate([data[\"resist\"][1]], data[\"resist\"][0], tablefmt=\"grid\") + \"\\n\"\n if data[\"inherits\"] and data[\"inherits\"][0]:\n table += tabulate([data[\"inherits\"][1]], data[\"inherits\"][0], tablefmt=\"grid\") + \"\\n\"\n \n skills = tabulate(data[\"skills\"][1], data[\"skills\"][0], tablefmt=\"grid\")\n if len(skills) > 2000:\n counter = 0\n split_skills = []\n skills = skills.split(\"\\n\")\n skills = [\"\\n\".join(skills[8*i:min(8*(i+1)+1, len(skills))])\n for i in range(int(len(skills) / 8))]\n else:\n skills = [skills]\n\n results = [\"```\\n\" + table[:-1] + \"\\n```\"]\n for skill in skills:\n results.append(\"```\\n\" + skill + \"\\n```\")\n return results", "def displayHTMLtable(acc_sent2, acc_wv03, acc, prec_sent2, prec_wv03, prec, recall_sent2, recall_wv03, recall):\n\n methods = ['Sent2 NBR', 'WV03 NBR', 'WV03 RF']\n accuracies = [\"{:.2%}\".format(acc_sent2), \"{:.2%}\".format(acc_wv03), \"{:.2%}\".format(acc)]\n precisions = [\"{:.2%}\".format(prec_sent2), \"{:.2%}\".format(prec_wv03), \"{:.2%}\".format(prec)]\n recalls = [\"{:.2%}\".format(recall_sent2), \"{:.2%}\".format(recall_wv03), \"{:.2%}\".format(recall)]\n\n data = methods + accuracies + precisions + recalls\n\n data = np.reshape(data, (4, 3)).T\n\n display(HTML(\n '<table style=\"width:100%;\"><th>Method</th><th>Accuracy</th><th>Precision</th><th>Recall</th><tr>{}</tr></table>'.format(\n '</tr><tr>'.join(\n '<td>{}</td>'.format('</td><td>'.join(str(_) for _ in row)) for row in data)\n )\n ))", "def get_html_cv_report(db, last_rpt_time):\n rval = \"\"\n if not db.table_exists(table=\"checkables\"):\n return rval\n\n rval += (\"<h2>%s</h2>\\n\" % cv_sublib.report_title())\n diml = [{'name': 'cos',\n 'pop': \"type = 'f' and ? < last_check\",\n 'samp': \"type = 'f' and checksum = 1 and ? < last_check\"},\n {'name': 'ttypes',\n 'pop': \"type = 'f' and ttypes is not null and ? < last_check\",\n 'samp': \"type = 'f' and ttypes is not null \" +\n \"and checksum = 1 and ? < last_check\"}]\n\n for dim in diml:\n d = Dimension.Dimension(name=dim['name'])\n rval += \"<pre>\\n\"\n rval += d.report()\n rval += \"</pre>\\n<br>\"\n\n # get the population and sample entries added since the last report\n rows = db.select(table=\"checkables\",\n fields=[\"count(path)\"],\n where=dim['pop'],\n data=(last_rpt_time,))\n (c_pop_size) = rows[0]\n\n rows = db.select(table=\"checkables\",\n fields=[\"count(path)\"],\n where=dim['samp'],\n data=(last_rpt_time,))\n (c_sample_size) = rows[0]\n\n # report the deltas\n rval += (\"Since the last report, \" +\n \"%d items were added to population, \" % (c_pop_size) +\n \"%d items were added to sample\" % (c_sample_size))\n rval += \"\\n\"\n\n return rval", "def totals():\n return make_simple_tsv_get_response(TOTALS_FILE, 'totals')", "def buildTrivialStatsTable(self, deltaSeriesCollection, klass=TRIVIAL_STATS_TABLE, style=''):\n tableWrapper = HTML().div()\n klass = '{} {}'.format(TABLE_SUMMARY, klass)\n table = tableWrapper.table(border='1', klass=klass, style=style)\n self.buildStatsTableHeader(table)\n tbody = table.tbody\n\n for i, deltaSeries in enumerate(deltaSeriesCollection, 1):\n row = tbody.tr\n row.td('{0:,}'.format(i), klass=TD_KEY)\n row.td(deltaSeries.beginProbeName, klass=TD_KEY)\n row.td(deltaSeries.endProbeName, klass=TD_KEY)\n row.td(DURATION_FORMAT.format(deltaSeries.getMin()))\n row.td(DURATION_FORMAT.format(deltaSeries.getMax()))\n row.td(DURATION_FORMAT.format(deltaSeries.getMedian()))\n row.td(DURATION_FORMAT.format(deltaSeries.getMean()))\n row.td(DURATION_FORMAT.format(deltaSeries.getPercentile(self.percentile1)))\n row.td(DURATION_FORMAT.format(deltaSeries.getPercentile(self.percentile2)))\n row.td(DURATION_FORMAT.format(deltaSeries.getStandardDeviation()))\n return tableWrapper", "def _repr_html_(self):\n return html_table(self)", "def _buildStatsTable(self, eventName, deltaSeriesCollection, benchmarkTlsMap):\n statsReport = ''\n if benchmarkTlsMap:\n benchmarkIndex = 0\n for benchmarkTls in benchmarkTlsMap.values():\n klass = TIME_POINT_STATS.format(benchmarkIndex)\n style = 'display: {}'.format('table' if benchmarkIndex == 0 else 'none')\n refDeltaSeriesCollection = benchmarkTls.deltaSeriesRepo.get(eventName, None)\n if refDeltaSeriesCollection:\n statsReport += str(\n self.buildDifferentialStatsTable(deltaSeriesCollection, refDeltaSeriesCollection, klass, style)\n )\n else:\n statsReport += str(self.buildTrivialStatsTable(deltaSeriesCollection, klass, style))\n benchmarkIndex += 1\n statsReport = self.benchmarkStatsContainerFmt.format(statsReport)\n else:\n statsReport += str(self.buildTrivialStatsTable(deltaSeriesCollection))\n return statsReport", "def cumulate(self):\n name = self.getName() + '_Cumulated'\n c_names = self.getColumnNames()\n # create the new table an add the first column to it\n t_cumulate = DataTable(name)\n time = self.getColumn(0)\n t_cumulate.addColumn(c_names[0],time)\n # for each other column, cumulate values \n for i in range(1,self.getNbColumns()):\n c = self.getColumn(i)\n N = [0]\n for j in range(1,len(c)):\n n = (c[j]+c[j-1])/2*(time[j]-time[j-1]) + N[-1]\n N.append(n)\n pass\n t_cumulate.addColumn(c_names[i],N)\n pass\n \n return t_cumulate", "def output_summary_stats(self):\n total_return = self.equity_curve['equity_curve'][-1]\n returns = self.equity_curve['returns']\n pnl = self.equity_curve['equity_curve']\n \n sharpe_ratio = create_sharpe_ratio(returns) #, periods=252*6.5*60) ??? \n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\n self.equity_curve['drawdown'] = drawdown\n \n stats = [(\"Total Return\", \"%0.2f%%\" % ((total_return - 1.0) * 100.0)), \n (\"Sharpe Ratio\", \"%0.2f\" % sharpe_ratio), \n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)), \n (\"Drawdown Duration\", \"%d\" % dd_duration)]\n \n self.equity_curve.to_csv('equity.csv')\n \n return stats", "def getHtml(self):\n if len(self.rows)<1:\n return ''\n if self.useTableSorter:\n if self.tableAttr:\n h = '<table %s>\\n' % self.tableAttr\n else:\n h = '<table class=\"tablesorter\">\\n'\n h += '<thead>\\n'\n h += self.rows[0]\n h += '\\n</thead><tbody>\\n'\n h += '\\n'.join(self.rows[1:])\n h += '\\n</tbody></table>\\n'\n else:\n h = '<table%s>\\n' % sep(self.tableAttr)\n h += '\\n'.join(self.rows)\n h += '\\n</table>\\n'\n return h", "def get_html_tcc_report(db, last_rpt_time):\n rval = \"\"\n if not db.table_exists(table='tcc_data'):\n return rval\n\n rval = (\"<h2>%s</h2>\\n\" % tcc_sublib.report_title())\n\n checks = correct = error = 0\n\n rows = tcc_sublib.recent_records(last_rpt_time, db=db)\n for (t, l, h, c, e) in rows:\n checks += (h - l + 1)\n correct += c\n error += e\n\n rows = tcc_sublib.distinct_objects(db=db)\n t_check = len(rows)\n\n rows = tcc_sublib.distinct_objects(db=db,\n where=\"correct = 1\")\n t_correct = len(rows)\n c_obj_id_l = [x[0] for x in rows]\n\n t_error = 0\n erows = tcc_sublib.distinct_objects(db=db,\n where=\"correct <> 1\")\n for r in erows:\n if r[0] not in c_obj_id_l:\n t_error += 1\n\n rval += \"<pre>\\n\"\n rval += \" %s Checked Correct Errors\\n\" % (\" \" * 29)\n rval += (\" Since %-18s: %6d %6d %6d\\n\" %\n (util.ymdhms(last_rpt_time), checks, correct, error))\n rval += (\" Total: %s %6d %6d %6d\\n\" % (\" \" * 21,\n t_check,\n t_correct,\n t_error))\n rval += \"</pre>\\n\"\n\n return rval", "def to_html_table(self):\n td = '<td>'\n nwtd = '<td nowrap=\"true\">'\n ftd = '<td class=\"format\">'\n ctd = '<td class=\"cen\">'\n etd = '</td>'\n \n if self.is_power_onoff():\n out = td + 'Power On/Off' + etd\n else:\n out = nwtd + '<strong>' + self['target'].ljust(20) + '</strong>' + etd\n\n if 'Date' in self:\n out += ctd + self['Date'] + etd\n else:\n out += td + etd\n\n if 'UTstart' in self:\n out += ctd + self['UTstart'] + etd\n else:\n out += td + etd\n\n if 'UTend' in self:\n out += ctd + self['UTend'] + etd\n else:\n out += td + etd\n\n if 'exposure' in self:\n out += ctd + self['exposure'] + etd\n else:\n out += td + etd\n\n if 'sample' in self:\n out += ctd + self['sample'] + etd\n else:\n out += td + etd\n\n if 'nframe' in self:\n out += ctd + self['nframe'] + etd\n else:\n out += td + etd\n \n if self.is_power_onoff():\n out += (td + etd)*3\n else:\n speed = self['speed']\n out += ctd + self['filters'].ljust(11) + etd + ctd + self['x_bin'] + 'x' + self['y_bin'] + etd + ctd + speed + etd \n \n if self.number_windows() > 0:\n out += ctd + self['x1_size'].rjust(4) + 'x' + self['y1_size'].ljust(4) + etd + td + self['x1_start'].ljust(3) + etd + td + self['y1_start'].ljust(4) + etd\n else:\n out += (td + etd)*3\n \n if self.number_windows() > 1:\n out += ctd + self['x2_size'].rjust(4) + 'x' + self['y2_size'].ljust(4) + etd + td + self['x2_start'].ljust(3) + etd + td + self['y2_start'].ljust(4) + etd\n else:\n out += (td + etd)*3\n\n if 'grating' in self:\n out += ctd + self['grating'] + etd\n else:\n out += td + etd\n\n if 'slit_width' in self:\n out += ctd + self['slit_width'] + etd\n else:\n out += td + etd\n\n if 'slit_angle' in self:\n out += ctd + self['slit_angle'] + etd\n else:\n out += td + etd\n \n if 'ID' in self:\n out += ctd + self['ID'] + etd\n else:\n out += td + etd\n\n if 'PI' in self:\n out += ctd + self['PI'] + etd\n else:\n out += td + etd\n \n if 'Comment' in self:\n out += nwtd + self['Comment'] + etd\n else:\n out += td + etd\n\n return out", "def buildDifferentialStatsTable(self, deltaSeriesCollection, refDsc, klass, style):\n from xpedite.report.markup import getDeltaMarkup, getDeltaType\n klass = '{} {}'.format(TABLE_SUMMARY, klass)\n table = HTML().table(border='1', klass=klass, style=style)\n self.buildStatsTableHeader(table)\n tbody = table.tbody\n fmt = DURATION_FORMAT + ' ({1}' + DURATION_FORMAT_2 + ')'\n\n for i, deltaSeries in enumerate(deltaSeriesCollection, 1):\n row = tbody.tr\n row.td('{0:,}'.format(i), klass=TD_KEY)\n row.td(deltaSeries.beginProbeName, klass=TD_KEY)\n row.td(deltaSeries.endProbeName, klass=TD_KEY)\n\n delta = deltaSeries.getMin() - refDsc[i-1].getMin()\n row.td(fmt.format(deltaSeries.getMin(), getDeltaMarkup(delta), delta), klass=getDeltaType(delta))\n\n delta = deltaSeries.getMax() - refDsc[i-1].getMax()\n row.td(fmt.format(deltaSeries.getMax(), getDeltaMarkup(delta), delta), klass=getDeltaType(delta))\n\n delta = deltaSeries.getMedian() - refDsc[i-1].getMedian()\n row.td(fmt.format(deltaSeries.getMedian(), getDeltaMarkup(delta), delta), klass=getDeltaType(delta))\n\n delta = deltaSeries.getMean() - refDsc[i-1].getMean()\n row.td(fmt.format(deltaSeries.getMean(), getDeltaMarkup(delta), delta), klass=getDeltaType(delta))\n\n percentile1 = deltaSeries.getPercentile(self.percentile1)\n delta = percentile1 - refDsc[i-1].getPercentile(self.percentile1)\n row.td(fmt.format(percentile1, getDeltaMarkup(delta), delta), klass=getDeltaType(delta))\n\n percentile2 = deltaSeries.getPercentile(self.percentile2)\n delta = percentile2 - refDsc[i-1].getPercentile(self.percentile2)\n row.td(fmt.format(percentile2, getDeltaMarkup(delta), delta), klass=getDeltaType(delta))\n\n delta = deltaSeries.getStandardDeviation() - refDsc[i-1].getStandardDeviation()\n row.td(fmt.format(\n deltaSeries.getStandardDeviation(), getDeltaMarkup(delta), delta), klass=getDeltaType(delta))\n return table", "def summary_stats(self):\n capital_gains = self.df['values'].iloc[-1].sum() - self.tc.starting_cash\n total_return = capital_gains / self.tc.starting_cash\n days_invested = (self.df.index[-1] - self.df.index[0]).days\n annualized_returns = (total_return + 1) ** (365 / days_invested) - 1\n annualized_volatility = self.df['returns'].std() * (252 ** 0.5)\n sharpe = annualized_returns / annualized_volatility\n num_trades = self.trades.shape[0]\n stats = pd.Series(\n data=[capital_gains, total_return, annualized_returns, annualized_volatility, sharpe, num_trades],\n index=['Capital Gains', 'Total Return', 'Annualized Return', 'Annualized Volatility', 'Sharpe Ratio',\n 'Number of Trades']\n )\n return stats", "def output_summary_stats(self):\n total_return = self.equity_curve['equity_curve'][-1]\n returns = self.equity_curve['returns']\n pnl = self.equity_curve['equity_curve']\n\n sharpe_ratio = create_sharpe_ratio(returns)\n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\n self.equity_curve['drawdown'] = drawdown\n if len(dd_duration) == 1:\n dd_duration = dd_duration[0]\n\n stats = [(\"Total Return\", \"%0.2f%%\" % ((total_return - 1.0) * 100.0)),\n (\"Sharpe Ratio\", \"%0.2f\" % sharpe_ratio),\n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)),\n (\"Drawdown Duration\", \"%s\" % dd_duration)]\n\n self.equity_curve.to_csv('equity.csv')\n self.positions.to_csv('positions.csv')\n self.prices.to_csv('prices.csv')\n\n return stats", "def output_summary_stats(self):\n total_return = self.equity_curve['equity_curve'][-1]\n returns = self.equity_curve['returns']\n pnl = self.equity_curve['equity_curve']\n \n sharpe_ratio = create_sharpe_ratio(returns, periods=252*6.5*60)\n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\n self.equity_curve['drawdown'] = drawdown\n \n stats = [(\"Total Return\", \"%0.2f%%\" % ((total_return - 1.0) * 100.0)), \n (\"Sharpe Ratio\", \"%0.2f\" % sharpe_ratio), \n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)), \n (\"Drawdown Duration\", \"%d\" % dd_duration)]\n \n self.equity_curve.to_csv('equity.csv')\n \n return stats", "def to_html(self) -> str:\n if self.coverage < 0:\n coverage_str = '1e308'\n coverage_class = 'na'\n elif self.coverage == 0:\n coverage_str = '0'\n coverage_class = 'zero'\n else:\n coverage_str = str(self.coverage)\n coverage_class = 'all'\n\n sorted_branches = sorted(self.branches.values(), key=lambda s: s.id_)\n branches_html = ''.join(b.to_html() for b in sorted_branches)\n\n return '<tr id=\"line-{2}\" class=\"cov-health-{0}\"><td>{4}</td><td>{1}</td><td>{2}</td><td>{3}</td></tr>\\n'.format(\n coverage_class, coverage_str, self.linenum, escape(self.source),\n branches_html\n )" ]
[ "0.6796021", "0.63996667", "0.6175595", "0.61329174", "0.6100673", "0.6094698", "0.6058827", "0.60511935", "0.60229", "0.5993624", "0.5987919", "0.59840375", "0.59793216", "0.595735", "0.5949419", "0.5934658", "0.5930337", "0.59065324", "0.5865028", "0.58387583", "0.58330786", "0.5827222", "0.58049685", "0.5787476", "0.5784251", "0.5775809", "0.5761053", "0.57569414", "0.57469076", "0.5738022" ]
0.6549288
1