query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
By default, Keras will create placeholders for the model's target, which will be fed with the target data during training. If instead you would like to use your own target tensors (in turn, Keras will not expect external Numpy data for these targets at training time), you can specify them via the target_tensors argument. It can be a single tensor (for a singleoutput model), a list of tensors, or a dict mapping output names to target tensors. | def target_tensors(self):
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def targets(self) -> Tensor:\n data_normalized = self._dataset_values.clone()\n data_normalized -= self._row_min\n data_normalized /= self._row_range\n _, outputs_normalized_transformed = self._transform(\n data_normalized[:, self._input_column_indices],\n data_normalized[:, self._output_column_indices],\n )\n assert outputs_normalized_transformed.ndim == 1 or (\n outputs_normalized_transformed.ndim == 2\n and outputs_normalized_transformed.size(1) == 1\n ), \".targets requires exactly one output column (to match TorchVision datasets).\"\n return outputs_normalized_transformed.view(-1).clone()",
"def eval_batch(self, outputs, target):\n raise NotImplementedError",
"def output_targets(self, input_targets):\n pass",
"def targets(self) -> Optional[jnp.ndarray]:\n pass",
"def add_model(self, input_data, target_data=None):\n # Consider signal matrix as an image with channels.\n height = self.config.num_steps\n width = self.config.time_stamps\n channels = self.config.channels\n batch_size = self.config.batch_size\n\n # input_data: (-1, height, width, channels)\n input_data = tf.reshape(input_data, [-1, channels, height, width])\n input_data = tf.transpose(input_data, perm=[0, 2, 3, 1])\n\n # module-1\n # conv/ReLU-1\n x = layer_conv_relu(input_data, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-2\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-3\n x_1 = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n\n # module-2\n # conv/ReLU-1\n x = layer_conv_relu(x_1, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-2\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-3\n x_2 = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n\n # module-3\n # conv/ReLU-1\n x = layer_conv_relu(x_2, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-2\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-3\n x_3 = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n\n # module-4\n # conv/ReLU-1\n x = layer_conv_relu(x_3, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-2\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-3\n x_4 = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n\n # module-5\n # conv/ReLU-1\n x = layer_conv_relu(x_4, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-2\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-3\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n\n # module-6\n # conv/ReLU-1\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # concatenate\n x = concatenate([x, x_4], axis=-1)\n # conv/ReLU-2\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-3\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n\n # module-7\n # conv/ReLU-1\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # concatenate\n x = concatenate([x, x_3], axis=-1)\n # conv/ReLU-2\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-3\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n\n # module-8\n # conv/ReLU-1\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # concatenate\n x = concatenate([x, x_2], axis=-1)\n # conv/ReLU-2\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/ReLU-3\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n\n # module-9\n # conv/ReLU-1\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # concatenate\n x = concatenate([x, x_1], axis=-1)\n # conv/ReLU-2\n x = layer_conv_relu(x, self.config.n1, self.config.f1, self.config.f3, self.config.regularizer)\n # conv/tanh-3\n x = Convolution2D(self.config.n1, (self.config.f1, self.config.f3),\n activation='relu', padding='same',\n kernel_regularizer=self.config.regularizer,\n bias_regularizer=self.config.regularizer,\n data_format='channels_last',\n )(x)\n x = Activation('tanh')(x)\n\n # output x: (-1, height, width, channels)\n output = Convolution2D(channels, (self.config.f2, self.config.f2),\n activation='linear',\n padding='same',\n name='output',\n kernel_regularizer=self.config.regularizer,\n bias_regularizer=self.config.regularizer,\n data_format='channels_last'\n )(x)\n\n prediction = tf.transpose(output, perm=[0, 3, 1, 2])\n prediction = tf.reshape(prediction, [batch_size, height, width])\n return prediction",
"def model_inputs():\n # TODO: Implement Function\n inputs = tf.placeholder(tf.int32, shape=[None,None], name= \"input\")\n targets = tf.placeholder(tf.int32, shape=[None,None], name= \"targets\")\n lrate = tf.placeholder(tf.float32, name= \"learning_rate\")\n keep_prob = tf.placeholder(tf.float32, name= \"keep_prob\")\n target_seq_lenth = tf.placeholder(tf.int32, shape=[None], name= \"target_sequence_length\")\n max_target_len = tf.reduce_max(target_seq_lenth, name= 'max_target_len')\n source_seq_length = tf.placeholder(tf.int32, shape=[None], name= \"source_sequence_length\")\n return (inputs, targets, lrate, keep_prob, target_seq_lenth, max_target_len, source_seq_length)",
"def create_model(args, pretrained_embedding: np.ndarray, model_name='deep_q_network', trainable=True):\n\n state = Input(shape=(args.n_features,))\n model = None\n\n n, m = pretrained_embedding.shape\n print('shape', pretrained_embedding.shape)\n embedded = Embedding(n, m, embeddings_initializer=keras.initializers.constant(pretrained_embedding))(state)\n\n if model_name == \"deep_q_network\":\n print(\"Building \" + model_name + \" ...\")\n\n # First convolutional layer\n\n x = Dense(args.hidden_size, activation=K.relu)(embedded)\n x = Dropout(args.dropout)(x)\n x = Flatten()(x)\n y_pred = Dense(args.n_actions, trainable=trainable)(x)\n\n model = Model(inputs=state, outputs=y_pred)\n\n elif model_name == \"deep_q_network_double\":\n print(\"Building \" + model_name + \" ...\")\n\n x = Dense(args.hidden_size, activation=K.relu)(embedded)\n x = Dropout(args.dropout)(x)\n x = Flatten()(x)\n y_pred = Dense(args.n_actions, trainable=trainable)(x)\n\n model = Model(input=state, output=y_pred)\n\n elif model_name == \"deep_q_network_duel\":\n print(\"Building \" + model_name + \" ...\")\n\n x = Dense(args.hidden_size, activation=K.relu)(embedded)\n x = Dropout(args.dropout)(x)\n x = Flatten()(x)\n\n y_pred = Dense(args.n_actions, trainable=trainable)(x)\n\n # value output\n x_val = Dense(args.hidden_size, trainable=trainable)(x)\n # x_val = Activation('relu')(x_val)\n y_val = Dense(1, trainable=trainable)(x_val)\n\n # advantage output\n x_advantage = Dense(args.hidden_size, trainable=trainable)(x)\n # x_advantage = Activation('relu')(x_advantage)\n y_advantage = Dense(args.n_actions, trainable=trainable)(x_advantage)\n # mean advantage\n y_advantage_mean = Lambda(lambda x: K.mean(x, axis=1, keepdims=True))(y_advantage)\n\n y_q = Lambda(lambda x: x[0] + x[1] - x[2])([y_val, y_advantage, y_advantage_mean])\n\n model = Model(input=state, output=y_q)\n\n else:\n print(\"Model not supported\")\n exit(1)\n\n return model",
"def output_targets(self, input_targets):\n return input_targets",
"def check_loss_and_target_compatibility(targets, loss_fns, output_shapes):\n key_loss_fns = {\n losses.mean_squared_error, losses.binary_crossentropy,\n losses.categorical_crossentropy\n }\n key_loss_classes = (losses.MeanSquaredError, losses.BinaryCrossentropy,\n losses.CategoricalCrossentropy)\n for y, loss, shape in zip(targets, loss_fns, output_shapes):\n if y is None or loss is None or tensor_util.is_tf_type(y):\n continue\n if losses.is_categorical_crossentropy(loss):\n if y.shape[-1] == 1:\n raise ValueError('You are passing a target array of shape ' +\n str(y.shape) +\n ' while using as loss `categorical_crossentropy`. '\n '`categorical_crossentropy` expects '\n 'targets to be binary matrices (1s and 0s) '\n 'of shape (samples, classes). '\n 'If your targets are integer classes, '\n 'you can convert them to the expected format via:\\n'\n '```\\n'\n 'from keras.utils import to_categorical\\n'\n 'y_binary = to_categorical(y_int)\\n'\n '```\\n'\n '\\n'\n 'Alternatively, you can use the loss function '\n '`sparse_categorical_crossentropy` instead, '\n 'which does expect integer targets.')\n\n is_loss_wrapper = isinstance(loss, losses.LossFunctionWrapper)\n if (isinstance(loss, key_loss_classes) or (is_loss_wrapper and\n (loss.fn in key_loss_fns))):\n for target_dim, out_dim in zip(y.shape[1:], shape[1:]):\n if out_dim is not None and target_dim != out_dim:\n loss_name = loss.name\n if loss_name is None:\n loss_type = loss.fn if is_loss_wrapper else type(loss)\n loss_name = loss_type.__name__\n raise ValueError('A target array with shape ' + str(y.shape) +\n ' was passed for an output of shape ' + str(shape) +\n ' while using as loss `' + loss_name + '`. '\n 'This loss expects targets to have the same shape '\n 'as the output.')",
"def output_to_target(output, width, height):\n if isinstance(output, torch.Tensor):\n output = output.cpu().numpy()\n\n targets = []\n for i, o in enumerate(output):\n if o is not None:\n for pred in o:\n box = pred[:4]\n w = (box[2] - box[0]) / width\n h = (box[3] - box[1]) / height\n x = box[0] / width + w / 2\n y = box[1] / height + h / 2\n conf = pred[4]\n cls = int(pred[5])\n\n targets.append([i, cls, x, y, w, h, conf])\n\n return np.array(targets)",
"def output_to_target(output, width, height):\n if isinstance(output, torch.Tensor):\n output = output.cpu().numpy()\n\n targets = []\n for i, o in enumerate(output):\n if o is not None:\n for pred in o:\n box = pred[:4]\n w = (box[2] - box[0]) / width\n h = (box[3] - box[1]) / height\n x = box[0] / width + w / 2\n y = box[1] / height + h / 2\n conf = pred[4]\n cls = int(pred[5])\n\n targets.append([i, cls, x, y, w, h, conf])\n\n return np.array(targets)",
"def align_targets(predictions, targets):\n if (getattr(predictions, 'broadcastable', None) == (False, True) and\n getattr(targets, 'ndim', None) == 1):\n targets = as_theano_expression(targets).dimshuffle(0, 'x')\n return predictions, targets",
"def targets_placeholder(self):",
"def fit(self, features, targets):\n self.model_features = features\n self.model_targets= targets",
"def loss_fn(self, targets, outputs, model):",
"def predict_target(\n model: Model,\n *,\n # exactly one of them is None\n head: Union[None, int, str] = None,\n relation: Union[None, int, str] = None,\n tail: Union[None, int, str] = None,\n #\n triples_factory: Optional[TriplesFactory] = None,\n targets: Union[None, torch.LongTensor, Sequence[Union[int, str]]] = None,\n mode: Optional[InductiveMode] = None,\n) -> Predictions:\n # note: the models' predict method takes care of setting the model to evaluation mode\n\n # get input & target\n target, batch, other_col_ids = _get_input_batch(factory=triples_factory, head=head, relation=relation, tail=tail)\n\n # get label-to-id mapping and prediction targets\n labels, ids, targets = _get_targets(\n ids=targets, triples_factory=triples_factory, device=model.device, entity=relation is not None\n )\n\n # get scores\n scores = model.predict(batch, full_batch=False, mode=mode, ids=targets, target=target).squeeze(dim=0)\n if ids is None:\n ids = range(len(scores))\n\n # note: maybe we want to expose these scores, too?\n if target == LABEL_RELATION and model.use_inverse_triples:\n ids_t = torch.as_tensor(ids)\n non_inv_mask = ~model.relation_inverter.is_inverse(ids_t)\n ids = ids_t[non_inv_mask].tolist()\n scores = scores[non_inv_mask]\n\n # create raw dataframe\n data = {f\"{target}_id\": ids, \"score\": scores.tolist()}\n if labels is not None:\n data[f\"{target}_label\"] = labels\n df = pandas.DataFrame(data=data).sort_values(\"score\", ascending=False)\n return TargetPredictions(df=df, factory=triples_factory, target=target, other_columns_fixed_ids=other_col_ids)",
"def output(x_tensor, num_outputs):\n # TODO: Implement Function\n y = tf.layers.dense(x_tensor,num_outputs)\n return y",
"def forward(self, inputs: Tensor, targets: Tensor, **kwargs) -> Tensor:\n return NotImplemented",
"def output(x_tensor, num_outputs):\n shape = x_tensor.get_shape().as_list()\n weight = tf.Variable(tf.truncated_normal([shape[-1], num_outputs], stddev=0.1))\n bias = tf.Variable(tf.zeros(num_outputs))\n return tf.add(tf.matmul(x_tensor, weight), bias)",
"def add_prediction_op(self, outputs):\n dropout_rate = self.dropout_placeholder\n U = tf.get_variable(\"OutputWeights\", shape = (self.config.hidden_size, self.config.n_classes), initializer = tf.contrib.layers.xavier_initializer())\n b_2 = tf.get_variable(\"OutputBias\", shape = (self.config.n_classes), initializer = tf.zeros_initializer())\n\n outputs = tf.nn.dropout(outputs, dropout_rate) \n\n outputs = tf.reshape(outputs, [-1, self.config.hidden_size]) \n preds = tf.add(tf.matmul(outputs, U), b_2)\n preds = tf.reshape(preds, [self.config.batch_size, -1, self.config.n_classes])\n #preds = tf.Print(preds, [preds], summarize = self.config.n_classes)\n return preds",
"def model(x, hidden_layer_sizes, weights, biases):\n n_samples = x.get_shape().as_list()[0]\n\n # Calculate the activation of the first hidden layer\n expanded_weights = tf.tile(tf.expand_dims(tf.transpose(weights[0]), axis=0), multiples=[n_samples, 1, 1])\n z = tf.add(tf.matmul(x, expanded_weights), biases[0])\n h = tf.sigmoid(z)\n\n # Calculate the activation of the remaining hidden layers\n for i in range(hidden_layer_sizes.size - 1):\n expanded_weights = tf.tile(tf.expand_dims(tf.transpose(weights[i+1]), axis=0), multiples=[n_samples, 1, 1])\n z = tf.add(tf.matmul(h, expanded_weights), biases[i + 1])\n h = tf.sigmoid(z)\n\n # Calculating the output of the last layer\n expanded_weights = tf.tile(tf.expand_dims(tf.transpose(weights[-1]), axis=0), multiples=[n_samples, 1, 1])\n z = tf.add(tf.matmul(h, expanded_weights), biases[-1], name=\"output\")\n\n z_squeezed = tf.squeeze(z, axis=[-1])\n\n return z_squeezed",
"def forward(\n self, output: Union[torch.Tensor, List[torch.Tensor]], target: torch.Tensor\n ):\n if isinstance(output, torch.Tensor):\n output = [output]\n assert isinstance(\n output, list\n ), \"Model output should be a list of tensors. Got Type {}\".format(type(output))\n assert torch.is_tensor(target), \"Target should be a tensor. Got Type {}\".format(\n type(target)\n )\n\n loss = 0\n for i, pred in enumerate(output):\n if i >= len(self._losses):\n self._losses.append(self._create_loss_function())\n\n assert (\n target.max().item() < pred.shape[1]\n ), f\"pred.shape[1]={pred.shape[1]} and target.max().item()={target.max().item()}\"\n\n if self._normalize_output:\n pred = nn.functional.normalize(pred, dim=1, p=2)\n\n if self._label_smoothing > 0.0:\n target = self.apply_label_smoothing(\n target,\n num_labels=pred.shape[1],\n label_smoothing=self._label_smoothing,\n )\n\n loss += self._losses[i](pred / self._temperature, target)\n return loss",
"def forward(self, output: torch.Tensor, target: torch.Tensor, label_lengths: torch.LongTensor) ->torch.Tensor:\n orig_shapes = output.size(), target.size()\n output = output.view(-1, self.tgt_vocab_size)\n target = target.view(-1)\n model_prob = self.one_hot.repeat(target.size(0), 1)\n model_prob = model_prob\n model_prob.scatter_(1, target.unsqueeze(1), self.confidence)\n model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)\n output = output.view(orig_shapes[0])\n model_prob = model_prob.view(orig_shapes[0])\n return tx.losses.sequence_softmax_cross_entropy(labels=model_prob, logits=output, sequence_length=label_lengths, average_across_batch=False, sum_over_timesteps=False)",
"def infer_target_type(input: torch.Tensor, targets: torch.Tensor) -> str:\n if input.shape == targets.shape:\n return 'one_hot'\n elif input.ndim == targets.ndim + 1:\n return 'indices'\n else:\n raise RuntimeError(f'Unable to infer indices or one_hot. Targets has shape {targets.shape}'\n f' and the inputs to cross entropy has shape {input.shape}. For one_hot, '\n 'expect targets.shape == inputs.shape. For indices, expect '\n 'inputs.ndim == targets.ndim + 1')",
"def _learn_model(data, desc_ids, targ_ids, learner, out_kind=\"numeric\", **kwargs):\n\n i, o = data[:, desc_ids], data[:, targ_ids]\n\n if i.ndim == 1:\n # We always want 2D inputs\n i = i.reshape(-1, 1)\n if o.shape[1] == 1:\n # If output is single variable, we need 1D matrix\n o = o.ravel()\n\n try:\n model = learner(**kwargs)\n model.fit(i, o)\n except ValueError as e:\n print(e)\n\n # Bookkeeping\n model.desc_ids = desc_ids\n model.targ_ids = targ_ids\n model.out_kind = out_kind\n return model",
"def build_model(input_classes,output_classes):\n dimensions = 20\n inputs = []\n embedded_outputs = []\n for i in input_classes:\n input_layer = Input((1,))\n inputs.append(input_layer)\n embedder = Embedding(input_dim=i,output_dim=dimensions,input_length=1,embeddings_constraint=UnitNorm(axis=0))\n embedded_layer = embedder(input_layer)\n embedded_outputs.append(embedded_layer)\n\n embedded_concats = Concatenate()(embedded_outputs)\n flatten_layer = Flatten()\n\n dense_layer = Dense(output_classes)\n\n flattened_output = flatten_layer(embedded_concats)\n dense_output = dense_layer(flattened_output)\n\n # dense_output = dense_layer(embedded_concats)\n\n model = Model(inputs,dense_output)\n print(model.summary())\n model.compile(loss='sparse_categorical_crossentropy', optimizer='adam')\n\n return model",
"def train(self, inputs_list, targets_list):\n # convert inputs list to 2d array\n inputs = cupy.array(inputs_list, ndmin=2).T\n targets = cupy.array(targets_list, ndmin=2).T\n\n # calculate signals into hidden layer\n hidden_inputs = cupy.dot(self.wih, inputs)\n # calculate the signals emerging from hidden layer\n hidden_outputs = self.activation_function(hidden_inputs)\n\n # calcute signals into final output layer\n final_inputs = cupy.dot(self.who, hidden_outputs)\n # calculate signals into final output layer\n final_outputs = self.activation_function(final_inputs)\n\n # output layer error is the (target - actual)\n output_errors = targets - final_outputs\n # hidden layer error is the output_errors, split by weights, recombined at hidden nodes\n hidden_errors = cupy.dot(self.who.T, output_errors)\n\n # update the weights for the links between the hidden and output layers\n # S' = S(1 - S)\n # dE / Dw = K * e * o * (1 - o)\n self.who += self.lr * cupy.dot((output_errors * final_outputs * (\n 1.0 - final_outputs)), cupy.transpose(hidden_outputs))\n self.wih += self.lr * \\\n cupy.dot((hidden_errors * hidden_outputs *\n (1.0 - hidden_outputs)), cupy.transpose(inputs))\n\n pass",
"def predict_from(self, inputs, to_layers):",
"def forward(self, y_pred: Dict[str, torch.Tensor], target: Union[torch.Tensor, rnn.PackedSequence]) -> torch.Tensor:\n # unpack\n if isinstance(target, rnn.PackedSequence):\n target, lengths = rnn.pad_packed_sequence(target, batch_first=True)\n # batch sizes reside on the CPU by default -> we need to bring them to GPU\n lengths = lengths.to(target.device)\n else:\n lengths = torch.ones(target.size(0), device=target.device, dtype=torch.long) * target.size(1)\n assert not target.requires_grad\n\n # calculate loss with \"none\" reduction\n if target.ndim == 3:\n weight = target[..., 1]\n target = target[..., 0]\n else:\n weight = None\n\n losses = self.loss(y_pred, target)\n # weight samples\n if weight is not None:\n losses = losses * weight.unsqueeze(-1)\n\n # mask loss\n mask = torch.arange(target.size(1), device=target.device).unsqueeze(0) >= lengths.unsqueeze(-1)\n if losses.ndim > 2:\n mask = mask.unsqueeze(-1)\n dim_normalizer = losses.size(-1)\n else:\n dim_normalizer = 1.0\n # reduce to one number\n if self.reduction == \"none\":\n loss = losses.masked_fill(mask, float(\"nan\"))\n else:\n if self.reduction == \"mean\":\n losses = losses.masked_fill(mask, 0.0)\n loss = losses.sum() / lengths.sum() / dim_normalizer\n elif self.reduction == \"sqrt-mean\":\n losses = losses.masked_fill(mask, 0.0)\n loss = losses.sum() / lengths.sum() / dim_normalizer\n loss = loss.sqrt()\n assert not torch.isnan(loss), (\n \"Loss should not be nan - i.e. something went wrong \"\n \"in calculating the loss (e.g. log of a negative number)\"\n )\n assert torch.isfinite(\n loss\n ), \"Loss should not be infinite - i.e. something went wrong (e.g. input is not in log space)\"\n return loss",
"def layer_output(x_tensor, num_outputs):\n weights = tf.Variable(\n tf.random_normal(\n [x_tensor.shape[1].value, num_outputs],\n stddev=0.1\n )\n )\n bias = tf.Variable(tf.zeros([num_outputs]))\n return tf.add(tf.matmul(x_tensor, weights), bias)"
] | [
"0.5898726",
"0.565797",
"0.55954695",
"0.55766785",
"0.555807",
"0.5468719",
"0.53791976",
"0.5375494",
"0.53694284",
"0.53404087",
"0.53404087",
"0.53265506",
"0.5286209",
"0.52710843",
"0.52633405",
"0.5224834",
"0.5223838",
"0.52131724",
"0.5201254",
"0.51955396",
"0.5181284",
"0.5171625",
"0.5170315",
"0.51683235",
"0.51534355",
"0.515113",
"0.514788",
"0.5144749",
"0.5134958",
"0.5123693"
] | 0.63880855 | 0 |
Tests settings proxies creation and dereferencing | def test_settings_proxies_creation() -> None:
settings = Settings()
settings_proxy = settings.create_proxy() # We have one proxy
assert len(settings._proxies) == 1
second_proxy = settings.create_proxy() # Now we have two proxies
assert len(settings._proxies) == 2
# We are creating the third one, but the original `settings_proxy` is dereferenced
settings_proxy = settings.create_proxy()
assert len(settings._proxies) == 2
del second_proxy
assert len(settings._proxies) == 1
del settings_proxy
assert len(settings._proxies) == 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_settings_proxy_properties_setting(parameters: Dict[str, Any]) -> None:\n settings = Settings()\n settings_proxy = settings.create_proxy()\n\n for key, value in parameters.items():\n if isinstance(value, (int, float)):\n assert settings.__getattribute__(key) == settings_proxy.__getattribute__(key)\n else:\n assert settings.__getattribute__(key) == str(settings_proxy.__getattribute__(key))\n\n for key, value in parameters.items():\n settings.__setattr__(key, value)\n\n for key, value in parameters.items():\n if isinstance(value, (int, float)):\n assert settings.__getattribute__(key) == settings_proxy.__getattribute__(key)\n else:\n assert settings.__getattribute__(key) == str(settings_proxy.__getattribute__(key))",
"def test_create_hyperflex_proxy_setting_policy(self):\n pass",
"def test_patch_hyperflex_proxy_setting_policy(self):\n pass",
"def settings():\n return SettingsMock.instance()",
"def test_update_hyperflex_proxy_setting_policy(self):\n pass",
"def test_act_on_settings(self):\n pass # TODO(tlarsen)",
"def test_act_on_settings(self):\n pass # TODO(tlarsen)",
"def setUp(self):\n self.proxyServices = [\n Host('web', '127.0.0.1:8080', 'prod', leastc, 'host1', '127.0.0.1:7001', True),\n Host('web', '127.0.0.1:8080', 'prod', leastc, 'host2', '127.0.0.1:7002'),\n Host('web', '127.0.0.1:8080', 'prod', leastc, 'host3', '127.0.0.1:7003'),\n Host('web', '127.0.0.1:8080', 'test', leastc, 'host4', '127.0.0.1:7004', False),\n Host('web', '127.0.0.1:8080', 'test', leastc, 'host5', '127.0.0.1:7005'),\n Host('web', '127.0.0.1:8080', 'test', leastc, 'host6', '127.0.0.1:7006'),\n Host('dns', '127.0.0.1:8053', 'corp', roundr, 'host7', '127.0.0.1:7007', True),\n Host('dns', '127.0.0.1:8053', 'corp', roundr, 'host8', '127.0.0.1:7008'),\n ]",
"def test_settings_instantiation(self):\n ## no settings passed on instantiation\n bd = BorrowDirect() # no settings info\n self.assertEqual(\n True, isinstance(bd, BorrowDirect) )\n ## dict settings\n settings_dict = {} ## empty dct\n bd = BorrowDirect( settings_dict )\n self.assertEqual(\n None, bd.UNIVERSITY_CODE )\n settings_dict = { 'UNIVERSITY_CODE': '123' } ## populated dct\n bd = BorrowDirect( settings_dict )\n self.assertEqual(\n '123', bd.UNIVERSITY_CODE )\n ## module settings\n s = imp.new_module( 'settings' ) ## empty module\n bd = BorrowDirect( s )\n self.assertEqual(\n None, bd.UNIVERSITY_CODE )\n s = imp.new_module( 'settings' ) ## populated module\n s.UNIVERSITY_CODE = '234'\n bd = BorrowDirect( s )\n self.assertEqual(\n '234', bd.UNIVERSITY_CODE )",
"def testGetConfig():\n configs = GetConfig()\n # print(configs.host_ip)\n # print(configs.proxy_local)\n \n # print(configs.proxy_online)\n # print(configs.user_img_url)\n # print(configs.user_login_url)\n print(configs.user_start_id)\n\n # assert isinstance(configs.proxy_getter_functions, list)\n # print(configs.proxy_getter_functions)",
"def test_settings_doesnt_break(self):\r\n self.settingsDeploy()",
"def test_configure_proxy(matrix):\n mock_proxy = mock.Mock()\n matrix.charm_config[\"enable-tls\"] = False\n matrix.charm_config[\"enable-federation\"] = False\n matrix.charm_config[\"external-domain\"] = \"mock.external\"\n matrix.charm_config[\"prefer-internal-ip\"] = True\n matrix.configure_proxy(mock_proxy)\n assert mock_proxy.configure.called\n assert mock_proxy.configure.call_args == mock.call(\n [\n {\n \"mode\": \"http\",\n \"external_port\": 80,\n \"internal_host\": \"10.10.10.10\",\n \"internal_port\": 8008,\n \"subdomain\": \"mock.external\",\n }\n ]\n )\n\n \"\"\"Test configure_proxy with internal IP preference and internal host preference.\"\"\"\n mock_proxy = mock.Mock()\n matrix.charm_config[\"enable-tls\"] = False\n matrix.charm_config[\"enable-federation\"] = False\n matrix.charm_config[\"external-domain\"] = \"mock.external\"\n matrix.charm_config[\"prefer-internal-ip\"] = True\n matrix.configure_proxy(mock_proxy)\n assert mock_proxy.configure.called\n assert mock_proxy.configure.call_args == mock.call(\n [\n {\n \"mode\": \"http\",\n \"external_port\": 80,\n \"internal_host\": \"10.10.10.10\",\n \"internal_port\": 8008,\n \"subdomain\": \"mock.external\",\n }\n ]\n )\n\n \"\"\"Test configure_proxy.\"\"\"\n mock_proxy = mock.Mock()\n matrix.charm_config[\"enable-tls\"] = False\n matrix.charm_config[\"enable-federation\"] = False\n matrix.charm_config[\"external-domain\"] = \"\"\n matrix.charm_config[\"prefer-internal-ip\"] = False\n matrix.configure_proxy(mock_proxy)\n assert mock_proxy.configure.called\n assert mock_proxy.configure.call_args == mock.call(\n [\n {\n \"mode\": \"http\",\n \"external_port\": 80,\n \"internal_host\": \"mock.fqdn\",\n \"internal_port\": 8008,\n \"subdomain\": \"mock.fqdn\",\n }\n ]\n )\n\n \"\"\"Test configure_proxy with manual external domain.\"\"\"\n mock_proxy = mock.Mock()\n matrix.charm_config[\"enable-tls\"] = False\n matrix.charm_config[\"enable-federation\"] = False\n matrix.charm_config[\"external-domain\"] = \"matrix.mockhost\"\n matrix.configure_proxy(mock_proxy)\n assert mock_proxy.configure.called\n assert mock_proxy.configure.call_args == mock.call(\n [\n {\n \"mode\": \"http\",\n \"external_port\": 80,\n \"internal_host\": \"mock.fqdn\",\n \"internal_port\": 8008,\n \"subdomain\": \"matrix.mockhost\",\n }\n ]\n )\n\n # Test HTTPS\n mock_proxy.reset_mock()\n matrix.charm_config[\"enable-tls\"] = True\n matrix.charm_config[\"enable-federation\"] = False\n matrix.charm_config[\"external-domain\"] = \"\"\n matrix.configure_proxy(mock_proxy)\n assert mock_proxy.configure.called\n assert mock_proxy.configure.call_args == mock.call(\n [\n {\n \"mode\": \"http\",\n \"external_port\": 443,\n \"internal_host\": \"mock.fqdn\",\n \"internal_port\": 8008,\n \"subdomain\": \"mock.fqdn\",\n }\n ]\n )\n\n # Test manual server name\n mock_proxy.reset_mock()\n matrix.charm_config[\"enable-tls\"] = False\n matrix.charm_config[\"enable-federation\"] = False\n matrix.charm_config[\"server-name\"] = \"manual.mock.host\"\n matrix.charm_config[\"external-domain\"] = \"\"\n matrix.configure_proxy(mock_proxy)\n assert mock_proxy.configure.called\n assert mock_proxy.configure.call_args == mock.call(\n [\n {\n \"mode\": \"http\",\n \"external_port\": 80,\n \"internal_host\": \"mock.fqdn\",\n \"internal_port\": 8008,\n \"subdomain\": \"manual.mock.host\",\n }\n ]\n )\n\n # Test manual server name with external domain specified\n mock_proxy.reset_mock()\n matrix.charm_config[\"enable-tls\"] = False\n matrix.charm_config[\"enable-federation\"] = False\n matrix.charm_config[\"server-name\"] = \"manual.mock.host\"\n matrix.charm_config[\"external-domain\"] = \"matrix.manual.mock.host\"\n matrix.configure_proxy(mock_proxy)\n assert mock_proxy.configure.called\n assert mock_proxy.configure.call_args == mock.call(\n [\n {\n \"mode\": \"http\",\n \"external_port\": 80,\n \"internal_host\": \"mock.fqdn\",\n \"internal_port\": 8008,\n \"subdomain\": \"matrix.manual.mock.host\",\n }\n ]\n )\n\n # Test HTTPS with federation enabled\n mock_proxy.reset_mock()\n matrix.charm_config[\"enable-tls\"] = True\n matrix.charm_config[\"enable-federation\"] = True\n matrix.charm_config[\"server-name\"] = \"\"\n matrix.charm_config[\"external-domain\"] = \"\"\n matrix.configure_proxy(mock_proxy)\n assert mock_proxy.configure.called\n assert mock_proxy.configure.call_args == mock.call(\n [\n {\n \"mode\": \"http\",\n \"external_port\": 443,\n \"internal_host\": \"mock.fqdn\",\n \"internal_port\": 8008,\n \"subdomain\": \"mock.fqdn\",\n },\n {\n \"mode\": \"tcp+tls\",\n \"external_port\": 8448,\n \"internal_host\": \"mock.fqdn\",\n \"internal_port\": 8448,\n }\n ]\n )\n\n # Test HTTPS with federation enabled and manual server name\n mock_proxy.reset_mock()\n matrix.charm_config[\"enable-tls\"] = True\n matrix.charm_config[\"enable-federation\"] = True\n matrix.charm_config[\"server-name\"] = \"manual.mock.host\"\n matrix.charm_config[\"external-domain\"] = \"\"\n matrix.configure_proxy(mock_proxy)\n assert mock_proxy.configure.called\n assert mock_proxy.configure.call_args == mock.call(\n [\n {\n \"mode\": \"http\",\n \"external_port\": 443,\n \"internal_host\": \"mock.fqdn\",\n \"internal_port\": 8008,\n \"subdomain\": \"manual.mock.host\",\n },\n {\n \"mode\": \"tcp+tls\",\n \"external_port\": 8448,\n \"internal_host\": \"mock.fqdn\",\n \"internal_port\": 8448,\n }\n ]\n )\n\n # Test HTTPS with federation enabled\n mock_proxy.reset_mock()\n matrix.charm_config[\"enable-tls\"] = True\n matrix.charm_config[\"enable-federation\"] = True\n matrix.charm_config[\"server-name\"] = \"manual.mock.host\"\n matrix.charm_config[\"external-domain\"] = \"matrix.manual.mock.host\"\n matrix.configure_proxy(mock_proxy)\n assert mock_proxy.configure.called\n assert mock_proxy.configure.call_args == mock.call(\n [\n {\n \"mode\": \"http\",\n \"external_port\": 443,\n \"internal_host\": \"mock.fqdn\",\n \"internal_port\": 8008,\n \"subdomain\": \"matrix.manual.mock.host\",\n },\n {\n \"mode\": \"tcp+tls\",\n \"external_port\": 8448,\n \"internal_host\": \"mock.fqdn\",\n \"internal_port\": 8448,\n }\n ]\n )\n\n # Test IRC with TLS enabled\n mock_proxy.reset_mock()\n matrix.charm_config[\"enable-tls\"] = True\n matrix.charm_config[\"enable-federation\"] = True\n matrix.charm_config[\"server-name\"] = \"manual.mock.host\"\n matrix.charm_config[\"external-domain\"] = \"matrix.manual.mock.host\"\n matrix.charm_config[\"enable-ircd\"] = True\n matrix.configure_proxy(mock_proxy)\n assert mock_proxy.configure.called\n assert mock_proxy.configure.call_args == mock.call(\n [\n {\n \"mode\": \"http\",\n \"external_port\": 443,\n \"internal_host\": \"mock.fqdn\",\n \"internal_port\": 8008,\n \"subdomain\": \"matrix.manual.mock.host\",\n },\n {\n \"mode\": \"tcp+tls\",\n \"external_port\": 8448,\n \"internal_host\": \"mock.fqdn\",\n \"internal_port\": 8448,\n },\n {\n \"mode\": \"tcp+tls\",\n \"external_port\": 6697,\n \"internal_host\": \"mock.fqdn\",\n \"internal_port\": 6667,\n },\n ]\n )\n\n # Test IRC without TLS enabled\n mock_proxy.reset_mock()\n matrix.charm_config[\"enable-tls\"] = False\n matrix.configure_proxy(mock_proxy)\n assert mock_proxy.configure.called\n assert mock_proxy.configure.call_args == mock.call(\n [\n {\n \"mode\": \"http\",\n \"external_port\": 80,\n \"internal_host\": \"mock.fqdn\",\n \"internal_port\": 8008,\n \"subdomain\": \"matrix.manual.mock.host\",\n },\n {\n \"mode\": \"tcp\",\n \"external_port\": 8448,\n \"internal_host\": \"mock.fqdn\",\n \"internal_port\": 8448,\n },\n {\n \"mode\": \"tcp\",\n \"external_port\": 6667,\n \"internal_host\": \"mock.fqdn\",\n \"internal_port\": 6667,\n },\n ]\n )",
"def settings():\n raise NotImplementedError # pragma: nocoverage",
"def test_constructor(self):\n # Build the Settings objects\n self.assertEqual(self.extension, self.settings.extension)\n\n # Ensure that the registration settings dict gets\n # added to this Settings\n self.assertEqual(self.test_dict['test_key1'],\n self.settings['test_key1'])",
"def __init__( settings={} ):",
"def test_04_proxy_uri(self):\n\n pobj = publisher.ProxyURI(\"http://example.com\")\n self.assert_(pobj.uri == \"http://example.com\")\n\n tcert = os.path.join(self.test_root, \"test.cert\")\n tkey = os.path.join(self.test_root, \"test.key\")\n # check that we can't set several RepositoryURI attributes\n bad_props = {\n \"priority\": 1,\n \"ssl_cert\": tcert,\n \"ssl_key\": tkey,\n \"trailing_slash\": False\n }\n\n pobj = publisher.ProxyURI(\"http://example.com\")\n for prop in bad_props:\n self.assertRaises(ValueError,\n setattr, pobj, prop, bad_props[prop])\n\n # check bad values for system\n self.assertRaises(api_errors.BadRepositoryAttributeValue,\n setattr, pobj, \"system\", \"Carrots\")\n self.assertRaises(api_errors.BadRepositoryAttributeValue,\n setattr, pobj, \"system\", None)\n\n # check that we can set URI values that RespositoryURI would\n # choke on\n uri = \"http://user:pass@server\"\n pobj.uri = uri\n self.assert_(pobj.uri == uri)\n\n # check that setting system results in uri being overridden\n pobj.system = True\n self.assert_(pobj.system == True)\n self.assert_(pobj.uri == publisher.SYSREPO_PROXY)\n\n # check that clearing system also clears uri\n pobj.system = False\n self.assert_(pobj.system == False)\n self.assert_(pobj.uri == None)",
"def test_init():\n x = random.randint(0, 65536)\n y = random.randint(0, 65536)\n z = {'a': random.randint(0, 65536), 'b': random.randint(0, 65536)}\n o = SampleProxy(x=x, y=y, z=z)\n assert(o.x == x)\n assert(o.y == y)\n assert(o.z == z)",
"def set_proxy(self):",
"def test_get_mt_settings(self):\n pass",
"def switch_proxy(self, proxy):",
"def set_proxies(proxy_dict={}):\n global proxies\n proxies = proxy_dict",
"def test_get_hyperflex_proxy_setting_policy_list(self):\n pass",
"def test_settings_restored(self) -> None:\n from django.conf import settings\n\n assert TestLiveServer._test_settings_before_run is True # type: ignore[attr-defined]\n assert (\n f\"{settings.__class__.__module__}.{settings.__class__.__name__}\"\n == \"django.conf.Settings\"\n )\n assert settings.ALLOWED_HOSTS == [\"testserver\"]",
"def test_delete_hyperflex_proxy_setting_policy(self):\n pass",
"def test_reverse_proxy_config():\n\n class ReverseProxyConfig(TestingConfig):\n REVERSE_PROXY = \"1,2,3,4\"\n\n app = create_ctfd(config=ReverseProxyConfig)\n with app.app_context():\n assert app.wsgi_app.x_for == 1\n assert app.wsgi_app.x_proto == 2\n assert app.wsgi_app.x_host == 3\n assert app.wsgi_app.x_port == 4\n assert app.wsgi_app.x_prefix == 0\n destroy_ctfd(app)\n\n class ReverseProxyConfig(TestingConfig):\n REVERSE_PROXY = \"true\"\n\n app = create_ctfd(config=ReverseProxyConfig)\n with app.app_context():\n assert app.wsgi_app.x_for == 1\n assert app.wsgi_app.x_proto == 1\n assert app.wsgi_app.x_host == 1\n assert app.wsgi_app.x_port == 1\n assert app.wsgi_app.x_prefix == 1\n destroy_ctfd(app)",
"def setUp(self):\n\n apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()\n\n apiproxy_stub_map.apiproxy.RegisterStub('memcache',\n memcache_stub.MemcacheServiceStub())\n\n self.stub = apiproxy_stub_map.apiproxy.GetStub('memcache')",
"def test_need_proxy(self):\n os.environ['no_proxy'] = 'blah.com,blah2.com'\n self.assertTrue(dockerv2.need_proxy('proxy.blah3.com'))\n self.assertFalse(dockerv2.need_proxy('proxy.blah.com'))",
"def setUp(self):\n client.theResolver = FakeResolver()\n self.hostname = 'example.com'\n self.ghbntest = 'getHostByNameTest'",
"def test_settings(self):\n\n wf._items = []\n\n sys.argv = ['drive.py', '>']\n main(None)\n self.assertEqual(len(wf._items), 4)\n self.assertEqual(wf._items[0].title, SETTINGS['LOGIN']['title'])\n self.assertEqual(wf._items[1].title, SETTINGS['LOGOUT']['title'])\n self.assertEqual(wf._items[2].title, SETTINGS['CLEAR_CACHE']['title'])\n self.assertEqual(wf._items[3].title, SETTINGS['SET_CACHE']['title'] % '[seconds]')\n wf._items = []",
"def testConfigA(self):\n assert type(self.config) == dict, \"Read setting not returning a dictionary\""
] | [
"0.70535135",
"0.69610393",
"0.6671404",
"0.6658353",
"0.6654484",
"0.6590051",
"0.6590051",
"0.6581018",
"0.6452413",
"0.6448714",
"0.62878704",
"0.6280929",
"0.6219918",
"0.6213459",
"0.62046504",
"0.6198726",
"0.61963135",
"0.61654794",
"0.61559033",
"0.6146009",
"0.6126499",
"0.60178703",
"0.601564",
"0.59701884",
"0.5957728",
"0.59398705",
"0.5925679",
"0.59063035",
"0.58924633",
"0.5889252"
] | 0.7889519 | 0 |
Returns sorted string permutations >>> solve('hat') 'aht,ath,hat,hta,tah,tha' >>> solve('Zu6') '6Zu,6uZ,Z6u,Zu6,u6Z,uZ6' >>> solve('abc') 'abc,acb,bac,bca,cab,cba' | def solve(in_str):
return ','.join(sorted(imap(lambda x: ''.join(x),permutations(in_str)))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def permutation(s):\n if len(s) == 1:\n return [s]\n result = []\n first = s[0]\n ss = s[1:]\n pers = permutation(ss)\n for p in pers:\n for i in range(0,len(p)):\n result.append(p[:i]+first+p[i:])\n return result",
"def permute(s):\n output = []\n if len(s) <= 1:\n return s\n\n for i, letter in enumerate(s):\n # strip current letter from s and find perms on new string\n\n new_s = s[:i] + s[i + 1:]\n\n permutes = permute(new_s)\n\n for perm in permutes:\n output.append(letter + perm)\n\n return output",
"def permute(string):\n if len(string) == 0:\n return ['']\n prev_list = permute(string[1:len(string)])\n next_list = []\n\n for i in range(0, len(prev_list)):\n for j in range(0, len(string)):\n new_string = prev_list[i][0:j] + string[0] + prev_list[i][j:len(string)-1]\n if new_string not in next_list:\n next_list.append(new_string)\n return(next_list)",
"def word_perms(word):\n\t# Question 4a: Generates all strings that are permutations of the letters in word\n\treturn {''.join(w) for w in permutations(word)}",
"def permutation_strings(input, input_two):\n if len(input) != len(input_two):\n return False\n else:\n return sorted(input) == sorted(input_two)",
"def gen_all_strings(word):\n if DEBUG_GAS:\n print \"WORD\", word\n if len(word) < 1:\n if DEBUG_GAS:\n print \"BASE ZERO\"\n print \"len(word)\", len(word)\n print \"word\", word\n return ['']\n if len(word) == 1:\n if DEBUG_GAS:\n print \"BASE ONE\"\n print \"len(word)\", len(word)\n print \"word\", word\n return ['', word]\n else:\n first = word[0]\n rest = word[1:]\n rest_strings = gen_all_strings(rest)\n permutations = []\n if DEBUG_GAS:\n print \"rest_strings\", rest_strings\n print first, rest\n for item in rest_strings:\n if DEBUG_GAS:\n print \"rest_strings item\", item\n for dummy_idx in range(len(item)+1):\n if DEBUG_GAS:\n print \"dummy_idx\", dummy_idx\n print \"item\", item\n permutations.append(str(item[:dummy_idx] + first + item[dummy_idx:]))\n for item in permutations:\n rest_strings.append(item)\n return rest_strings",
"def all_permutations(q_1: Q) -> Qs:\n\n results = []\n\n for perm in itertools.permutations(\"txyz\"):\n results.append(permutation(q_1, perm=perm))\n\n return Qs(results)",
"def solve(chars, length):\n return generate_greedy(generate_string_list(length, chars))",
"def permutation(string):\n i = 0\n j = len(string) - 1\n while i < j:\n if string[i] != string[j]:\n return False\n i += 1\n j -= 1\n return True",
"def perm_gen_lex(a):\n\n if a == '':\n return []\n elif len(a) == 1:\n return [a]\n else:\n perm_list = []\n \"\"\"For each character in the input string\"\"\"\n for i in range(len(a)):\n\n \"\"\"Form a simpler string by removing the character from the input string\n Generate all permutations of the simpler string recursively\"\"\"\n if i == len(a)-1:\n simple_string = a[:i]\n else:\n simple_string = a[:i] + a[(i+1):]\n\n simple_string_list = perm_gen_lex(simple_string)\n\n \"\"\"Add the removed character to the front of each permutation of the simpler string, and\n add the resulting permutation to the list\"\"\"\n for val in simple_string_list:\n perm_list.append(a[i] + val)\n\n return perm_list",
"def get_permutations(nums: List[int]) -> List[List[int]]:\n\n # Stores all permutations\n all_permutations = []\n\n while True:\n\n all_permutations.append(nums.copy())\n\n i = len(nums) - 2\n\n while nums[i] >= nums[i + 1]:\n i -= 1\n if i == -1:\n return all_permutations\n\n j = len(nums) - 1\n while nums[j] <= nums[i]:\n j -= 1\n\n nums[i], nums[j] = nums[j], nums[i]\n nums[i + 1:] = nums[i + 1:][::-1]",
"def permutation(nums):\n list = []\n temp = []\n backtrack(list, temp, nums)\n return list",
"def permutation(q_1: Q, perm: str = \"txyz\") -> Q:\n\n if len(perm) != 4:\n raise ValueError(f\"The perm string must be 4 letters long: {perm}\")\n\n result = {}\n\n result[f\"{perm[0]}\"] = q_1.t\n result[f\"{perm[1]}\"] = q_1.x\n result[f\"{perm[2]}\"] = q_1.y\n result[f\"{perm[3]}\"] = q_1.z\n\n rearranged = []\n\n for letter in tuple(\"txyz\"):\n rearranged.append(result[letter])\n\n return Q(rearranged)",
"def perm_2_let():\r\n return {''.join(i) for i in permutations('abcdefghijklmnopqrstuvwxyz', 2)}\r\n # print(comb_2_let, sep='')\r",
"def solve(words):\n result = defaultdict(list)\n for s in words:\n result[tuple(sorted(s))].append(s)\n print(result.values())",
"def get_perms(n):\n \n from itertools import permutations\n bases = 'CATGN'\n return [''.join(perm) for perm in permutations(bases, n)]",
"def get_sorted_pandigital(m):\n perms = get_all_permutations(range(m,0,-1))\n\n for perm in perms: \n # per is a m-length tuple\n perm = list2num(perm)\n yield perm",
"def all_permutations(lst):\n # If lst is empty then there are no permutations\n if len(lst) == 0:\n return []\n # If there is only one element in lst then, only\n # one permuatation is possible\n if len(lst) == 1:\n return [lst]\n # Find the permutations for lst if there are\n # more than 1 characters\n\n result = [] # empty list that will store current permutation\n # Iterate the input(lst) and calculate the permutation\n for i in range(len(lst)):\n m = lst[i]\n # Extract lst[i] or m from the list. remLst is remaining list\n remLst = lst[:i] + lst[i + 1:]\n\n # Generating all permutations where m is first element\n for p in all_permutations(remLst):\n result.append([m] + p)\n return result",
"def build_permutation_dictionary(input_string):\n string_contents = {}\n\n for char in input_string:\n if char not in string_contents:\n string_contents[char] = 0\n else:\n string_contents[char] += 1\n\n return string_contents",
"def cands(inputs):\n # The below could probably be simplified a bit....\n return map(''.join, list(itertools.chain.from_iterable([ map (list, (itertools.permutations(inputs, x))) for x in range(4, len(inputs)+1)])))",
"def domain_permutations(s):\r\n ret = []\r\n r = s.split('.')\r\n\r\n for x in xrange(len(r)):\r\n ret.append('.'.join(r[x:len(r)]))\r\n for x in r:\r\n ret.append(x)\r\n\r\n return set(ret)",
"def sort(self, input):\n regex = re.compile('[%s]' % re.escape(string.punctuation))\n normal_input = regex.sub('', input.lower())\n array = list(normal_input.replace(' ',''))\n array.sort()\n return ''.join(array)",
"def permutations(value):\n seen = {value}\n possible = calulate_total_permutations(value)\n yield value\n LOG.info(\"%s has %s possible combinations\", value, possible)\n while True:\n if len(seen) >= possible:\n break\n anagram = shuffle_str(value)\n while True:\n if anagram not in seen:\n seen.add(anagram)\n break\n anagram = shuffle_str(anagram)\n yield anagram",
"def digPerms(digset, nzcharset, okzcharset):\n nzcnt = len(nzcharset) # How many letters are non-0\n okzcnt = len(okzcharset) # How many letters are allowed 0\n totcnt = nzcnt + okzcnt # Total number of letters\n if totcnt < 1: # if total numbers of letters is 0\n return [()] # return a singe empty permutation\n nzdigset = digset - set((0,)) # generate a non-zero digit set\n nzdigsetcnt = len(nzdigset) # how many non-zero digits are available\n digsetcnt = len(digset) # how many ok zero digits are available\n # if either fewer digits than letters at all or fewer non-0 digits\n # than letters that need to be non-zero\n if digsetcnt < totcnt or nzdigsetcnt < nzcnt:\n return [] # Return no permutations possible\n # Simple case when zeros are allowed everwhere\n # or no zero is containted within the given digits\n elif nzcnt == 0 or digsetcnt == nzdigsetcnt:\n return permutations(digset, totcnt)\n # Another simple case all letters are non-0\n elif okzcnt == 0:\n return permutations(nzdigset, totcnt)\n else:\n # General case\n # Generate a list of possible 0 positions\n poslst = list(range(nzcnt, totcnt))\n # Chain two iterators\n # first iterator with all non-0 permutations\n # second iterator with all permulations without 1 letter\n # insert 0 in all possible positions of that permutation\n return chain(permutations(nzdigset, totcnt),\n map(lambda x: x[0][:x[1]] + (0,) + x[0][x[1]:],\n product(permutations(nzdigset, totcnt - 1),\n poslst)))",
"def bigSorting(unsorted):\n lookup = defaultdict(lambda: [])\n print(lookup)\n for num_string in unsorted:\n lookup[len(num_string)].append(num_string)\n\n results = []\n lengths = list(lookup.keys())\n lengths.sort()\n for length in lengths:\n x = lookup[length]\n x.sort()\n results = results + x\n print(results)\n return results",
"def gen_permutations_re(outcomes):\r\n\r\n if len(outcomes) == 1:\r\n ans = set()\r\n temp = []\r\n temp.append(outcomes[0])\r\n ans.add(tuple(temp))\r\n return ans\r\n\r\n rest_permutations = gen_permutations_re(outcomes[1:])\r\n\r\n answer = []\r\n for perm in rest_permutations:\r\n perm = list(perm)\r\n for i in range(len(perm) + 1):\r\n temp = perm[:]\r\n temp.insert(i, outcomes[0])\r\n answer.append(tuple(temp))\r\n\r\n return set(answer)",
"def rearrange_chars_no_two_adj_same(txt):\n n = len(txt)\n txt_chars = list(txt)\n result = []\n count_map = {}\n heap = []\n\n for char in txt_chars:\n if char not in count_map:\n count_map[char] = -1\n else:\n count_map[char] = -(abs(count_map[char])+1)\n\n print(count_map)\n\n for key,value in count_map.items():\n heapq.heappush(heap, (value,key))\n\n print(heap)\n\n while heap:\n cur_key = heapq.heappop(heap)[1]\n result.append(cur_key)\n count_map[cur_key] = -(abs(count_map[cur_key])-1)\n prev_key = cur_key\n\n if heap:\n cur_key = heapq.heappop(heap)[1]\n result.append(cur_key)\n count_map[cur_key] = -(abs(count_map[cur_key])-1)\n if abs(count_map[prev_key]) > 0:\n heapq.heappush(heap, (count_map[prev_key], prev_key))\n if abs(count_map[cur_key]) > 0:\n heapq.heappush(heap, (count_map[cur_key], cur_key))\n\n else:\n break\n\n if n != len(result):\n print(\"Not possible to rearrange\")\n else:\n print(\"\".join(result))",
"def permutations(config):\r\n return list(set(itertools.permutations(config)))",
"def generate_permutations(search_space):\n expanded_ss = []\n for sp in search_space:\n new_se = generate_sign_permutation(sp)\n expanded_ss.extend(new_se)\n return expanded_ss",
"def list_permutations(self):\n return self.permutations(self._char_counts)"
] | [
"0.661948",
"0.6591128",
"0.63372636",
"0.62420875",
"0.61537945",
"0.61418396",
"0.6108773",
"0.61023647",
"0.59346366",
"0.59056646",
"0.58466256",
"0.57606375",
"0.57452804",
"0.57302254",
"0.5710946",
"0.56997097",
"0.56990546",
"0.5698452",
"0.56910104",
"0.5674147",
"0.5668412",
"0.5666714",
"0.5662571",
"0.5660114",
"0.5639199",
"0.56301385",
"0.5614278",
"0.56058466",
"0.5597954",
"0.55938166"
] | 0.82966363 | 0 |
reconstruct a sg from a scene json file | def recon_sg2(json_file_dir, if_add_bases=True):
id2color = {
"gray": [87, 87, 87],
"red": [173, 35, 35],
"blue": [42, 75, 215],
"green": [29, 105, 20],
"brown": [129, 74, 25],
"purple": [129, 38, 192],
"cyan": [41, 208, 208],
"yellow": [255, 238, 51],
"c1": [42, 87, 9],
"c2": [255, 102, 255],
"orange": [255, 140, 0]
}
color2id = {tuple(v): u for u, v in id2color.items()}
with open(json_file_dir, 'r') as json_file:
du = json.load(json_file)
location_dict = {}
objects = []
bboxes = []
for obj in du["objects"]:
color = tuple([int(du33*255) for du33 in obj["color"]][:-1])
object_id = color2id[color]
a_key = "%.3f" % obj["location"][0]
if a_key not in location_dict:
location_dict[a_key] = [(object_id, obj["location"][2])]
else:
location_dict[a_key].append((object_id, obj["location"][2]))
objects.append(object_id)
bboxes.append([
obj["bbox"][0]/128.0,
obj["bbox"][1]/128.0,
obj["bbox"][2]/128.0,
obj["bbox"][3]/128.0,
])
obj2id = {objects[du4]: objects[du4] for du4 in range(len(objects))}
if if_add_bases:
relationships = [
[obj2id["brown"], "left", obj2id["purple"]],
[obj2id["purple"], "left", obj2id["cyan"]],
]
else:
relationships = []
for du3 in location_dict:
location = sorted(location_dict[du3], key=lambda x: x[1])
while len(location) > 1:
o1 = location.pop()[0]
o2 = location[-1][0]
relationships.append([obj2id[o1], "up", obj2id[o2]])
assert o1 not in ["cyan", "purple", "brown"]
return relationships | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def utt_to_scene(file_name):\n with open(file_name, 'r') as file:\n data = file.readlines()\n data = [line.strip().split() for line in data if line.strip() != '']\n data = [[line[0], \" \".join(line[1:])] for line in data]\n preproc_data = [[line[0], list(map(lambda x: x[:x.find(\":\")], line[1].split(',')))[:-1]] for line in data]\n scene_mapping = {line[0]: line[1] for line in preproc_data}\n return scene_mapping",
"def import_scene(file_path):\n\n pass",
"def parse(vera, s):\n\n sd = SceneDefinition()\n \n sd.name = s[\"name\"]\n\n for i in s[\"triggers\"]:\n sd.triggers.append(Trigger.parse(vera, i))\n\n if s.has_key(\"timers\"):\n for i in s[\"timers\"]:\n sd.timers.append(Timer.parse(i))\n\n if s.has_key(\"groups\"):\n for i in s[\"groups\"]:\n sd.actions.append(Group.parse(vera, i))\n\n if s.has_key(\"room\"):\n if s[\"room\"] == 0:\n sd.room = None\n else:\n sd.room = vera.get_room_by_id(s[\"room\"])\n\n if s.has_key(\"modeStatus\"):\n sd.modes = Modes.parse(vera, s[\"modeStatus\"])\n\n return sd",
"def read(scene_name):\n routes, fixtures = read_fixtures(scene_name)\n scene = build_scene_from_fixtures(fixtures, scene_name)\n write_to_json(scene, scene_name)\n if routes:\n write_to_json(build_routes_file(routes, scene_name), scene_name + \"-routes\")",
"def load_streamed_scene(self, scene=\"tdw_room_2018\"):\n\n self.communicate([{\"$type\": \"load_streamed_scene\", \"scene_name\": scene}])",
"def load_scene():\n module_path = dirname(__file__)\n return _safe_unpickle(join(module_path, 'scene.pickle'))",
"def main(unused_argv):\n\n # Read the scene file.\n with open(FLAGS.scene_path, 'r') as file_id:\n scenes = json.load(file_id)\n\n # Read the synonyms file.\n with open(FLAGS.synonym_path, 'r') as file_id:\n synonyms = json.load(file_id)\n sorter = lambda x: len(x[0].split(' '))\n\n # Read the metainformation file.\n with open(FLAGS.metainfo_path, 'r') as file_id:\n gvars.METAINFO = json.load(file_id)\n tag_inv_map = {attr: tag for tag, attr in gvars.METAINFO['tag_map'].items()\n if tag != '<P>'}\n gvars.METAINFO['tag_inv_map'] = tag_inv_map\n gvars.METAINFO['synonym_keys'] = sorted(synonyms.items(),\n key=sorter, reverse=True)\n\n # Add ids to objects.\n scenes = utils.add_object_ids(scenes)\n scenes = utils.clean_object_attributes(scenes)\n\n # Read the caption templates.\n template_paths = os.listdir(FLAGS.caption_template_root)\n cap_templates = []\n for ii in template_paths:\n with open(os.path.join(FLAGS.caption_template_root, ii), 'r') as file_id:\n cur_templates = json.load(file_id)\n cap_templates.extend(cur_templates)\n #utils.pretty_print_templates(cap_templates, 1)\n\n # Read the question templates.\n template_paths = os.listdir(FLAGS.question_template_root)\n ques_templates = []\n for ii in template_paths:\n with open(os.path.join(FLAGS.question_template_root, ii), 'r') as file_id:\n cur_templates = json.load(file_id)\n ques_templates.extend(cur_templates)\n #utils.pretty_print_templates(ques_templates, 1)\n\n # 1. Check if there a scene_id_file specified.\n # 2. Check if num_images is -1\n if FLAGS.scene_id_file != '':\n with open(FLAGS.scene_id_file, 'r') as file_id:\n missing_ids = [int(ii.strip('\\n')) for ii in file_id.readlines()]\n print('Dialogs missing for scenes: %d' % len(missing_ids))\n\n # Create a image_index -> scenes list index dictionary\n image_list_id_dict = {ii['image_index']: index\n for index, ii in enumerate(scenes['scenes'])}\n scenes_subset = [scenes['scenes'][image_list_id_dict[scene_id]]\n for scene_id in missing_ids]\n\n elif FLAGS.num_images == -1:\n scenes_subset = scenes['scenes']\n\n else:\n scenes_subset = scenes['scenes'][0: FLAGS.num_images]\n\n # BFS for each scene.\n if FLAGS.num_workers == 1:\n # Single thread version.\n dialogs = []\n for index, scene in enumerate(scenes_subset):\n cur_time = time.strftime('%a-%d%b%y-%X', time.gmtime())\n print('Generating [ %s ] [ Worker: %d, Progress: %d/%d Scene: %d ]' %\\\n (cur_time, 0, index, len(scenes_subset), scene['image_index']))\n gen_dialog = generate_dialog_bfs(scene, cap_templates, ques_templates)\n dialogs.append(gen_dialog)\n\n else:\n # Multithread version.\n output_q = multiprocessing.Queue()\n jobs = []\n for worker_id in range(FLAGS.num_workers):\n allotment = scenes_subset[worker_id::FLAGS.num_workers]\n inputs = (allotment, cap_templates, ques_templates)\n inputs += (worker_id, output_q)\n\n process = multiprocessing.Process(target=worker, args=inputs)\n jobs.append(process)\n process.start()\n\n # Wait for all the jobs to finish and collect the output.\n final_results = {}\n for _ in jobs:\n final_results.update(output_q.get())\n for job in jobs:\n job.join()\n\n # Flatten and sort.\n final_results = [jj for _, ii in final_results.items() for jj in ii]\n dialogs = sorted(final_results, key=lambda x: x['image_index'])\n # utils.pretty_print_dialogs(dialogs)\n\n # Save the dialogs.\n print('Saving dialog at: %s' % FLAGS.save_path)\n with open(FLAGS.save_path, 'w') as file_id:\n json.dump(dialogs, file_id)",
"def parse( self, data, baseURL, *args, **named ):\n sg = basenodes.sceneGraph(\n )\n \n # these three are shared among all shapes\n hash = md5( baseURL ).hexdigest()\n coord = basenodes.Coordinate( DEF='Coord-%s'%(hash,) )\n normal = basenodes.Normal(DEF='Norm-%s'%(hash,))\n texCoord = basenodes.TextureCoordinate(DEF='TexCoord-%s'%(hash,))\n\n mesh = None # transforms\n group = None # shape\n material = None # appearance, material, texture\n \n materials = {}\n \n # indices are 1-based, the first values are never used...\n vertices = [[0., 0., 0.]] \n normals = [[0., 0., 0.]]\n tex_coords = [[0., 0.]]\n \n current_vertex_indices = []\n current_normal_indices = []\n current_texcoord_indices = []\n\n for line in data.splitlines():\n if line.startswith('#'): \n continue\n values = line.split()\n if not values: \n continue\n\n if values[0] == 'v':\n vertices.append(map(float, values[1:4]))\n elif values[0] == 'vn':\n normals.append(map(float, values[1:4]))\n elif values[0] == 'vt':\n tex_coords.append(map(float, values[1:3]))\n elif values[0] == 'mtllib':\n self.load_material_library(values[1], materials, baseURL)\n elif values[0] in ('usemtl', 'usemat'):\n material = materials.get(values[1], None)\n if material is None:\n log.warn('Unknown material: %s', values[1])\n material = self.defaultMaterial()\n if mesh is not None:\n if group and current_vertex_indices:\n group.geometry.coordIndex = current_vertex_indices\n group.geometry.texCoordIndex = current_texcoord_indices\n group.geometry.normalIndex = current_normal_indices\n current_vertex_indices = []\n current_texcoord_indices = []\n current_normal_indices = []\n group = basenodes.Shape(\n geometry = basenodes.IndexedFaceSet(\n coord = coord,\n normal = normal,\n texCoord = texCoord,\n solid=False,\n ),\n appearance = material,\n )\n mesh.children.append(group)\n elif values[0] == 'o':\n mesh = basenodes.Transform( DEF = values[1] )\n sg.children.append( mesh )\n sg.regDefName( values[1], mesh )\n # previous shape is no longer current...\n group = None\n elif values[0] == 's':\n # a smoothing-group definition...\n # not currently supported...\n pass\n elif values[0] == 'f':\n # adds a single face\n if mesh is None:\n # anonymous transform\n mesh = basenodes.Transform()\n sg.children.append(mesh)\n if material is None:\n material = self.defaultMaterial()\n if group is None:\n group = basenodes.Shape( \n geometry = basenodes.IndexedFaceSet(\n coord = coord,\n normal = normal,\n texCoord = texCoord,\n solid=False,\n ),\n appearance = material,\n )\n mesh.children.append(group)\n\n for i, v in enumerate(values[1:]):\n v_index, t_index, n_index = self._cleanIndex( v )\n current_vertex_indices.append( v_index )\n current_texcoord_indices.append( t_index )\n current_normal_indices.append( n_index )\n current_vertex_indices.append( -1 )\n current_texcoord_indices.append( -1 )\n current_normal_indices.append( -1 )\n else:\n log.warn( \"\"\"Unrecognized operation: %r\"\"\", values )\n if group and current_vertex_indices:\n group.geometry.coordIndex = current_vertex_indices\n group.geometry.texCoordIndex = current_texcoord_indices\n group.geometry.normalIndex = current_normal_indices\n coord.point = vertices\n normal.normal = normals\n texCoord.texCoord = tex_coords\n return True,sg\n \n \n # this creates a pointset-only version of the geometry...",
"def stich_from_file(jsonname):\n with open(jsonname) as f:\n data = json.loads(f.read())\n stich(data)",
"def menu_load_scene(self):\n file_name = QtGui.QFileDialog().getOpenFileName(self, \"Choose Scene File\", get_data_path(), \"*.pkl\")\n with open(file_name, \"rb\") as f:\n self.scene = pickle.load(f)",
"def from_json(cls, path):\n\n if not isinstance(path, Path):\n path = Path(path)\n\n directory, prefix = path.parent, path.stem\n with open(path) as fp:\n params = json.load(fp)\n\n model = cls(params['elements'],\n params['r_cut'],\n params['gp']['sigma'],\n params['gp']['noise'],\n params['gp']['r0'])\n\n gp_filename = params['gp']['filename']\n model.gp.load(directory / gp_filename)\n\n if params['grid']:\n model.grid_start = params['grid']['r_min']\n model.grid_end = params['grid']['r_max']\n model.grid_num = params['grid']['r_num']\n\n for key, grid_filename in params['grid']['filename'].items():\n k = tuple(key)\n\n model.grid[k] = interpolation.Spline1D.load(\n directory / grid_filename)\n\n return model",
"def run():\n\n if lm.scene_object().type == 'env':\n read_layout(outFile=lm.scene_object().copy(ext='json').path_root)\n\n else:\n read_layout()",
"def create_scene(self, s):\n s = json.dumps(s.output())\n\n # URL-encoding. Vera not happy with Python's standard\n # URL-encoding.\n s = Vera.urlencode(s)\n \n payload = self.get('data_request?id=scene&action=create&json=%s' % s)\n return payload",
"def load(self) -> Scene:\n self.path = self.find_scene(self.meta.path)\n if not self.path:\n raise ImproperlyConfigured(\"Scene '{}' not found\".format(self.meta.path))\n\n self.scene = Scene(self.path)\n\n # Load gltf json file\n if self.path.suffix == \".gltf\":\n self.load_gltf()\n\n # Load binary gltf file\n if self.path.suffix == \".glb\":\n self.load_glb()\n\n self.gltf.check_version()\n self.gltf.check_extensions(self.supported_extensions)\n self.load_images()\n self.load_samplers()\n self.load_textures()\n self.load_materials()\n self.load_meshes()\n self.load_nodes()\n\n self.scene.calc_scene_bbox()\n self.scene.prepare()\n\n return self.scene",
"def create_scene(self, ):\n self.scene = create_scene(\n self.opt.splats_img_size, self.opt.splats_img_size, self.opt.fovy,\n self.opt.focal_length, self.opt.n_splats)",
"def load_skinned(file):\n try:\n option = pyassimp.postprocess.aiProcessPreset_TargetRealtime_MaxQuality\n scene = pyassimp.load(file, option)\n except pyassimp.errors.AssimpError:\n #print('ERROR: pyassimp unable to load', file)\n return []\n\n # ----- load animations\n def conv(assimp_keys, ticks_per_second):\n \"\"\" Conversion from assimp key struct to our dict representation \"\"\"\n return {key.time / ticks_per_second: key.value for key in assimp_keys}\n\n # load first animation in scene file (could be a loop over all animations)\n transform_keyframes = {}\n if scene.animations:\n anim = scene.animations[0]\n for channel in anim.channels:\n # for each animation bone, store trs dict with {times: transforms}\n # (pyassimp name storage bug, bytes instead of str => convert it)\n transform_keyframes[channel.nodename.data.decode('utf-8')] = (\n conv(channel.positionkeys, anim.tickspersecond),\n conv(channel.rotationkeys, anim.tickspersecond),\n conv(channel.scalingkeys, anim.tickspersecond)\n )\n\n # Note: embedded textures not supported at the moment\n path = os.path.dirname(file)\n for mat in scene.materials:\n mat.tokens = dict(reversed(list(mat.properties.items())))\n if 'file' in mat.tokens: # texture file token\n tname = mat.tokens['file'].split('/')[-1].split('\\\\')[-1]\n # search texture in file's whole subdir since path often screwed up\n tname = [os.path.join(d[0], f) for d in os.walk(path) for f in d[2]\n if tname.startswith(f) or f.startswith(tname)]\n if tname:\n mat.texture = tname[0]\n else:\n print('Failed to find texture:', tname)\n\n # ---- prepare scene graph nodes\n # create SkinningControlNode for each assimp node.\n # node creation needs to happen first as SkinnedMeshes store an array of\n # these nodes that represent their bone transforms\n nodes = {} # nodes: string name -> node dictionary\n\n def make_nodes(pyassimp_node):\n \"\"\" Recursively builds nodes for our graph, matching pyassimp nodes \"\"\"\n trs_keyframes = transform_keyframes.get(pyassimp_node.name, (None,))\n\n node = SkinningControlNode(*trs_keyframes, name=pyassimp_node.name,\n transform=pyassimp_node.transformation)\n nodes[pyassimp_node.name] = node, pyassimp_node\n node.add(*(make_nodes(child) for child in pyassimp_node.children))\n return node\n\n root_node = make_nodes(scene.rootnode)\n\n # ---- create SkinnedMesh objects\n for mesh in scene.meshes:\n # -- skinned mesh: weights given per bone => convert per vertex for GPU\n # first, populate an array with MAX_BONES entries per vertex\n v_bone = np.array([[(0, 0)]*MAX_BONES] * mesh.vertices.shape[0],\n dtype=[('weight', 'f4'), ('id', 'u4')])\n for bone_id, bone in enumerate(mesh.bones[:MAX_BONES]):\n for entry in bone.weights: # weight,id pairs necessary for sorting\n v_bone[entry.vertexid][bone_id] = (entry.weight, bone_id)\n\n v_bone.sort(order='weight') # sort rows, high weights last\n v_bone = v_bone[:, -MAX_VERTEX_BONES:] # limit bone size, keep highest\n\n # prepare bone lookup array & offset matrix, indexed by bone index (id)\n bone_nodes = [nodes[bone.name][0] for bone in mesh.bones]\n bone_offsets = [bone.offsetmatrix for bone in mesh.bones]\n\n try :\n # Si les textures sont définies : corp principal du dinosaure\n texture = scene.materials[mesh.materialindex].texture\n # tex coords in raster order: compute 1 - y to follow OpenGL convention\n if mesh.texturecoords.size:\n tex_uv = np.array((0, 1) + mesh.texturecoords[0][:, :2] * (1, -1), dtype=np.float32)\n else:\n tex_uv = None\n\n tangents = []\n bitangents = []\n\n for face in mesh.faces:\n # Calcul des tangentes et bitangentes pour chaque face de la figure\n v0 = mesh.vertices[face[0]]\n v1 = mesh.vertices[face[1]]\n v2 = mesh.vertices[face[2]]\n\n uv0 = tex_uv[face[0]]\n uv1 = tex_uv[face[1]]\n uv2 = tex_uv[face[2]]\n\n deltaPos1 = [v1[i] - v0[i] for i in range(3)]\n deltaPos2 = [v2[i] - v0[i] for i in range(3)]\n\n deltaUV1 = [uv1[i] - uv0[i] for i in range(2)]\n deltaUV2 = [uv2[i] - uv0[i] for i in range(2)]\n\n r = 1 / ((deltaUV1[0]*deltaUV2[1]) - (deltaUV2[0]*deltaUV1[1]))\n tangent = [(deltaPos1[i]*deltaUV2[1])-(deltaPos2[i]*deltaUV1[1]) for i in range(3)]\n bitangent = [(deltaPos2[i]*deltaUV2[1])-(deltaPos1[i]*deltaUV1[1]) for i in range(3)]\n\n tangents.append(tangent)\n bitangents.append(bitangent)\n\n\n # initialize skinned mesh and store in pyassimp_mesh for node addition\n # ajout des coordonnées uv de texture, des tangentes et bitangentes\n mesh.skinned_mesh = SkinnedTextMesh(\n [mesh.vertices, mesh.normals, v_bone['id'], v_bone['weight'], tex_uv, tangents, bitangents],\n bone_nodes, bone_offsets, texture, mesh.faces\n )\n except AttributeError:\n # cas sans textures définies (dents du dinosaure)\n mesh.skinned_mesh = SkinnedMesh(\n [mesh.vertices, mesh.normals, v_bone['id'], v_bone['weight']],\n bone_nodes, bone_offsets, mesh.faces\n )\n\n\n # ------ add each mesh to its intended nodes as indicated by assimp\n for final_node, assimp_node in nodes.values():\n final_node.add(*(_mesh.skinned_mesh for _mesh in assimp_node.meshes))\n\n nb_triangles = sum((mesh.faces.shape[0] for mesh in scene.meshes))\n # print('Loaded', file, '\\t(%d meshes, %d faces, %d nodes, %d animations)' %\n # (len(scene.meshes), nb_triangles, len(nodes), len(scene.animations)))\n pyassimp.release(scene)\n return [root_node]",
"def make_scene_folders_0001(jsonFile, rootDir):\n sceneShotList = [[1, ['A', 'AA', 'AB', 'AC', 'AD', 'AE', 'AF', 'AG', 'AH', 'AJ', 'AK',\n 'C', 'D', 'E', 'E_v2', 'G', 'H', 'N']],\n [8, ['D', 'DA', 'DB', 'DC', 'D_v2']],\n [9, ['A', 'A_v2', 'B', 'B_v2']],\n [11, ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'G_v2',\n 'K', 'K_v2', 'K_v3', 'K_v4', 'K_v5', 'K_v6',\n 'M', 'M_v2', 'M_v3', 'M_v4', 'M_v5', 'M_v6', 'M_v7',\n 'N', 'N_v2', 'N_v3', 'N_v4', 'N_v5', 'R', 'R_v2', 'R_v3', 'R_v4', 'R_v5']],\n [12, ['A', 'A_v2', 'B', 'C', 'C_v2', 'C_v3', 'C_v4', 'D',\n 'E', 'E_v2']],\n [13, ['A', 'C', 'D', 'E', 'F']],\n [14, ['A', 'B', 'B_v2', 'B_v3', 'B_v4', 'B_v5', 'B_v6',\n 'C', 'C_v2', 'C_v3', 'C_v4', 'D', 'F', 'FF', 'H',\n 'H_v2', 'J', 'JJ', 'JJ_v2', 'K', 'L', 'M', 'M_v2',\n 'M_v3', 'M_v4', 'M_v5', 'N', 'P', 'P_v2', 'P_v3',\n 'R', 'R_v2', 'T', 'T_v2']],\n [15, ['A', 'J', 'J_v2', 'K', 'K_v2', 'L']]]\n\n for i, l in enumerate(sceneShotList):\n shotFolder = \"s\" + str(l[0]).zfill(3) # gives padding of 4\n shotFolderFinal = shotFolder\n if len(l) == 1 or l[1] == []:\n shotFolderFinal = os.path.join(rootDir, shotFolder)\n make_tree_from_dict(jsonFile, shotFolderFinal)\n else:\n for shot in l[1]:\n shotFolderFinal = shotFolder + shot\n shotFolderFinal = os.path.join(rootDir, shotFolderFinal)\n make_tree_from_dict(jsonFile, shotFolderFinal)",
"def from_json(cls, path):\n\n if not isinstance(path, Path):\n path = Path(path)\n\n directory, prefix = path.parent, path.stem\n with open(path) as fp:\n params = json.load(fp)\n\n model = cls(params['element'],\n params['r_cut'],\n params['gp']['sigma'],\n params['gp']['noise'],\n params['gp']['r0'])\n\n gp_filename = params['gp']['filename']\n model.gp.load(directory / gp_filename)\n\n if params['grid']:\n grid_filename = params['grid']['filename']\n model.grid = interpolation.Spline1D.load(directory / grid_filename)\n\n model.grid_start = params['grid']['r_min']\n model.grid_end = params['grid']['r_max']\n model.grid_num = params['grid']['r_num']\n\n return model",
"def __init__(self, meta: SceneDescription):\n super().__init__(meta)\n self.scenes = []\n self.nodes = []\n self.meshes = []\n self.materials = []\n self.images = []\n self.samplers = []\n self.textures = []\n\n self.path = None\n self.scene = None\n self.gltf = None",
"def open_scene(file_path, save=True):\n\n pass",
"def __init__(self, designfile):\r\n with open(designfile, 'r') as fp:\r\n self.design = json.load(fp)",
"def from_json(cls, path):\n\n if not isinstance(path, Path):\n path = Path(path)\n\n directory, prefix = path.parent, path.stem\n\n with open(path) as fp:\n params = json.load(fp)\n model = cls(params['elements'],\n params['r_cut'],\n params['gp']['sigma'],\n params['gp']['theta'],\n params['gp']['noise'])\n\n gp_filename = params['gp']['filename']\n try:\n model.gp.load(directory / gp_filename)\n except:\n warnings.warn(\"The many-body GP file is missing\")\n pass\n\n return model",
"def loadGMSHModel(modelfile, scale, dx=0.0, dy=0.0, dz=0.0, avg=True,\n neg_normal=False, texture=None):\n\n # noinspection PyPep8Naming,PyUnboundLocalVariable,PyShadowingNames,PyUnusedLocal\n def load(gmshfile, scale, dx, dy, dz):\n \"\"\"Carga un archivo gmsh y retorna 3 listas, una lista de vertices, otra de normales y otra de normales promedio. \\n\n Toma como argumento el archivo, una escala y la posicion (dx,dy,dz)\"\"\"\n\n # noinspection PyPep8Naming,PyShadowingNames\n def getAveNormals(nodes, elems):\n \"\"\"Calcula las normales promedio por cada vertice\"\"\"\n nodetrilist = []\n for nodenum in range(len(nodes)):\n nodetrilist.append([])\n for elemnum in range(len(elems)):\n if nodenum in elems[elemnum]:\n nodetrilist[nodenum].append(elemnum)\n avenorms = []\n for tri in nodetrilist:\n aveNi = 0.0\n aveNj = 0.0\n aveNk = 0.0\n denom = max(float(len(tri)), 1)\n for elem in tri:\n vert1 = [nodes[elems[elem][0]][0], nodes[elems[elem][0]][1],\n nodes[elems[elem][0]][2]]\n vert2 = [nodes[elems[elem][1]][0], nodes[elems[elem][1]][1],\n nodes[elems[elem][1]][2]]\n vert3 = [nodes[elems[elem][2]][0], nodes[elems[elem][2]][1],\n nodes[elems[elem][2]][2]]\n normals = getNormals(vert1, vert2, vert3)\n aveNi += normals[0]\n aveNj += normals[1]\n aveNk += normals[2]\n avenorms.append([aveNi / denom, aveNj / denom, aveNk / denom])\n return avenorms\n\n # noinspection PyPep8Naming\n def getNormals(vertA, vertB, vertC):\n \"\"\"Calcula las normales por cada 3 vertices\"\"\"\n xA = vertA[0]\n xB = vertB[0]\n xC = vertC[0]\n yA = vertA[1]\n yB = vertB[1]\n yC = vertC[1]\n zA = vertA[2]\n zB = vertB[2]\n zC = vertC[2]\n ABx = xB - xA\n ABy = yB - yA\n ABz = zB - zA\n BCx = xC - xB\n BCy = yC - yB\n BCz = zC - zB\n Nx = ABy * BCz - ABz * BCy\n Ny = ABz * BCx - ABx * BCz\n Nz = ABx * BCy - ABy * BCx\n VecMag = math.sqrt(Nx ** 2 + Ny ** 2 + Nz ** 2)\n Ni = Nx / VecMag\n Nj = Ny / VecMag\n Nk = Nz / VecMag\n return [Ni, Nj, Nk]\n\n # Lee el archivo\n try:\n infile = open(gmshfile)\n except:\n raise Exception(\"el archivo del modelo no existe\")\n\n # Crea el modeo\n try:\n gmshlines = infile.readlines()\n readnodes = False\n readelems = False\n skipline = 0\n elems = []\n lnum = 0\n nnodes = 0\n for line in gmshlines:\n if \"$Nodes\" in line:\n readnodes = True\n skipline = 2\n nnodes = int(gmshlines[lnum + 1].strip())\n nodes = []\n for i in range(nnodes):\n nodes.append(99999.9)\n elif \"$EndNodes\" in line:\n readnodes = False\n skipline = 1\n elif \"$Elements\" in line:\n readelems = True\n skipline = 2\n elif \"$EndElements\" in line:\n readelems = False\n skipline = 1\n if skipline < 1:\n if readnodes:\n nXYZ = line.strip().split()\n nodenum = int(nXYZ[0]) - 1\n nX = float(nXYZ[1]) * scale + dx\n nY = float(nXYZ[2]) * scale + dy\n nZ = float(nXYZ[3]) * scale + dz\n if neg_normal:\n nZ *= -1\n nodes[nodenum] = [nX, nY, nZ]\n elif readelems:\n n123 = line.split()\n if n123[1] == \"2\":\n n1 = int(n123[-3]) - 1\n n2 = int(n123[-1]) - 1\n n3 = int(n123[-2]) - 1\n elems.append([n1, n2, n3])\n else:\n skipline -= 1\n lnum += 1\n triarray = []\n normarray = []\n avenorms = []\n nodeavenorms = getAveNormals(nodes, elems)\n for elem in elems:\n vert1 = [nodes[elem[0]][0], nodes[elem[0]][1],\n nodes[elem[0]][2]]\n vert2 = [nodes[elem[1]][0], nodes[elem[1]][1],\n nodes[elem[1]][2]]\n vert3 = [nodes[elem[2]][0], nodes[elem[2]][1],\n nodes[elem[2]][2]]\n avenorm0 = nodeavenorms[elem[0]]\n avenorm1 = nodeavenorms[elem[1]]\n avenorm2 = nodeavenorms[elem[2]]\n normals = getNormals(vert1, vert2, vert3)\n triarray.append(vert1)\n triarray.append(vert2)\n triarray.append(vert3)\n normarray.append(normals)\n normarray.append(normals)\n normarray.append(normals)\n avenorms.append(avenorm0)\n avenorms.append(avenorm1)\n avenorms.append(avenorm2)\n return triarray, normarray, avenorms\n\n except:\n raise Exception(\"error al cargar el modelo\")\n\n vertex, norm, avgnorm = load(modelfile, scale, float(dx), float(dy),\n float(dz))\n if avg:\n return VboObject(vbo.VBO(array(vertex, 'f')),\n vbo.VBO(array(avgnorm, 'f')), len(vertex), texture)\n else:\n return VboObject(vbo.VBO(array(vertex, 'f')), vbo.VBO(array(norm, 'f')),\n len(vertex), texture)",
"def load_stoic_from_sbml(file_name, split_bidirectional_fluxes=False):\n from lxml import etree\n for ext in ['', '.xml', '.xml.gz', '.gz']:\n if os.path.isfile(file_name+ext):\n file_name = file_name+ext\n break\n tree = etree.parse(file_name)\n root = tree.getroot()\n assert root.tag.endswith ('sbml'), `root.tag`\n version = int(root.attrib['version'])\n level = int(root.attrib['level'])\n if level in [2,3]:\n default_stoichiometry = '1'\n else:\n default_stoichiometry = None\n compartments = {}\n species = []\n modifiers = []\n species_all = []\n reactions = []\n species_reactions = defaultdict (lambda:[])\n reactions_species = defaultdict (lambda:[])\n reactions_info = defaultdict(lambda:dict(modifiers=[],reactants=[],products=[],\n boundary_specie_stoichiometry={},annotation=[],\n compartments = set()))\n species_info = defaultdict(lambda:dict())\n matrix = {}\n for model in root:\n for item in model:\n if item.tag.endswith('listOfCompartments'):\n for compartment in item:\n compartments[compartment.attrib['id']] = compartment.attrib\n elif item.tag.endswith('listOfSpecies'):\n for specie in item:\n species_all.append(specie.attrib['id'])\n species_info[specie.attrib['id']]['compartment'] = specie.attrib['compartment']\n species_info[specie.attrib['id']]['name'] = specie.attrib.get('name', specie.attrib['id'])\n elif item.tag.endswith('listOfReactions'):\n for reaction in item:\n\n reversible =eval(reaction.attrib.get('reversible', 'False').title())\n reaction_id = reaction.attrib['id']\n name = reaction.attrib.get('name', reaction_id)\n assert reaction_id not in reactions,`reaction_id`\n reactions.append(reaction_id)\n reaction_index = len(reactions)-1\n reactions_info[reaction_id]['name'] = name\n\n if split_bidirectional_fluxes and reversible:\n reaction_id2 = '%s_r' % (reaction_id)\n assert reaction_id2 not in reactions,`reaction_id2`\n reactions.append(reaction_id2)\n reaction_index2 = len(reactions)-1\n reactions_info[reaction_id2]['name'] = name+'_r'\n reactions_info[reaction_id]['reversible'] = False\n reactions_info[reaction_id2]['reversible'] = False\n else:\n reaction_id2 = reaction_index2 = None\n reactions_info[reaction_id]['reversible'] = reversible\n\n for part in reaction:\n if part.tag.endswith ('listOfReactants'):\n for reactant in part:\n assert reactant.tag.endswith('speciesReference'), `reactant.tag`\n specie_id = reactant.attrib['species']\n stoichiometry = -obj2num(reactant.attrib.get('stoichiometry', default_stoichiometry))\n reactions_info[reaction_id]['reactants'].append(specie_id)\n try:\n specie_index = species.index(specie_id)\n except ValueError:\n species.append(specie_id)\n specie_index = len(species)-1\n assert stoichiometry,`stoichiometry`\n matrix[specie_index, reaction_index] = stoichiometry\n species_reactions[specie_index].append(reaction_index)\n reactions_species[reaction_index].append(specie_index) \n reactions_info[reaction_id]['compartments'].add(species_info[specie_id]['compartment'])\n if reaction_index2 is not None:\n reactions_info[reaction_id2]['reactants'].append(specie_id)\n matrix[specie_index, reaction_index2] = -stoichiometry\n species_reactions[specie_index].append(reaction_index2)\n reactions_species[reaction_index2].append(specie_index) \n reactions_info[reaction_id2]['compartments'].add(species_info[specie_id]['compartment'])\n elif part.tag.endswith ('listOfProducts'):\n for product in part:\n assert product.tag.endswith('speciesReference'), `product.tag`\n specie_id = product.attrib['species']\n stoichiometry = obj2num(product.attrib.get('stoichiometry', default_stoichiometry))\n reactions_info[reaction_id]['products'].append(specie_id)\n try:\n specie_index = species.index(specie_id)\n except ValueError:\n species.append(specie_id)\n specie_index = len(species)-1\n assert stoichiometry,`stoichiometry`\n matrix[specie_index, reaction_index] = stoichiometry\n species_reactions[specie_index].append(reaction_index)\n reactions_species[reaction_index].append(specie_index)\n reactions_info[reaction_id]['compartments'].add(species_info[specie_id]['compartment'])\n if reaction_index2 is not None:\n reactions_info[reaction_id2]['products'].append(specie_id)\n matrix[specie_index, reaction_index2] = -stoichiometry\n species_reactions[specie_index].append(reaction_index2)\n reactions_species[reaction_index2].append(specie_index)\n reactions_info[reaction_id2]['compartments'].add(species_info[specie_id]['compartment'])\n elif part.tag.endswith ('listOfModifiers'):\n for modifier in part:\n assert modifier.tag.endswith('modifierSpeciesReference'), `modifier.tag`\n specie_id = product.attrib['species']\n reactions_info[reaction_id]['modifiers'].append(specie_id)\n reactions_info[reaction_id]['compartments'].add(species_info[specie_id]['compartment'])\n continue\n elif part.tag.endswith ('annotation'):\n reactions_info[reaction_id]['annotation'].append(part.text)\n continue\n elif re.match(r'.*(kineticLaw|notes)\\Z', part.tag): \n continue\n else:\n print 'get_stoichiometry:warning:unprocessed reaction element: %r' % (part.tag)\n continue\n\n\n elif re.match (r'.*(annotation|notes|listOfSpeciesTypes|listOfUnitDefinitions)\\Z', item.tag):\n pass\n else:\n print 'get_stoichiometry:warning:unprocessed model element: %r' % (item.tag)\n\n return matrix, species, reactions, species_info, reactions_info",
"def __init__(self, src_path, json, *args, **kwargs):\n super(SimState, self).__init__(*args, **kwargs)\n\n self.src_path = src_path\n self.json = json\n self.name = json['name']\n self.super = json['super']\n\n self.vars = []\n for json_var in json['vars']:\n self.vars.append(SimVar(json_var))",
"def __init__(self, image_dir, instances_json, stuff_json=None,\n stuff_only=True, image_size=(64, 64), mask_size=32, normalize_images=True, max_samples=None,\n include_relationships=True, min_object_size=0.02, min_objects=3, max_objects=8,\n include_other=False, instance_whitelist=None, stuff_whitelist=None, learned_transitivity=False,\n include_dummies=True, use_transitivity=False, use_converse=False, learned_symmetry=False,\n learned_converse=False):\n super(CocoSceneGraphDataset, self).__init__()\n self.use_converse = use_converse\n self.learned_transitivity = learned_transitivity\n self.learned_symmetry = learned_symmetry\n self.learned_converse = learned_converse\n self.include_dummies = include_dummies\n self.image_dir = image_dir\n # self.mask_size = image_size[0]\n self.mask_size = mask_size\n self.masks = True\n if self.mask_size == 0:\n self.masks = False\n self.mask_size = 32\n\n self.max_samples = max_samples\n self.normalize_images = normalize_images\n self.include_relationships = include_relationships\n self.set_image_size(image_size)\n self.use_transitivity = use_transitivity\n\n with open(instances_json, 'r') as f:\n instances_data = json.load(f)\n\n with open(stuff_json, 'r') as f:\n stuff_data = json.load(f)\n\n self.image_ids = []\n self.image_id_to_filename = {}\n self.image_id_to_size = {}\n for image_data in instances_data['images']:\n image_id = image_data['id']\n filename = image_data['file_name']\n width = image_data['width']\n height = image_data['height']\n self.image_ids.append(image_id)\n self.image_id_to_filename[image_id] = filename\n self.image_id_to_size[image_id] = (width, height)\n\n self.vocab = {\n 'object_name_to_idx': {},\n 'pred_name_to_idx': {},\n }\n object_idx_to_name = {}\n all_instance_categories = []\n for category_data in instances_data['categories']:\n category_id = category_data['id']\n category_name = category_data['name']\n all_instance_categories.append(category_name)\n object_idx_to_name[category_id] = category_name\n self.vocab['object_name_to_idx'][category_name] = category_id\n all_stuff_categories = []\n\n for category_data in stuff_data['categories']:\n category_name = category_data['name']\n category_id = category_data['id']\n all_stuff_categories.append(category_name)\n object_idx_to_name[category_id] = category_name\n self.vocab['object_name_to_idx'][category_name] = category_id\n\n if instance_whitelist is None:\n instance_whitelist = all_instance_categories\n if stuff_whitelist is None:\n stuff_whitelist = all_stuff_categories\n category_whitelist = set(instance_whitelist) | set(stuff_whitelist)\n\n # Add object data from instances\n self.image_id_to_objects = defaultdict(list)\n for object_data in instances_data['annotations']:\n image_id = object_data['image_id']\n _, _, w, h = object_data['bbox']\n W, H = self.image_id_to_size[image_id]\n box_area = (w * h) / (W * H)\n box_ok = box_area > min_object_size\n object_name = object_idx_to_name[object_data['category_id']]\n category_ok = object_name in category_whitelist\n other_ok = object_name != 'other' or include_other\n if box_ok and category_ok and other_ok:\n self.image_id_to_objects[image_id].append(object_data)\n\n # Add object data from stuff\n image_ids_with_stuff = set()\n for object_data in stuff_data['annotations']:\n image_id = object_data['image_id']\n image_ids_with_stuff.add(image_id)\n _, _, w, h = object_data['bbox']\n W, H = self.image_id_to_size[image_id]\n box_area = (w * h) / (W * H)\n box_ok = box_area > min_object_size\n object_name = object_idx_to_name[object_data['category_id']]\n category_ok = object_name in category_whitelist\n other_ok = object_name != 'other' or include_other\n if box_ok and category_ok and other_ok:\n self.image_id_to_objects[image_id].append(object_data)\n\n new_image_ids = []\n for image_id in self.image_ids:\n if image_id in image_ids_with_stuff:\n new_image_ids.append(image_id)\n self.image_ids = new_image_ids\n\n all_image_ids = set(self.image_id_to_filename.keys())\n image_ids_to_remove = all_image_ids - image_ids_with_stuff\n for image_id in image_ids_to_remove:\n self.image_id_to_filename.pop(image_id, None)\n self.image_id_to_size.pop(image_id, None)\n self.image_id_to_objects.pop(image_id, None)\n\n # COCO category labels start at 1, so use 0 for __image__\n self.vocab['object_name_to_idx']['__image__'] = 0\n\n # Build object_idx_to_name\n name_to_idx = self.vocab['object_name_to_idx']\n assert len(name_to_idx) == len(set(name_to_idx.values()))\n max_object_idx = max(name_to_idx.values())\n idx_to_name = ['NONE'] * (1 + max_object_idx)\n for name, idx in self.vocab['object_name_to_idx'].items():\n idx_to_name[idx] = name\n self.vocab['object_idx_to_name'] = idx_to_name\n\n # Prune images that have too few or too many objects\n new_image_ids = []\n total_objs = 0\n for image_id in self.image_ids:\n num_objs = len(self.image_id_to_objects[image_id])\n total_objs += num_objs\n if min_objects <= num_objs <= max_objects:\n new_image_ids.append(image_id)\n self.image_ids = new_image_ids\n\n self.register_augmented_relations()\n\n self.vocab[\"attributes\"] = {}\n self.vocab[\"attributes\"]['objects'] = self.vocab['object_name_to_idx']\n self.vocab[\"reverse_attributes\"] = {}\n for attr in self.vocab[\"attributes\"].keys():\n self.vocab[\"reverse_attributes\"][attr] = {v: k for k, v in self.vocab[\"attributes\"][attr].items()}",
"def from_json(cls, path):\n\n if not isinstance(path, Path):\n path = Path(path)\n\n directory, prefix = path.parent, path.stem\n\n with open(path) as fp:\n params = json.load(fp)\n model = cls(params['element'],\n params['r_cut'],\n params['gp']['sigma'],\n params['gp']['theta'],\n params['gp']['noise'])\n\n gp_filename = params['gp']['filename']\n try:\n model.gp.load(directory / gp_filename)\n except:\n warnings.warn(\"The many-body GP file is missing\")\n pass\n\n return model",
"def from_json(o, sd=None, fname=None, s=None, wts=None, gz=None, root_name=None):\n if gz is None:\n if isinstance(fname, str):\n gz = fname.endswith(\".gz\")\n else:\n gz = False\n\n # keeping track of elapsed time. want to make sure I don't do anything\n # that's too slow.\n start_time = time.time()\n # Get the model state dict from one of three sources\n if sd is not None: # Existing Python dict (for in-memory stuff).\n pass\n elif fname is not None: # Read in from a json file\n if gz:\n with gzip.open(fname, \"r\") as f:\n fr = f.read()\n sd = json.loads(fr)\n else:\n with open(fname, \"r\") as f:\n sd = json.load(f) # json file\n elif s is not None: # Use a json string (not really sure if useful)\n sd = json.loads(s) # json string\n else: # Didn't specify at least one source\n raise Exception(\"Need to specify a data source to load from\")\n dict_time = time.time() # To calculate how long it took to read file\n if wts is None: # if no StoreSpec object given use the default, which should\n wts = StoreSpec() # be the typical save everything important\n lookup = {} # A dict to use for a lookup tables\n suffixes = {} # A list of suffixes delayed to end so lookup is complete\n # Read toplevel component (is recursive)\n if root_name is None:\n for k in sd:\n if k.startswith(\"__\") and k.endswith(\"__\"):\n # This is metadata or maybe some similar future addition.\n continue\n else:\n root_name = k\n break # should be one root, use it's name\n _read_component(sd, o, wts, lookup=lookup, suffixes=suffixes, root_name=root_name)\n read_time = time.time() # to calc time to read model state minus suffixes\n # Now read in the suffixes\n _read_suffixes(lookup, suffixes)\n suffix_time = time.time() # to calculate time to read suffixes\n pdict = {} # return some performance information, to make sure not too slow\n pdict[\"etime_load_file\"] = dict_time - start_time\n pdict[\"etime_read_dict\"] = read_time - dict_time\n pdict[\"etime_read_suffixes\"] = suffix_time - read_time\n return pdict",
"def realize_text_and_extract_scene(scene, template, filter_objs):\n\n default_list = lambda: collections.defaultdict(list)\n graph = {'relationships': collections.defaultdict(default_list),\n 'counts': {}, 'exists': {}, 'history': [], 'objects': {}}\n\n # number of inputs\n n_inputs = template.get('inputs', 1)\n # sample a text template\n text_sample = random.choice(template['text'])\n text_sample_index = template['text'].index(text_sample)\n\n # extract attribute tags and get them into groups\n tags = re.findall('(<[\\d\\w]*>)', text_sample)\n\n tag_groups = collections.defaultdict(list)\n for tag in tags:\n group_id = get_tag_group(tag)\n tag_groups[group_id].append(tag)\n\n # sample a random element from filtered\n arg_sample = random.choice(filter_objs)\n # scene information obtained from the current round\n graph_item = arg_sample['graph']\n\n # remove tags from text not allowed by filter_objs\n for arg_ind in range(n_inputs):\n obj_sample = arg_sample['objects'][arg_ind]\n avail_attrs = obj_sample['optional'] + obj_sample['required']\n\n for ii in tag_groups[arg_ind][::-1]:\n if mapping(ii) not in avail_attrs:\n tag_groups[arg_ind].remove(ii)\n text_sample = replace_attribute(text_sample, ii, arg_sample, True)\n\n # assert that all required attributes are present as tags\n for attribute in obj_sample['required']:\n required_tag = inv_mapping(attribute, arg_ind)\n assert required_tag in tag_groups[arg_ind], \\\n 'A required attribute is missing in template!'\n\n # start compiling tags to keep\n tags_to_keep = [inv_mapping(ii, arg_ind) for ii in obj_sample['required']]\n\n # filter out those not present in text template\n optional_tags = [inv_mapping(ii,arg_ind) for ii in obj_sample['optional']]\n optional_tags = [ii for ii in optional_tags if ii in tag_groups[arg_ind]]\n\n # if tags_to_keep is empty, sample from optional with 1:70 2:25 3:5\n if len(optional_tags) > 0:\n if len(tags_to_keep) > 0:\n n_tags_sample = [0, 1, 2]\n else: n_tags_sample = [1, 2, 3]\n n_sample = np.random.choice(n_tags_sample, 1,\n p=gvars.METAINFO['probabilities'],\n replace=False)\n # lower cap at the length of optional\n n_sample = min(n_sample[0], len(optional_tags))\n if n_sample > 0:\n tags_to_keep += random.sample(optional_tags, n_sample)\n\n # now create a dictionary of placeholders with actual attribute values\n for tag in tag_groups[arg_ind]:\n remove = tag not in tags_to_keep\n text_sample = replace_attribute(text_sample, tag, arg_sample, remove)\n\n # remove attributes from objects not included in tags_to_keep\n if 'objects' in graph_item:\n for ii in gvars.METAINFO['attributes']:\n if inv_mapping(ii, arg_ind) not in tags_to_keep:\n if ii in graph_item['objects'][arg_ind]:\n del graph_item['objects'][arg_ind][ii]\n\n # record the caption info\n graph_item['round'] = 0\n\n sample = {}\n sample['template_info'] = [copy.deepcopy(template)]\n del sample['template_info'][-1]['text']\n sample['template_info'][-1]['index'] = text_sample_index\n sample['caption'] = text_sample\n sample['dialog'] = []\n\n # append history, update scene graph, and save the new scene graph\n graph['history'].append(graph_item)\n sample['graph'] = utils.merge_update_scene_graph(graph, graph_item)\n return sample",
"def build_from_file(path):\n with open(path) as obj:\n raw_file = obj.read()\n file_lines = [line.split(\" \") for line in raw_file.split(\"\\n\")]\n\n vertices = {}\n faces = []\n for number, line in enumerate(file_lines):\n if line[0] == \"v\":\n vertices[number + 1] = tuple(map(float, line[1:]))\n if line[0] == \"f\":\n face = []\n for index in line[1:]:\n face.append(vertices[int(index)])\n face.append(vertices[int(line[1])])\n faces.append(face)\n return Object(points=faces)"
] | [
"0.62307423",
"0.61761534",
"0.60450804",
"0.5899949",
"0.57367736",
"0.57106173",
"0.5701228",
"0.5658538",
"0.560505",
"0.55704886",
"0.5564936",
"0.55112326",
"0.54868126",
"0.548616",
"0.54856515",
"0.54752856",
"0.54382986",
"0.5383373",
"0.5370884",
"0.5354638",
"0.5324452",
"0.5314156",
"0.53111714",
"0.530899",
"0.53030884",
"0.529981",
"0.5295116",
"0.52938986",
"0.5275745",
"0.5275351"
] | 0.6196547 | 1 |
reconstruct a sg from object names and coordinates | def recon_sg(obj_names, locations, if_return_assigns=False, if_add_bases=True):
location_dict = {}
objects = []
if type(locations) == torch.Tensor:
locations = locations.cpu().numpy()
elif isinstance(locations, list):
locations = np.array(locations)
locations = locations.reshape(-1, 2)
k_means_assign = kmeans(locations[:, 0])
for idx, object_id in enumerate(obj_names):
a_key = k_means_assign[idx]
if a_key not in location_dict:
location_dict[a_key] = [(object_id, locations[idx][1])]
else:
location_dict[a_key].append((object_id, locations[idx][1]))
objects.append(object_id)
relationships = []
if if_add_bases:
relationships.extend([
["brown", "left", "purple"],
["purple", "left", "cyan"],
])
for du3 in location_dict:
location = sorted(location_dict[du3], key=lambda x: x[1])
while len(location) > 1:
o1 = location.pop()[0]
o2 = location[-1][0]
relationships.append([o1, "up", o2])
if if_return_assigns:
return relationships, k_means_assign
return relationships | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _construct_new_2d_object(new_xp,\n half_w,\n new_yp,\n half_l):\n\n new_x1 = float(new_xp - half_w)\n new_x2 = float(new_xp + half_w)\n new_y1 = float(new_yp - half_l)\n new_y2 = float(new_yp + half_l)\n\n new_obj = od.ObjectLabel()\n new_obj.x1 = new_x1\n new_obj.x2 = new_x2\n new_obj.y1 = new_y1\n new_obj.y2 = new_y2\n\n new_box = np.array([new_x1, new_y1, new_x2, new_y2])\n\n return new_obj, new_box",
"def coords_to_structure(self) -> None:\n ...",
"def make_SHIGUCHI_list(SHIGUCHI_name, m1_info, m2_info, m3_info, m4_info, offset):\n \"\"\"\n 1 Get information from m1_info, m2_info, m3_info\n \"\"\"\n x_m1 = m1_info[0]\n y_m1 = m1_info[1]\n z_m = m1_info[2]\n\n m1_points = m1_info[3]\n\n m1_p0 = m1_points[0]\n m1_p1 = m1_points[1]\n m1_p2 = m1_points[2]\n m1_p3 = m1_points[3]\n\n x_m2 = m2_info[0]\n y_m2 = m2_info[1]\n z_m2 = m2_info[2]\n\n m2_points = m2_info[3]\n\n m2_p0 = m2_points[0]\n m2_p1 = m2_points[1]\n m2_p2 = m2_points[2]\n m2_p3 = m2_points[3]\n\n x_m3 = m3_info[0]\n y_m3 = m3_info[1]\n z_m = m3_info[2]\n\n m3_points = m3_info[3]\n\n m3_p0 = m3_points[0]\n m3_p1 = m3_points[1]\n m3_p2 = m3_points[2]\n m3_p3 = m3_points[3]\n\n x_m4 = m4_info[0]\n y_m4 = m4_info[1]\n z_m = m4_info[2]\n\n m4_points = m4_info[3]\n\n m4_p0 = m4_points[0]\n m4_p1 = m4_points[1]\n m4_p2 = m4_points[2]\n m4_p3 = m4_points[3]\n\n \"\"\"\n 2 Get base point to make SHIGUCHI\n m1 & m2 -> base point = m2_p3 = (dx_U_right, dy_U_right)\n m1 & m3 -> base point = m3_p2 = (dx_L_right, dy_L_right)\n\n m4 & m2 -> base point = m2_p0 = (dx_U_left, dy_U_left)\n m4 & m3 -> base point = m3_p1 = (dx_L_left, dy_L_left)\n \"\"\"\n dx_U_right = m2_p3[0]\n dy_U_right = m2_p3[1]\n\n dx_L_right = m3_p2[0]\n dy_L_right = m3_p2[1]\n\n dx_U_left = m2_p0[0]\n dy_U_left = m2_p0[1]\n\n dx_L_left = m3_p1[0]\n dy_L_left = m3_p1[1]\n\n \"\"\"\n 3 Call appropriate function.\n \"\"\"\n if SHIGUCHI_name == 'TOME':\n pass\n elif SHIGUCHI_name == 'IRIWA':\n # Right side\n dx = dx_U_right\n dy = dy_U_right\n m_info = m2_info\n choice = 'UpperRight'\n m2_right_KUMIKI_points1, m2_right_KUMIKI_points2 = make_IRIWA_KUMIKI_points(dx, dy, m_info, choice, offset)\n # rs.AddPolyline(m2_right_KUMIKI_points1)\n # rs.AddPolyline(m2_right_KUMIKI_points2)\n\n dx = dx_L_right\n dy = dy_L_right\n m_info = m3_info\n choice = 'LowerRight'\n m3_right_KUMIKI_points1, m3_right_KUMIKI_points2 = make_IRIWA_KUMIKI_points(dx, dy, m_info, choice, offset)\n # rs.AddPolyline(m3_right_KUMIKI_points1)\n # rs.AddPolyline(m3_right_KUMIKI_points2)\n\n # Left side\n dx = dx_U_left\n dy = dy_U_left\n m_info = m2_info\n choice = 'UpperLeft'\n m2_left_KUMIKI_points1, m2_left_KUMIKI_points2 = make_IRIWA_KUMIKI_points(dx, dy, m_info, choice, offset)\n # rs.AddPolyline(m2_left_KUMIKI_points1)\n # rs.AddPolyline(m2_left_KUMIKI_points2)\n\n dx = dx_L_left\n dy = dy_L_left\n m_info = m3_info\n choice = 'LowerLeft'\n m3_left_KUMIKI_points1, m3_left_KUMIKI_points2 = make_IRIWA_KUMIKI_points(dx, dy, m_info, choice, offset)\n # rs.AddPolyline(m3_left_KUMIKI_points1)\n # rs.AddPolyline(m3_left_KUMIKI_points2)\n\n elif SHIGUCHI_name == 'SANMAIKUMI':\n pass\n elif SHIGUCHI_name == 'AIKAKI':\n pass\n elif SHIGUCHI_name == 'HAKO':\n pass\n else:\n sys.exit()\n\n SHIGUCHI_list =\\\n [m2_right_KUMIKI_points1, m2_right_KUMIKI_points2,\\\n m3_right_KUMIKI_points1, m3_right_KUMIKI_points2,\\\n m2_left_KUMIKI_points1, m2_left_KUMIKI_points2,\\\n m3_left_KUMIKI_points1, m3_left_KUMIKI_points2]\n\n return SHIGUCHI_list",
"def psana_geom_splitter(psf, returned_units='mm'):\n #geom = cspad.geometry(event)\n origin_64 = np.zeros((64, 3))\n FS_64 = np.zeros_like(origin_64)\n SS_64 = np.zeros_like(origin_64)\n\n origin_32, SS_32, FS_32 = map(np.array, zip(*psf))\n for i in range(32):\n # create the origins of each sub asic\n origin_A = origin_32[i]\n shift = 194. * 109.92 + (274.8 - 109.92) * 2.\n unit_f = FS_32[i] / np.linalg.norm(FS_32[i])\n origin_B = origin_A + unit_f * shift\n\n # save two sub-asics per each of the 32 actual asics\n idx_A = 2 * i\n idx_B = 2 * i + 1\n origin_64[idx_A] = origin_A\n origin_64[idx_B] = origin_B\n FS_64[idx_A] = FS_64[idx_B] = FS_32[i]\n SS_64[idx_A] = SS_64[idx_B] = SS_32[i]\n\n if returned_units == \"mm\": # dials convention\n return origin_64 / 1000., SS_64 / 1000., FS_64 / 1000.,\n elif returned_units == \"um\": # psgeom convention\n return origin_64, SS_64, FS_64\n elif returned_units == \"pixels\": # crystfel convention\n return origin_64 / 109.92, SS_64 / 109.92, FS_64 / 109.92",
"def model_geom_fr_scratch_2d(): \n\n geom = [[],[]]\n geom2 = [[],[]]\n\n obj = object25d()\n\n #add new geom and auto increment the ids\n polys = [(1,2,3), (2,3,4) ]\n pts = [(1,1,1),(0,1,1),(-1,-1,1),(2,-2,1)]\n geom = obj.insert_polygons(polys, pts, geom=geom) \n\n \n polys = [(1,2,3,4) ]\n pts = [(4,-4.3,-3),(1.5,-2.5,-2.1),(-2,2,-4),(4,-4.2,1)]\n geom2 = obj.insert_polygons(polys, pts, geom=geom2) \n\n # use insert to add geom to object \n obj.insert(geom) \n obj.insert(geom2) \n \n # see what we have done, or not done \n obj.show() \n\n obj.save(\"3d_obj/foo.obj\")",
"def __init__(self, name, geometry):\n self.name = name\n self.geometry = geometry",
"def update_from(self, grp_names):\n import GEOM, SMESH\n mesh_types = {\n GEOM.VERTEX : SMESH.NODE,\n GEOM.EDGE : SMESH.EDGE,\n GEOM.WIRE : SMESH.EDGE,\n GEOM.FACE : SMESH.FACE,\n GEOM.SHELL : SMESH.FACE,\n GEOM.SOLID : SMESH.VOLUME,\n GEOM.COMPSOLID : SMESH.VOLUME,\n }\n smesh = self.get_smesh()\n\n\n smesh_grps_MA = []\n smesh_grps_NO = []\n for grp in smesh.GetGroups() :\n if str(grp.GetType()) == 'NODE' :\n smesh_grps_NO.append(grp.GetName())\n else :\n smesh_grps_MA.append(grp.GetName())\n\n print smesh_grps_MA,smesh_grps_NO\n done = False\n for geom in self.give_geom().get_children():\n grp_name = geom.read_name()\n #if grp_name in smesh_grps:\n # continue\n #Modif Fournier\n print grp_name\n if grp_name in grp_names[0]:\n if grp_name in smesh_grps_MA:\n pass\n else :\n mesh_type = mesh_types.get(geom.get_shape_type())\n if mesh_type:\n #smesh.CreateGroup(mesh_type, grp_name)\n smesh.CreateGroupFromGEOM(mesh_type,grp_name,geom.get_sgeom())\n done = True\n if grp_name in grp_names[1]:\n if grp_name in smesh_grps_NO:\n continue\n #smesh.CreateGroup(SMESH.NODE,grp_name)\n smesh.CreateGroupFromGEOM(SMESH.NODE,grp_name,geom.get_sgeom())\n done = True\n return done",
"def fromVertices(cls,\n xp0, yp0, zp0, xp1, yp1, zp1,\n xp2, yp2, zp2, xp3, yp3, zp3,\n origin,\n group_index=None,\n reference=None):\n if len(xp0) == len(yp0) == len(zp0) == len(xp1) == len(yp1) == \\\n len(zp1) == len(xp2) == len(yp2) == len(zp2) == len(xp3) == \\\n len(yp3) == len(zp3):\n pass\n else:\n raise ShakeLibException('All vectors specifying quadrilateral '\n 'vertices must have the same length.')\n\n nq = len(xp0)\n if group_index is not None:\n if len(group_index) != nq:\n raise Exception(\n \"group_index must have same length as vertices.\")\n else:\n group_index = np.array(range(nq))\n\n xp0 = np.array(xp0, dtype='d')\n yp0 = np.array(yp0, dtype='d')\n zp0 = np.array(zp0, dtype='d')\n xp1 = np.array(xp1, dtype='d')\n yp1 = np.array(yp1, dtype='d')\n zp1 = np.array(zp1, dtype='d')\n xp2 = np.array(xp2, dtype='d')\n yp2 = np.array(yp2, dtype='d')\n zp2 = np.array(zp2, dtype='d')\n xp3 = np.array(xp3, dtype='d')\n yp3 = np.array(yp3, dtype='d')\n zp3 = np.array(zp3, dtype='d')\n\n #----------------------------------------------------------------------\n # Create GeoJSON object\n #----------------------------------------------------------------------\n\n coords = []\n u_groups = np.unique(group_index)\n n_groups = len(u_groups)\n for i in range(n_groups):\n ind = np.where(u_groups[i] == group_index)[0]\n lons = np.concatenate(\n [xp0[ind[0]].reshape((1,)),\n xp1[ind],\n xp2[ind][::-1],\n xp3[ind][::-1][-1].reshape((1,)),\n xp0[ind[0]].reshape((1,))\n ])\n lats = np.concatenate(\n [yp0[ind[0]].reshape((1,)),\n yp1[ind],\n yp2[ind][::-1],\n yp3[ind][::-1][-1].reshape((1,)),\n yp0[ind[0]].reshape((1,))\n ])\n deps = np.concatenate(\n [zp0[ind[0]].reshape((1,)),\n zp1[ind],\n zp2[ind][::-1],\n zp3[ind][::-1][-1].reshape((1,)),\n zp0[ind[0]].reshape((1,))\n ])\n\n poly = []\n for lon, lat, dep in zip(lons, lats, deps):\n poly.append([lon, lat, dep])\n coords.append(poly)\n\n d = {\"type\": \"FeatureCollection\",\n \"metadata\": {},\n \"features\": [{\n \"type\": \"Feature\",\n \"properties\": {\n \"rupture type\": \"rupture extent\",\n \"reference\": reference,\n },\n \"geometry\": {\n \"type\": \"MultiPolygon\",\n \"coordinates\": [coords]\n }\n }]}\n\n # Add origin information to metadata\n odict = origin.__dict__\n for k, v in odict.items():\n if isinstance(v, HistoricTime):\n d['metadata'][k] = v.strftime('%Y-%m-%dT%H:%M:%SZ')\n else:\n d['metadata'][k] = v\n if hasattr(origin, 'id'):\n d['metadata']['eventid'] = origin.id\n\n return cls(d, origin)",
"def buildSingle( name, shape=\"circle\", color=\"yellow\", position=(0, 0, 0)):\n master = \"{0}_GRP\".format(name)\n cmds.createNode(\"transform\", n=master)\n #create joint\n cmds.select( cl=True )\n jnt = cmds.joint( n=(name + \"_JNT\") )\n #joint null\n jNull = cmds.group(n=(jnt+\"_NULL\"))\n #control\n conBuffer = control.Control( name = (name+\"_CON\"), shape=shape, color=color)\n conBuffer.create()\n con = conBuffer.getName()\n #control null\n cNull = conBuffer.getNull()\n cmds.parent( jNull, master )\n cmds.parent( cNull, master)\n attrs = [\"t\", \"r\", \"s\"]\n for attr in attrs:\n cmds.connectAttr(\n \"{0}.{1}\".format(con, attr),\n \"{0}.{1}\".format(jnt, attr)\n )\n #position the mofo\n #cmds.xform( cNull, t=position, ws=True )\n #kill it off by selecting the con. It pleases me.\n cmds.select(con)\n result = {\n \"master\": master,\n \"cNull\": cNull,\n \"jNull\": jNull,\n \"con\": con,\n \"jnt\": jnt\n }\n return result",
"def createScatter(self):\n\t\tcurv = str( self.curve_le.text() )\n\t\tobjCount = self.controlCount_sbx.value()\n\t\trandom = self.random_chb.isChecked()\n\t\tuseTip = self.useTips_chb.isChecked()\n\t\tkeepConn = self.keepConnected_chb.isChecked()\n\t\ttangent = self.tangent_chb.isChecked()\n\t\tgroupIt = self.groupIt_chb.isChecked()\n\t\tanimated = self.animated_chb.isChecked()\n\t\tobjs = []\n\t\tfor index in xrange(self.objects_lw.count()):\n\t\t\tobjs.append( mn.Node( str ( self.objects_lw.item(index).text() ) ) )\n\t\tcrvScat.CurveScatter( \n\t\t\t\tcurve = crv.Curve( curv ), \n\t\t\t\tobjects = objs,\n\t\t\t\tpointsCount = objCount, \n\t\t\t\tuseTips = useTip, \n\t\t\t\tkeepConnected = keepConn,\n\t\t\t\ttangent = tangent,\n\t\t\t\trand = random,\n\t\t\t\tgroupit = groupIt,\n\t\t\t\tanimated = animated)",
"def generate(pts):\n cmds.polyCreateFacet(name=\"shirt\", p=points)\n cmds.polyTriangulate()\n cmds.polySubdivideFacet(dv=SUBDIVISIONS)\n cmds.polyTriangulate()",
"def __init__(self):\n self.superelevations = []\n self.shapes = []",
"def create_spheres(self,depth_arr):\n\n\n '''\n depth_arr- depth image as numpy array\n '''\n\n try:\n #points=[nose,left_wrist,right,wrist,left_ankle,right ankle]\n points=[self.rpts[0],self.rpts[15],self.rpts[16],self.rpts[27],self.rpts[28]]\n self.spheres.points=[]\n self.spheres.header.frame_id = \"kinect_frame\"\n self.spheres.header.stamp= rospy.Time.now()\n \n self.spheres.id = 0\n self.spheres.action =Marker.ADD\n \n #points\n self.spheres.type = Marker.SPHERE_LIST\n self.spheres.color.r = 1.0\n self.spheres.color.a = 1.0\n \n self.spheres.scale.x = 0.08\n self.spheres.scale.y = 0.08\n self.spheres.scale.z = 0.01\n for p in points:\n depth_val=float(depth_arr[p[1], p[0]])\n pts_x,pts_y,pts_z=self.depth_to_xyz(p[0],p[1],depth_val)\n \n self.sphere_point=Point()\n self.sphere_point.x = pts_x\n self.sphere_point.y = pts_y\n self.sphere_point.z = pts_z\n self.spheres.points.append(self.sphere_point)\n \n except:\n pass",
"def transform_geometries(datasource, src_epsg, dst_epsg):\n # Part 1\n src_srs = osr.SpatialReference()\n src_srs.ImportFromEPSG(src_epsg)\n dst_srs = osr.SpatialReference()\n dst_srs.ImportFromEPSG(dst_epsg)\n transformation = osr.CoordinateTransformation(src_srs, dst_srs)\n layer = datasource.GetLayerByIndex(0)\n \n # Part 2\n geoms = []\n layer.ResetReading()\n for feature in layer:\n geom = feature.GetGeometryRef().Clone()\n geom.Transform(transformation)\n geoms.append(geom)\n return geoms",
"def m1_make_lower_shape_points_list(tx, ty, m1_info, SEN_info):\n \"\"\"\n 1 Get information from m1_info & SEN_info\n \"\"\"\n x_m1 = m1_info[0]\n y_m1 = m1_info[1]\n z_m = m1_info[2]\n\n m1_points = m1_info[3]\n\n m1_p0 = m1_points[0]\n m1_p1 = m1_points[1]\n m1_p2 = m1_points[2]\n m1_p3 = m1_points[3]\n\n w_sen = SEN_info[0]\n n_w_sen = SEN_info[1]\n h_sen = SEN_info[2]\n t_sen = SEN_info[3]\n u_n = SEN_info[4]\n l_n = SEN_info[5]\n set = SEN_info[6]\n u_offset = SEN_info[7]\n l_offset = SEN_info[8]\n\n \"\"\"\n 2 Make lists.\n lower_shape_upper_left_row list\n lower_shape_upper_right_row list\n\n lower_shape_lower_left_row list\n lower_shape_lower_right_row list\n \"\"\"\n # upper side\n lower_shape_upper_left_row = []\n lower_shape_upper_right_row = []\n\n for i in range(u_n):\n # left row\n ix = tx + t_sen\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_lower_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p4, p3, p2, p1, p8, p7, p6, p5]\n lower_shape_upper_left_row.extend((left_points))\n\n for i in range(u_n - 1, -1, -1):\n # right row\n ix = tx + (x_m1 - t_sen)\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_lower_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p8, p7, p6, p5, p4, p3, p2, p1]\n lower_shape_upper_right_row.extend(right_points)\n\n # lower side\n lower_shape_lower_left_row = []\n lower_shape_lower_right_row = []\n\n for i in range(l_n - 1, -1, -1):\n # left row\n ix = tx + t_sen\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_lower_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p4, p3, p2, p1, p8, p7, p6, p5]\n lower_shape_lower_left_row.extend((left_points))\n\n for i in range(l_n):\n # right row\n ix = tx + (x_m1 - t_sen)\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_lower_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p8, p7, p6, p5, p4, p3, p2, p1]\n lower_shape_lower_right_row.extend(right_points)\n\n lower_shape_upper = [lower_shape_upper_left_row, lower_shape_upper_right_row]\n lower_shape_lower = [lower_shape_lower_left_row, lower_shape_lower_right_row]\n\n return lower_shape_upper, lower_shape_lower",
"def m1_make_upper_shape_points_list(tx, ty, m1_info, SEN_info):\n \"\"\"\n 1 Get information from m1_info & SEN_info\n \"\"\"\n x_m1 = m1_info[0]\n y_m1 = m1_info[1]\n z_m = m1_info[2]\n\n m1_points = m1_info[3]\n\n m1_p0 = m1_points[0]\n m1_p1 = m1_points[1]\n m1_p2 = m1_points[2]\n m1_p3 = m1_points[3]\n\n w_sen = SEN_info[0]\n n_w_sen = SEN_info[1]\n h_sen = SEN_info[2]\n t_sen = SEN_info[3]\n u_n = SEN_info[4]\n l_n = SEN_info[5]\n set = SEN_info[6]\n u_offset = SEN_info[7]\n l_offset = SEN_info[8]\n\n \"\"\"\n 2 Make lists.\n upper_shape_upper_left_row list\n upper_shape_upper_right_row list\n\n upper_shape_lower_left_row list\n upper_shape_lower_right_row list\n \"\"\"\n # upper side\n upper_shape_upper_left_row = []\n upper_shape_upper_right_row = []\n\n for i in range(u_n):\n # left row\n ix = tx + t_sen\n iy = ty + (i * u_offset + set) + 10 # have to \"+\" something now its magic number\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p1, p2, p3, p4, p5, p6, p7, p8]\n upper_shape_upper_left_row.extend((left_points))\n\n for i in range(u_n - 1, -1, -1):\n # right row\n ix = tx + (x_m1 - t_sen)\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p5, p6, p7, p8, p1, p2, p3, p4]\n upper_shape_upper_right_row.extend(right_points)\n\n # lower side\n upper_shape_lower_left_row = []\n upper_shape_lower_right_row = []\n\n for i in range(l_n -1, -1, -1):\n # left row\n ix = tx + t_sen\n iy = ty - (i * l_offset + set) - 10 # have to \"-\" something now its magic number\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p1, p2, p3, p4, p5, p6, p7, p8]\n upper_shape_lower_left_row.extend((left_points))\n\n for i in range(l_n):\n # right row\n ix = tx + (x_m1 - t_sen)\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = Y_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p5, p6, p7, p8, p1, p2, p3, p4]\n upper_shape_lower_right_row.extend(right_points)\n\n upper_shape_upper = [upper_shape_upper_left_row, upper_shape_upper_right_row]\n upper_shape_lower = [upper_shape_lower_left_row, upper_shape_lower_right_row]\n\n return upper_shape_upper, upper_shape_lower",
"def fromTrace(cls, xp0, yp0, xp1, yp1, zp, widths, dips, origin,\n strike=None, group_index=None, reference=\"\"):\n if len(xp0) == len(yp0) == len(xp1) == len(\n yp1) == len(zp) == len(dips) == len(widths):\n pass\n else:\n raise ShakeLibException(\n 'Number of xp0,yp0,xp1,yp1,zp,widths,dips points must be '\\\n 'equal.')\n if strike is None:\n pass\n else:\n if (len(xp0) == len(strike)) | (len(strike) == 1):\n pass\n else:\n raise ShakeLibException(\n 'Strike must be None, scalar, or same length as '\n 'trace coordinates.')\n\n if group_index is None:\n group_index = np.array(range(len(xp0)))\n\n # Convert dips to radians\n dips = np.radians(dips)\n\n # Ensure that all input sequences are numpy arrays\n xp0 = np.array(xp0, dtype='d')\n xp1 = np.array(xp1, dtype='d')\n yp0 = np.array(yp0, dtype='d')\n yp1 = np.array(yp1, dtype='d')\n zp = np.array(zp, dtype='d')\n widths = np.array(widths, dtype='d')\n dips = np.array(dips, dtype='d')\n\n # Get a projection object\n west = np.min((xp0.min(), xp1.min()))\n east = np.max((xp0.max(), xp1.max()))\n south = np.min((yp0.min(), yp1.min()))\n north = np.max((yp0.max(), yp1.max()))\n\n # Projected coordinates are in km\n proj = get_orthographic_projection(west, east, north, south)\n xp2 = np.zeros_like(xp0)\n xp3 = np.zeros_like(xp0)\n yp2 = np.zeros_like(xp0)\n yp3 = np.zeros_like(xp0)\n zpdown = np.zeros_like(zp)\n for i in range(0, len(xp0)):\n # Project the top edge coordinates\n p0x, p0y = proj(xp0[i], yp0[i])\n p1x, p1y = proj(xp1[i], yp1[i])\n\n # Get the rotation angle defined by these two points\n if strike is None:\n dx = p1x - p0x\n dy = p1y - p0y\n theta = np.arctan2(dx, dy) # theta is angle from north\n elif len(strike) == 1:\n theta = np.radians(strike[0])\n else:\n theta = np.radians(strike[i])\n\n R = np.array([[np.cos(theta), -np.sin(theta)],\n [np.sin(theta), np.cos(theta)]])\n\n # Rotate the top edge points into a new coordinate system (vertical\n # line)\n p0 = np.array([p0x, p0y])\n p1 = np.array([p1x, p1y])\n p0p = np.dot(R, p0)\n p1p = np.dot(R, p1)\n\n # Get right side coordinates in project, rotated system\n dz = np.sin(dips[i]) * widths[i]\n dx = np.cos(dips[i]) * widths[i]\n p3xp = p0p[0] + dx\n p3yp = p0p[1]\n p2xp = p1p[0] + dx\n p2yp = p1p[1]\n\n # Get right side coordinates in un-rotated projected system\n p3p = np.array([p3xp, p3yp])\n p2p = np.array([p2xp, p2yp])\n Rback = np.array([[np.cos(-theta), -np.sin(-theta)],\n [np.sin(-theta), np.cos(-theta)]])\n p3 = np.dot(Rback, p3p)\n p2 = np.dot(Rback, p2p)\n p3x = np.array([p3[0]])\n p3y = np.array([p3[1]])\n p2x = np.array([p2[0]])\n p2y = np.array([p2[1]])\n\n # project lower edge points back to lat/lon coordinates\n lon3, lat3 = proj(p3x, p3y, reverse=True)\n lon2, lat2 = proj(p2x, p2y, reverse=True)\n\n xp2[i] = lon2\n xp3[i] = lon3\n yp2[i] = lat2\n yp3[i] = lat3\n zpdown[i] = zp[i] + dz\n\n #----------------------------------------------------------------------\n # Create GeoJSON object\n #----------------------------------------------------------------------\n\n coords = []\n u_groups = np.unique(group_index)\n n_groups = len(u_groups)\n for i in range(n_groups):\n ind = np.where(u_groups[i] == group_index)[0]\n lons = np.concatenate(\n [xp0[ind[0]].reshape((1,)),\n xp1[ind], xp2[ind][::-1],\n xp3[ind][::-1][-1].reshape((1,)),\n xp0[ind[0]].reshape((1,))\n ])\n lats = np.concatenate(\n [yp0[ind[0]].reshape((1,)),\n yp1[ind],\n yp2[ind][::-1],\n yp3[ind][::-1][-1].reshape((1,)),\n yp0[ind[0]].reshape((1,))\n ])\n deps = np.concatenate(\n [zp[ind[0]].reshape((1,)),\n zp[ind],\n zpdown[ind][::-1],\n zpdown[ind][::-1][-1].reshape((1,)),\n zp[ind[0]].reshape((1,))])\n\n poly = []\n for lon, lat, dep in zip(lons, lats, deps):\n poly.append([lon, lat, dep])\n coords.append(poly)\n\n d = {\"type\": \"FeatureCollection\",\n \"metadata\": {},\n \"features\": [{\n \"type\": \"Feature\",\n \"properties\": {\n \"rupture type\": \"rupture extent\",\n \"reference\": reference,\n },\n \"geometry\": {\n \"type\": \"MultiPolygon\",\n \"coordinates\": [coords]\n }\n }]}\n\n # Add origin information to metadata\n odict = origin.__dict__\n for k, v in odict.items():\n if isinstance(v, HistoricTime):\n d['metadata'][k] = v.strftime('%Y-%m-%dT%H:%M:%SZ')\n else:\n d['metadata'][k] = v\n\n return cls(d, origin)",
"def spatial(self):",
"def __init__(self, geom):\n self.geom = deepcopy(geom)",
"def pristine_coords_to_objects(list_of_coords):\n list_of_objects = []\n for element in range(len(list_of_coords)):\n list_of_objects.append(Atom(element, \"CX\", \"GGG\", element, list_of_coords[element][0], list_of_coords[element][1], list_of_coords[element][2]))\n return list_of_objects",
"def BeamShapes2SID(s_G, s_span, m, EI, U, dU, ddU, int_method='trapz', damping=None, consistency='', shapeIntegrals=False):\r\n # --- Method 1 using shape integrals\r\n if shapeIntegrals:\r\n from welib.yams.flexibility import shapeIntegrals\r\n\r\n p = shapeIntegrals(s_G, s_span, m, U, dU, ddU, method=int_method, EI=EI)\r\n sid = shapeIntegrals2SID(p, consistency=consistency)\r\n sid.p=p\r\n return sid\r\n\r\n # --- Method 2 relying on Generalized functions\r\n from welib.yams.flexibility import GMBeam, GKBeam, GKBeamStiffnening, GKBeamStiffneningSplit\r\n MM, IT = GMBeam(s_G, s_span, m, U, rot_terms=True, method=int_method, main_axis='z', M1=True) \r\n Gr, Ge, Oe, Oe6 = IT['Gr'], IT['Ge'], IT['Oe'], IT['Oe6']\r\n\r\n KK = GKBeam(s_span, EI, ddU, bOrth = False, method = int_method)\r\n pKg = GKBeamStiffneningSplit(s_G, s_span, dU, m , main_axis='z', method = int_method)\r\n\r\n nq = U.shape[0]\r\n nNodes = U.shape[2]\r\n if int_method=='OpenFAST':\r\n I = np.arange(nNodes)[1:-1]\r\n else:\r\n I = np.arange(nNodes)\r\n\r\n sid=SID(nq=nq, GrOrder=1)\r\n\r\n sid.refmod.mass= MM[0,0]\r\n for i in np.arange(nq):\r\n sid.refmod.ielastq[i]= 'Eigen Mode {:4d}'.format(i+1)\r\n\r\n for ii, i in enumerate(I):\r\n f=Node(nq=nq)\r\n f.name= ii\r\n f.rframe= 'body ref'\r\n # Origin\r\n f.origin.M0[:,0]= s_G[:,i]\r\n for j in np.arange(nq):\r\n f.origin.M1[:, 0, j] = U[j][:,i]\r\n # Phi - Node translations/shape functions (6.46),(6.80), (6.337), (5.122), (6.415)\r\n for j in np.arange(nq):\r\n f.phi.M0[:,j] = U[j][:,i]\r\n ## Stiffnening due to force\r\n #if 'Fend' in GKg.keys():\r\n # if i==nNodes-1:\r\n # f.phi.setorder(1)\r\n # f.phi.M1[iMaxDim, :, :]= GKg['Fend']\r\n # Psi - Node rotations/slope (6.81), (6.337)\r\n for j in np.arange(nq):\r\n f.psi.M0[:,j] = dU[j][:,i]\r\n\r\n # Orientation\r\n f.APmod.M0 = np.eye(3) # keep original orientation for now E{i_elem};\r\n for j in np.arange(nq):\r\n f.APmod.M1[:, :, j]= skew(f.psi.M0[:, j]);\r\n\r\n # --- Pretension\r\n #f.sigma.M0 = np.zeros((6, 1)) # no pretension for now.\r\n sid.frame.append(f)\r\n\r\n # --- mdCM\r\n sid.mdCM.M0= np.array([\r\n np.sum(skew([0.5, 0 , 0 ])*IT['Mxt'].T), \r\n np.sum(skew([0, 0.5, 0 ])*IT['Mxt'].T), \r\n np.sum(skew([0, 0 , 0.5])*IT['Mxt'].T)\r\n ]).reshape(3,1)\r\n for j in np.arange(nq):\r\n sid.mdCM.M1[:, 0, j]= IT['Mxg'][:, j].T # Ct0 Mgt\r\n # sid.mdCM.M1[:, 0, j]= p['Ct'][j, :] # Ct0 Mgt # Verify\r\n\r\n # --- J\r\n sid.J.M0= IT['Mtt']\r\n for j in np.arange(nq):\r\n sid.J.M1[:, :, j]= IT['Mtt_M1'][:,:,j]\r\n # sid.J.M1[:, :, j]= -C4[:, :, j] - C4[:, :, j].T;\r\n\r\n\r\n # --- Cr - Mtg = \\int [~s] Phi dm Or: Mrg, Cr^T\r\n sid.Cr.M0 = IT['Mtg'].T\r\n # TODO TODO TODO M1 term\r\n sid.Cr.M1[:,0,:] = IT['Mtg_M1'][0,:,:] # Kr[0][:,:]; # nq x nq # TODO TODO TODO WEIRD NO NEED FOR TRANSPOSE\r\n sid.Cr.M1[:,1,:] = IT['Mtg_M1'][1,:,:] # Kr[1][:,:]; \r\n sid.Cr.M1[:,2,:] = IT['Mtg_M1'][2,:,:] # Kr[2][:,:]; \r\n\r\n # --- Ct - Mxg = \\int Phi dm Or: Psi , Ct^T\r\n # NOTE: Ct.M1 = K0t is geometrical stiffening due to translational acceleration/gravity\r\n sid.Ct.M0= IT['Mxg'].T\r\n #sid.Ct.M0= p['C1'].T\r\n sid.Ct.M1[:, 0, :] = pKg['K0t'][0,:,:] # Gkg['t_ax'], K0t is (3,nf,nf)\r\n sid.Ct.M1[:, 1, :] = pKg['K0t'][1,:,:]\r\n sid.Ct.M1[:, 2, :] = pKg['K0t'][2,:,:]\r\n\r\n # --- Gr \r\n for j in np.arange(nq):\r\n sid.Gr.M0[0:3, 3*j:3*j+3]= IT['Gr'][j][:,:]; # columns concatenation 3 per shapes\r\n for j in np.arange(nq):\r\n for k in np.arange(nq):\r\n sid.Gr.M1[0:3, 3*j:3*j+3, k]= IT['Gr_M1'][j][:,:,k]; # columns concatenation 3 per shapes\r\n\r\n # --- Ge = 2 \\int Phi^t phi_j~^t dm = [2C5']\r\n # Ge Taylor(0,nq,3*nq,0 ,0) # Gyroscopic matrix for modal coordinates\r\n # IT['Ge'] = np.zeros((nf,nf,3))\r\n for j in np.arange(nq):\r\n sid.Ge.M0[0:nq, 3*j:3*j+3]= IT['Ge'][j,:,:] \r\n # TODO revisit this\r\n \r\n # --- Oe \r\n # see [2] (6.407) \r\n # M0: nq x 6\r\n # M1: nq x 6 x nq\r\n sid.Oe.M0= IT['Oe6'] # nq x 6\r\n\r\n sid.Oe.M1_base = np.zeros(sid.Oe.M1.shape)\r\n sid.Oe.M1_geom = np.zeros(sid.Oe.M1.shape)\r\n\r\n sid.Oe.M1_base= IT['Oe6_M1']\r\n\r\n sid.Oe.M1_geom[:, 0, :]= pKg['K0omega'][0, 0] \r\n sid.Oe.M1_geom[:, 1, :]= pKg['K0omega'][1, 1] \r\n sid.Oe.M1_geom[:, 2, :]= pKg['K0omega'][2, 2] \r\n sid.Oe.M1_geom[:, 3, :]= pKg['K0omega'][0, 1] + pKg['K0omega'][1, 0] \r\n sid.Oe.M1_geom[:, 4, :]= pKg['K0omega'][1, 2] + pKg['K0omega'][2, 1] \r\n sid.Oe.M1_geom[:, 5, :]= pKg['K0omega'][0, 2] + pKg['K0omega'][2, 0] \r\n\r\n sid.Oe.M1 = sid.Oe.M1_base + sid.Oe.M1_geom \r\n #sid.Oe.M1 = sid.Oe.M1_base \r\n #sid.Oe.M1 = sid.Oe.M1_geom \r\n\r\n ## --- Me, Ke, De\r\n sid.Me.M0= MM[6:,6:]\r\n sid.Ke.M0= KK[6:,6:]\r\n #sid.De.M0= p['De']\r\n\r\n # --- remove some off-diagonal terms to conform to FAST approach\r\n if consistency=='OpenFAST':\r\n sid.Me.M0 = np.diag(np.diag(sid.Me.M0))\r\n for a in [0,1,2]:\r\n sid.Ct.M1[:, a, :]= np.diag(np.diag(np.squeeze(sid.Ct.M1[:, a, :])))\r\n for m in np.arange(6):\r\n if m<3:\r\n sid.Oe.M1[:, m, :]+= - pKg['K0omega'][m, m] + np.diag(np.diag(pKg['K0omega'][m, m]))\r\n else:\r\n c= m-3;\r\n d= np.mod(c+1, 3)\r\n sid.Oe.M1[:, m, :]+= - pKg['K0omega'][c, d] - pKg['K0omega'][c, d].T + 2*np.diag(np.diag(pKg['K0omega'][c, d]))\r\n\r\n return sid",
"def to_geom_args(self, conversion=1.0, name=None): # pragma: lpy\n import openalea.plantgl.all as pgl\n smb_class, args, kwargs = super(ObjDict, self).to_geom_args(\n conversion=conversion, name=name, _as_obj=True)\n index_class = pgl.Index\n array_class = pgl.IndexArray\n # Texture coords\n if self.get('texcoords', []):\n obj_texcoords = []\n for t in self['texcoords']:\n obj_texcoords.append(pgl.Vector2(np.float64(t['u']),\n np.float64(t.get('v', 0.0))))\n kwargs['texCoordList'] = pgl.Point2Array(obj_texcoords)\n obj_ftexcoords = []\n for i, f in enumerate(self['faces']):\n entry = []\n for _f in f:\n if 'texcoord_index' not in _f:\n if i > 0: # pragma: debug\n warnings.warn((\"'texcoord_index' missing from face\"\n + \"%d, texcoord indices will be \"\n + \"ignored.\") % i)\n obj_ftexcoords = []\n entry = []\n break\n entry.append(int(_f['texcoord_index']))\n if not entry:\n break\n obj_ftexcoords.append(index_class(*entry))\n if obj_ftexcoords:\n kwargs['texCoordIndexList'] = array_class(obj_ftexcoords)\n # Normals\n if self.get('normals', []):\n obj_normals = []\n for n in self['normals']:\n obj_normals.append(pgl.Vector3(np.float64(n['i']),\n np.float64(n['j']),\n np.float64(n['k'])))\n kwargs['normalList'] = pgl.Point3Array(obj_normals)\n obj_fnormals = []\n for i, f in enumerate(self['faces']):\n entry = []\n for _f in f:\n if 'normal_index' not in _f:\n if i > 0: # pragma: debug\n warnings.warn((\"'normal_index' missing from face\"\n + \"%d, normal indices will be \"\n + \"ignored.\") % i)\n obj_fnormals = []\n entry = []\n break\n entry.append(int(_f['normal_index']))\n if not entry:\n break\n obj_fnormals.append(index_class(*entry))\n if obj_fnormals:\n kwargs['normalIndexList'] = array_class(obj_fnormals)\n return smb_class, args, kwargs",
"def create_from_segments(self, segment, origin=0):\r\n n = origin\r\n if segment[origin]['T'] != 'soma': # if it's a soma, only one compartment\r\n while (len(segment[n]['children']) == 1) and (segment[n]['T'] != 'soma'): # Go to the end of the branch\r\n n += 1\r\n # End of branch\r\n branch = segment[origin:n + 1]\r\n # Set attributes\r\n self.diameter, self.length, self.area, self.x, self.y, self.z = \\\r\n zip(*[(seg['diameter'], seg['length'], seg['area'], seg['x'], seg['y'], seg['z']) for seg in branch])\r\n self.diameter, self.length, self.area, self.x, self.y, self.z = array(self.diameter), array(self.length), \\\r\n array(self.area), array(self.x), array(self.y), array(self.z)\r\n self.type = segment[n]['T'] # normally same type for all compartments in the branch\r\n # Create children (list)\r\n self.children = [Morphology().create_from_segments(segment, origin=c) for c in segment[n]['children']]\r\n # Create dictionary of names (enumerates children from number 1)\r\n for i, child in enumerate(self.children):\r\n self._namedkid[str(i + 1)] = child\r\n # Name the child if possible\r\n if child.type in ['soma', 'axon', 'dendrite']:\r\n if child.type in self._namedkid:\r\n self._namedkid[child.type] = None # two children with the same name: erase (see next block)\r\n else:\r\n self._namedkid[child.type] = child\r\n # Erase useless names\r\n for k in self._namedkid.keys():\r\n if self._namedkid[k] is None:\r\n del self._namedkid[k]\r\n # If two kids, name them L (left) and R (right)\r\n if len(self.children) == 2:\r\n self._namedkid['L'] = self._namedkid['1']\r\n self._namedkid['R'] = self._namedkid['2']\r\n return self",
"def recon_sg2(json_file_dir, if_add_bases=True):\n id2color = {\n \"gray\": [87, 87, 87],\n \"red\": [173, 35, 35],\n \"blue\": [42, 75, 215],\n \"green\": [29, 105, 20],\n \"brown\": [129, 74, 25],\n \"purple\": [129, 38, 192],\n \"cyan\": [41, 208, 208],\n \"yellow\": [255, 238, 51],\n \"c1\": [42, 87, 9],\n \"c2\": [255, 102, 255],\n \"orange\": [255, 140, 0]\n }\n\n color2id = {tuple(v): u for u, v in id2color.items()}\n with open(json_file_dir, 'r') as json_file:\n du = json.load(json_file)\n location_dict = {}\n objects = []\n bboxes = []\n for obj in du[\"objects\"]:\n color = tuple([int(du33*255) for du33 in obj[\"color\"]][:-1])\n object_id = color2id[color]\n a_key = \"%.3f\" % obj[\"location\"][0]\n if a_key not in location_dict:\n location_dict[a_key] = [(object_id, obj[\"location\"][2])]\n else:\n location_dict[a_key].append((object_id, obj[\"location\"][2]))\n objects.append(object_id)\n bboxes.append([\n obj[\"bbox\"][0]/128.0,\n obj[\"bbox\"][1]/128.0,\n obj[\"bbox\"][2]/128.0,\n obj[\"bbox\"][3]/128.0,\n ])\n obj2id = {objects[du4]: objects[du4] for du4 in range(len(objects))}\n if if_add_bases:\n relationships = [\n [obj2id[\"brown\"], \"left\", obj2id[\"purple\"]],\n [obj2id[\"purple\"], \"left\", obj2id[\"cyan\"]],\n ]\n else:\n relationships = []\n for du3 in location_dict:\n location = sorted(location_dict[du3], key=lambda x: x[1])\n while len(location) > 1:\n o1 = location.pop()[0]\n o2 = location[-1][0]\n relationships.append([obj2id[o1], \"up\", obj2id[o2]])\n assert o1 not in [\"cyan\", \"purple\", \"brown\"]\n\n return relationships",
"def vsn_func_1(ns, traj, vs_def_beads_ids):\n for ts in ns.aa2cg_universe.trajectory:\n traj[ts.frame] = ns.aa2cg_universe.atoms[vs_def_beads_ids].center_of_geometry(pbc=None)",
"def __init__(self, island, x = 0, y = 0, s=\"A\"):\n self.island = island\n self.name = s\n self.x = x\n self.y = y",
"def __init__(self, geom_input, srs=None):\n str_instance = isinstance(geom_input, str)\n\n # If HEX, unpack input to a binary buffer.\n if str_instance and hex_regex.match(geom_input):\n geom_input = memoryview(bytes.fromhex(geom_input))\n str_instance = False\n\n # Constructing the geometry,\n if str_instance:\n wkt_m = wkt_regex.match(geom_input)\n json_m = json_regex.match(geom_input)\n if wkt_m:\n if wkt_m[\"srid\"]:\n # If there's EWKT, set the SRS w/value of the SRID.\n srs = int(wkt_m[\"srid\"])\n if wkt_m[\"type\"].upper() == \"LINEARRING\":\n # OGR_G_CreateFromWkt doesn't work with LINEARRING WKT.\n # See https://trac.osgeo.org/gdal/ticket/1992.\n g = capi.create_geom(OGRGeomType(wkt_m[\"type\"]).num)\n capi.import_wkt(g, byref(c_char_p(wkt_m[\"wkt\"].encode())))\n else:\n g = capi.from_wkt(\n byref(c_char_p(wkt_m[\"wkt\"].encode())), None, byref(c_void_p())\n )\n elif json_m:\n g = self._from_json(geom_input.encode())\n else:\n # Seeing if the input is a valid short-hand string\n # (e.g., 'Point', 'POLYGON').\n OGRGeomType(geom_input)\n g = capi.create_geom(OGRGeomType(geom_input).num)\n elif isinstance(geom_input, memoryview):\n # WKB was passed in\n g = self._from_wkb(geom_input)\n elif isinstance(geom_input, OGRGeomType):\n # OGRGeomType was passed in, an empty geometry will be created.\n g = capi.create_geom(geom_input.num)\n elif isinstance(geom_input, self.ptr_type):\n # OGR pointer (c_void_p) was the input.\n g = geom_input\n else:\n raise GDALException(\n \"Invalid input type for OGR Geometry construction: %s\"\n % type(geom_input)\n )\n\n # Now checking the Geometry pointer before finishing initialization\n # by setting the pointer for the object.\n if not g:\n raise GDALException(\n \"Cannot create OGR Geometry from input: %s\" % geom_input\n )\n self.ptr = g\n\n # Assigning the SpatialReference object to the geometry, if valid.\n if srs:\n self.srs = srs\n\n # Setting the class depending upon the OGR Geometry Type\n self.__class__ = GEO_CLASSES[self.geom_type.num]",
"def copyGeom(geom):\n geomJson = geom.ExportToJson()\n newGeom = ogr.CreateGeometryFromJson(geomJson)\n return newGeom",
"def m2_m3_make_lower_shape_points_list(dx, dy, m_info, SEN_info):\n \"\"\"\n 1 Get information from m_info & SEN_info.\n \"\"\"\n x_m = m_info[0]\n y_m = m_info[1]\n z_m = m_info[2]\n\n m_points = m_info[3]\n\n m_p0 = m_points[0]\n m_p1 = m_points[1]\n m_p2 = m_points[2]\n m_p3 = m_points[3]\n\n w_sen = SEN_info[0]\n n_w_sen = SEN_info[1]\n h_sen = SEN_info[2]\n t_sen = SEN_info[3]\n l_n = SEN_info[4]\n r_n = SEN_info[5]\n set = SEN_info[6]\n l_offset = SEN_info[7]\n r_offset = SEN_info[8]\n\n \"\"\"\n 2 Make lists.\n lower_shape_left_upper_row list\n lower_shape_left_lower_row list\n\n lower_shape_right_upper_row list\n lower_shape_right_lower_row list\n \"\"\"\n # Leftside\n lower_shape_left_upper_row = []\n lower_shape_left_lower_row = []\n\n for i in range(l_n):\n # upper row\n ix = i * l_offset + set\n iy = y_m - t_sen + dy\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = X_lower_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n upper_points = [p1, p2, p3, p4, p5, p6, p7, p8]\n lower_shape_left_upper_row.extend((upper_points))\n\n for i in range(l_n - 1, -1, -1):\n # lower row\n ix = i * l_offset + set\n iy = t_sen + dy\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = X_lower_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n lower_points = [p5, p6, p7, p8, p1, p2, p3, p4]\n lower_shape_left_lower_row.extend(lower_points)\n\n # Rightside\n lower_shape_right_upper_row = []\n lower_shape_right_lower_row = []\n\n for i in range(r_n):\n # upper row\n ix = x_m - i * r_offset - set\n iy = y_m - t_sen + dy\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = X_lower_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n upper_points = [p8, p7, p6, p5, p4, p3, p2, p1]\n lower_shape_right_upper_row.extend((upper_points))\n\n for i in range(r_n - 1, -1, -1):\n # lower row\n ix = x_m - i * r_offset - set\n iy = t_sen + dy\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = X_lower_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n lower_points = [p4, p3, p2, p1, p8, p7, p6, p5]\n lower_shape_right_lower_row.extend(lower_points)\n\n lower_shape_left = [lower_shape_left_upper_row, lower_shape_left_lower_row]\n lower_shape_right = [lower_shape_right_upper_row, lower_shape_right_lower_row]\n\n return lower_shape_left, lower_shape_right",
"def m1_make_middle_shape_points_list(tx, ty, m1_info, SEN_info):\n \"\"\"\n 1 Get information from m1_info & SEN_info\n \"\"\"\n x_m1 = m1_info[0]\n y_m1 = m1_info[1]\n z_m = m1_info[2]\n\n m1_points = m1_info[3]\n\n m1_p0 = m1_points[0]\n m1_p1 = m1_points[1]\n m1_p2 = m1_points[2]\n m1_p3 = m1_points[3]\n\n w_sen = SEN_info[0]\n n_w_sen = SEN_info[1]\n h_sen = SEN_info[2]\n t_sen = SEN_info[3]\n u_n = SEN_info[4]\n l_n = SEN_info[5]\n set = SEN_info[6]\n u_offset = SEN_info[7]\n l_offset = SEN_info[8]\n\n \"\"\"\n 2 Make lists.\n middle_shape_upper_left_row list\n middle_shape_upper_right_row list\n\n middle_shape_lower_left_row list\n middle_shape_lower_right_row list\n \"\"\"\n # upper side\n middle_shape_upper_left_row = []\n middle_shape_upper_right_row = []\n\n for i in range(u_n):\n # left row\n ix = tx + t_sen\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p2, p1, p4, p3]\n middle_shape_upper_left_row.extend((left_points))\n\n for i in range(u_n - 1, -1, -1):\n # right row\n ix = tx + (x_m1 - t_sen)\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p4, p3, p2, p1]\n middle_shape_upper_right_row.extend(right_points)\n\n # lower side\n middle_shape_lower_left_row = []\n middle_shape_lower_right_row = []\n\n for i in range(l_n - 1, -1, -1):\n # left row\n ix = tx + t_sen\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p2, p1, p4, p3]\n middle_shape_lower_left_row.extend((left_points))\n\n for i in range(l_n):\n # right row\n ix = tx + (x_m1 - t_sen)\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p4, p3, p2, p1]\n middle_shape_lower_right_row.extend(right_points)\n\n middle_shape_upper = [middle_shape_upper_left_row, middle_shape_upper_right_row]\n middle_shape_lower = [middle_shape_lower_left_row, middle_shape_lower_right_row]\n\n return middle_shape_upper, middle_shape_lower"
] | [
"0.549307",
"0.54439116",
"0.54079205",
"0.53954226",
"0.5255821",
"0.5238671",
"0.5142048",
"0.5113373",
"0.510141",
"0.50817597",
"0.5079542",
"0.50776017",
"0.506408",
"0.50393265",
"0.502448",
"0.50152045",
"0.50018567",
"0.49921232",
"0.49887058",
"0.4985654",
"0.49647623",
"0.4951741",
"0.49451986",
"0.49362275",
"0.49336317",
"0.49305645",
"0.49154478",
"0.48929882",
"0.4890987",
"0.48905"
] | 0.6250433 | 0 |
returns an empty cuboidal room with source and mic somewhat in center | def empty_room():
room_material = pra.Material(energy_absorption=0.6, scattering=None)
room_faces = make_polygon(
centre=[0,0,2.5],
radius=10,
height=5,
N=4,
rpy=[0,0,np.pi/4]
)
# create room
walls = []
walls.extend(create_walls(room_faces, room_material))
room = pra.Room(walls, fs=fs, max_order=3, ray_tracing=True, air_absorption=False)
room.add_source([0, 0, 2.])
room.add_microphone([0, 0.2, 2.1])
# compute rir
room.image_source_model()
room.ray_tracing()
room.compute_rir()
return room | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sample_room(config):\n room = np.zeros((3,))\n room[0] = get_sample(config['length'], n_sample=1)[0]\n room[1] = get_sample(config['width'], n_sample=1)[0]\n room[2] = get_sample(config['height'], n_sample=1)[0]\n return room",
"def zeros(self):\n super(TimeCube, self).zeros()\n self.data = np.zeros([self.time_range[1]-self.time_range[0]]+self.cubesize, np.uint8)",
"def generate_sphere_full():\n \n num_voxels = 31\n c = (15.0, 15.0, 15.0)\n\n data_x = []\n data_y = []\n data_z = []\n data_intensity = []\n\n volume = numpy.zeros((num_voxels, num_voxels, num_voxels))\n\n for x in range(num_voxels):\n for y in range(num_voxels):\n for z in range(num_voxels):\n\n if numpy.sqrt((x-c[0])**2 + (y-c[1])**2 + (z-c[2])**2) - 7.5 < 1.5:\n data_x.append(x)\n data_y.append(y)\n data_z.append(z)\n data_intensity.append(200.0)\n\n volume[x,y,z] = 200.0\n\n\n return data_x, data_y, data_z, data_intensity, volume",
"def empty_diff_walls():\n\t# 4 side walls are absorptive\n\troom_materials = [pra.Material(energy_absorption=0.1, scattering=None)] * 4\n\t# floor and ceiling are reflective\n\troom_materials.extend([pra.Material(energy_absorption=0.98, scattering=None)] * 2)\n\t\n\troom_faces = make_polygon(\n\t\tcentre=[0,0,2.5],\n\t\tradius=10,\n\t\theight=5,\n\t\tN=4,\n\t\trpy=[0,0,np.pi/4]\n\t)\n\n\t# create room\n\twalls = []\n\twalls.extend(create_walls(room_faces, room_materials))\n\n\troom = pra.Room(walls, fs=fs, max_order=3, ray_tracing=False, air_absorption=False)\n\n\troom.add_source([-5, 2, 2.])\n\troom.add_microphone([1, 0, 2.])\n\n\t# compute rir\n\troom.image_source_model()\n\troom.compute_rir()\n\n\treturn room",
"def generate_cube():\n \n num_voxels = 31\n\n data_x = []\n data_y = []\n data_z = []\n data_intensity = []\n\n volume = numpy.zeros((num_voxels, num_voxels, num_voxels))\n\n for x in range(num_voxels):\n for y in range(num_voxels):\n for z in range(num_voxels):\n\n if 5 < x < 10 and 5 < y < 10:\n data_x.append(x)\n data_y.append(y)\n data_z.append(z)\n data_intensity.append(200.0)\n\n volume[x,y,z] = 200.0\n\n\n return data_x, data_y, data_z, data_intensity, volume",
"def createCube():\n subjects, detections, antigen = getAxes()\n cube = np.full([len(subjects), len(detections), len(antigen)], np.nan)\n\n IGG = importIGG()\n glycan, dfGlycan = importGlycan()\n glyCube = np.full([len(subjects), len(glycan)], np.nan)\n\n for k, curAnti in enumerate(antigen):\n lumx = importLuminex(curAnti)\n\n for _, row in lumx.iterrows():\n i = subjects.index(row[\"subject\"])\n j = detections.index(row[\"variable\"])\n cube[i, j, k] = row[\"value\"]\n\n for _, row in dfGlycan.iterrows():\n i = subjects.index(row[\"subject\"])\n j = glycan.index(row[\"variable\"])\n glyCube[i, j] = row[\"value\"]\n\n # Add IgG data on the end as another detection\n for _, row in IGG.iterrows():\n i = subjects.index(row[\"subject\"])\n k = antigen.index(row[\"variable\"])\n cube[i, -1, k] = row[\"value\"]\n\n # Clip to 0 as there are a few strongly negative outliers\n cube = np.clip(cube, 1.0, None)\n glyCube = np.clip(glyCube, 0.1, None)\n\n cube = np.log10(cube)\n glyCube = np.log10(glyCube)\n\n # Mean center each measurement\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n cube -= np.nanmean(cube, axis=0)\n glyCube -= np.nanmean(glyCube, axis=0)\n\n # Check that there are no slices with completely missing data\n assert ~np.any(np.all(np.isnan(cube), axis=(0, 1)))\n assert ~np.any(np.all(np.isnan(cube), axis=(0, 2)))\n assert ~np.any(np.all(np.isnan(cube), axis=(1, 2)))\n\n glyCube *= np.sqrt(np.nanvar(cube) / np.nanvar(glyCube))\n return cube, glyCube",
"def room_with_box():\n\troom_material = pra.Material(energy_absorption=0.6, scattering=None)\n\troom_faces = make_polygon(\n\t\tcentre=[0,0,2.5],\n\t\tradius=10,\n\t\theight=5,\n\t\tN=4,\n\t\trpy=[0,0,np.pi/4]\n\t)\n\n\t# define obstacle\n\tobstacle_faces = make_polygon(\n\t\tcentre=[2.5,0,2.5],\n\t\tradius=1.8,\n\t\theight=3,\n\t\tN=4,\n\t\trpy=[0,0,np.pi/4],\n\t\treverse_normals=True\n\t)\n\tobstacle_material = pra.Material(energy_absorption=0.1, scattering=0.1)\n\n\t# create room\n\twalls = []\n\twalls.extend(create_walls(room_faces, room_material))\n\twalls.extend(create_walls(obstacle_faces, obstacle_material))\n\n\troom = pra.Room(walls, fs=fs, max_order=3, ray_tracing=False, air_absorption=False)\n\n\troom.add_source([0, 0, 2.])\n\troom.add_microphone([0, 0.2, 2.1])\n\n\t# compute rir\n\troom.image_source_model()\n\troom.compute_rir()\n\n\treturn room",
"def to_pycuber(self) -> pycuber.Cube:\n self.soft_align_faces()\n qpos_copy = self.sim.data.qpos.copy()\n\n cubies = []\n\n for i in range(27):\n cubelet_meta = self.cubelet_meta_info[i]\n\n if cubelet_meta[\"type\"] == \"cubelet\":\n mtx = self._cubelet_rotation_matrix(cubelet_meta, qpos_copy)\n\n original_coords = cubelet_meta[\"coords\"]\n # current_coords = (mtx @ cubelet_meta['coords'].astype(float)).round().astype(int)\n\n cubie_desc = {}\n\n for prev_axis, sign in enumerate(original_coords):\n if sign != 0:\n vec = mtx[:, prev_axis] * sign\n new_axis = np.abs(vec).argmax()\n new_sign = vec[new_axis]\n\n color = PYCUBER_REVERSE_COLORS[prev_axis, sign]\n loc = PYCUBER_REVERSE_LOCATIONS[new_axis, new_sign]\n\n cubie_desc[loc] = pycuber.Square(color)\n\n if len(cubie_desc) == 3:\n cubies.append(pycuber.Corner(**cubie_desc))\n elif len(cubie_desc) == 2:\n cubies.append(pycuber.Edge(**cubie_desc))\n if cubelet_meta[\"type\"] == \"driver\":\n original_coords = cubelet_meta[\"coords\"]\n axis = np.abs(original_coords).argmax()\n sign = original_coords[axis]\n\n color = PYCUBER_REVERSE_COLORS[axis, sign]\n loc = PYCUBER_REVERSE_LOCATIONS[axis, sign]\n\n cubie_desc = {loc: pycuber.Square(color)}\n cubies.append(pycuber.Centre(**cubie_desc))\n\n return pycuber.Cube(cubies=cubies)",
"def cuboid(geometry,\n network,\n propname,\n **params):\n print('cuboid: nothing yet')",
"def default(self):\n self._reset_env()\n\n scale = self.block_size / 2\n\n o = Cube2D(transform = Transform2D([-0.71322928, -0.68750558], 0.50, scale))\n self.add_object(o)\n # o = Cube2D(transform = Transform2D([-0.5, 0.3], 0.60, scale))\n o = Cube2D(transform = Transform2D([-0.2344808, -0.16797299], 0.60, scale))\n self.add_object(o)\n\n last_frames = self.capture_last(frames = 2, mode = SPEED)\n\n # Set the first observation\n observation = self._get_observation(last_frames)\n\n return observation",
"def initialize_from_ramp(self):\n self.center_position = self.ramp_object.top_center",
"def set_up_threshold_cube():\n test_data = 50*np.arange(16).reshape(4, 4)\n grid_x = DimCoord(np.arange(4), standard_name=\"projection_x_coordinate\",\n units=\"km\")\n grid_y = DimCoord(np.arange(4), standard_name=\"projection_y_coordinate\",\n units=\"km\")\n test_cube = iris.cube.Cube(test_data, long_name=\"surface_altitude\",\n units=\"m\",\n dim_coords_and_dims=[(grid_y, 0), (grid_x, 1)])\n return test_cube",
"def molmapCube(model_id, resolution):\n\n ####create grid and save as model\n #get gabarities\n min_x, max_x, min_y, max_y, min_z, max_z = getGabarities(model_id)\n #create empty cube map\n min_cor = min(min_x,min_y,min_z)\n max_cor = max(max_x,max_y,max_z)\n d_grid = resolution/3\n\n\n\n #run molmap\n molmap_com = 'molmap #'+str(model_id) + ' ' + str(resolution)+' gridSpacing ' + str(resolution/3.0)\n chimera.runCommand(molmap_com)\n map_orig = active_volume();\n\n # interpolation\n createCubeMapfromGivenMap(map_orig,min_cor, max_cor, d_grid)\n\n #delete the grid\n map_orig.destroy()",
"def cube(self):\n\n dims = self.voxels.shape\n max_dim = max(dims)\n \n x_target = (max_dim - dims[0]) / 2\n y_target = (max_dim - dims[1]) / 2\n z_target = (max_dim - dims[2]) / 2\n\n self.voxels = np.pad(self.voxels,\n ((int(np.ceil(x_target)), int(np.floor(x_target))),\n (int(np.ceil(y_target)), int(np.floor(y_target))),\n (int(np.ceil(z_target)), int(np.floor(z_target)))),\n 'constant',\n constant_values=(0))\n\n self.point_position = self.point_position + [np.ceil(z_target),\n np.ceil(y_target),\n np.ceil(x_target)]\n\n return(self)",
"def unoriented_cube():\n faces = get_oriented_cube_faces()\n for face in faces:\n np.random.shuffle(face)\n poly = Polyhedron(get_cube_points(), faces, faces_are_convex=True)\n poly.sort_faces()\n return poly",
"def get_real_samples(self):\n # Define the camera poses\n if not self.opt.same_view:\n if self.opt.full_sphere_sampling:\n self.cam_pos = uniform_sample_sphere(\n radius=self.opt.cam_dist, num_samples=self.opt.batchSize,\n axis=None, angle=self.opt.angle,\n theta_range=np.deg2rad(self.opt.theta), phi_range=np.deg2rad(self.opt.phi))\n else:\n self.cam_pos = uniform_sample_sphere(\n radius=self.opt.cam_dist, num_samples=self.opt.batchSize,\n axis=None, angle=self.opt.angle,\n theta_range=np.deg2rad(self.opt.theta),\n phi_range=np.deg2rad(self.opt.phi))\n if self.opt.full_sphere_sampling_light:\n self.light_pos1 = uniform_sample_sphere(\n radius=self.opt.cam_dist, num_samples=self.opt.batchSize,\n axis=None, angle=self.opt.angle,\n theta_range=np.deg2rad(self.opt.theta),\n phi_range=np.deg2rad(self.opt.phi))\n # self.light_pos2 = uniform_sample_sphere(radius=self.opt.cam_dist, num_samples=self.opt.batchSize,\n # axis=self.opt.axis, angle=np.deg2rad(40),\n # theta_range=self.opt.theta, phi_range=self.opt.phi)\n else:\n print(\"inbox\")\n light_eps = 0.15\n self.light_pos1 = np.random.rand(self.opt.batchSize,3)*self.opt.cam_dist + light_eps\n self.light_pos2 = np.random.rand(self.opt.batchSize,3)*self.opt.cam_dist + light_eps\n\n # TODO: deg2rad in all the angles????\n\n # Create a splats rendering scene\n large_scene = create_scene(self.opt.width, self.opt.height,\n self.opt.fovy, self.opt.focal_length,\n self.opt.n_splats)\n lookat = self.opt.at if self.opt.at is not None else [0.0, 0.0, 0.0, 1.0]\n large_scene['camera']['at'] = tch_var_f(lookat)\n\n # Render scenes\n data, data_depth, data_normal, data_cond = [], [], [], []\n inpath = self.opt.vis_images + '/'\n for idx in range(self.opt.batchSize):\n # Save the splats into the rendering scene\n if self.opt.use_mesh:\n if 'sphere' in large_scene['objects']:\n del large_scene['objects']['sphere']\n if 'disk' in large_scene['objects']:\n del large_scene['objects']['disk']\n if 'triangle' not in large_scene['objects']:\n large_scene['objects'] = {\n 'triangle': {'face': None, 'normal': None,\n 'material_idx': None}}\n samples = self.get_samples()\n\n large_scene['objects']['triangle']['material_idx'] = tch_var_l(\n np.zeros(samples['mesh']['face'][0].shape[0],\n dtype=int).tolist())\n large_scene['objects']['triangle']['face'] = Variable(\n samples['mesh']['face'][0].cuda(), requires_grad=False)\n large_scene['objects']['triangle']['normal'] = Variable(\n samples['mesh']['normal'][0].cuda(),\n requires_grad=False)\n else:\n if 'sphere' in large_scene['objects']:\n del large_scene['objects']['sphere']\n if 'triangle' in large_scene['objects']:\n del large_scene['objects']['triangle']\n if 'disk' not in large_scene['objects']:\n large_scene['objects'] = {\n 'disk': {'pos': None,\n 'normal': None,\n 'material_idx': None}}\n large_scene['objects']['disk']['radius'] = tch_var_f(\n np.ones(self.opt.n_splats) * self.opt.splats_radius)\n large_scene['objects']['disk']['material_idx'] = tch_var_l(\n np.zeros(self.opt.n_splats, dtype=int).tolist())\n large_scene['objects']['disk']['pos'] = Variable(\n samples['splats']['pos'][idx].cuda(),\n requires_grad=False)\n large_scene['objects']['disk']['normal'] = Variable(\n samples['splats']['normal'][idx].cuda(),\n requires_grad=False)\n\n # Set camera position\n if not self.opt.same_view:\n large_scene['camera']['eye'] = tch_var_f(self.cam_pos[idx])\n else:\n large_scene['camera']['eye'] = tch_var_f(self.cam_pos[0])\n\n large_scene['lights']['pos'][0,:3]=tch_var_f(self.light_pos1[idx])\n #large_scene['lights']['pos'][1,:3]=tch_var_f(self.light_pos2[idx])\n\n # Render scene\n res = render(large_scene,\n norm_depth_image_only=self.opt.norm_depth_image_only,\n double_sided=True, use_quartic=self.opt.use_quartic)\n\n # Get rendered output\n if self.opt.render_img_nc == 1:\n depth = res['depth']\n im_d = depth.unsqueeze(0)\n else:\n depth = res['depth']\n im_d = depth.unsqueeze(0)\n im = res['image'].permute(2, 0, 1)\n target_normal_ = get_data(res['normal'])\n target_normalmap_img_ = get_normalmap_image(target_normal_)\n im_n = tch_var_f(\n target_normalmap_img_).view(im.shape[1], im.shape[2],\n 3).permute(2, 0, 1)\n\n # Add depth image to the output structure\n if self.iteration_no % self.opt.save_image_interval == 0:\n imsave((inpath + str(self.iteration_no) +\n 'real_normalmap_{:05d}.png'.format(idx)),\n target_normalmap_img_)\n imsave((inpath + str(self.iteration_no) +\n 'real_depth_{:05d}.png'.format(idx)), get_data(depth))\n # imsave(inpath + str(self.iteration_no) + 'real_depthmap_{:05d}.png'.format(idx), im_d)\n # imsave(inpath + str(self.iteration_no) + 'world_normalmap_{:05d}.png'.format(idx), target_worldnormalmap_img_)\n data.append(im)\n data_depth.append(im_d)\n data_normal.append(im_n)\n data_cond.append(large_scene['camera']['eye'])\n # Stack real samples\n real_samples = torch.stack(data)\n real_samples_depth = torch.stack(data_depth)\n real_samples_normal = torch.stack(data_normal)\n real_samples_cond = torch.stack(data_cond)\n self.batch_size = real_samples.size(0)\n if not self.opt.no_cuda:\n real_samples = real_samples.cuda()\n real_samples_depth = real_samples_depth.cuda()\n real_samples_normal = real_samples_normal.cuda()\n real_samples_cond = real_samples_cond.cuda()\n\n # Set input/output variables\n\n self.input.resize_as_(real_samples.data).copy_(real_samples.data)\n self.input_depth.resize_as_(real_samples_depth.data).copy_(real_samples_depth.data)\n self.input_normal.resize_as_(real_samples_normal.data).copy_(real_samples_normal.data)\n self.input_cond.resize_as_(real_samples_cond.data).copy_(real_samples_cond.data)\n self.label.resize_(self.batch_size).fill_(self.real_label)\n # TODO: Remove Variables\n self.inputv = Variable(self.input)\n self.inputv_depth = Variable(self.input_depth)\n self.inputv_normal = Variable(self.input_normal)\n self.inputv_cond = Variable(self.input_cond)\n self.labelv = Variable(self.label)",
"def _build(self,\n size=_DEFAULT_PITCH_SIZE,\n goal_size=None,\n top_camera_distance=_TOP_CAMERA_DISTANCE,\n field_box=False,\n name='pitch'):\n super(Pitch, self)._build(name=name)\n self._size = size\n self._goal_size = goal_size\n self._top_camera_distance = top_camera_distance\n\n self._top_camera = self._mjcf_root.worldbody.add(\n 'camera',\n name='top_down',\n pos=[0, 0, top_camera_distance],\n zaxis=[0, 0, 1],\n fovy=_top_down_cam_fovy(self._size, top_camera_distance))\n\n self._mjcf_root.visual.headlight.set_attributes(\n ambient=[.4, .4, .4], diffuse=[.8, .8, .8], specular=[.1, .1, .1])\n\n # Ensure close up geoms are rendered by egocentric cameras.\n self._mjcf_root.visual.map.znear = 0.0005\n\n # Build groundplane.\n if len(self._size) != 2:\n raise ValueError('`size` should be a sequence of length 2: got {!r}'\n .format(self._size))\n self._ground_texture = self._mjcf_root.asset.add(\n 'texture',\n type='2d',\n builtin='checker',\n name='groundplane',\n rgb1=[0.3, 0.8, 0.3],\n rgb2=[0.1, 0.6, 0.1],\n width=300,\n height=300,\n mark='edge',\n markrgb=[0.8, 0.8, 0.8])\n self._ground_material = self._mjcf_root.asset.add(\n 'material', name='groundplane', texture=self._ground_texture)\n self._ground_geom = self._mjcf_root.worldbody.add(\n 'geom',\n type='plane',\n material=self._ground_material,\n size=list(self._size) + [max(self._size) * _GROUND_GEOM_GRID_RATIO])\n\n # Build walls.\n self._walls = []\n for wall_pos, wall_xyaxes in _wall_pos_xyaxes(self._size):\n self._walls.append(\n self._mjcf_root.worldbody.add(\n 'geom',\n type='plane',\n rgba=[.1, .1, .1, .8],\n pos=wall_pos,\n size=[1e-7, 1e-7, 1e-7],\n xyaxes=wall_xyaxes))\n\n # Build goal position detectors.\n # If field_box is enabled, offset goal by 1.0 such that ball reaches the\n # goal position detector before bouncing off the field_box.\n self._fb_offset = 0.5 if field_box else 0.0\n goal_size = self._get_goal_size()\n self._home_goal = props.PositionDetector(\n pos=(-self._size[0] + goal_size[0] + self._fb_offset, 0,\n goal_size[2]),\n size=goal_size,\n rgba=(0, 0, 1, 0.5),\n visible=True,\n name='home_goal')\n self.attach(self._home_goal)\n\n self._away_goal = props.PositionDetector(\n pos=(self._size[0] - goal_size[0] - self._fb_offset, 0, goal_size[2]),\n size=goal_size,\n rgba=(1, 0, 0, 0.5),\n visible=True,\n name='away_goal')\n self.attach(self._away_goal)\n\n # Build inverted field position detectors.\n self._field = props.PositionDetector(\n pos=(0, 0),\n size=(self._size[0] - 2 * goal_size[0],\n self._size[1] - 2 * goal_size[0]),\n rgba=(1, 0, 0, 0.1),\n inverted=True,\n visible=True,\n name='field')\n self.attach(self._field)\n\n # Build field box.\n self._field_box = []\n if field_box:\n for wall_pos, wall_xyaxes in _wall_pos_xyaxes(\n (self._field.upper - self._field.lower) / 2.0):\n self._field_box.append(\n self._mjcf_root.worldbody.add(\n 'geom',\n type='plane',\n rgba=[.3, .3, .3, .3],\n pos=wall_pos,\n size=[1e-7, 1e-7, 1e-7],\n xyaxes=wall_xyaxes))",
"def getCube(unique_name):",
"def sample_source_position_by_random_coordinate(config, n_spk, room_size, array_center, forbidden_rect=None):\n source_position = np.zeros((3, n_spk))\n\n d_from_wall = config['min_dist_from_wall'] if \"min_dist_from_wall\" in config.keys() else [0.0, 0.0] # minimum distance from wall\n d_from_array = config['min_dist_from_array'] if \"min_dist_from_array\" in config.keys() else 0.1 # minimum distnace from mic array\n d_from_other = config['min_dist_from_other'] if \"min_dist_from_other\" in config.keys() else 0.2 # minimum distance from other sources\n x_distribution = get_distribution_template(\"comment\", min=d_from_wall[0],\n max=room_size[0] - d_from_wall[0])\n y_distribution = get_distribution_template(\"comment\", min=d_from_wall[0],\n max=room_size[1] - d_from_wall[1])\n if \"height\" in config.keys():\n z_distribution = config['height']\n else:\n z_distribution = get_distribution_template(\"comment\", min=0.0, max=room_size[2])\n\n for i in range(n_spk):\n cnt = 0\n while 1:\n cnt += 1\n x = get_sample(x_distribution)[0]\n y = get_sample(y_distribution)[0]\n z = get_sample(z_distribution)[0]\n curr_pos = np.asarray([x, y, z])\n if np.linalg.norm(curr_pos[:2]-array_center[:2]) >= d_from_array:\n if forbidden_rect is None or (np.prod(curr_pos[0] - forbidden_rect[0, :]) > 0 or np.prod(curr_pos[1] - forbidden_rect[1, :]) > 0):\n if i == 0 or (np.linalg.norm(curr_pos[:2,np.newaxis]-source_position[:2, :i], axis=0) >= d_from_other).all():\n source_position[:, i] = curr_pos[:]\n break\n if cnt > 1000:\n raise Exception(\"Maximum number (1000) of trial finished but still not able to find acceptable position for speaker position. \")\n\n return source_position",
"def coroot_space(self, base_ring = QQ):\n return self.root_system.coroot_space(base_ring = base_ring)",
"def generated_cube(self) -> Optional[xr.Dataset]:\n return self._generated_cube",
"def cubes_and_cuboids(\n target,\n pore_diameter='pore.diameter',\n throat_diameter='throat.diameter'\n):\n from openpnm.models.geometry import conduit_lengths\n out = conduit_lengths.cubes_and_cuboids(\n target, pore_diameter=pore_diameter, throat_diameter=throat_diameter\n )\n return out[:, 1]",
"def zeros_like(self):\n return MultiterminalDevice.zeros(\n self.dims,\n self.center.shape,\n tuple(i.shape for i in self.leads),\n self.connections,\n )",
"def __init__(self):\r\n\t\tsuper(Empty, self).__init__()\r\n\r\n\t\t# Initialize all of the objects\r\n\t\tground = self.world.CreateStaticBody(\r\n\t\t\tshapes=[ \r\n\t\t\t\tb2EdgeShape(vertices=[(-40,0),(40,0)])\r\n\t\t\t\t#~ b2EdgeShape(vertices=[(-1.1,-40),(-1.1,40)]),\r\n\t\t\t\t]\r\n\t\t) \r\n\t\tbox=b2FixtureDef(\r\n\t\t\tshape=b2PolygonShape(box=(0.3,0.3)),\r\n\t\t\tdensity=0.01,\r\n\t\t\tfriction=0.3)\t\t\r\n\t\t\r\n\t\tself.q = [qcopter(self.world,self.renderer,1,0.05,-8,8,0,1)]##,qcopter(self.world,self.renderer,1,0.1,-12,8,0,0)]\r\n\t\r\n\t\t(self.kp,self.ki,self.kd) = self.q[0].GetCoefs(self.q[0].configer)\r\n\t\t\r\n\t\t# The platform\r\n\t\t#~ fixture=b2FixtureDef(\r\n\t\t\t#~ shape=b2PolygonShape(box=(0.02,4.5)), \r\n\t\t\t#~ density=1,\r\n\t\t\t#~ friction=0.6,\r\n\t\t#~ )\t\t\t\r\n\r\n\t\t#~ self.platform=self.world.CreateDynamicBody(position=(-8,4.5), fixtures=fixture, )\r\n\t\t\r\n\t\t#~ self.platform.type=b2_staticBody\r\n\t\tif not(self.q[0].x_pid or self.q[0].y_pid):\t\r\n\t\t\tself.world.CreateRevoluteJoint(\r\n\t\t\t\tbodyA=self.q[0].body,\r\n\t\t\t\tbodyB=ground,\r\n\t\t\t\tanchor=(self.q[0].GetPos().x,self.q[0].GetPos().y),\r\n\t\t\t\tmaxMotorTorque=0,\r\n\t\t\t\tenableMotor=False\r\n\t\t\t)\r\n\t\t\t\t\r\n\t\tfor i in xrange(10):\r\n\t\t\tfor j in xrange(5+i/5):\r\n\t\t\t\tself.world.CreateDynamicBody(\r\n\t\t\t\t\tfixtures=box,\r\n\t\t\t\t\tposition=(i*3-10, 1+1*j)\r\n\t\t\t\t)",
"def geocube():",
"def place_cube(self,\n cube_xy,\n player=None,\n weight=1,\n azimuth=None,\n return_azimuth=False):\n\n self.color_idx += 1\n if self.color_idx == len(self.colors):\n self.color_idx = 0\n if azimuth is None:\n azimuth = np.random.randint(0, 180)\n else:\n assert azimuth >= 0 and azimuth <= 180\n cube_rot = self.p0.getQuaternionFromEuler([\n 0, 0, np.deg2rad(azimuth)\n ]) # rotated around which axis? # np.deg2rad(90)\n\n alpha = 1 # this could be set to .5 for some transparency\n\n if weight == 1:\n if player is None or self.four_colors:\n color = self.colors[self.color_idx] + [alpha]\n elif player == Player.Player:\n color = [0, 0, 1, 1]\n if DEBUG:\n print(\"Player putting down cube at\", cube_xy)\n elif player == Player.Enemy:\n color = [1, 0, 0, 1]\n if DEBUG:\n print(\"Opponent putting down cube at\", cube_xy)\n elif player == Player.Starter:\n color = [0, 0, 0, 1]\n if self.dark:\n color = [1, 1, 1, 1]\n if DEBUG:\n print(\"Starter cube at\", cube_xy)\n else:\n color = WEIGHT_COLORS[weight]\n\n max_z = self.find_highest_z(cube_xy, azimuth)\n\n cube_pos = [cube_xy[0], cube_xy[1], max_z + 1.0001]\n # print (\"placing cube at\",cube_pos)\n\n cube_visual = self.p0.createVisualShape(\n shapeType=self.p0.GEOM_BOX,\n rgbaColor=color,\n halfExtents=[1, 1, 1]\n # specularColor=[0.4, .4, 0],\n )\n\n cube = self.p0.createMultiBody(\n baseMass=weight,\n # baseInertialFramePosition=[0, 0, 0],\n baseCollisionShapeIndex=self.cube_collision,\n baseVisualShapeIndex=cube_visual,\n basePosition=cube_pos,\n baseOrientation=cube_rot,\n useMaximalCoordinates=True)\n\n self.cubes.append(cube)\n\n if max_z > self.current_max_z:\n self.current_max_z = np.around(max_z)\n out = True\n else:\n out = False\n\n if not return_azimuth:\n return out\n else:\n return out, azimuth",
"def cc(self):\n return MultiterminalDevice(\n self.center.cc(),\n list(i.cc() for i in self.leads),\n list(i.conj() for i in self.connections),\n )",
"def surface_area_of_cube(side):\n return side",
"def Cube(center=(0.0, 0.0, 0.0), x_length=1.0, y_length=1.0, z_length=1.0, bounds=None, clean=True):\n src = _vtk.vtkCubeSource()\n if bounds is not None:\n if np.array(bounds).size != 6:\n raise TypeError(\n 'Bounds must be given as length 6 tuple: (xMin, xMax, yMin, yMax, zMin, zMax)'\n )\n src.SetBounds(bounds)\n else:\n src.SetCenter(center)\n src.SetXLength(x_length)\n src.SetYLength(y_length)\n src.SetZLength(z_length)\n src.Update()\n cube = wrap(src.GetOutput())\n\n # add face index data for compatibility with PlatonicSolid\n # but make it inactive for backwards compatibility\n cube.cell_data.set_array([1, 4, 0, 3, 5, 2], 'FaceIndex')\n\n # clean duplicate points\n if clean:\n cube.clean(inplace=True)\n\n return cube",
"def corridor(x,z, emap, width=10, length=10, height=10, details=None, walls=\"ns\", name=\"wall\", mergeshape=None):\r\n global wallnum\r\n n = z + width / 2\r\n s = z - width / 2\r\n e = x + length / 2\r\n w = x - length / 2\r\n\r\n solid_objects = []\r\n\r\n if \"n\" in walls:\r\n # TODO: abstract out the mostly-duplicate code in these cases...\r\n nwall = SolidObject(name+str(wallnum),\r\n Size(length, height, 1),\r\n Position(x, emap.calcHeight(x, z) + height / 2, n-0.5), 0)\r\n solid_objects.append(nwall)\r\n nwallmodel = createMyCuboid(nwall.w() * 2, nwall.h() * 2, nwall.d() * 2,\r\n name=name+str(wallnum),\r\n x=nwall.x(),y=nwall.y(),z=nwall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(nwallmodel)\r\n else:\r\n nwall.setmodel(nwallmodel, details)\r\n\r\n\r\n wallnum += 1\r\n\r\n if \"s\" in walls:\r\n swall = SolidObject(name+str(wallnum), Size(length, height, 1), Position(x, emap.calcHeight(x, z)+height / 2, s+0.5), 0)\r\n solid_objects.append(swall)\r\n swallmodel = createMyCuboid(swall.w()*2, swall.h()*2, swall.d()*2,\r\n name=name+str(wallnum),\r\n x=swall.x(), y=swall.y(), z=swall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0,cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(swallmodel)\r\n else:\r\n swall.setmodel(swallmodel, details)\r\n\r\n wallnum += 1\r\n\r\n if \"e\" in walls:\r\n ewall = SolidObject(name+str(wallnum), Size(1, height, width), Position(e-0.5, emap.calcHeight(x, z)+height / 2, z), 0)\r\n solid_objects.append(ewall)\r\n ewallmodel = createMyCuboid(ewall.w()*2, ewall.h()*2, ewall.d()*2,\r\n name=name+str(wallnum),\r\n x=ewall.x(), y=ewall.y(), z=ewall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(ewallmodel)\r\n else:\r\n ewall.setmodel(ewallmodel, details)\r\n\r\n wallnum += 1\r\n\r\n if \"w\" in walls:\r\n wwall = SolidObject(name+str(wallnum), Size(1, height, width), Position(w+0.5, emap.calcHeight(x, z)+height / 2, z), 0)\r\n solid_objects.append(wwall)\r\n wwallmodel = createMyCuboid(wwall.w()*2, wwall.h()*2, wwall.d()*2,\r\n name=name+str(wallnum),\r\n x=wwall.x(), y=wwall.y(), z=wwall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(wwallmodel)\r\n else:\r\n wwall.setmodel(wwallmodel, details)\r\n wallnum += 1\r\n\r\n if \"o\" not in walls:\r\n ceiling = SolidObject(name+str(wallnum), Size(length, 1, width), Position(x, emap.calcHeight(x, z)+height+0.5, z), 0)\r\n solid_objects.append(ceiling)\r\n ceilingmodel = createMyCuboid(ceiling.w()*2, ceiling.h()*2, ceiling.d()*2,\r\n name=name+str(wallnum),\r\n x=ceiling.x(), y=ceiling.y(), z=ceiling.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(ceilingmodel)\r\n else:\r\n ceiling.setmodel(ceilingmodel, details)\r\n\r\n wallnum += 1\r\n\r\n return solid_objects"
] | [
"0.5908637",
"0.56457776",
"0.5444895",
"0.5426376",
"0.5395292",
"0.5354499",
"0.5339258",
"0.52996683",
"0.5294323",
"0.5251623",
"0.52373815",
"0.51490283",
"0.51358646",
"0.5119024",
"0.50912035",
"0.5087778",
"0.5083692",
"0.5081936",
"0.50569177",
"0.5044609",
"0.50421",
"0.50277054",
"0.50209963",
"0.50193626",
"0.5016786",
"0.4996781",
"0.4996358",
"0.49813658",
"0.49683842",
"0.4958906"
] | 0.6427652 | 0 |
Returns cuboidal room with box | def room_with_box():
room_material = pra.Material(energy_absorption=0.6, scattering=None)
room_faces = make_polygon(
centre=[0,0,2.5],
radius=10,
height=5,
N=4,
rpy=[0,0,np.pi/4]
)
# define obstacle
obstacle_faces = make_polygon(
centre=[2.5,0,2.5],
radius=1.8,
height=3,
N=4,
rpy=[0,0,np.pi/4],
reverse_normals=True
)
obstacle_material = pra.Material(energy_absorption=0.1, scattering=0.1)
# create room
walls = []
walls.extend(create_walls(room_faces, room_material))
walls.extend(create_walls(obstacle_faces, obstacle_material))
room = pra.Room(walls, fs=fs, max_order=3, ray_tracing=False, air_absorption=False)
room.add_source([0, 0, 2.])
room.add_microphone([0, 0.2, 2.1])
# compute rir
room.image_source_model()
room.compute_rir()
return room | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def box(x, y, z):\n global _cmds\n _cmds = (f\"cube({[x,y,z]},\"\n f\"center=false);\\n\\n\") + _cmds",
"def cuboid(geometry,\n network,\n propname,\n **params):\n print('cuboid: nothing yet')",
"def genCubes():\n offset = vpy.vector(.5, .5, .5)\n size = vpy.vector(.2, .2, .2)\n B1 = vpy.box(pos=vpy.vector(0, 0, 0)-offset,\n color=vpy.vector(0, 0, 0), size=size, make_trail=True)\n B2 = vpy.box(pos=vpy.vector(0, 0, 1)-offset,\n color=vpy.vector(0, 0, 1), size=size, make_trail=True)\n B3 = vpy.box(pos=vpy.vector(0, 1, 1)-offset,\n color=vpy.vector(0, 1, 1), size=size, make_trail=True)\n B4 = vpy.box(pos=vpy.vector(0, 1, 0)-offset,\n color=vpy.vector(0, 1, 0), size=size, make_trail=True)\n\n B5 = vpy.box(pos=vpy.vector(1, 0, 0)-offset,\n color=vpy.vector(1, 0, 0), size=size, make_trail=True)\n B6 = vpy.box(pos=vpy.vector(1, 0, 1)-offset,\n color=vpy.vector(1, 0, 1), size=size, make_trail=True)\n B7 = vpy.box(pos=vpy.vector(1, 1, 0)-offset,\n color=vpy.vector(1, 1, 0), size=size, make_trail=True)\n B8 = vpy.box(pos=vpy.vector(1, 1, 1)-offset,\n color=vpy.vector(1, 1, 1), size=size, make_trail=True)\n\n return [B1, B2, B3, B4, B5, B6, B7, B8]",
"def generate_cube():\n \n num_voxels = 31\n\n data_x = []\n data_y = []\n data_z = []\n data_intensity = []\n\n volume = numpy.zeros((num_voxels, num_voxels, num_voxels))\n\n for x in range(num_voxels):\n for y in range(num_voxels):\n for z in range(num_voxels):\n\n if 5 < x < 10 and 5 < y < 10:\n data_x.append(x)\n data_y.append(y)\n data_z.append(z)\n data_intensity.append(200.0)\n\n volume[x,y,z] = 200.0\n\n\n return data_x, data_y, data_z, data_intensity, volume",
"def surface_area_of_cube(side):\n return side",
"def create_cube():\n new_cube = RubicsCube2x2()\n show_cube_console(new_cube)\n\n seed = [10, 9, 17, 14, 11, 8, 3, 2, 17, 3, 9, 7, 15, 4, 14, 14, 3, 3, \\\n 13, 7, 15, 9, 14, 13, 11, 17, 7, 10, 5, 16, 11, 5, 7, 10, 14, \\\n 7, 17, 7, 8, 6, 12, 3, 6, 1, 16, 12, 5, 13, 3, 4]\n for move in seed:\n new_cube.do_move(move)\n return new_cube",
"def getCube(unique_name):",
"def corridor(x,z, emap, width=10, length=10, height=10, details=None, walls=\"ns\", name=\"wall\", mergeshape=None):\r\n global wallnum\r\n n = z + width / 2\r\n s = z - width / 2\r\n e = x + length / 2\r\n w = x - length / 2\r\n\r\n solid_objects = []\r\n\r\n if \"n\" in walls:\r\n # TODO: abstract out the mostly-duplicate code in these cases...\r\n nwall = SolidObject(name+str(wallnum),\r\n Size(length, height, 1),\r\n Position(x, emap.calcHeight(x, z) + height / 2, n-0.5), 0)\r\n solid_objects.append(nwall)\r\n nwallmodel = createMyCuboid(nwall.w() * 2, nwall.h() * 2, nwall.d() * 2,\r\n name=name+str(wallnum),\r\n x=nwall.x(),y=nwall.y(),z=nwall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(nwallmodel)\r\n else:\r\n nwall.setmodel(nwallmodel, details)\r\n\r\n\r\n wallnum += 1\r\n\r\n if \"s\" in walls:\r\n swall = SolidObject(name+str(wallnum), Size(length, height, 1), Position(x, emap.calcHeight(x, z)+height / 2, s+0.5), 0)\r\n solid_objects.append(swall)\r\n swallmodel = createMyCuboid(swall.w()*2, swall.h()*2, swall.d()*2,\r\n name=name+str(wallnum),\r\n x=swall.x(), y=swall.y(), z=swall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0,cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(swallmodel)\r\n else:\r\n swall.setmodel(swallmodel, details)\r\n\r\n wallnum += 1\r\n\r\n if \"e\" in walls:\r\n ewall = SolidObject(name+str(wallnum), Size(1, height, width), Position(e-0.5, emap.calcHeight(x, z)+height / 2, z), 0)\r\n solid_objects.append(ewall)\r\n ewallmodel = createMyCuboid(ewall.w()*2, ewall.h()*2, ewall.d()*2,\r\n name=name+str(wallnum),\r\n x=ewall.x(), y=ewall.y(), z=ewall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(ewallmodel)\r\n else:\r\n ewall.setmodel(ewallmodel, details)\r\n\r\n wallnum += 1\r\n\r\n if \"w\" in walls:\r\n wwall = SolidObject(name+str(wallnum), Size(1, height, width), Position(w+0.5, emap.calcHeight(x, z)+height / 2, z), 0)\r\n solid_objects.append(wwall)\r\n wwallmodel = createMyCuboid(wwall.w()*2, wwall.h()*2, wwall.d()*2,\r\n name=name+str(wallnum),\r\n x=wwall.x(), y=wwall.y(), z=wwall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(wwallmodel)\r\n else:\r\n wwall.setmodel(wwallmodel, details)\r\n wallnum += 1\r\n\r\n if \"o\" not in walls:\r\n ceiling = SolidObject(name+str(wallnum), Size(length, 1, width), Position(x, emap.calcHeight(x, z)+height+0.5, z), 0)\r\n solid_objects.append(ceiling)\r\n ceilingmodel = createMyCuboid(ceiling.w()*2, ceiling.h()*2, ceiling.d()*2,\r\n name=name+str(wallnum),\r\n x=ceiling.x(), y=ceiling.y(), z=ceiling.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(ceilingmodel)\r\n else:\r\n ceiling.setmodel(ceilingmodel, details)\r\n\r\n wallnum += 1\r\n\r\n return solid_objects",
"def mk_room(self, bounding_box):\n if (bounding_box.top + self.edge_min + self.room_min\n > bounding_box.bottom):\n raise ValueError(\"Region too small to make room\")\n if (bounding_box.left + self.edge_min + self.room_min\n > bounding_box.right):\n raise ValueError(\"Region too small to make room\")\n h_max = bounding_box.bottom - bounding_box.top - self.edge_min\n w_max = bounding_box.right - bounding_box.left - self.edge_min\n height = random.randint(self.room_min, h_max)\n width = random.randint(self.room_min, w_max)\n\n # we now have a room height and width that fit within our bounding box.\n # Just need to decide where to put the top left corner\n y_start = random.randint(bounding_box.top + self.edge_min,\n bounding_box.bottom - height)\n x_start = random.randint(bounding_box.left + self.edge_min,\n bounding_box.right - width)\n room = Box(y_start, x_start, y_start + height - 1, x_start + width - 1)\n for i in range(y_start, y_start + height):\n for j in range(x_start, x_start + width):\n self.set_tile(Point(j, i))\n return room",
"def plasm_cube(self, size=0.1, color=WHITE):\n return COLOR(color)(T([1,2,3])(self.coords)(CUBOID([size, size, size])))",
"def volume_of_a_cuboid(length, width, height):\n return length * width * height",
"def draw_box(self) -> None:\n from math import pi, sin, cos\n import pymol\n from pymol import cmd\n\n # Convert angle\n angle1 = (self.angle1.value() / 180.0) * pi\n angle2 = (self.angle2.value() / 180.0) * pi\n\n # Get positions of box vertices\n # P1\n x1 = -self.min_x.value() * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n\n y1 = -self.min_y.value() * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n \n z1 = self.min_x.value() * sin(angle2) + self.min_y.value() * sin(angle1) * cos(angle2) - self.min_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P2\n x2 = self.max_x.value() * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n \n y2 = (-self.min_y.value()) * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n \n z2 = (-self.max_x.value()) * sin(angle2) - (-self.min_y.value()) * sin(angle1) * cos(angle2) + (-self.min_z.value()) * cos(angle1) * cos(angle2) + self.z\n\n # P3\n x3 = (-self.min_x.value()) * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n\n y3 = self.max_y.value() * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n\n z3 = -(-self.min_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + (-self.min_z.value()) * cos(angle1) * cos(angle2) + self.z\n\n # P4\n x4 = (-self.min_x.value()) * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n \n y4 = (-self.min_y.value()) * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n \n z4 = -(-self.min_x.value()) * sin(angle2) - (-self.min_y.value()) * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P5\n x5 = self.max_x.value() * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + (-self.min_z.value()) * cos(angle1) * sin(angle2) + self.x\n \n y5 = self.max_y.value() * cos(angle1) + (-self.min_z.value()) * sin(angle1) + self.y\n\n z5 = (-self.max_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + (-self.min_z.value()) * cos(angle1) * cos(angle2) + self.z\n\n # P6\n x6 = self.max_x.value() * cos(angle2) - (-self.min_y.value()) * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n \n y6 = (-self.min_y.value()) * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n \n z6 = (-self.max_x.value()) * sin(angle2) - (-self.min_y.value()) * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P7\n x7 = (-self.min_x.value()) * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n\n y7 = self.max_y.value() * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n\n z7 = -(-self.min_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # P8\n x8 = self.max_x.value() * cos(angle2) - self.max_y.value() * sin(angle1) * sin(angle2) + self.max_z.value() * cos(angle1) * sin(angle2) + self.x\n \n y8 = self.max_y.value() * cos(angle1) + self.max_z.value() * sin(angle1) + self.y\n \n z8 = (-self.max_x.value()) * sin(angle2) - self.max_y.value() * sin(angle1) * cos(angle2) + self.max_z.value() * cos(angle1) * cos(angle2) + self.z\n\n # Create box object\n pymol.stored.list = []\n if \"box\" in cmd.get_names(\"selections\"):\n cmd.iterate(\"box\", \"stored.list.append((name, color))\", quiet=1)\n list_color = pymol.stored.list\n cmd.delete(\"box\")\n if len(list_color) > 0:\n for item in list_color:\n at_name = item[0]\n at_c = item[1]\n cmd.set_color(at_name + \"color\", cmd.get_color_tuple(at_c))\n else:\n for at_name in [\"v2\", \"v3\", \"v4\", \"v5\", \"v6\", \"v7\", \"v8\", \"v1x\", \"v1y\", \"v1z\", \"v2x\", \"v3y\", \"v4z\"]:\n cmd.set_color(at_name + \"color\", [0.86, 0.86, 0.86])\n\n # Create vertices\n cmd.pseudoatom(\"box\", name=\"v2\", pos=[x2, y2, z2], color=\"v2color\")\n cmd.pseudoatom(\"box\", name=\"v3\", pos=[x3, y3, z3], color=\"v3color\")\n cmd.pseudoatom(\"box\", name=\"v4\", pos=[x4, y4, z4], color=\"v4color\")\n cmd.pseudoatom(\"box\", name=\"v5\", pos=[x5, y5, z5], color=\"v5color\")\n cmd.pseudoatom(\"box\", name=\"v6\", pos=[x6, y6, z6], color=\"v6color\")\n cmd.pseudoatom(\"box\", name=\"v7\", pos=[x7, y7, z7], color=\"v7color\")\n cmd.pseudoatom(\"box\", name=\"v8\", pos=[x8, y8, z8], color=\"v8color\")\n\n # Connect vertices\n cmd.select(\"vertices\", \"(name v3,v7)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v2,v6)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v5,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v2,v5)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v4,v6)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v4,v7)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v3,v5)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v6,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v7,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"box\", name=\"v1x\", pos=[x1, y1, z1], color='red')\n cmd.pseudoatom(\"box\", name=\"v2x\", pos=[x2, y2, z2], color='red')\n cmd.select(\"vertices\", \"(name v1x,v2x)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"box\", name=\"v1y\", pos=[x1, y1, z1], color='forest')\n cmd.pseudoatom(\"box\", name=\"v3y\", pos=[x3, y3, z3], color='forest')\n cmd.select(\"vertices\", \"(name v1y,v3y)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"box\", name=\"v4z\", pos=[x4, y4, z4], color='blue')\n cmd.pseudoatom(\"box\", name=\"v1z\", pos=[x1, y1, z1], color='blue')\n cmd.select(\"vertices\", \"(name v1z,v4z)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.delete(\"vertices\")",
"def find_square_box(box):\n width = box['bottom_right_x'] - box['top_left_x']\n height = box['bottom_right_y'] - box['top_left_y']\n if width <= height:\n offset = int((width - height) / 2)\n box['top_left_x'] = box['top_left_x'] - offset\n box['bottom_right_x'] = box['bottom_right_x'] + offset\n else:\n offset = int((height - width) / 2)\n box['top_left_y'] = box['top_left_y'] - offset\n box['bottom_right_y'] = box['bottom_right_y'] + offset\n return box",
"def GetBox(quad):\n x0, y0, _, _, x1, y1, _, _ = quad\n return (x0, y0, x1, y1)",
"def cube_from_bbox(bbox):\n cube = pm.polyCube(\n width=bbox.width(),\n height=bbox.height(),\n depth=bbox.depth(),\n ch=False\n )\n cube[0].setAttr('t', bbox.center())\n return cube[0]",
"def covering_box(boxes):\n x_min = np.amin([b.x for b in boxes])\n x_max = np.amax([b.x + b.width for b in boxes])\n y_min = np.amin([b.y for b in boxes])\n y_max = np.amax([b.y + b.height for b in boxes])\n cover = Box(x_min, y_min, x_max - x_min, y_max - y_min)\n return cover",
"def testCube(self):\n cube = {i:(i^1,i^2,i^4) for i in range(8)}\n self.check(cube,6)",
"def __get_box(self, position):\n return self.__board[position//self.__length][position%self.__length]",
"def __drawCube(self):\n self.cubePos = [[[(160, 160), (200, 160), (240, 160)],\n [(160, 200), (200, 200), (240, 200)],\n [(160, 240), (200, 240), (240, 240)]],\n [[(400, 160), (440, 160), (480, 160)],\n [(400, 200), (440, 200), (480, 200)],\n [(400, 240), (440, 240), (480, 240)]],\n [[(280, 160), (320, 160), (360, 160)],\n [(280, 200), (320, 200), (360, 200)],\n [(280, 240), (320, 240), (360, 240)]],\n [[(40, 160), (80, 160), (120, 160)],\n [(40, 200), (80, 200), (120, 200)],\n [(40, 240), (80, 240), (120, 240)]],\n [[(160, 40), (200, 40), (240, 40)],\n [(160, 80), (200, 80), (240, 80)],\n [(160, 120), (200, 120), (240, 120)]],\n [[(160, 280), (200, 280), (240, 280)],\n [(160, 320), (200, 320), (240, 320)],\n [(160, 360), (200, 360), (240, 360)]]]\n self.cubeColor = {1: 'green', 2: 'blue', 3: 'red', 4: 'orange',\\\n 5: 'white', 6: 'yellow'}\n for x in range(6):\n for y in range(3):\n for z in range(3):\n pos = self.cubePos[x][y][z]\n color = self.cubeColor[self.cube.cube[x][y][z]]\n self.cv.create_rectangle(pos[0], pos[1], pos[0]+40, pos[1]+40,\n fill=color, width='2')",
"def get_boxes(self):\r\n\r\n boxes = [(\" \", self.worldbox.tl, self.worldbox.br)]\r\n# boxes = []\r\n boxes += [(\".\", b.tl, b.br) for b in self.wallboxes]\r\n boxes += [(\"x\", b.tl, b.br) for b in self.targetboxes]\r\n agentscale = 100\r\n boxes += [(\"a\", (self.i_state[0] - self.dx * agentscale, self.i_state[1] - self.dx * agentscale),\r\n (self.i_state[0] + self.dx * agentscale, self.i_state[1] + self.dx * agentscale))]\r\n return boxes",
"def create_room(self):\n # iterate through array of room types\n rooms = []\n prob_block_5_list = []\n prob_block_6_list = []\n\n for row in self.room_type:\n for col in row:\n rooms.append(self.import_template(col))\n # iterate through rooms to fill screen\n # this number will be part of how we find location of top left corner of room\n # based on 5x5 grid of rooms\n for pos in range(25):\n # this will iterate through the number of columns in array\n # the number y will be part of how we find where to place the block on the y axis (according to pygame.draw)\n for y in range(self.blocks_per_room_y):\n # this will iterate through the number of rows in array\n # the number x will be part of how we find where to place the block on the x axis (according to pygame.draw)\n for x in range(self.blocks_per_room_x):\n # if cell is a 1 add a platform sprite\n if rooms[pos][y][x] is 1:\n #check if platform has another above it for graphics\n if rooms[pos][y - 1][x] in (0, 3, 4, 7) and y - 1 >= 0:\n # the cases checked in each of these conditionals are the basic case that check surrounding blocks\n # to see what platform we should be using, the edge cases, such as if a block is at the edge of\n # the room, in which case we need to check the neighboring room (array in this case)\n\n #check conditions to see if we are using the sprite with with rounded edges on the bottom right and top right\n if ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 0 and rooms[pos][y][x + 1] is 0 and rooms[pos][y][x - 1] is 1)\\\n or (x is self.blocks_per_room_x - 1 and y < self.blocks_per_room_y - 1 and pos < 24 and rooms[pos][y + 1][x] is 0 and rooms[pos + 1][y][0] is 0)\\\n or (y is self.blocks_per_room_y - 1 and x < self.blocks_per_room_x - 1 and pos < 20 and rooms[pos][y][x + 1] is 0):\n block = Platform(self.block_width, self.block_height, 'right', self.theme)\n #check conditionals to see if we are using the sprite with rounded edges on the bottom left and top left\n elif ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 0 and rooms[pos][y][x - 1] is 0 and rooms[pos][y][x + 1] is 1)\\\n or (x is 0 and y < self.blocks_per_room_y - 1 and pos > 0 and rooms[pos][y + 1][x] is 0 and rooms[pos - 1][y][self.blocks_per_room_x - 1] is 0) \\\n or (y is self.blocks_per_room_y - 1 and x > 0 and pos < 20 and rooms[pos][y][x - 1] is 0):\n block = Platform(self.block_width, self.block_height, 'left', self.theme)\n #check conditionals to see if we are using the sprite with the rounded corners on top left and top right\n elif ((x + 1) < self.blocks_per_room_x and (x - 1) >= 0 and rooms[pos][y][x + 1] in (0, 3, 4) and rooms[pos][y][x - 1] in (0, 3, 4))\\\n or (x is 0 and pos > 0 and rooms[pos - 1][y][self.blocks_per_room_x - 1] in (0, 3, 4) and rooms[pos][y][x + 1] in (0, 3, 4))\\\n or (x is self.blocks_per_room_x - 1 and pos < 24 and rooms[pos + 1][y][0] in (0, 3, 4) and rooms[pos][y][x - 1] in (0, 3, 4)):\n block = Platform(self.block_width, self.block_height, 'round top', self.theme)\n #check conditionals to see if we are using the sprite with the rounded corner in the top left\n elif ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 1 and rooms[pos][y][x - 1] is 0 and rooms[pos][y][x + 1] is 1) \\\n or (x is 0 and y < self.blocks_per_room_y - 1 and pos > 0 and rooms[pos][y + 1][x] is 1 and rooms[pos - 1][y][self.blocks_per_room_x - 1] is 0) \\\n or (y is self.blocks_per_room_y - 1 and x > 0 and pos < 20 and rooms[pos][y][x - 1] is 0):\n block = Platform(self.block_width, self.block_height, 'top left', self.theme)\n #check conditionals to see if we are using the sprite with the rounded corner in the top right\n elif ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 1 and rooms[pos][y][x + 1] is 0 and rooms[pos][y][x - 1] is 1)\\\n or (x is self.blocks_per_room_x - 1 and y < self.blocks_per_room_y - 1 and pos < 24 and rooms[pos][y + 1][x] is 0 and rooms[pos + 1][y][0] is 0)\\\n or (y is self.blocks_per_room_y - 1 and x < self.blocks_per_room_x - 1 and pos < 20 and rooms[pos][y][x + 1] is 0):\n block = Platform(self.block_width, self.block_height, 'top right', self.theme)\n else:\n block = Platform(self.block_width, self.block_height, 'top', self.theme)\n else:\n block = Platform(self.block_width, self.block_height, 'middle', self.theme)\n coord_x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n block.rect.x = coord_x\n block.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n block.player = self.player\n self.platform_list.add(block)\n #if the space above this block is empty see if we spawn an enemy on the spot above current block\n if rooms[pos][y-1][x] is 0 and y - 1 >= 0:\n self.enemy_generation(coord_x, self.block_height + (pos // 5) * self.room_side_length_y + (y - 1) * self.block_height)\n # if the cell is a 3 then it will be an item pickup\n elif rooms[pos][y][x] is 3:\n rand = random.randrange(0, 4)\n if rand == 0:\n #calculate coordinates of the bag\n bag = pickupSprite('rope')\n # print('width = ' + str(self.block_width) + ' height = ' + str(self.block_height))\n bag.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n bag.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n bag.player = self.player\n self.bagGroup.add(bag)\n elif rand == 1:\n #calculate coordinates of the bag\n bag = pickupSprite('knife')\n # print('width = ' + str(self.block_width) + ' height = ' + str(self.block_height))\n bag.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n bag.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n bag.player = self.player\n self.bagGroup.add(bag)\n elif rand == 2:\n bag = pickupSprite('health')\n bag.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n bag.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n bag.player = self.player\n self.bagGroup.add(bag)\n\n\n # if the cell is a 4 then it will be either a spike, if the space is on the bottom of the room,\n # otherwise it is a randomized block or nothing\n elif rooms[pos][y][x] is 4:\n # if the cell is at the bottom of the level, randomly choose whether to place a spike or not\n rand = random.randrange(0, 3)\n rand2 = random.randrange(0, 2)\n if y is 6 and rand is 1:\n spike = enemies.Spikes()\n spike.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n spike.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n spike.player = self.player\n self.enemy_list.add(spike)\n # elif y is 6 and rand is 2:\n # dart = enemies.Darts(self.theme, 'up')\n # dart.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n # dart.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n # dart.player = self.player\n # self.enemy_list.add(dart)\n elif y != 6 and rand2 is 0:\n if rooms[pos][y - 1][x] is 0:\n block = Platform(self.block_width, self.block_height, 'top', self.theme)\n else:\n block = Platform(self.block_width, self.block_height, 'middle', self.theme)\n block.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n block.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n block.player = self.player\n self.platform_list.add(block)\n elif y != 6 and rand2 is 1:\n if x-1 >= 0 and x+1 <= self.blocks_per_room_x and y-1 >= 0 and y+1 < self.blocks_per_room_y:\n if rooms[pos][y][x-1] is 0:\n direction = 'left'\n blockType = 'middle'\n elif rooms[pos][y][x+1] is 0:\n direction = 'right'\n blockType = 'middle'\n elif rooms[pos][y-1][x] is 0:\n direction = 'up'\n blockType = 'top'\n elif rooms[pos][y+1][x] is 0:\n direction = 'down'\n blockType = 'middle'\n else:\n direction = None\n if direction is not None:\n # use for both block and dart\n rectX = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n rectY = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n\n block = Platform(self.block_width, self.block_height, blockType, self.theme)\n block.rect.x = rectX\n block.rect.y = rectY\n block.player = self.player\n self.platform_list.add(block)\n\n dart = enemies.Darts(self.theme, direction)\n dart.rect.x = rectX\n dart.rect.y = rectY\n dart.player = self.player\n self.enemy_list.add(dart)\n # this is the starting and ending points of the level\n elif rooms[pos][y][x] is 7:\n # exit of the game on the top row of the level\n if pos // 5 is 0:\n #calculate coordinates of the exit\n self.exit_coords['x'] = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n self.exit_coords['y'] = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n exit = exit_door_sprite(self.block_width, self.block_height)\n # print('width = ' + str(self.block_width) + ' height = ' + str(self.block_height))\n exit.rect.x = self.exit_coords['x']\n exit.rect.y = self.exit_coords['y']\n exit.player = self.player\n self.exit_sprite.add(exit)\n #entance of the game on the bottom row of the level\n elif pos // 5 is 4:\n #calculate coordinates of the entrance\n self.entrance_coords['x'] = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n self.entrance_coords['y'] = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height",
"def cube(x):\n return x*x*x",
"def box(self):\n r2 = self.radius\n res = [self.x - r2, self.y - r2, self.x + r2, self.y + r2]\n return res",
"def _make_test_cube(long_name):\n cs = GeogCS(EARTH_RADIUS)\n data = np.array([[1.0, 1.0, 1.0], [0.0, 0.0, 0.0], [1.0, 0.0, 1.0]])\n cube = Cube(data, long_name=long_name)\n x_coord = DimCoord(\n np.linspace(-45.0, 45.0, 3), \"latitude\", units=\"degrees\", coord_system=cs\n )\n y_coord = DimCoord(\n np.linspace(120, 180, 3), \"longitude\", units=\"degrees\", coord_system=cs\n )\n cube.add_dim_coord(x_coord, 0)\n cube.add_dim_coord(y_coord, 1)\n return cube",
"def return_box(self, num):\n result = []\n three = [0, 1, 2]\n for a in three: #row\n for b in three: #column\n result += [self.board[a+((num-1)//3)*3][b+(num%3-1)*3]]\n return result",
"def make_box_square(box, offset_scale=0.05):\n\n x_min, y_min, x_max, y_max = box[:4]\n center_x = (x_max + x_min) / 2.\n center_y = (y_max + y_min) / 2.\n width = x_max - x_min\n height = y_max - y_min\n\n if height >= width:\n half_box = height / 2.\n x_min = center_x - half_box\n x_max = center_x + half_box\n if width > height:\n half_box = width / 2.\n y_min = center_y - half_box\n y_max = center_y + half_box\n\n box_side_lenght = (x_max + x_min) / 2.\n offset = offset_scale * box_side_lenght\n x_min = x_min - offset\n x_max = x_max + offset\n y_min = y_min - offset\n y_max = y_max + offset\n return (int(x_min), int(y_min), int(x_max), int(y_max))",
"def print_cube(num):\n print(\"Cube: {}\".format(num * num * num))",
"def print_cube(num):\n print(\"Cube: {}\".format(num * num * num))",
"def box_mul():\n\tu0=r.uniform(0,1)\n\tu1=r.uniform(0,1)\n\tz0 = m.sqrt((-2) * m.log(u0)) * m.cos(2 * m.pi * u1)\n\tz1 = m.sqrt((-2) * m.log(u0)) * m.sin(2 * m.pi * u1)\n\treturn (z0, z1)",
"def create_room(room):\n global map\n for x in range(room.x1+1, room.x2):\n for y in range(room.y1+1, room.y2):\n map[x][y].blocked = False\n map[x][y].block_sight = False"
] | [
"0.6676445",
"0.6549599",
"0.6418574",
"0.6388707",
"0.63560385",
"0.63021725",
"0.6228042",
"0.62078243",
"0.6170989",
"0.6027921",
"0.60269403",
"0.6023139",
"0.6019739",
"0.5979369",
"0.59714824",
"0.59581614",
"0.59423727",
"0.5935897",
"0.5934828",
"0.59304893",
"0.5890355",
"0.5886195",
"0.58794683",
"0.5876552",
"0.5873121",
"0.5862503",
"0.5839324",
"0.5839324",
"0.5832779",
"0.58311665"
] | 0.7044862 | 0 |
Returns empty room with walls of different materials | def empty_diff_walls():
# 4 side walls are absorptive
room_materials = [pra.Material(energy_absorption=0.1, scattering=None)] * 4
# floor and ceiling are reflective
room_materials.extend([pra.Material(energy_absorption=0.98, scattering=None)] * 2)
room_faces = make_polygon(
centre=[0,0,2.5],
radius=10,
height=5,
N=4,
rpy=[0,0,np.pi/4]
)
# create room
walls = []
walls.extend(create_walls(room_faces, room_materials))
room = pra.Room(walls, fs=fs, max_order=3, ray_tracing=False, air_absorption=False)
room.add_source([-5, 2, 2.])
room.add_microphone([1, 0, 2.])
# compute rir
room.image_source_model()
room.compute_rir()
return room | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def empty_room():\n\troom_material = pra.Material(energy_absorption=0.6, scattering=None)\n\troom_faces = make_polygon(\n\t\tcentre=[0,0,2.5],\n\t\tradius=10,\n\t\theight=5,\n\t\tN=4,\n\t\trpy=[0,0,np.pi/4]\n\t)\n\n\t# create room\n\twalls = []\n\twalls.extend(create_walls(room_faces, room_material))\n\n\troom = pra.Room(walls, fs=fs, max_order=3, ray_tracing=True, air_absorption=False)\n\n\troom.add_source([0, 0, 2.])\n\troom.add_microphone([0, 0.2, 2.1])\n\n\t# compute rir\n\troom.image_source_model()\n\troom.ray_tracing()\n\troom.compute_rir()\n\n\treturn room",
"def room_with_box():\n\troom_material = pra.Material(energy_absorption=0.6, scattering=None)\n\troom_faces = make_polygon(\n\t\tcentre=[0,0,2.5],\n\t\tradius=10,\n\t\theight=5,\n\t\tN=4,\n\t\trpy=[0,0,np.pi/4]\n\t)\n\n\t# define obstacle\n\tobstacle_faces = make_polygon(\n\t\tcentre=[2.5,0,2.5],\n\t\tradius=1.8,\n\t\theight=3,\n\t\tN=4,\n\t\trpy=[0,0,np.pi/4],\n\t\treverse_normals=True\n\t)\n\tobstacle_material = pra.Material(energy_absorption=0.1, scattering=0.1)\n\n\t# create room\n\twalls = []\n\twalls.extend(create_walls(room_faces, room_material))\n\twalls.extend(create_walls(obstacle_faces, obstacle_material))\n\n\troom = pra.Room(walls, fs=fs, max_order=3, ray_tracing=False, air_absorption=False)\n\n\troom.add_source([0, 0, 2.])\n\troom.add_microphone([0, 0.2, 2.1])\n\n\t# compute rir\n\troom.image_source_model()\n\troom.compute_rir()\n\n\treturn room",
"def wallsAndGates(self, rooms: List[List[int]]) -> None:\n if len(rooms)==0:\n return rooms\n n, m = len(rooms), len(rooms[0])\n for i in range(n):\n for j in range(m):\n if rooms[i][j] == 0:\n q= [(i,j,0)]\n while q:\n ci, cj , d = q.pop(0)\n for ni, nj in [(ci+1,cj),(ci-1,cj),(ci,cj+1),(ci,cj-1)]:\n if 0<=ni<n and 0<=nj<m and rooms[ni][nj]!=-1:\n if rooms[ni][nj]>d+1:\n rooms[ni][nj] = d+1\n q.append((ni,nj,d+1))\n return rooms",
"def wallsAndGates(self, rooms: List[List[int]]) -> None:\n offsets = [[0,1],[1,0],[0,-1],[-1,0]]\n size = [len(rooms), len(rooms[0])]\n q = []\n for i in range(len(rooms)):\n for j in range(len(rooms[i])):\n if rooms[i][j] == 0:\n q.append([i,j])\n while len(q) != 0:\n cur = q.pop(0)\n for i in offsets:\n x = cur[0]+i[0]\n y = cur[1]+i[1]\n if x >= size[0] or x<0 or y>=size[1] or y<0 or rooms[x][y] != 2147483647:\n continue\n rooms[x][y] = rooms[cur[0]][cur[1]]+1\n q.append([x,y])",
"def wallsAndGates(self, rooms: List[List[int]]) -> None:\n rows = len(rooms)\n if rows == 0:\n return\n cols = len(rooms[0])\n GATE = 0\n EMPTY = pow(2, 31) - 1\n DIRECTIONS = [(0, 1), (0, -1), (1, 0), (-1, 0)]\n q = deque([])\n for i in range(rows):\n for j in range(cols):\n if rooms[i][j] == GATE:\n q.append((i, j))\n while q:\n x, y = q.popleft()\n for idx, idy in DIRECTIONS:\n if x + idx < 0 or x + idx >= rows or y + idy < 0 or y + idy >= cols or rooms[x + idx][y + idy] != EMPTY:\n continue\n rooms[x + idx][y + idy] = rooms[x][y] + 1\n q.append((x + idx, y + idy))",
"def wallsAndGates(self, rooms: List[List[int]]) -> None:\n # start with the gate and search the empty cells it can reach\n # then we take the smallest one\n if not rooms:\n return\n\n m, n = len(rooms), len(rooms[0])\n \n queue = []\n for i in range(m):\n for j in range(n):\n if rooms[i][j] == 0:\n queue.append((i, j))\n\n\n for x, y in queue:\n dist = rooms[x][y] + 1\n\n for dx, dy in ((-1, 0), (1, 0), (0, 1), (0, -1)):\n new_x, new_y = x+dx, y+dy\n if 0 <= new_x < m and 0 <= new_y < n and rooms[new_x][new_y] == 2147483647:\n rooms[new_x][new_y] = dist\n queue.append((new_x, new_y))",
"def create_room(room):\n global map\n for x in range(room.x1+1, room.x2):\n for y in range(room.y1+1, room.y2):\n map[x][y].blocked = False\n map[x][y].block_sight = False",
"def create_room(self):\n # iterate through array of room types\n rooms = []\n prob_block_5_list = []\n prob_block_6_list = []\n\n for row in self.room_type:\n for col in row:\n rooms.append(self.import_template(col))\n # iterate through rooms to fill screen\n # this number will be part of how we find location of top left corner of room\n # based on 5x5 grid of rooms\n for pos in range(25):\n # this will iterate through the number of columns in array\n # the number y will be part of how we find where to place the block on the y axis (according to pygame.draw)\n for y in range(self.blocks_per_room_y):\n # this will iterate through the number of rows in array\n # the number x will be part of how we find where to place the block on the x axis (according to pygame.draw)\n for x in range(self.blocks_per_room_x):\n # if cell is a 1 add a platform sprite\n if rooms[pos][y][x] is 1:\n #check if platform has another above it for graphics\n if rooms[pos][y - 1][x] in (0, 3, 4, 7) and y - 1 >= 0:\n # the cases checked in each of these conditionals are the basic case that check surrounding blocks\n # to see what platform we should be using, the edge cases, such as if a block is at the edge of\n # the room, in which case we need to check the neighboring room (array in this case)\n\n #check conditions to see if we are using the sprite with with rounded edges on the bottom right and top right\n if ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 0 and rooms[pos][y][x + 1] is 0 and rooms[pos][y][x - 1] is 1)\\\n or (x is self.blocks_per_room_x - 1 and y < self.blocks_per_room_y - 1 and pos < 24 and rooms[pos][y + 1][x] is 0 and rooms[pos + 1][y][0] is 0)\\\n or (y is self.blocks_per_room_y - 1 and x < self.blocks_per_room_x - 1 and pos < 20 and rooms[pos][y][x + 1] is 0):\n block = Platform(self.block_width, self.block_height, 'right', self.theme)\n #check conditionals to see if we are using the sprite with rounded edges on the bottom left and top left\n elif ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 0 and rooms[pos][y][x - 1] is 0 and rooms[pos][y][x + 1] is 1)\\\n or (x is 0 and y < self.blocks_per_room_y - 1 and pos > 0 and rooms[pos][y + 1][x] is 0 and rooms[pos - 1][y][self.blocks_per_room_x - 1] is 0) \\\n or (y is self.blocks_per_room_y - 1 and x > 0 and pos < 20 and rooms[pos][y][x - 1] is 0):\n block = Platform(self.block_width, self.block_height, 'left', self.theme)\n #check conditionals to see if we are using the sprite with the rounded corners on top left and top right\n elif ((x + 1) < self.blocks_per_room_x and (x - 1) >= 0 and rooms[pos][y][x + 1] in (0, 3, 4) and rooms[pos][y][x - 1] in (0, 3, 4))\\\n or (x is 0 and pos > 0 and rooms[pos - 1][y][self.blocks_per_room_x - 1] in (0, 3, 4) and rooms[pos][y][x + 1] in (0, 3, 4))\\\n or (x is self.blocks_per_room_x - 1 and pos < 24 and rooms[pos + 1][y][0] in (0, 3, 4) and rooms[pos][y][x - 1] in (0, 3, 4)):\n block = Platform(self.block_width, self.block_height, 'round top', self.theme)\n #check conditionals to see if we are using the sprite with the rounded corner in the top left\n elif ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 1 and rooms[pos][y][x - 1] is 0 and rooms[pos][y][x + 1] is 1) \\\n or (x is 0 and y < self.blocks_per_room_y - 1 and pos > 0 and rooms[pos][y + 1][x] is 1 and rooms[pos - 1][y][self.blocks_per_room_x - 1] is 0) \\\n or (y is self.blocks_per_room_y - 1 and x > 0 and pos < 20 and rooms[pos][y][x - 1] is 0):\n block = Platform(self.block_width, self.block_height, 'top left', self.theme)\n #check conditionals to see if we are using the sprite with the rounded corner in the top right\n elif ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 1 and rooms[pos][y][x + 1] is 0 and rooms[pos][y][x - 1] is 1)\\\n or (x is self.blocks_per_room_x - 1 and y < self.blocks_per_room_y - 1 and pos < 24 and rooms[pos][y + 1][x] is 0 and rooms[pos + 1][y][0] is 0)\\\n or (y is self.blocks_per_room_y - 1 and x < self.blocks_per_room_x - 1 and pos < 20 and rooms[pos][y][x + 1] is 0):\n block = Platform(self.block_width, self.block_height, 'top right', self.theme)\n else:\n block = Platform(self.block_width, self.block_height, 'top', self.theme)\n else:\n block = Platform(self.block_width, self.block_height, 'middle', self.theme)\n coord_x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n block.rect.x = coord_x\n block.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n block.player = self.player\n self.platform_list.add(block)\n #if the space above this block is empty see if we spawn an enemy on the spot above current block\n if rooms[pos][y-1][x] is 0 and y - 1 >= 0:\n self.enemy_generation(coord_x, self.block_height + (pos // 5) * self.room_side_length_y + (y - 1) * self.block_height)\n # if the cell is a 3 then it will be an item pickup\n elif rooms[pos][y][x] is 3:\n rand = random.randrange(0, 4)\n if rand == 0:\n #calculate coordinates of the bag\n bag = pickupSprite('rope')\n # print('width = ' + str(self.block_width) + ' height = ' + str(self.block_height))\n bag.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n bag.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n bag.player = self.player\n self.bagGroup.add(bag)\n elif rand == 1:\n #calculate coordinates of the bag\n bag = pickupSprite('knife')\n # print('width = ' + str(self.block_width) + ' height = ' + str(self.block_height))\n bag.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n bag.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n bag.player = self.player\n self.bagGroup.add(bag)\n elif rand == 2:\n bag = pickupSprite('health')\n bag.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n bag.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n bag.player = self.player\n self.bagGroup.add(bag)\n\n\n # if the cell is a 4 then it will be either a spike, if the space is on the bottom of the room,\n # otherwise it is a randomized block or nothing\n elif rooms[pos][y][x] is 4:\n # if the cell is at the bottom of the level, randomly choose whether to place a spike or not\n rand = random.randrange(0, 3)\n rand2 = random.randrange(0, 2)\n if y is 6 and rand is 1:\n spike = enemies.Spikes()\n spike.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n spike.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n spike.player = self.player\n self.enemy_list.add(spike)\n # elif y is 6 and rand is 2:\n # dart = enemies.Darts(self.theme, 'up')\n # dart.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n # dart.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n # dart.player = self.player\n # self.enemy_list.add(dart)\n elif y != 6 and rand2 is 0:\n if rooms[pos][y - 1][x] is 0:\n block = Platform(self.block_width, self.block_height, 'top', self.theme)\n else:\n block = Platform(self.block_width, self.block_height, 'middle', self.theme)\n block.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n block.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n block.player = self.player\n self.platform_list.add(block)\n elif y != 6 and rand2 is 1:\n if x-1 >= 0 and x+1 <= self.blocks_per_room_x and y-1 >= 0 and y+1 < self.blocks_per_room_y:\n if rooms[pos][y][x-1] is 0:\n direction = 'left'\n blockType = 'middle'\n elif rooms[pos][y][x+1] is 0:\n direction = 'right'\n blockType = 'middle'\n elif rooms[pos][y-1][x] is 0:\n direction = 'up'\n blockType = 'top'\n elif rooms[pos][y+1][x] is 0:\n direction = 'down'\n blockType = 'middle'\n else:\n direction = None\n if direction is not None:\n # use for both block and dart\n rectX = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n rectY = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n\n block = Platform(self.block_width, self.block_height, blockType, self.theme)\n block.rect.x = rectX\n block.rect.y = rectY\n block.player = self.player\n self.platform_list.add(block)\n\n dart = enemies.Darts(self.theme, direction)\n dart.rect.x = rectX\n dart.rect.y = rectY\n dart.player = self.player\n self.enemy_list.add(dart)\n # this is the starting and ending points of the level\n elif rooms[pos][y][x] is 7:\n # exit of the game on the top row of the level\n if pos // 5 is 0:\n #calculate coordinates of the exit\n self.exit_coords['x'] = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n self.exit_coords['y'] = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n exit = exit_door_sprite(self.block_width, self.block_height)\n # print('width = ' + str(self.block_width) + ' height = ' + str(self.block_height))\n exit.rect.x = self.exit_coords['x']\n exit.rect.y = self.exit_coords['y']\n exit.player = self.player\n self.exit_sprite.add(exit)\n #entance of the game on the bottom row of the level\n elif pos // 5 is 4:\n #calculate coordinates of the entrance\n self.entrance_coords['x'] = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n self.entrance_coords['y'] = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height",
"def wallsAndGates(self, rooms: List[List[int]]) -> None:\n lenm = len(rooms)\n lenn = len(rooms[0])\n \n queue = []\n visited = set()\n \n direction = [(0,1),(1,0),(0,-1),(-1,0)]\n \n for i in range(lenm):\n for j in range(lenn):\n if rooms[i][j] == 0:\n queue.append(((i,j),0))\n visited.add((i,j))\n while queue:\n n = len(queue)\n for _ in range(n):\n (a,b),val = queue.pop(0)\n if rooms[a][b] != -1:\n rooms[a][b] = val\n for x,y in direction:\n i = a+x\n j = b+y\n if i <0 or i>=lenm or j<0 or j>=lenn or rooms[i][j] == -1:\n continue\n if (i,j) not in visited:\n visited.add((i,j))\n queue.append(((i,j),val+1))\n \n \n \n \n return rooms",
"def wallsAndGates(self, rooms: List[List[int]]) -> None:\n if not rooms or not rooms[0]:\n return \n \n queue = deque([])\n for i in range(len(rooms)):\n for j in range(len(rooms[i])):\n if rooms[i][j] == 0:\n queue.append((i,j, 0))\n \n visited = set()\n while queue:\n i, j, distance = queue.popleft()\n \n if not (0 <= i < len(rooms) and 0 <= j < len(rooms[0])):\n continue\n \n if rooms[i][j] == -1:\n continue\n \n if (i, j) in visited:\n continue\n \n visited.add((i, j))\n if rooms[i][j] != 0:\n rooms[i][j] = distance\n \n for offset in OFFSETS:\n queue.append((i + offset[0], j + offset[1], distance + 1))",
"def wallsAndGates(self, rooms: List[List[int]]) -> None:\n def get_gates(rooms):\n res = []\n M, N = len(rooms), len(rooms[0])\n for i in range(M):\n for j in range(N):\n if rooms[i][j] == 0:\n res.append((i, j))\n return res\n \n que = [(i, j, 0) for i, j in get_gates(rooms)]\n #seen = set()\n deltas = [[0, 1], [0, -1], [-1, 0], [1, 0]]\n R, C = len(rooms), len(rooms[0])\n INF = 2**31 - 1\n while que:\n i, j, distance = que.pop(0)\n for dx, dy in deltas:\n x, y = i + dx, j + dy\n if 0 <= x < R and 0 <= y < C and rooms[x][y] == INF:\n rooms[x][y] = distance + 1\n que.append((x, y, distance + 1))",
"def wallsAndGates(self, rooms: List[List[int]]) -> None:\r\n if not rooms or not rooms[0]:\r\n return\r\n \r\n \r\n m, n = len(rooms), len(rooms[0])\r\n q = deque([])\r\n for i in range(m):\r\n for j in range(n):\r\n if rooms[i][j] == 0:\r\n # 把所有为0坐标一次加入队列\r\n q.append((i,j))\r\n \r\n self.BFS(rooms, q)\r\n return",
"def wallsAndGates(self, rooms: List[List[int]]) -> None:\n if rooms==[]: return\n xcord=len(rooms)\n ycord=len(rooms[0])\n indexstack=[(i,j) for i in range(len(rooms)) for j in range(len(rooms[0])) if rooms[i][j] == 0]\n direction=[(0,1),(1,0),(0,-1),(-1,0)]\n gatenum=1\n while indexstack != []:\n newindex=[]\n for item in indexstack:\n for mapdir in direction:\n xpoint=item[0]+mapdir[0]\n ypoint=item[1]+mapdir[1]\n if 0<=xpoint <len(rooms) and 0<=ypoint<len(rooms[0]):\n if rooms[xpoint][ypoint]==pow(2,31)-1:\n rooms[xpoint][ypoint]=gatenum\n newindex.append((xpoint,ypoint))\n indexstack=newindex\n gatenum+=1\n ''''\n for item in index_0:\n for mapdir in direction:\n xpoint=item[0]+mapdir[0]\n ypoint=item[1]+mapdir[1]\n if xpoint <len(rooms) and ypoint<len(rooms[0]):\n if rooms[xpoint][ypoint]==pow(2,31)-1:\n rooms[xpoint][ypoint]=1\n index_1.append((xpoint,ypoint))\n for item in index_1:\n for mapdir in direction:\n xpoint=item[0]+mapdir[0]\n ypoint=item[1]+mapdir[1]\n if xpoint <len(rooms) and ypoint<len(rooms[0]):\n if rooms[xpoint][ypoint]==pow(2,31)-1:\n rooms[xpoint][ypoint]=2\n index_2.append((xpoint,ypoint))\n for item in index_2:\n for mapdir in direction:\n xpoint=item[0]+mapdir[0]\n ypoint=item[1]+mapdir[1]\n if xpoint <len(rooms) and ypoint<len(rooms[0]):\n if rooms[xpoint][ypoint]==pow(2,31)-1:\n rooms[xpoint][ypoint]=3\n index_3.append((xpoint,ypoint))\n for item in index_3:\n for mapdir in direction:\n xpoint=item[0]+mapdir[0]\n ypoint=item[1]+mapdir[1]\n if xpoint <=len(rooms) and ypoint<=len(rooms[0]):\n if rooms[xpoint][ypoint]==pow(2,31)-1:\n rooms[xpoint][ypoint]=4\n #index_3.append((xpoint,ypoint))'''",
"def get_room_list():\n room_list = []\n # 0\n room = Room(\n \"Patio. You are in a patio, it is surrounded by four long walls.\\n\\\n The south wall has a metal door.\",\n north = None,\n east = None,\n south = 2,\n west = None\n )\n room_list.append(room)\n # 1\n room = Room(\n \"Bedroom. You are in a bedroom, there is nothing uncommon.\\n\\\n The door lead to east\",\n north = None,\n east = 2,\n south = None,\n west = None\n )\n room_list.append(room)\n # 2\n room = Room(\n \"Kitchen. You are in a kitchen, there is some porridge on the oven.\\n\\\n There are three doors, north, west, south\",\n north = 0,\n east = None,\n south = 4,\n west = 1\n )\n room_list.append(room)\n # 3\n room = Room(\n \"Tv room. You are in the tv room, there is an old tv set, \\n\\\n seems like it hasn't being used for years.\\n\\\n The door lead to east\",\n north = None,\n east = 4,\n south = None,\n west = None\n )\n room_list.append(room)\n # 4\n room = Room(\n \"Dinning room. You are in a dinning room, there is a big round table.\\n\\\n There are three doors, north, west, south\",\n north = 2,\n east = None,\n south = 6,\n west = 3\n )\n room_list.append(room)\n # 5\n room = Room(\n \"Master bedroom. You are in the master bedroom, there are many religious things all over.\\n\\\n The door lead to east\",\n north = None,\n east = 6,\n south = None,\n west = None\n )\n room_list.append(room)\n # 6\n room = Room(\n \"Living room. You are in the living room, you can see the exit door to the south.\\n\\\n There are three doors, north, west, south\",\n north = 4,\n east = None,\n south = 7,\n west = 5\n )\n room_list.append(room)\n return room_list",
"def wallsAndGates(self, rooms: List[List[int]]) -> None:\n ## bfs:\n queue= []\n for r in range(len(rooms)):\n for c in range(len(rooms[0])):\n if rooms[r][c]==0:\n queue.append((r,c, 0)) \n \n while queue:\n r,c, l= queue.pop(0)\n for new_r, new_c in [(r+1, c), (r-1, c), (r, c+1), (r, c-1)]:\n if new_r>=0 and new_c>=0 and new_r<len(rooms) and \\\n new_c<len(rooms[0]) and rooms[new_r][new_c]==2**31 -1:\n rooms[new_r][new_c]=l+1\n queue.append((new_r, new_c, l+1))\n return rooms",
"def create_outer_walls(space,width,height):\n static_lines = [pymunk.Segment(space.static_body, (0.0, 0.0), (width, 0.0), 0.0),\n pymunk.Segment(space.static_body, (width, 0.0), (width, height), 0.0),\n pymunk.Segment(space.static_body, (width, height), (0.0, height), 0.0),\n pymunk.Segment(space.static_body, (0.0, 600.0), (0.0, 0.0), 0.0)]\n for line in static_lines:\n line.friction = 0.5\n line.elasticity = 0.9\n\n return static_lines",
"def wallsAndGates(self, rooms: List[List[int]]) -> None:\n if not rooms or not rooms[0]:\n return\n m, n = len(rooms), len(rooms[0])\n INF = 2147483647\n q = []\n for i in range(m):\n for j in range(n):\n if rooms[i][j] == 0:\n q.append((i, j))\n # bfs\n while q:\n newq = []\n for i, j in q:\n for u, v in [(i-1,j), (i,j+1), (i+1,j), (i,j-1)]:\n if 0 <= u < m and 0 <= v < n and rooms[u][v] == INF:\n rooms[u][v] = rooms[i][j] + 1\n newq.append((u, v))\n q = newq",
"def wallsAndGates(self, rooms: List[List[int]]) -> None:\r\n if not rooms:\r\n return rooms\r\n \r\n ind_0s = []\r\n len_row = len(rooms)\r\n len_col = len(rooms[0])\r\n for i in range(len_row):\r\n for j in range(len_col):\r\n if rooms[i][j] == 0:\r\n ind_0s.append((i,j))\r\n que = ind_0s\r\n while que:\r\n i,j = que.pop()\r\n neighbors = [(i,j-1), (i-1,j), (i,j+1), (i+1,j)]\r\n if rooms[i][j] == 0:\r\n parent_distance = 0\r\n else:\r\n parent_distance = rooms[i][j]\r\n for x, y in neighbors:\r\n if x>=0 and x<len_row and y>=0 and y<len_col and rooms[x][y]!=-1 and rooms[x][y] and \\\r\n parent_distance+1 < rooms[x][y]:\r\n que.append((x,y))",
"def create_rooms(width=10, height=10):\n # create blank rooms\n rooms = [Room(id) for id in range(1, (width * height + 1))]\n\n # make some rooms inaccessible\n for _ in range(0, height*width//10):\n rand_num = randint(0, height*width-1)\n while rooms[rand_num].is_accessible == False:\n rand_num = randint(0, height*width-1)\n rooms[rand_num].is_accessible = False\n\n # set each room's directions\n for i in range(len(rooms)):\n room = rooms[i]\n room.title = f\"Room {id}\"\n room.description += f\"{id}\"\n n_index = i - width\n s_index = i + width\n e_index = i + 1\n w_index = i - 1\n\n if not rooms[i].is_accessible:\n continue\n\n if n_index >= 0:\n if rooms[n_index].is_accessible:\n room.n_to = n_index + 1\n if s_index < height * width:\n if rooms[s_index].is_accessible:\n room.s_to = s_index +1\n if e_index % width != 0 and e_index < len(rooms):\n if rooms[e_index].is_accessible:\n room.e_to = e_index + 1\n if i % width != 0:\n if rooms[w_index].is_accessible:\n room.w_to = w_index + 1\n\n # put rooms into rows\n rooms_in_rows = []\n\n start_index = 0\n for _ in range(height):\n row = rooms[start_index:start_index+width]\n start_index += width\n rooms_in_rows.append(row)\n\n return rooms\n # for room in rooms:\n # print(room.id, room.is_accessible, room.n_to, room.s_to, room.e_to, room.w_to)\n # for row in rooms_in_rows:\n # print(len(row))",
"def makeRoom(cls, size, center, environment, wallThickness = 0.01):\n wallList = []\n sx,sy,sz = size\n cx, cy, cz = center\n t = wallThickness#*environment.lengthScale\n\n # top (y+)\n w = cls((sx, t, sz), (cx, cy+sy/2+t/2, cz), environment)\n wallList.append(w)\n\n #bottom (y-)\n w = cls((sx, t, sz), (cx, cy-sy/2-t/2, cz), environment)\n wallList.append(w)\n\n #right (x+)\n w = cls((t, sy, sz), (cx+sx/2+t/2, cy, cz), environment)\n wallList.append(w)\n\n #left (x-)\n w = cls((t, sy, sz), (cx-sx/2-t/2, cy, cz), environment)\n wallList.append(w)\n\n #front (z+)\n w = cls((sx, sy, t), (cx, cy, cz+sz/2+t/2), environment)\n wallList.append(w)\n\n #back (z-)\n w = cls((sx, sy, t), (cx, cy, cz-sz/2-t/2), environment)\n wallList.append(w)\n\n return wallList",
"def all_rooms(self):\n pass",
"def wallsAndGates(self, rooms: List[List[int]]) -> None:\n if rooms == []:\n return\n \n row = len(rooms)\n column = len(rooms[0])\n visited = [[False for i in range(column)] for j in range(row)]\n def valid(row_index, column_index):\n if row_index < row and row_index >= 0 and column_index< column and column_index >= 0:\n return True\n return False\n \n \n def bfs_traverse(row_index, column_index, distance):\n if valid(row_index, column_index) == False or rooms[row_index][column_index] < distance:\n return\n else:\n # if rooms[row_index] [column_index] != -1 and rooms[row_index] [column_index] != 0:\n\n rooms[row_index][column_index] = distance\n \n bfs_traverse(row_index+1, column_index, distance+1)\n\n bfs_traverse(row_index, column_index +1 , distance+1)\n\n bfs_traverse(row_index-1, column_index, distance+1)\n\n bfs_traverse(row_index, column_index-1, distance+1)\n \n \n for row_index in range(row):\n for column_index in range(column):\n if rooms[row_index][column_index] == 0:\n bfs_traverse(row_index, column_index, 0)",
"def room_wall(self):\n print(f\"You are in {self.room}.\")\n print(\"Which wall would you like to look at?\")\n for key, walls in wall.items():\n print(f\" - {key}: {walls}\")",
"def door_world(self):\n maze = self.create_maze_world(height= 6, width = 9)\n for i in range(maze.dims[0]):\n if i is not 3:\n maze.add_wall( (i, 6), \"W\")\n for j in range(6):\n if j is not 0:\n maze.add_wall( (2 , j), \"N\")\n maze.add_wall((2,2), \"E\")\n maze.add_wall((0,2), \"E\")\n return maze",
"def select_walls(matrix, xaxis, yaxis, zaxis):\n clear()\n print(\"Now we're going to construct the interior of the rooms.\")\n print(\"I'm going to present you with all rooms that have been selected.\")\n print(\"I want you to indicate where the walls should be placed in each room.\")\n print(\"Just like room selection, you will indicate, by a list of numbers,\")\n print(\"where the walls should be placed.\")\n print()\n print(\"In locations where a room is adjacent to a void space, a non-optional\")\n print(\"will automatically be inserted. Later some of these can be removed\")\n print(\"to create vertical trasversals, but for now, just consider each\")\n print(\"floor individually.\")",
"def generate_rooms(self):\n\t\tall_rooms = []\n\t\t\n\t\trow_num = 0\n\n\t\tfor r in self.grid_matrix:\t\t# r is a list, but it's really just used as a range here\n\n\t\t\tcol_num = 0\n\t\t\t\n\t\t\tfor c in r:\t\t\t\t\t# c is a string (literally a ' * ') but is also used as range here \n\t\t\t\troom_type = self.get_room_type()\n\t\t\t\troom = {'Coords':[row_num, col_num], 'Type':room_type} #add 'Visited' with Bool value\n\t\t\t\tall_rooms.append(room)\n\t\t\t\tcol_num += 1\n\n\t\t\trow_num += 1\n\n\t\treturn all_rooms\n\n\t\t# you think all_rooms is a list of lists, and that each nested list contains dictionaries (rooms), but is it?\n\t\t# I see the outer list created at the start of the function, but where are the inner lists being created?\n\t\t# isn't this just creating a bunch of dictionaries inside the all_rooms list? you need to test this code\n\t\t# and print out all_rooms to see what it actually contains....",
"def get_walls(world):\r\n return set(((x,y) for x in range(world.get_width()) for y in range(world.get_height()) if world.is_wall((x,y))))",
"def add_walls(self):\n for x in range(self.width + 1):\n if not self.some_things_at((x, 0), Wall):\n self.add_thing(Wall(), (x, 0))\n if not self.some_things_at((x, self.height), Wall):\n self.add_thing(Wall(), (x, self.height))\n\n for y in range(self.height + 1):\n if not self.some_things_at((0, y), Wall):\n self.add_thing(Wall(), (0, y))\n if not self.some_things_at((self.width, y), Wall):\n self.add_thing(Wall(), (self.width, y))\n #self.add_thing(Wumpus(),(1,3))\n #self.add_thing(Pit(),(3,3))\n #self.add_thing(Pit(),(3,1))\n #self.add_thing(Gold(),(2,3))\n #self.add_thing(Pit(),(4,4))",
"def _create_room(new_map, room):\n for x in range(room.x1 + 1, room.x2):\n for y in range(room.y1 + 1, room.y2):\n new_map.terrain[x][y] = 1",
"def all_clean(room):\r\n\r\n for row in room:\r\n for cell in row:\r\n if cell == \"dirt\":\r\n return False\r\n\r\n return True"
] | [
"0.8141561",
"0.66774863",
"0.66051066",
"0.65975714",
"0.6447375",
"0.64353395",
"0.6392355",
"0.6391305",
"0.63583034",
"0.6332799",
"0.6323636",
"0.63046914",
"0.62797344",
"0.62634623",
"0.61977875",
"0.6187254",
"0.61689997",
"0.6131876",
"0.61124665",
"0.6069279",
"0.59698915",
"0.59282464",
"0.59269166",
"0.5919579",
"0.5909206",
"0.58659965",
"0.58365273",
"0.57936007",
"0.5769243",
"0.5757642"
] | 0.79700404 | 1 |
Return number of complementary regions of train track. | def num_complementary_regions(self):
g = self._get_puncturefinder_graph()
# return g.connected_components_number()
return nx.number_connected_components(g) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_contours_number(self):\n ncontour = len(self.x)\n logger.info(\"Number of contours: {0}\".format(ncontour))\n return ncontour",
"def number_of_new_components(self):\n t_low = self.lower_binary_tree().to_tilting()\n t_up = self.upper_binary_tree().to_tilting()\n return len([p for p in t_low if p in t_up])",
"def number_of_carnivores_island(self):\n return np.sum(self.carnivores_on_island)",
"def n_facets(self):\n return self.n_inequalities()",
"def voxel_count(self):\n return self.cols * self.rows * self.sections",
"def num_cusps_of_regions(self):\n G = self._get_puncturefinder_graph()\n # return [sum(G.subgraph(vertices=region).edge_labels())\n # for region in G.connected_components()]\n return [sum(edge[2]['weight']\n for edge in subgraph.edges(data=True))\n for subgraph in nx.connected_component_subgraphs(G)]",
"def countComponents26(cube):\n n,l = labelComponents26(cube);\n return n;",
"def part_one():\n return len(numpy.where(grid > 1)[0])",
"def cardinality(self):\n estimate = self._alpha * math.pow(self._m, 2) / sum(math.pow(2, -x) for x in self._registers)\n\n if estimate <= 2.5 * self._m:\n # get number of registers equal to zero\n empty_registers = self._registers.count(0)\n if empty_registers != 0:\n return self._linear_count(empty_registers)\n else:\n return estimate\n elif estimate <= ((1 << 32) / 30):\n return estimate\n else:\n return self._large_range_correction(estimate)",
"def trace_region_count(self):\n cmd = enums.JLinkTraceCommand.GET_NUM_REGIONS\n data = ctypes.c_uint32(0)\n res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n if (res == 1):\n raise errors.JLinkException('Failed to get trace region count.')\n return data.value",
"def N(self):\n return len(self.cavity_grid.cavities) + 1",
"def number_of_electrodes(self):\n return self._pre_kernel.shape[1]",
"def count_hits_region(location, region):\n l=len(region)\n c=0\n for i in range(0,l-1):\n if hits_border(location,region[i],region[i+1])==True:\n c=c+1\n return c",
"def get_num_carn_landscape(self):\n return len(self.carn_pop)",
"def n_cs(self):\n return np.size(self._cs, 0)",
"def num_conll(self):\n pass",
"def itkRGBAPixelUC_GetNumberOfComponents():\n return _itkRGBAPixelPython.itkRGBAPixelUC_GetNumberOfComponents()",
"def number_of_herbivores_island(self):\n return np.sum(self.herbivores_on_island)",
"def area(cnt):\n\treturn cv2.contourArea(cnt)",
"def contig_count(contig):\n return sum([1 for line in open(contig, 'rU').readlines() if line.startswith('>')])",
"def number_bits_in_cardinality(self,card):\n return 32 - self.count_lead_zs(card)",
"def GetNumberOfComponents():\n return _itkRGBAPixelPython.itkRGBAPixelUC_GetNumberOfComponents()",
"def cumulativeCadenceRevolutionCount(self):\n return (self.raw[4] << 8) | self.raw[3]",
"def number_weakly_connected_components(G):\n return sum(1 for wcc in weakly_connected_components(G))",
"def _cv_len(cv, X, y):\n return len(cv) if not SK18 else cv.get_n_splits(X, y)",
"def num_clbits(self):\n return 0",
"def number_of_constituents(bc_class):\n num_trn = 0\n cn = bc_class.constituent_properties\n if cn.salinity:\n num_trn += 1\n if cn.temperature:\n num_trn += 1\n if cn.vorticity:\n num_trn += 1\n if not cn.general_constituents.empty:\n num_trn += len(cn.general_constituents.index)\n if not cn.sand.empty:\n num_trn += len(cn.sand.index)\n if not cn.clay.empty:\n num_trn += len(cn.clay.index)\n return num_trn",
"def num_regions(image_data):\n if len(image_data.shape) > 2:\n image_data = skimage.color.rgb2gray(image_data)\n _, num_labels = ndimage.label(image_data)\n return num_labels",
"def __get_contour_num(self,coord_tuple):\r\n boundRect = self.boundRect\r\n x,y = coord_tuple\r\n res = -1\r\n for i in range(len(self.top_contours)):\r\n if ((self.top_contours[i] == True) and (x >= boundRect[i][0]) and (x <= boundRect[i][0]+boundRect[i][2]) and\r\n (y >= boundRect[i][1]) and (y <= boundRect[i][1]+boundRect[i][2])):\r\n res = i\r\n\r\n break\r\n return res",
"def getNumberOfTraces(self) -> int:\n\n if not self.debug:\n self.myFieldFox.write(\"CALC:PAR:COUN?\")\n ret = self.myFieldFox.read()\n else:\n ret = 4\n return ret"
] | [
"0.621802",
"0.61952245",
"0.61242294",
"0.58201116",
"0.57810885",
"0.5775374",
"0.57539856",
"0.5722044",
"0.5713514",
"0.5689252",
"0.56840074",
"0.56836957",
"0.5657429",
"0.56544477",
"0.5639411",
"0.5637723",
"0.5621052",
"0.560351",
"0.56030804",
"0.55957466",
"0.5593106",
"0.5582395",
"0.5569299",
"0.55507046",
"0.5548019",
"0.5542969",
"0.5526294",
"0.5503032",
"0.5500042",
"0.5493815"
] | 0.748708 | 0 |
Return the boundary paths of complementary regions. The region is on the right side of the paths. | def complementary_regions(self):
g = self._get_puncturefinder_graph()
# return g.connected_components()
return list(nx.connected_components(g)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def boundary_polygon_by_edges(self):\n lines=self.boundary_linestrings()\n polys=join_features.lines_to_polygons(lines,close_arc=False)\n if len(polys)>1:\n raise GridException(\"somehow there are multiple boundary polygons\")\n return polys[0]",
"def boundary_polygon(self):\n try:\n return self.boundary_polygon_by_edges()\n except Exception as exc:\n self.log.warning('Warning, boundary_polygon() failed using edges! Trying polygon union method')\n self.log.warning(exc,exc_info=True)\n return self.boundary_polygon_by_union()",
"def for_boundary(self, south, west, north, east):\n south = float(south)\n west = float(west)\n north = float(north)\n east = float(east)\n if west * east < 0:\n # Boundary overlaps a hemisphere line, look up either side of it.\n if (180 - west) < west:\n # Closest to International Date Line.\n lookup = (self._boundary_filter(south, west, north, 180) | \n self._boundary_filter(south, -180, north, east))\n else:\n # Closest to Prime Meridian.\n lookup = (self._boundary_filter(south, west, north, 0) | \n self._boundary_filter(south, 0, north, east))\n else:\n lookup = self._boundary_filter(south, west, north, east)\n return self.filter(lookup)",
"def build_constraints_boundaries(self):\n\n # Trapezoidal and Hermite-Simpson methods can't compute\n # defects at the last node contrary to pseudospectral methods\n coll_method = self.options['tr_method'] in [\n 'trapezoidal', 'hermite-simpson']\n n_nodes = self.problem.prm['n_nodes'] - \\\n 1 if coll_method else self.problem.prm['n_nodes']\n\n # Defects lower and upper boundaries\n defects_low = np.zeros(\n self.problem.prm['n_states'] * n_nodes)\n defects_upp = np.zeros(\n self.problem.prm['n_states'] * n_nodes)\n\n # Path lower and upper boundaries\n path_low = np.hstack([self.problem.low_bnd.path]\n * (self.problem.prm['n_nodes']))\n path_upp = np.hstack([self.problem.upp_bnd.path]\n * (self.problem.prm['n_nodes']))\n\n # Events lower and upper boundaries\n event_low = self.problem.low_bnd.event\n event_upp = self.problem.upp_bnd.event\n\n # Assembly of the lower and upper boundaries vectors\n low = np.concatenate((defects_low, path_low, event_low))\n upp = np.concatenate((defects_upp, path_upp, event_upp))\n\n return low, upp",
"def GetInteriorEdgesTri(self):\n\n if not isinstance(self.all_edges,np.ndarray):\n self.GetEdgesTri()\n if not isinstance(self.edges,np.ndarray):\n self.GetBoundaryEdgesTri()\n\n sorted_all_edges = np.sort(self.all_edges,axis=1)\n sorted_boundary_edges = np.sort(self.edges,axis=1)\n\n x = []\n for i in range(self.edges.shape[0]):\n current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],\n self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])\n interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)\n pos_interior_edges = np.where(interior_edges==0)[0]\n if pos_interior_edges.shape[0] != 0:\n x.append(pos_interior_edges)\n\n edge_aranger = np.arange(self.all_edges.shape[0])\n edge_aranger = np.setdiff1d(edge_aranger,np.array(x)[:,0])\n interior_edges = self.all_edges[edge_aranger,:]\n\n # GET FLAGS FOR BOUNDRAY AND INTERIOR\n edge_flags = np.ones(self.all_edges.shape[0],dtype=np.int64)\n edge_flags[edge_aranger] = 0\n\n self.interior_edges = interior_edges\n return interior_edges, edge_flags",
"def boundary_polygon(self, time):\n ti = np.where(time == self.times)[0][0]\n com_x, com_y = self.center_of_mass(time)\n # If at least one point along perimeter of the mask rectangle is unmasked, find_boundaries() works.\n # But if all perimeter points are masked, find_boundaries() does not find the object.\n # Therefore, pad the mask with zeroes first and run find_boundaries on the padded array.\n padded_mask = np.pad(self.masks[ti], 1, 'constant', constant_values=0)\n chull = convex_hull_image(padded_mask)\n boundary_image = find_boundaries(chull, mode='inner', background=0)\n # Now remove the padding.\n boundary_image = boundary_image[1:-1, 1:-1]\n boundary_x = self.x[ti].ravel()[boundary_image.ravel()]\n boundary_y = self.y[ti].ravel()[boundary_image.ravel()]\n r = np.sqrt((boundary_x - com_x) ** 2 + (boundary_y - com_y) ** 2)\n theta = np.arctan2((boundary_y - com_y), (boundary_x - com_x)) * 180.0 / np.pi + 360\n polar_coords = np.array([(r[x], theta[x]) for x in range(r.size)], dtype=[('r', 'f4'), ('theta', 'f4')])\n coord_order = np.argsort(polar_coords, order=['theta', 'r'])\n ordered_coords = np.vstack([boundary_x[coord_order], boundary_y[coord_order]])\n return ordered_coords",
"def roi_boundary(roi, pts_per_side=2):\n yy, xx = roi\n xx = np.linspace(xx.start, xx.stop, pts_per_side, dtype='float32')\n yy = np.linspace(yy.start, yy.stop, pts_per_side, dtype='float32')\n\n return polygon_path(xx, yy).T[:-1]",
"def GetInteriorEdgesQuad(self):\n\n if not isinstance(self.all_edges,np.ndarray):\n self.GetEdgesQuad()\n if not isinstance(self.edges,np.ndarray):\n self.GetBoundaryEdgesQuad()\n\n sorted_all_edges = np.sort(self.all_edges,axis=1)\n sorted_boundary_edges = np.sort(self.edges,axis=1)\n\n x = []\n for i in range(self.edges.shape[0]):\n current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],\n self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])\n interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)\n pos_interior_edges = np.where(interior_edges==0)[0]\n if pos_interior_edges.shape[0] != 0:\n x.append(pos_interior_edges)\n\n edge_aranger = np.arange(self.all_edges.shape[0])\n edge_aranger = np.setdiff1d(edge_aranger,np.array(x)[:,0])\n interior_edges = self.all_edges[edge_aranger,:]\n\n # GET FLAGS FOR BOUNDRAY AND INTERIOR\n edge_flags = np.ones(self.all_edges.shape[0],dtype=np.int64)\n edge_flags[edge_aranger] = 0\n\n self.interior_edges = interior_edges\n return interior_edges, edge_flags",
"def boundary_polygon_by_union(self):\n cell_geoms = [None]*self.Ncells()\n\n for i in self.valid_cell_iter():\n xy = self.nodes['x'][self.cell_to_nodes(i)]\n cell_geoms[i] = geometry.Polygon(xy)\n return ops.cascaded_union(cell_geoms)",
"def _add_boundaries(self):\n logger.info(\"add_boundaries: start of method\")\n\n import scipy.spatial as sptl\n import scipy.sparse as sprs\n Lx = self._Lx\n Ly = self._Ly\n Lz = self._Lz\n Np = self.num_pores()\n btype = self._btype\n boffset = 0.05\n\n #Translate internal pores to each face of domain\n poffset = np.zeros((7,3))\n poffset[[2,5],0] = [-Lx, Lx]\n poffset[[3,4],1] = [-Ly, Ly]\n poffset[[1,6],2] = [-Lz, Lz]\n pcoords = pcoords0 = self['pore.coords']\n for i in np.r_[1:7]:\n pcoords = np.concatenate((pcoords,pcoords0 + poffset[i,:]),axis=0)\n\n #Use some twisted logic to get bval list of + for boundary and - for periodic faces\n bval = [0, 1, 2, 3, 4, 5, 6]*(np.array([0, btype[2], btype[0], btype[1], btype[1], btype[0], btype[2]])*-2+1)\n ptype = np.zeros((Np,),dtype=int)\n for i in np.r_[1:7]:\n ptype = np.concatenate((ptype,np.ones((Np,),dtype=int)*bval[i]),axis=0)\n\n #pnum contains the internal ID number of the boundary pores (for connecting periodic points)\n pnum = self.pores()\n pnum = np.tile(pnum,7)\n\n Tri = sptl.Delaunay(pcoords)\n adjmat = sprs.lil_matrix((np.shape(pcoords)[0],np.shape(pcoords)[0]),dtype=int)\n for i in np.arange(0,np.shape(Tri.simplices)[0]):\n #Keep only simplices that are fully in real domain\n adjmat[Tri.simplices[i],Tri.simplices[i]] = 1\n adjmat = sprs.triu(adjmat,k=1,format=\"lil\")\n for i in np.arange(0,Np):\n #Add periodic throats to the netowrk (if any)\n tpore2 = pnum[adjmat.rows[i]][ptype[adjmat.rows[i]]<0]\n tpore1 = np.ones_like(tpore2,dtype=int)*i\n conns = self['throat.conns']\n conns = np.concatenate((conns,np.vstack((tpore1,tpore2)).T),axis=0)\n #Add boundary pores and throats to the network\n newporetyps = np.unique(ptype[adjmat.rows[i]][ptype[adjmat.rows[i]]>0])\n newporenums = np.r_[self.num_pores():self.num_pores()+np.size(newporetyps)]\n tpore2 = newporenums\n tpore1 = np.ones_like(tpore2,dtype=int)*i\n conns = np.concatenate((conns,np.vstack((tpore1,tpore2)).T),axis=0)\n self['throat.conns'] = conns\n bcoords = np.zeros((7,3),dtype=float)\n coords = self['pore.coords']\n bcoords[1,:] = [coords[i,0], coords[i,1], 0-Lz*boffset]\n bcoords[2,:] = [0-Lx*boffset, coords[i,1], coords[i,2]]\n bcoords[3,:] = [coords[i,0], -Ly*boffset, coords[i,2]]\n bcoords[4,:] = [coords[i,0], Ly+Ly*boffset, coords[i,2]]\n bcoords[5,:] = [Lx+Lx*boffset, coords[i,1], coords[i,2]]\n bcoords[6,:] = [coords[i,0], coords[i,1], Lz+Lz*boffset]\n newporecoords = bcoords[newporetyps,:]\n coords = np.concatenate((coords,newporecoords),axis=0)\n self['pore.coords'] = coords\n #Reset number of pores and throats (easier than tracking it)\n nums = np.r_[0:np.shape(coords)[0]]\n self['pore.numbering'] = nums\n self['pore.numbering'] = np.ones((nums[-1]+1,),dtype=bool)\n nums = np.r_[0:np.shape(conns)[0]]\n self['throat.numbering'] = nums\n self['throat.numbering'] = np.ones((nums[-1]+1,),dtype=bool)\n logger.debug(\"add_boundaries: end of method\")",
"def boundary_contour(self, time):\n ti = np.where(time == self.times)[0][0]\n image_mask = binary_erosion(binary_fill_holes(binary_dilation(self.masks[ti])))\n padded_mask = np.pad(image_mask, 1, 'constant', constant_values=0)\n c_out = find_contours(padded_mask, level=0.5, fully_connected=\"high\")\n x_cont = self.x[ti][np.floor(c_out[0][:, 0]).astype(int), np.floor(c_out[0][:, 1]).astype(int)]\n y_cont = self.y[ti][np.floor(c_out[0][:, 0]).astype(int), np.floor(c_out[0][:, 1]).astype(int)]\n ordered_coords = np.vstack([x_cont, y_cont])\n return ordered_coords",
"def GetInteriorEdgesPent(self):\n\n if not isinstance(self.all_edges,np.ndarray):\n self.GetEdgesPent()\n if not isinstance(self.edges,np.ndarray):\n self.GetBoundaryEdgesPent()\n\n sorted_all_edges = np.sort(self.all_edges,axis=1)\n sorted_boundary_edges = np.sort(self.edges,axis=1)\n\n x = []\n for i in range(self.edges.shape[0]):\n current_sorted_boundary_edge = np.tile(sorted_boundary_edges[i,:],\n self.all_edges.shape[0]).reshape(self.all_edges.shape[0],self.all_edges.shape[1])\n interior_edges = np.linalg.norm(current_sorted_boundary_edge - sorted_all_edges,axis=1)\n pos_interior_edges = np.where(interior_edges==0)[0]\n if pos_interior_edges.shape[0] != 0:\n x.append(pos_interior_edges)\n\n edge_aranger = np.arange(self.all_edges.shape[0])\n edge_aranger = np.setdiff1d(edge_aranger,np.array(x)[:,0])\n interior_edges = self.all_edges[edge_aranger,:]\n\n # GET FLAGS FOR BOUNDRAY AND INTERIOR\n edge_flags = np.ones(self.all_edges.shape[0],dtype=np.int64)\n edge_flags[edge_aranger] = 0\n\n self.interior_edges = interior_edges\n return interior_edges, edge_flags",
"def add_boundaries(self):\n\n bound_conns=[]\n bound_coords=[]\n bound_vert_index=[]\n throat_vert_index=[]\n #Find boundary extent\n [x_min,x_max,y_min,y_max,z_min,z_max]=vo.vertex_dimension(self,self.pores(),parm='minmax')\n min_point = np.around(np.array([x_min,y_min,z_min]),10)\n max_point = np.around(np.array([x_max,y_max,z_max]),10)\n Np = self.num_pores()\n Nt = self.num_throats()\n new_throat_count = 0\n # ridge_dict contains a dictionary where the key is a set of 2 neighbouring pores and the value is the vertex indices\n # that form the throat or ridge between them\n for p,v in self._vor.ridge_dict.items():\n # if the vertex with index -1 is contained in list then the ridge is unbounded - ignore these\n if np.all(np.asarray(v) >=0):\n #boundary throats will be those connecting one pore inside the original set and one out\n if (p[0] in range(Np) and p[1] not in range(Np)) or\\\n (p[0] not in range(Np) and p[1] in range(Np)):\n # the dictionary key is not in numerical order so find the pore index inside\n if p[0] in range(Np):\n my_pore=p[0]\n else:\n my_pore=p[1]\n my_pore_coord = self[\"pore.coords\"][my_pore]\n new_pore_coord = my_pore_coord.copy()\n #rounding necessary here to identify the plane as Voronoi can have 1e-17 and smaller errors\n throat_verts = np.around(self._vor.vertices[v],10)\n #find which plane we are aligned with (if any) and align new_pore with throat plane\n if len(np.unique(throat_verts[:,0])) == 1:\n new_pore_coord[0]=np.unique(throat_verts[:,0])\n elif len(np.unique(throat_verts[:,1])) == 1:\n new_pore_coord[1]=np.unique(throat_verts[:,1])\n elif len(np.unique(throat_verts[:,2])) == 1:\n new_pore_coord[2]=np.unique(throat_verts[:,2])\n else:\n new_pore_coord = throat_verts.mean()\n bound_coords.append(new_pore_coord)\n bound_conns.append(np.array([my_pore,new_throat_count+Np]))\n bound_vert_index.append(dict(zip(v,throat_verts)))\n throat_vert_index.append(dict(zip(v,throat_verts)))\n new_throat_count += 1\n\n #Add new pores and connections\n self.extend(pore_coords=bound_coords, throat_conns=bound_conns)\n #Record new number of pores\n Mp = self.num_pores()\n Mt = self.num_throats()\n new_pore_ids = np.arange(Np,Mp)\n new_throat_ids = np.arange(Nt,Mt)\n #Identify which boundary the pore sits on\n front = self.pores()[self['pore.coords'][:,0]==min_point[0]]\n back = self.pores()[self['pore.coords'][:,0]==max_point[0]]\n left = self.pores()[self['pore.coords'][:,1]==min_point[1]]\n right = self.pores()[self['pore.coords'][:,1]==max_point[1]]\n bottom = self.pores()[self['pore.coords'][:,2]==min_point[2]]\n top = self.pores()[self['pore.coords'][:,2]==max_point[2]]\n #Assign labels\n self['pore.boundary'] = False\n self['pore.boundary'][new_pore_ids] = True\n self['pore.right_boundary'] = False\n self['pore.left_boundary'] = False\n self['pore.front_boundary'] = False\n self['pore.back_boundary'] = False\n self['pore.top_boundary'] = False\n self['pore.bottom_boundary'] = False\n self['pore.right_boundary'][right] = True\n self['pore.left_boundary'][left] = True\n self['pore.front_boundary'][front] = True\n self['pore.back_boundary'][back] = True\n self['pore.top_boundary'][top] = True\n self['pore.bottom_boundary'][bottom] = True\n #Save the throat verts\n self[\"pore.vert_index\"][new_pore_ids] = bound_vert_index\n self[\"throat.vert_index\"][new_throat_ids] = throat_vert_index",
"def store_region_boundaries(self): \n # Generate mask that distinguishes cells from white space. \n bwMask, _ = getMaskForSlideImage(self.file)\n \n # Get the indices for mask border. \n _, contours, _ = cv2.findContours(bwMask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n\n # Get region boundaries for each row. \n regionDict = self.get_boundaries_dict(contours)\n return regionDict",
"def boundary(self):\n\n\t\tif not hasattr(self,\"_hessian_boundary\"):\n\t\t\tself.maskBoundaries()\n\n\t\treturn self._hessian_boundary",
"def compute_bb(self):\n all_shapes = list(self.parts.values()) + list(self.edges.values())\n bbox_vertices = cascaded_union(all_shapes).envelope.exterior.coords.xy\n min_x = min(bbox_vertices[0])\n max_x = max(bbox_vertices[0])\n min_y = min(bbox_vertices[1])\n max_y = max(bbox_vertices[1])\n return [min_x, max_x,min_y, max_y]",
"def detect_area_with_boundary(self, boundary): \r\n # create a polynome approximation with the boundary curve\r\n #update boundary\r\n self.set_boundary(boundary)\r\n \r\n # step 1: find the line connected head and tail\r\n head = boundary.get_head(boundary.mode)\r\n tail = boundary.get_tail(boundary.mode)\r\n \r\n major_axis_angle = self.compute_angle(head, tail) \r\n major_axis_length = math.sqrt( (head[1]- tail[1])**2 +(head[0]- tail[0])**2)\r\n # step 2: find cropping positions\r\n tail_dropoff_pos = self.coordinate_shift(tail, \\\r\n major_axis_length * self.tail_dropoff_threshold, \\\r\n major_axis_angle) \r\n \r\n head_dropoff_pos = self.coordinate_shift(head, \\\r\n - major_axis_length * self.head_dropoff_threshold,\\\r\n major_axis_angle)\r\n \r\n # find the intersection points with boundary_curve\r\n # minor_axis_angle is range from (-pi/2 , pi/2)\r\n if major_axis_angle < 0:\r\n minor_axis_angle = major_axis_angle + math.pi / 2\r\n else:\r\n minor_axis_angle = major_axis_angle - math.pi / 2\r\n \r\n if minor_axis_angle > 0:\r\n minor_axis_angle_prime = minor_axis_angle - math.pi\r\n else:\r\n minor_axis_angle_prime = minor_axis_angle + math.pi\r\n \r\n \r\n # find the two vetices of polygone which are close to tail \r\n min_diff_angle_0 = math.inf\r\n min_diff_angle_pi = math.inf\r\n \r\n curve = boundary.get_approx_curve(boundary.mode)\r\n \r\n for (r,c) in curve:\r\n # compute the angle for the line connecting (bd_x, bd_y) and tail_dropoff_pos\r\n bd_angle = self.compute_angle( (c,r), tail_dropoff_pos )\r\n \r\n diff_angle = np.abs( bd_angle - minor_axis_angle) \r\n if (diff_angle < min_diff_angle_0):\r\n min_diff_angle_0 = diff_angle\r\n pos_1 = (r,c)\r\n \r\n diff_angle = np.abs(bd_angle - minor_axis_angle_prime ) \r\n if (diff_angle < min_diff_angle_pi):\r\n min_diff_angle_pi = diff_angle\r\n pos_2 = (r,c)\r\n \r\n if pos_1[0] < pos_2[0]:\r\n self.__tail_poly_vertice_upper = pos_1\r\n self.__tail_poly_vertice_lower = pos_2\r\n else:\r\n self.__tail_poly_vertice_upper = pos_2\r\n self.__tail_poly_vertice_lower = pos_1\r\n \r\n # find the two vertices of polygone which are close to head\r\n min_diff_angle_0 = math.inf\r\n min_diff_angle_pi = math.inf\r\n for (r,c) in curve:\r\n # compute the angle for the line connecting (bd_x, bd_y) and head_dropoff_pos\r\n bd_angle = self.compute_angle( (c,r), head_dropoff_pos )\r\n # diff_angle is range from (0, pi)\r\n diff_angle = np.abs( bd_angle - minor_axis_angle) \r\n if (diff_angle < min_diff_angle_0):\r\n min_diff_angle_0 = diff_angle\r\n pos_1 = (r,c)\r\n diff_angle = np.abs(bd_angle - minor_axis_angle_prime ) \r\n if (diff_angle < min_diff_angle_pi):\r\n min_diff_angle_pi = diff_angle\r\n pos_2 = (r,c)\r\n \r\n if pos_1[0] < pos_2[0]:\r\n self.__head_poly_vertice_upper = pos_1\r\n self.__head_poly_vertice_lower = pos_2\r\n else:\r\n self.__head_poly_vertice_upper = pos_2\r\n self.__head_poly_vertice_lower = pos_1\r\n \r\n # construct the vertices of polygone\r\n self.vertices = np.array([self.__tail_poly_vertice_upper, \\\r\n self.__tail_poly_vertice_lower, \\\r\n self.__head_poly_vertice_lower, \\\r\n self.__head_poly_vertice_upper])\r\n \r\n self.vertices = np.round(self.vertices).astype(int)\r\n \r\n # creat area form vertices\r\n self.vertex2poly()",
"def boundary(self):\n return self.substrates.boundary",
"def apply_boundary_conditions(self):\n E = self.__mesh.get_edge_list()\n\n # Top and bottom wall Dirichlet bcs (boundary_id = 21)\n \n e21_iterator = self.__mesh.edge_iterator(21)\n\n self.edge_center_value[e21_iterator[0]:e21_iterator[1]+1] = 0.0 \n \n # Left Dirichlet bc (boundary_id = 2)\n \n e2_iterator = self.__mesh.edge_iterator(2)\n\n b = np.sin(np.pi*self.y_e[e2_iterator[0]:e2_iterator[1]+1])\n\n self.edge_center_value[e2_iterator[0]:e2_iterator[1]+1] \\\n = b\n \n # Right Neumann bc (Zero flux, boundary_id = 3)\n \n e3_iterator = self.__mesh.edge_iterator(3)\n \n for i in range(e3_iterator[0], e3_iterator[1]+1):\n LC = E[i].get_straddling_cells()\n n = LC.get_global_cell_number() - 1\n self.edge_center_value[i] = self.cell_centroid_value[n]",
"def getBoundariesOfimage(image):\n if np.ndim(image) == 2:\n sElement = ndimage.generate_binary_structure(2, 1)\n else:\n sElement = ndimage.generate_binary_structure(3, 1)\n erode_im = scipy.ndimage.morphology.binary_erosion(image, sElement)\n b = image - erode_im\n return b",
"def GetBoundaryEdgesTri(self):\n\n p = self.InferPolynomialDegree()\n\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.edges,np.ndarray):\n if self.edges.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.edges.shape[1] == 2 and p > 1:\n pass\n else:\n return\n\n\n node_arranger = NodeArrangementTri(p-1)[0]\n\n # CONCATENATE ALL THE EDGES MADE FROM ELEMENTS\n all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],\n self.elements[:,node_arranger[2,:]]),axis=0)\n # GET UNIQUE ROWS\n uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)\n\n # ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES\n freqs_inv = itemfreq(inv)\n edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]\n # NOT ARRANGED\n self.edges = uniques[edges_ext_flags,:]\n\n # DETERMINE WHICH FACE OF THE ELEMENT THEY ARE\n boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)\n\n # FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR\n # IN ELEMENT CONNECTIVITY\n # THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES\n all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)\n all_edges_in_edges = np.where(all_edges_in_edges==True)[0]\n\n boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]\n boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]\n\n # ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS\n self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]\n self.edges = self.edges.astype(np.uint64)\n self.boundary_edge_to_element = boundary_edge_to_element\n\n return self.edges",
"def get_bounds(self):\n bottom_right = np.asarray([self.coords[k][0] for k in range(self.dim)])\n upper_left = np.asarray([self.coords[k][-1] for k in range(self.dim)])\n return bottom_right, upper_left",
"def get_bounds(self):\r\n left, bottom, front = 10000, 10000, 10000\r\n right, top, back = -10000, -10000, -10000\r\n for b in self.buf:\r\n for v in b.vertices:\r\n if v[0] < left:\r\n left = v[0]\r\n if v[0] > right:\r\n right = v[0]\r\n if v[1] < bottom:\r\n bottom = v[1]\r\n if v[1] > top:\r\n top = v[1]\r\n if v[2] < front:\r\n front = v[2]\r\n if v[2] > back:\r\n back = v[2]\r\n\r\n return (left, bottom, front, right, top, back)",
"def interior(self):\n return self - self.boundary",
"def real_boundaries(self):\n return (self._points[0][1], self._points[0][3])",
"def bounds(self):\n return self._bboxes[0][0] #TODO: merge all coverages",
"def boundary_of_set(i):\n b = self.args[i].boundary\n for j, a in enumerate(self.args):\n if j != i:\n b = b - a.interior\n return b",
"def getCoveringRanges( self, left_ranges, right_ranges, parent_ranges ):\n \n child_ranges = map( lambda x: (x[0], x[1], 0), left_ranges)\n child_ranges += map( lambda x: (x[0], x[1], 1), right_ranges)\n \n child_ranges.sort()\n parent_ranges.sort()\n \n new_left_ranges = []\n new_right_ranges = []\n \n parent_index = 0\n last_to = 0\n \n parent_left, parent_right = parent_ranges[parent_index]\n\n self.debug( \"child_ranges=%s\" % str(child_ranges) )\n self.debug( \"parent_ranges=%s\" % str(parent_ranges))\n \n last_left, last_right, last_is_right = child_ranges[0]\n \n for this_left, this_right, this_is_right in child_ranges[1:]:\n \n ## look at previous segment last_left to last_right:\n ## find matching parent_index:\n old_parent_index = parent_index\n while (min(parent_right, last_right) - max(parent_left, last_left)) < 0:\n parent_index += 1\n if parent_index == len(parent_ranges): break\n parent_left, parent_right = parent_ranges[parent_index]\n \n ## skip fragments that do not overlap\n if parent_index == len(parent_ranges):\n parent_index = old_parent_index\n last_left, last_right, last_is_right = this_left, this_right, this_is_right\n continue\n \n ## firstly: make segment covering\n new_left = min(parent_left, last_left)\n new_right = min(max(parent_right, last_right), this_left)\n \n if last_is_right:\n new_right_ranges.append((new_left, new_right))\n else:\n new_left_ranges.append((new_left, new_right))\n \n ## reduce parent on left side\n parent_left=max(new_right, parent_left)\n \n last_left, last_right, last_is_right = this_left, this_right, this_is_right\n \n ## process last segment\n while (min(parent_right, last_right) - max(parent_left, last_left)) < 0:\n parent_index += 1\n if parent_index >= len(parent_ranges): break \n parent_left, parent_right = parent_ranges[parent_index]\n \n new_left = min(parent_left, last_left)\n new_right = max(parent_right, last_right)\n \n if last_is_right:\n new_right_ranges.append((new_left, new_right))\n else:\n new_left_ranges.append((new_left, new_right))\n \n self.debug( \"old left ranges=%s\" % str(left_ranges))\n self.debug( \"new left ranges=%s\" % str(new_left_ranges))\n self.debug( \"old right ranges=%s\" % str(right_ranges))\n self.debug( \"new right ranges=%s\" % str(new_right_ranges))\n \n return new_left_ranges, new_right_ranges",
"def num_complementary_regions(self):\n g = self._get_puncturefinder_graph()\n # return g.connected_components_number()\n return nx.number_connected_components(g)",
"def find_boundary(edges):\n\n inputs = set([x[0] for x in edges])\n outputs = set([x[1] for x in edges])\n for e in edges:\n inputs.discard(e[1])\n outputs.discard(e[0])\n return inputs, outputs"
] | [
"0.609764",
"0.60245264",
"0.58544064",
"0.5849384",
"0.57595223",
"0.57295775",
"0.56954753",
"0.5641807",
"0.5619031",
"0.55793244",
"0.5564037",
"0.5553801",
"0.55465585",
"0.554616",
"0.545568",
"0.54502296",
"0.5446974",
"0.5438838",
"0.5392316",
"0.53638506",
"0.53225857",
"0.52969897",
"0.52948296",
"0.5289381",
"0.5286825",
"0.5271726",
"0.52439433",
"0.5243813",
"0.5232911",
"0.5208898"
] | 0.6510515 | 0 |
Return the surface that is regular neighborhood of ``self``. | def regular_neighborhood(self):
euler_char = self.num_switches() - self.num_branches()
return Surface(num_punctures=self.num_complementary_regions(),
euler_char=euler_char) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def surface(self):\n return self._surface",
"def getSurfAlongZ(self):\n\n return self._surf",
"def wireframe_only(self):\n return self._wireframe_only",
"def get_surface(self, new: bool = True) -> 'pygame.Surface':\n if new:\n return self.get_crop_rect(self.get_rect())\n return self._surface",
"def surface(self):\n return BRep_Tool_Surface(self.topods_shape())",
"def surface_mask(self):\n return np.vectorize(lambda name: name in self.nvertices.keys())(self.name)",
"def neighbor(self) -> Node:\r\n return self._neighbor",
"def interior(self):\n return Shape(self - self.edge('inner'))",
"def spatial(self):\n return self._spatial",
"def backbone(self, tol=1e-8):\n areneighbors, _ = self.neighbors(tol)\n\n # self.ndim + 1 for stability, +1 for itself\n notfloaters = np.sum(areneighbors, axis=0) >= self.ndim + 2\n\n oldNpack = -1\n Npack = np.sum(notfloaters)\n while Npack != oldNpack:\n areneighbors[~notfloaters] = 0\n areneighbors[:, ~notfloaters] = 0\n notfloaters = np.sum(areneighbors, axis=0) >= self.ndim + 2\n oldNpack, Npack = Npack, np.sum(notfloaters)\n\n return self.Backbone(notfloaters, areneighbors)",
"def isosurface(self):\n return self._isosurface()",
"def neighbors(self):\n return self.mesh.neighbors()",
"def get_neighbours(self):\n return self._neighbours",
"def get_neighbours(self):\n return self.neighbours",
"def surface_atoms(self):\n indx = self.surface_indices\n return self.cluster[indx]",
"def exposedSurf(self):\n if self.precision:\n h = self.evaluations.exposedWing.edges[1].point1.x # height of trapezoid\n B = self.chordRootW # major base of trapezoid\n b = self.evaluations.chordIntersected.edges[1].length # minor base of trapezoid\n internalS = 2 * (0.5 * (b + B) * h) # wing surface internal at fuselage\n return self.surfaceW - internalS\n else:\n return self.surfaceW - self.fuselageDiameter * self.cMACW # first guess for a faster evaluation",
"def findImmediateNeighbours(self):\n immediateNeighbours = []\n\n if self.xCoordinate - 1 > 0:\n immediateNeighbours.append(PixelPosition(self.xCoordinate - 1, self.yCoordinate))\n\n if self.xCoordinate + 1 < 395:\n immediateNeighbours.append(PixelPosition(self.xCoordinate + 1, self.yCoordinate))\n\n if self.yCoordinate + 1 < 500:\n immediateNeighbours.append(PixelPosition(self.xCoordinate, self.yCoordinate + 1))\n\n if self.yCoordinate - 1 > 0:\n immediateNeighbours.append(PixelPosition(self.xCoordinate, self.yCoordinate - 1))\n\n return immediateNeighbours",
"def facet_with_holes(self,):\n return self.facet_with_holes_",
"def getneighbors(self):\r\n\t\ti=self.cell[0]\r\n\t\tj=self.cell[1]\r\n\t\t\r\n\t\tw = self.width-1\r\n\t\tCenter = self.base[i][j]\r\n\t\tif(self.type==\"Neumann\"):\r\n\t\t\tif(j==w and 0<i<w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][0]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\r\n\t\t\tif(i==w and 0<j<w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[0][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\r\n\t\t\tif(j==0 and 0<i<w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][w]\r\n\t\t\tif(i==0 and 0<j<w):\r\n\t\t\t\tNorth=self.base[w][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\r\n\t\t\tif(j==w and i==w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][0]\r\n\t\t\t\tSouth=self.base[0][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\r\n\t\t\tif(j==0 and i==0):\r\n\t\t\t\tNorth=self.base[w][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][w]\r\n\r\n\t\t\tif(j==0 and i==w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[0][j]\r\n\t\t\t\tWest=self.base[i][w]\r\n\r\n\t\t\tif(i==0 and j==w):\r\n\t\t\t\tNorth=self.base[w][j]\r\n\t\t\t\tEast=self.base[i][0]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\r\n\t\t\tif(0<i<w and 0<j<w):\t\t\t\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\r\n\t\t\tself.surrounding = [North,South,East,West]\r\n\t\t\tself.binary= str(East)+str(West)+str(South)+str(North)+str(Center)\r\n\t\t\t\r\n\t\telif(self.type==\"Moore\"):\r\n\t\t\t\r\n\t\t\tif(j==w and 0<i<w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][0]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\t\t\tNE = self.base[i-1][0]\r\n\t\t\t\tNW = self.base[i-1][j-1]\r\n\t\t\t\tSE = self.base[i+1][0]\r\n\t\t\t\tSW = self.base[i+1][j-1]\r\n\t\t\tif(i==w and 0<j<w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[0][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\t\t\tNE = self.base[i-1][j+1]\r\n\t\t\t\tNW = self.base[i-1][j-1]\r\n\t\t\t\tSE = self.base[0][j+1]\r\n\t\t\t\tSW = self.base[0][j-1]\r\n\t\t\tif(j==0 and 0<i<w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][w]\r\n\t\t\t\tNE = self.base[i-1][j+1]\r\n\t\t\t\tNW = self.base[i-1][w]\r\n\t\t\t\tSE = self.base[i+1][j+1]\r\n\t\t\t\tSW = self.base[i+1][w]\r\n\t\t\tif(i==0 and 0<j<w):\r\n\t\t\t\tNorth=self.base[w][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\t\t\tNE = self.base[w][j+1]\r\n\t\t\t\tNW = self.base[w][j-1]\r\n\t\t\t\tSE = self.base[i+1][j+1]\r\n\t\t\t\tSW = self.base[i+1][j-1]\r\n\t\t\t\t\t\t\t\r\n\t\t\tif(j==w and i==w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][0]\r\n\t\t\t\tSouth=self.base[0][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\t\t\tNE = self.base[i-1][0]\r\n\t\t\t\tNW = self.base[i-1][j-1]\r\n\t\t\t\tSE = self.base[0][0]\r\n\t\t\t\tSW = self.base[0][j-1]\r\n\t\t\tif(j==0 and i==0):\r\n\t\t\t\tNorth=self.base[w][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][w]\r\n\t\t\t\tNE = self.base[w][j+1]\r\n\t\t\t\tNW = self.base[w][w]\r\n\t\t\t\tSE = self.base[i+1][j+1]\r\n\t\t\t\tSW = self.base[i+1][w]\r\n\t\t\tif(j==0 and i==w):\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[0][j]\r\n\t\t\t\tWest=self.base[i][w]\r\n\t\t\t\tNE = self.base[i-1][j+1]\r\n\t\t\t\tNW = self.base[i-1][w]\r\n\t\t\t\tSE = self.base[0][j+1]\r\n\t\t\t\tSW = self.base[0][w]\r\n\t\t\tif(i==0 and j==w):\r\n\t\t\t\tNorth=self.base[w][j]\r\n\t\t\t\tEast=self.base[i][0]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\t\t\tNE = self.base[w][0]\r\n\t\t\t\tNW = self.base[w][j-1]\r\n\t\t\t\tSE = self.base[i+1][0]\r\n\t\t\t\tSW = self.base[i+1][j-1]\r\n\t\t\tif(0<i<w and 0<j<w):\t\t\t\r\n\t\t\t\tNorth=self.base[i-1][j]\r\n\t\t\t\tEast=self.base[i][j+1]\r\n\t\t\t\tSouth=self.base[i+1][j]\r\n\t\t\t\tWest=self.base[i][j-1]\r\n\t\t\t\tNE = self.base[i-1][j+1]\r\n\t\t\t\tNW = self.base[i-1][j-1]\r\n\t\t\t\tSE = self.base[i+1][j+1]\r\n\t\t\t\tSW = self.base[i+1][j-1]\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tself.surrounding = [North,South,East,West,NE,NW,SE,SW]\r\n\t\t\tself.binary= str(East)+str(West)+str(South)+str(North)+str(Center)+str(NE)+str(NW)+str(SE)+str(SW)",
"def mask(self):\n mask = np.full(self._grid.shape, False)\n mask[np.ix_(self._lat_indices, self._lon_indices)] = True\n return mask",
"def get_ground_truth_camera_poses(self):\n return self._ground_truth_trajectory",
"def implicit_surface(self, F, y):\n y = y[:, :, None].expand(-1, -1, self.n_primitives, -1)\n y_latent, ldj = self.invertible_network.inverse(F, y)\n norm = torch.sqrt((y_latent**2).sum(-1))\n\n # <0 denotes internal points\n # >0 denotes external points\n # 0 is the boundary hence our primitive\n return norm - self.radius, ldj",
"def ground_vis(self) -> torch.Tensor:\n\n return utils.packed_cube_to_ground_cube(self.vis)",
"def get_surfaces(self):\n if not self.surfaces is None:\n return self.surfaces.copy()\n else:\n return None",
"def get_subsurface(self):\n w, h = self.rect.w, self.rect.h\n surface = pg.Surface((w, h))\n surface.set_colorkey((0, 0, 0))\n return pg.Surface((w, h))",
"def get_mask(self):\n w, h = self.rect.w, self.rect.h\n colorkey = (0, 0, 0)\n surface = pg.Surface((w, h))\n surface.set_colorkey(colorkey)\n # fill the surface with the spherical object\n color, center, radius = (255, 255, 255), self.rect.center, round(self.rect.w/2)\n pg.draw.circle(surface, color, center, radius)\n mask = pg.mask.from_surface(surface)\n return mask",
"def use_neighbor_placement(self):\n\n return self._use_neighbor_placement",
"def interior(self):\n return self - self.boundary",
"def solid(self):\r\n return not not self.prototype.solid",
"def test_d2_get_neighborhood_small(self):\n config.NR_COLS = 3\n config.NR_ROWS = 3\n gamefield = [\n [1, 0, 0],\n [1, 0, 0],\n [0, 1, 1],\n ]\n # top left\n nh = logic.get_neighborhood(gamefield, 0, 0)\n self.assertEqual(nh, 3)\n # top right\n nh = logic.get_neighborhood(gamefield, 0, 2)\n self.assertEqual(nh, 4)\n # bottom left\n nh = logic.get_neighborhood(gamefield, 2, 0)\n self.assertEqual(nh, 4)\n # bottom right\n nh = logic.get_neighborhood(gamefield, 2, 2)\n self.assertEqual(nh, 3)\n # center\n nh = logic.get_neighborhood(gamefield, 1, 1)\n self.assertEqual(nh, 4)"
] | [
"0.61511374",
"0.606822",
"0.59080124",
"0.5831154",
"0.5789176",
"0.5744674",
"0.5698036",
"0.56867176",
"0.568455",
"0.56453556",
"0.5628502",
"0.5598237",
"0.55960137",
"0.55879205",
"0.55872387",
"0.5559835",
"0.55027723",
"0.5469857",
"0.54697555",
"0.54540193",
"0.544791",
"0.5444681",
"0.54442185",
"0.5429617",
"0.54283834",
"0.5427631",
"0.5421475",
"0.54136884",
"0.5406401",
"0.5403724"
] | 0.7588659 | 0 |
Return the number of cusps for each complementary region. | def num_cusps_of_regions(self):
G = self._get_puncturefinder_graph()
# return [sum(G.subgraph(vertices=region).edge_labels())
# for region in G.connected_components()]
return [sum(edge[2]['weight']
for edge in subgraph.edges(data=True))
for subgraph in nx.connected_component_subgraphs(G)] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def num_complementary_regions(self):\n g = self._get_puncturefinder_graph()\n # return g.connected_components_number()\n return nx.number_connected_components(g)",
"def n_cs(self):\n return np.size(self._cs, 0)",
"def ncusps(self):\n n = self.level()\n return sum([arith.euler_phi(arith.gcd(d,n//d)) for d in n.divisors()])",
"def number_of_carnivores_island(self):\n return np.sum(self.carnivores_on_island)",
"def n_cf(self):\n return np.size(self._ref_ii, 0)",
"def get_contours_number(self):\n ncontour = len(self.x)\n logger.info(\"Number of contours: {0}\".format(ncontour))\n return ncontour",
"def voxel_count(self):\n return self.cols * self.rows * self.sections",
"def num_conll(self):\n pass",
"def carn_count(self):\n return len(self.carnivores)",
"def current_nbc_coverage():\n covered = 0\n total = 0\n for layer in layer_to_compute:\n covered = covered + np.count_nonzero(nbc_cov_dict[layer.name])\n total = total + np.size(nbc_cov_dict[layer.name])\n return covered / float(total)",
"def number_of_constituents(bc_class):\n num_trn = 0\n cn = bc_class.constituent_properties\n if cn.salinity:\n num_trn += 1\n if cn.temperature:\n num_trn += 1\n if cn.vorticity:\n num_trn += 1\n if not cn.general_constituents.empty:\n num_trn += len(cn.general_constituents.index)\n if not cn.sand.empty:\n num_trn += len(cn.sand.index)\n if not cn.clay.empty:\n num_trn += len(cn.clay.index)\n return num_trn",
"def nClumps(self):\n \n return len(self)",
"def cumulativeCadenceRevolutionCount(self):\n return (self.raw[4] << 8) | self.raw[3]",
"def countComponents26(cube):\n n,l = labelComponents26(cube);\n return n;",
"def N(self):\n return len(self.cavity_grid.cavities) + 1",
"def GetNumberOfComponents():\n return _itkRGBAPixelPython.itkRGBAPixelUC_GetNumberOfComponents()",
"def n_cf(self):\n return self.meta.n_caps - self.n_cs",
"def get_cpus_stats_count(self):\n\t\treturn call_sdk_function('PrlStat_GetCpusStatsCount', self.handle)",
"def cluster_count(self) -> int:\n cluster_count = max(1, round(16**3 * (self.vein.purity / 100.0) / self.cluster_size))\n return self.distribution.scale_cluster_count(cluster_count)",
"def n_cs(self):\n return self._configurations[0].n_cs",
"def count(self):\n return sum([self.bits[x][y] for x in range(self.n_rows)\n for y in range(self.n_columns)])",
"def num_clbits(self):\n return 0",
"def contig_count(contig):\n return sum([1 for line in open(contig, 'rU').readlines() if line.startswith('>')])",
"def itkRGBAPixelUC_GetNumberOfComponents():\n return _itkRGBAPixelPython.itkRGBAPixelUC_GetNumberOfComponents()",
"def n_cs(self):\n pass",
"def find_numerical_contours(counts):\n\tone_sigma_boundary = sigma_boundary(counts, 68)\n\tone_sigma = counts > one_sigma_boundary\n\ttwo_sigma_boundary = sigma_boundary(counts, 95)\n\ttwo_sigma = (counts > two_sigma_boundary) & (counts < one_sigma_boundary)\n\tthree_sigma_boundary = sigma_boundary(counts, 99)\n\tthree_sigma = (counts > three_sigma_boundary) & (counts < two_sigma_boundary)\n\n\t# Check method: Output actual percentages in each region\n\tprint('total no. samples:')\n\tprint(np.sum(counts))\n\tprint('included in 1st sigma region:')\n\tprint(np.sum(one_sigma * counts) / np.sum(counts))\n\tprint('included in 2 sigma region:')\n\tprint((np.sum(one_sigma * counts) + np.sum(two_sigma * counts)) / np.sum(counts))\n\tprint('included in 3 sigma region:')\n\tprint((np.sum(one_sigma * counts) + np.sum(two_sigma * counts) + np.sum(three_sigma * counts)) / np.sum(counts))\n\n\tfilled_numerical_contours = one_sigma * 1 + two_sigma * 2 + three_sigma * 3\n\n\treturn filled_numerical_contours",
"def csize(grades):\n\tp = 0\n\tfor k in grades:\n\t\tl = _comb(n,k)\n\t\tp += l\n\treturn p",
"def csize(grades):\n\tp = 0\n\tfor k in grades:\n\t\tl = _comb(n,k)\n\t\tp += l\n\treturn p",
"def get_num_carn_landscape(self):\n return len(self.carn_pop)",
"def _find_cusps(self):\n N = self.level()\n s = []\n\n for d in arith.divisors(N):\n w = arith.gcd(d, N//d)\n if w == 1:\n if d == 1:\n s.append(Cusp(1,0))\n elif d == N:\n s.append(Cusp(0,1))\n else:\n s.append(Cusp(1,d))\n else:\n for a in range(1, w):\n if arith.gcd(a, w) == 1:\n while arith.gcd(a, d//w) != 1:\n a += w\n s.append(Cusp(a,d))\n return sorted(s)"
] | [
"0.76018685",
"0.69323325",
"0.6927314",
"0.6893569",
"0.643692",
"0.6348874",
"0.62144196",
"0.61052084",
"0.61034214",
"0.6092677",
"0.6069627",
"0.6052347",
"0.5982089",
"0.597497",
"0.59650195",
"0.5963505",
"0.59482235",
"0.5927196",
"0.5901739",
"0.58945924",
"0.5891758",
"0.5891274",
"0.5891207",
"0.5861923",
"0.58540565",
"0.58310986",
"0.58184844",
"0.58184844",
"0.5810225",
"0.5807101"
] | 0.7510573 | 1 |
Return a graph to determine recurrence. | def _get_recurrence_graph(self):
try:
return self._recurrence_graph
except AttributeError:
pass
# g = DiGraph()
g = nx.DiGraph()
for i in range(self.num_switches()):
for ii in {-i-1, i+1}:
g.add_edges_from([(j, -k)
for j in self.outgoing_branches(ii)
for k in self.outgoing_branches(-ii)])
self._recurrence_graph = g
return g | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def recurrence(self):\n return self.__recurrence",
"def recurrence(self):\n if \"recurrence\" in self._prop_dict:\n if isinstance(self._prop_dict[\"recurrence\"], OneDriveObjectBase):\n return self._prop_dict[\"recurrence\"]\n else :\n self._prop_dict[\"recurrence\"] = PatternedRecurrence(self._prop_dict[\"recurrence\"])\n return self._prop_dict[\"recurrence\"]\n\n return None",
"def gen_graph(self):",
"def graph(self):\n ...",
"def graph(self):\n return self.__graph",
"def graph():\n graph = Graph()\n graph.insert(1, 2)\n graph.insert(0, 2, 3)\n graph.insert(0, 1, 3)\n graph.insert(1, 2)\n return graph",
"def graph(self) -> dict:\n return self.flat_graph()",
"def build_graph(self):\n raise NotImplementedError",
"def generate_dag_graph(self):\n # generate ranom graph\n G = nx.DiGraph()\n G.add_nodes_from(range(self.nodes))\n return self.fix_graph(G)",
"def graph(self):\n return self._graph",
"def graph(self):\n return self._graph",
"def build_graph(self):\n pass",
"def _get_full_graph(self):",
"def _build_graph(self):\n pass",
"def graph(self):\n\n return self._graph",
"def subgraphs_of_length(self, days=None, periods=None):\n graphs = []\n if days:\n sg_length = datetime.timedelta(days=days)\n else:\n sg_length = periods\n\n start_date = self.min_date\n end_date = start_date + sg_length\n done = False\n while not done:\n if start_date > self.max_date:\n break\n if end_date > self.max_date:\n # end_date = self.max_date\n done = True\n print(start_date, end_date)\n new = self.subgraph_within_dates(start_date, end_date)\n if new.nx_graph.number_of_edges():\n graphs.append(new)\n start_date += sg_length\n end_date += sg_length\n return graphs",
"def get_graph(self):\n return self._graph",
"def find_cycle(self):\n # from guido's blog :\n # http://neopythonic.blogspot.com/2009/01/detecting-cycles-in-directed-graph.html\n worklist = set(self.successors)\n while worklist:\n stack = [worklist.pop()]\n while stack:\n top = stack[-1]\n for node in self.successors.get(top, ()):\n try:\n # raises ValueError if node is not in stack.\n cycle = stack[stack.index(node) :]\n succs = dict(\n (source, [cycle[(i + 1) % len(cycle)]])\n for i, source in enumerate(cycle)\n )\n return Digraph(succs, self.get_score, self.get_label)\n except ValueError:\n pass\n if node in worklist:\n stack.append(node)\n worklist.remove(node)\n break\n else:\n stack.pop()\n return None",
"def get_graph(self, path):\n raise NotImplementedError",
"def get_dependency_graph(self):\n return self.graph",
"def build_graph(self):\n self._reset_iterator_memory()\n self._construct_graph_handler()\n assert self.graph_handler\n for rxn_id in self.graph_handler.get_valid_reaction_ids():\n rxn = db.Reaction(rxn_id, self._reactions)\n self.graph_handler.add_rxn(rxn)",
"def getGraph(self):\n\t\treturn self.graph",
"def build_graphs(self):\n kw = dict(weighted_by=self.attribute, logratio=self.logratio)\n return {_id: self.build_graph(r, **kw) for _id, r in self.replicates}",
"def find_reticulation_nodes(graph):\n # 2. If a node with more than two incoming edges is found, then\n # return null.\n degrees = graph.in_degree()\n if any(deg > 2 for deg in degrees.values()):\n return\n\n # 1. Perform a simple graph traversal in order to locate the\n # reticulation nodes.\n # 3. For every reticulation node find its two parents. Each of these\n # parents belongs to a chain of the gall.\n return {node: get_parents(graph, node) for node in graph\n if degrees[node] == 2}",
"def get_graph(self):\n return copy.deepcopy(self.graph)",
"def buildGraph(self):\n return None",
"def make_graph(self):\n # update the neighbors in the graph\n self.update_neighbors()\n\n # Go through each node and get their neighbors\n self.edges = []\n for node_name in self.nodes:\n\n # get the neighbors\n node_neighbors = self.nodes[node_name].neighbors\n\n # go through neighbors\n for neighbor_name in node_neighbors:\n\n # Make the edge key\n edge_key = \"-\".join(sorted([node_name, neighbor_name]))\n\n # Add it to the edge list if it is not already present\n if edge_key not in self.edges:\n\n self.edges.append(edge_key)\n\n return self.edges",
"def gen_graph(self, seed=None):\n block = make_residue_graph(self.molecule, attrs=('resid', 'resname'))\n resnames = nx.get_node_attributes(block, 'resname')\n graph = nx.Graph()\n graph.add_nodes_from(block.nodes)\n graph.add_edges_from(block.edges)\n nx.set_node_attributes(graph, resnames, \"resname\")\n return graph",
"def make_frequency_network( self, use_rmsd=False ):\n G = nx.DiGraph()\n min_nodes = []\n max_nodes = []\n\n def window(seq, n=2):\n it = iter(seq)\n result = tuple(itertools.islice(it, n))\n if len(result) == n:\n yield result\n for elem in it:\n result = result[1:] + (elem,)\n yield result\n\n data = {}\n for tp, df in self.groupby([\"frame\", \"neighbor\"]):\n transitions = list(window(list(df[\"position\"].unique())))\n rmsd = self[self[\"frame\"] == tp[0]][\"rmsd\"].max() - df[\"rmsd\"].values[0]\n rmsd = int(rmsd * 100)\n for tr in transitions:\n n = df[df[\"position\"] == tr[0]][\"aa\"].values[0]\n c = df[df[\"position\"] == tr[1]][\"aa\"].values[0]\n if use_rmsd:\n data.setdefault(tr[0], []).extend([(n, c), ] * rmsd)\n else:\n data.setdefault(tr[0], []).append((n, c))\n\n for k in data:\n options = len(data[k])\n data[k] = Counter(data[k])\n options = data[k].most_common(1)[0][1]\n for p in data[k]:\n n = str(k) + p[0]\n G.add_node(n, order=k, type=p[0])\n c = str(k + 1) + p[1]\n G.add_node(c, order=k + 1, type=p[1])\n G.add_edge(n, c, weight=(options - data[k][p]) / options)\n if k == min(data):\n min_nodes.append(n)\n if k == max(data):\n max_nodes.append(c)\n\n for node in min_nodes:\n G.add_node(\"0X\", order=0, type=\"X\")\n G.add_edge(\"0X\", node, weight=0)\n for node in max_nodes:\n G.add_node(\"-1X\", order=max(data) + 2, type=\"X\")\n G.add_edge(node, \"-1X\", weight=0)\n\n return G",
"def get_graph(self):\n self.array = self.get_field_array()\n self.r_long_row = self.__around_long_row()\n self.r_short_row = self.__around_short_row()\n grath = {}\n max_y = self.number_of_squares[1]\n max_x = self.number_of_squares[0]\n for i in range(1, max_y + 1):\n if i % 2 == 0:\n end = 2\n r = self.r_long_row\n else:\n end = 1\n r = self.r_short_row\n for j in range(1, max_x + end):\n\n if self.array[i - 1][j - 1] not in grath:\n grath[self.array[i - 1][j - 1]] = set()\n for i_s, j_s in r:\n if 0 < i + i_s < max_y + 1 and 0 < j + j_s < len(self.array[i + i_s - 1]) + 1:\n grath[self.array[i - 1][j - 1]].add(self.array[i + i_s - 1][j + j_s - 1])\n self.grath = grath\n return self.grath"
] | [
"0.65752923",
"0.5953535",
"0.5814856",
"0.5806418",
"0.58059746",
"0.5798361",
"0.5738831",
"0.57215935",
"0.5717185",
"0.5680185",
"0.5680185",
"0.5676411",
"0.5653283",
"0.56387323",
"0.56341726",
"0.5588331",
"0.5572233",
"0.557046",
"0.5558288",
"0.55452794",
"0.55316305",
"0.5515264",
"0.55083275",
"0.54882693",
"0.54855895",
"0.5478912",
"0.5476385",
"0.5422391",
"0.5394754",
"0.53772336"
] | 0.7925507 | 0 |
Test if ``self`` is recurrent. A train track is recurrent if it admits a strictly positive measure. Equivalently, it is recurrent, if it is possible to get from any branch to any other branch (not necessarily in both directions) along train paths. | def is_recurrent(self):
G = self._get_recurrence_graph()
# C = G.strongly_connected_components()
first_component = next(nx.strongly_connected_components(G))
abs_numbers = {abs(x) for x in first_component}
# return sorted(list(set([abs(x) for x in C[0]]))) == \
# range(1, self.num_branches()+1)
return abs_numbers == set(range(1, self.num_branches()+1)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def recurrent(self):\n if isinstance(self.network, BaseRNN):\n return True\n else:\n return False",
"def recurrent(self):\n return False",
"def is_cyclically_reduced(self):\n if not self:\n return True\n return self[0] != self[-1]**-1",
"def recurrent(self):\n pass",
"def convergence_on_track(self):\n\n on_track = True\n threshold = 5. # used to check condition if at least one of charnge_neutrality, rms-error goes down fast enough\n\n # first check if previous calculation was stopped due to reaching the QBOUND limit\n try:\n calc_reached_qbound = self.ctx.last_calc.outputs.output_parameters.get_dict(\n )['convergence_group']['calculation_converged']\n except AttributeError: # captures error when last_calc dies not have an output node\n calc_reached_qbound = False\n except KeyError: # captures\n calc_reached_qbound = False\n\n if self.ctx.kkrimp_step_success and not calc_reached_qbound:\n first_rms = self.ctx.last_rms_all[0]\n # skip first if this is the initial LDA+U iteration because there we see the original non-LDAU convergence value\n if 'settings_LDAU' in self.inputs and self.ctx.loop_count < 2 and len(self.ctx.last_rms_all) > 1:\n first_rms = self.ctx.last_rms_all[1]\n last_rms = self.ctx.last_rms_all[-1]\n # use this trick to avoid division by zero\n if last_rms == 0:\n last_rms = 10**-16\n r = last_rms / first_rms\n message = f'INFO: convergence check: first/last rms {first_rms}, {last_rms}'\n self.report(message)\n if r < 1:\n message = 'INFO: convergence check: rms goes down'\n self.report(message)\n on_track = True\n elif r > threshold:\n message = 'INFO: convergence check: rms goes up too fast, convergence is not expected'\n self.report(message)\n on_track = False\n elif len(self.ctx.last_rms_all) == 1:\n message = 'INFO: convergence check: already converged after single iteration'\n self.report(message)\n on_track = True\n else:\n message = 'INFO: convergence check: rms does not shrink fast enough, convergence is not expected'\n self.report(message)\n on_track = False\n elif calc_reached_qbound:\n message = 'INFO: convergence check: calculation reached QBOUND'\n self.report(message)\n on_track = True\n else:\n message = 'INFO: convergence check: calculation unsuccessful'\n self.report(message)\n on_track = False\n\n message = f'INFO: convergence check result: {on_track}'\n self.report(message)\n\n return on_track",
"def recall(self):\n tpos, fneg = len(self.__true_positives), len(self.__false_negatives)\n return float(tpos) / (tpos + fneg)",
"def iswalking(self):\r\n return self.model.coord!=self.model.targetcoord",
"def __bool__(self) -> bool:\n if self.initial_value == 1 and self.number_of_steps == 0:\n return True\n return False",
"def is_reflexive(self):\n return self.prop.is_reflexive()",
"def should_reset(self, current_time_step: ts.TimeStep) -> bool:\n handle_auto_reset = getattr(self, '_handle_auto_reset', False)\n return handle_auto_reset and np.all(current_time_step.is_last())",
"def __call__(self, trainer):\n updater = trainer.updater\n if self.unit == 'epoch':\n prev = self.count\n self.count = updater.epoch_detail // self.period\n return prev != self.count\n else:\n iteration = updater.iteration\n return iteration > 0 and iteration % self.period == 0",
"def recalcRelativeRates(self):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n TreeLikelihoodBase.recalcRelativeRates(self)",
"def one_step_back(self):\n if (self.row -1<0):\n return False\n elif (self.battery == 0):\n return False\n elif (self.maze[self.row - 1][self.column] == False):\n return False\n else:\n self.row -= 1\n self.battery -= 1\n return True",
"def include_renditions(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"include_renditions\")",
"def converged(self) -> bool:\n assert self._coords is not None\n\n if self._converged_translation:\n logger.info(\n \"Converged purely based on translation of the \"\n \"dimer midpoint\"\n )\n return True\n\n rms_g0 = np.sqrt(np.mean(np.square(self._coords.g0)))\n return self.iteration > 0 and rms_g0 < self.gtol",
"def IsRerun(self):\n return self.prev_test_context is not None",
"def trend_retrace_up(self):\n raise NotImplementedError()",
"def trend_retrace_down(self):\n raise NotImplementedError()",
"def one_step_left(self):\n if (self.column-1 <0):\n return False\n elif (self.battery == 0):\n return False\n elif (self.maze[self.row][self.column-1] == False):\n return False\n else:\n self.column-=1\n self.battery -= 1\n return True",
"def check_status(self, base):\n change = False\n # Trigger intensification\n if self.curr_i == self.I:\n self.curr_i = 0\n base = self.search_intensification()\n change = True\n # Trigger diversification\n elif self.curr_d == self.D:\n self.curr_d = 0\n base = self.search_diversification()\n change = True\n # Trigger step reduction\n elif self.curr_r == self.R:\n self.curr_r = 0\n # Start from best point found so far\n base = self.MTM[[-1], :-1].T\n self.update_STM(base)\n self.update_LTM(base)\n self.step = self.step_red * self.step\n\n if change:\n curr_obj = self.obj_wrap(base)\n self.update_MTM(base, curr_obj)\n self.update_STM(base)\n self.update_LTM(base)\n self.bases = np.block([[self.bases],[base.T, curr_obj]])\n\n return base",
"def relu(self):\n return self * self.ge(0)",
"def one_step_right(self):\n if (self.column+ 1 >=len(self.maze[0])):\n return False\n elif (self.battery == 0):\n return False\n elif (self.maze[self.row][self.column+1] == False):\n return False\n else:\n self.column += 1\n self.battery -= 1\n return True",
"def goal_test(self):\n if -1 in self.state:\n return False\n else:\n return True",
"def isrelative(self):\n return bool(self.control & gdef.SE_SELF_RELATIVE)",
"def one_step_forward(self):\n if(self.row+1>=len(self.maze)):\n return False\n elif(self.battery==0):\n return False\n elif(self.maze[self.row+1][self.column]==False):\n return False\n else:\n self.row+=1\n self.battery-=1\n return True",
"def solveOneStep(self):\n ### Student code goes here\n if self.currentState not in self.visited:\n self.visited[self.currentState]=True\n return self.currentState.state == self.victoryCondition\n\n if self.currentState.state == self.victoryCondition:\n self.visited[self.currentState]=True\n return True\n\n if not self.currentState.children:\n for move in self.gm.getMovables():\n self.gm.makeMove(move)\n childrenState = GameState(self.gm.getGameState(), self.currentState.depth+1, move)\n if childrenState not in self.visited:\n childrenState.parent = self.currentState\n self.currentState.children.append(childrenState)\n self.gm.reverseMove(move)\n\n if self.currentState.nextChildToVisit<len(self.currentState.children):\n nextState = self.currentState.children[self.currentState.nextChildToVisit]\n self.currentState.nextChildToVisit += 1\n self.gm.makeMove(nextState.requiredMovable)\n self.currentState = nextState\n return self.solveOneStep()\n else:\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n return self.solveOneStep()",
"def decreasing(self):\n return not self.direction()",
"def get_resave(self) -> bool:\n return self.resave",
"def recall(y_true, y_pred):\n true_positives = backend.sum(backend.round(backend.clip(y_true * y_pred, 0, 1)))\n possible_positives = backend.sum(backend.round(backend.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + backend.epsilon())\n return recall",
"def prev(self):\n return bool(self._ll_tree.prev())"
] | [
"0.70996857",
"0.6100841",
"0.5575293",
"0.5572439",
"0.55266136",
"0.5422163",
"0.53721553",
"0.5236663",
"0.52052224",
"0.52017254",
"0.5199006",
"0.5194531",
"0.5170744",
"0.5151156",
"0.51300734",
"0.5124644",
"0.50830173",
"0.50633717",
"0.5056852",
"0.5052298",
"0.50262195",
"0.5004036",
"0.49997872",
"0.49770424",
"0.49438736",
"0.49346536",
"0.4931211",
"0.49210906",
"0.4909212",
"0.48974293"
] | 0.65484834 | 1 |
Find the course location of an input psf by finding the brightest checkbox. This function uses a 2 dimensional image as input, and finds the the brightest checkbox of given size in the image. | def checkbox_2D(image, checkbox, debug=False):
# Calculate the checkbox half-width
chw = (checkbox - 1) / 2
# Calculate the image size
xsize, ysize = image.shape[1], image.shape[0]
# Calculate the x and y widths of checkbox region
xwidth, ywidth = xsize - checkbox + 1, ysize - checkbox + 1
# If the checkbox size is not equal to both the X and Y sizes,
# find the pixel with the brightest checkbox
if checkbox != xsize and checkbox != ysize:
xpeak = 0
ypeak = 0
sumpeak = 0
for ii in xrange(xsize - checkbox):
for jj in xrange(ysize - checkbox):
t = np.sum(image[jj:jj+checkbox, ii:ii+checkbox])
if t > sumpeak:
xpeak = ii + chw + 1
ypeak = jj + chw + 1
sumpeak = t
print('(checkbox_2D): Checkbox not equal to both x/ysize.')
print()
# If the checkbox size is equal to both the X and Y sizes
if checkbox == xsize and checkbox == ysize:
xpeak = xsize / 2
ypeak = ysize / 2
sumpeak = np.sum(image, axis=None)
print('(checkbox_2D): Checkbox equal to x/ysize.')
print()
# Print calculated checkbox center, and sum within checkbox centroid
# Find the checkbox region half-width in x and y
xhw = xwidth / 2
yhw = ywidth / 2
if xpeak < xhw or xpeak > xsize - xhw or ypeak < yhw or ypeak > ysize - yhw:
print('(checkbox_2D): WARNING - Peak too close to edge of image.')
print()
# NOTE: Use this section of the input image is a subset of a larger image
# Not currently needed for this analysis
# # Determine the center of the brightest checkbox, in extracted
# # image coordinates
# xpeak = xpeak + xhw
# ypeak = ypeak + yhw
# Debug messages
if debug:
print('(checkbox_2D): chw = ', chw)
print('(checkbox_2D): xsize, ysize = {}, {}'.format(xsize, ysize))
print('(checkbox_2D): xwidth, ywidth = {}, {}'.format(xwidth, ywidth))
print('(checkbox_2D): xpeak, ypeak = {}, {}'.format(xpeak, ypeak))
print('(checkbox_2D): sumpeak = ', sumpeak)
print('(checkbox_2D): xhw, yhw = {}, {}'.format(xhw, yhw))
print()
checkbox_ctr = np.array((xpeak, ypeak))
checkbox_hfw = np.array((xhw, yhw))
return checkbox_ctr, checkbox_hfw | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def checkbox_1D(image, checkbox, debug=False):\n \n # Collapse input image, currently onto X axis\n # Reshape to reflect collapse onto x axis\n vector = np.sum(image, axis=0)\n print('(checkbox_1D): Image collapsed into 1D vector.')\n print()\n \n # Calculate the checkbox half-width\n chw = (checkbox - 1) / 2\n\n \n # Calculate the image size\n xsize, ysize = image.shape[1], image.shape[0]\n \n # Calculate the x and y widths of checkbox region\n xwidth = xsize - checkbox + 1\n\n # If the checkbox size is not equal to both the X and Y sizes, \n # find the pixel with the brightest checkbox\n if checkbox != xsize and checkbox != ysize:\n xpeak = 0\n ypeak = 1\n sumpeak = 0\n for ii in xrange(xsize - checkbox):\n t = np.sum(vector[ii:ii+checkbox])\n if t > sumpeak:\n xpeak = ii + 1\n sumpeak = t\n\n print('(checkbox_1D): Checkbox not equal to xsize.')\n \n \n # If the checkbox size is equal to both the X and Y sizes\n if checkbox == xsize:\n xpeak = xsize / 2\n sumpeak = np.sum(vector, axis=None)\n \n print('(checkbox_1D): Checkbox equal to xsize.')\n \n # Print checkbox center and peak around centroid region\n\n # Find the checkbox region half-width in x and y\n xhw = xwidth / 2\n \n if xpeak < xhw or xpeak > xsize - xhw:\n print('(checkbox_1D): WARNING - Peak too close to edge of image.')\n \n \n # Debug messages\n if debug:\n print('(checkbox_1D): chw = ', chw)\n print('(checkbox_1D): xhw = ', xhw)\n print('(checkbox_1D): xsize = ', xsize)\n print('(checkbox_1D): xwidth = ', xwidth)\n print('(checkbox_1D): xpeak = ', xpeak)\n print('(checkbox_1D): sumpeak = ', sumpeak)\n print() \n \n# NOTE: Use this section of the input image is a subset of a larger image\n# Not currently needed for this analysis\n# # Determine the center of the brightest checkbox, in extracted\n# # image coordinates\n# xpeak = xpeak + xhw\n \n return xpeak, xhw",
"def _locate_finder_in_square(image, transform, size):\n radius = int(round(size/2))\n center = transform.trans\n angle = transform.rot\n\n rotated = image.rotate(angle, center)\n\n sx1, sy1 = center.x-radius, center.y-radius\n sx2, sy2 = center.x+radius, center.y+radius\n thick = int(round(size / 14))\n\n # Top\n x1, y1 = sx1, sy1\n x2, y2 = sx2, sy1 + thick\n top = np.sum(rotated.img[y1:y2, x1:x2]) / (size * thick)\n\n # Left\n x1, y1 = sx1, sy1\n x2, y2 = sx1 + thick, sy2\n left = np.sum(rotated.img[y1:y2, x1:x2]) / (size * thick)\n\n # Bottom\n x1, y1 = sx1, sy2 - thick\n x2, y2 = sx2, sy2\n bottom = np.sum(rotated.img[y1:y2, x1:x2]) / (size * thick)\n\n # Right\n x1, y1 = sx2 - thick, sy1\n x2, y2 = sx2, sy2\n right = np.sum(rotated.img[y1:y2, x1:x2]) / (size * thick)\n\n # Identify finder edges\n if top < bottom and left < right:\n c1 = [sx1, sy1]\n c2 = [sx1, sy2]\n c3 = [sx2, sy1]\n elif top < bottom and right < left:\n c1 = [sx2, sy1]\n c2 = [sx1, sy1]\n c3 = [sx2, sy2]\n elif bottom < top and left < right:\n c1 = [sx1, sy2]\n c2 = [sx2, sy2]\n c3 = [sx1, sy1]\n elif bottom < top and right < left:\n c1 = [sx2, sy2]\n c2 = [sx2, sy1]\n c3 = [sx1, sy2]\n else:\n return None\n\n # rotate points around center of square\n c1 = _rotate_around_point(Point.from_array(c1), angle, center)\n c2 = _rotate_around_point(Point.from_array(c2), angle, center)\n c3 = _rotate_around_point(Point.from_array(c3), angle, center)\n\n # Create finder pattern\n c1 = c1.intify()\n side1 = (c2 - c1).intify()\n side2 = (c3 - c1).intify()\n fp = FinderPattern(c1, side1, side2)\n\n return fp",
"def find_eyes(\n image_path: str, base_img_size: int\n ) -> Tuple[Tuple[int, int], Tuple[int, int]]:\n # TODO Make this a filter_eyes thing that also checks relative coords \n eye_cascade = cv2.CascadeClassifier('templates/cascades/haarcascade_eye.xml')\n cv2_img = cv2.imread(image_path)\n gray = cv2.cvtColor(cv2_img, cv2.COLOR_BGR2GRAY)\n\n image_path = eye_cascade.detectMultiScale(gray)\n largest_eyes = sorted(image_path, key=lambda eye: eye[3], reverse=True)[:min(3, len(image_path))]\n\n # Find the eyes that are the most similar in size and\n\n size_thresh = base_img_size // 15\n for i in range(len(largest_eyes)):\n for j in range(len(largest_eyes[i:])):\n if i == j:\n continue\n elif abs(largest_eyes[i][3] - largest_eyes[j][3]) < size_thresh:\n return largest_eyes[i], largest_eyes[j]",
"def extract_roi(img):\n roi1 = get_largest_blob(img, invert=True)\n roi2 = get_largest_blob(img, invert=False)\n if roi1.shape[0]*roi1.shape[1] > roi2.shape[0]*roi2.shape[1]:\n return roi1\n else:\n return roi2",
"def pick_largest(self, cut_off):\r\n for i in range(self.dimension):\r\n m = self.masked[int(self.rank_yx(self.rank[i])[0]) # locating the corresponding mark array\r\n ,int(self.rank_yx(self.rank[i])[1])]\r\n if m * self.image_data[i] == self.image_data[i]:\r\n if self.image_data[i] <= cut_off:\r\n print(\"Surveying completed\")\r\n return -1,-1 # returns -1,-1 if scan is completed\r\n else:\r\n return self.image_data[i], np.array(self.rank[i])",
"def find_img2d_candidates(image, **kwargs):\n\n # filter_kernel = np.array([[-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225]])\n\n filter_kernel = np.array([[-2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2/324, -2/324, -2/324],\n [-2 / 324, -2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -2/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -2/324, -2/324],\n [-2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2/324, -2/324, -2/324]])\n\n res = sg.convolve2d(image, filter_kernel, mode='same', boundary='fill', fillvalue=0)\n coord_x, coord_y = find_max_coords(np.absolute(res))\n\n return coord_x, coord_y",
"def thresh_frame_sobel(frame, kernel_size):\n\tgray = frame.gray\n\t# side x\n\tsobel_x = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=kernel_size)\n\t# side y\n\tsobel_y = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=kernel_size)\n\n\tsobel_mag = np.sqrt(sobel_x ** 2 + sobel_y ** 2)\n\tsobel_mag = np.uint8(sobel_mag / np.max(sobel_mag) * 255)\n\n\t_, sobel_mag = cv2.threshold(sobel_mag, 50, 1, cv2.THRESH_BINARY)\n\treturn sobel_mag.astype(bool)",
"def find_brightest_biggest(filename, catname=\"sources.cat\", config=\"config.sex\", minsize=10,\n minflux=450000):\n # Get dimensions of the image\n hdulist = pf.open(filename)\n (height, width) = hdulist[0].data.shape\n bloblist = manual_blob_finder(hdulist[0].data)\n hdulist.close()\n\n if bloblist is None:\n return None\n\n sort_ind = np.argsort(bloblist['max'])[::-1]\n blob_ind = None\n for ind in sort_ind:\n if bloblist['width'][ind] >= minsize and bloblist['flux'][ind] > minflux:\n blob_ind = ind\n break\n\n if blob_ind is None:\n return None\n\n return (bloblist['cent_x'][blob_ind] - (float(width) / 2.),\n bloblist['cent_y'][blob_ind] - (float(height) / 2.),\n bloblist['flux'][blob_ind],\n bloblist['width'][blob_ind],\n bloblist['max'][blob_ind])\n\n '''\n # Use sextractor to find blobs.\n # N.B. may be tuning of parameters, but this was mostly unreliable and noisy.\n\n hdulist.close()\n\n # Source extract\n call([\"sextractor\", filename, \"-c\", config, \"-CATALOG_NAME\", catname])\n\n # Load the catalog file\n srclist = pf.open(catname)\n srctable = srclist[2].data\n sort_ind = np.argsort(srctable['FLUX_MAX'])[::-1]\n blob_ind = None\n for ind in sort_ind:\n if (srctable['FLUX_RADIUS'][ind] > minsize and srctable['FLUX_MAX'][ind] > minflux and\n srctable['FLUX_RADIUS'][ind] < maxradius and srctable['FLUX_MAX'][ind] < maxflux):\n blob_ind = ind;\n break\n if blob_ind is None:\n return None\n return (srctable['X_IMAGE'][blob_ind] - (float(width) / 2.), \n srctable['Y_IMAGE'][blob_ind] - (float(height) / 2.),\n srctable['FLUX_MAX'][blob_ind],\n srctable['FLUX_RADIUS'][blob_ind],\n srctable['SNR_WIN'][blob_ind])\n '''",
"def locate_face(image, minNeighbors=5, scaleFactor=1.05):\n rects = cc_face.detectMultiScale(image, scaleFactor=scaleFactor, minNeighbors=minNeighbors)\n return max(rects, key=rect_area)",
"def getCrossKernel(size = (5,5)):\n\treturn cv2.getStructuringElement(cv2.MORPH_CROSS, size)",
"def get_location(bbox, image_size):\n\n # Get the center point for the face bounding-box\n face_x, face_y = get_center_point(bbox)\n\n # Get coordinates for each box in the raspimon field of view (FOV)\n fov_bboxes = get_fov_bboxes(image_size)\n\n # Find which FOV box currently holds the center point\n for index, fov_bbox in enumerate(fov_bboxes):\n if is_point_in_box(face_x, face_y, fov_bbox):\n return index\n return None",
"def identify_image(im):\n score_cures = np.mean(im[1025:1065, 1130:1180, 0])\n score_ingredients = np.mean(im[1025:1065, 675:720, 0])\n if score_cures < 177.5:\n return 'cures'\n if score_ingredients < 177.5:\n return 'ingredients'\n else:\n return 'other'",
"def get_bounding_box(img):\n rows = np.any(img, axis=1)\n cols = np.any(img, axis=0)\n rmin, rmax = np.where(rows)[0][[0, -1]]\n cmin, cmax = np.where(cols)[0][[0, -1]]\n # due to python indexing, need to add 1 to max\n # else accessing will be 1px in the box, not out\n rmax += 1\n cmax += 1\n return [rmin, rmax, cmin, cmax]",
"def find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size,\n hist_bins):\n draw_img = np.copy(img)\n img_tosearch = img[ystart:ystop, :, :]\n ctrans_tosearch = convert_color(img_tosearch, conv='RGB2YCrCb')\n if scale != 1:\n imshape = ctrans_tosearch.shape\n ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1] / scale), np.int(imshape[0] / scale)))\n\n # Channel extraction\n ch1 = ctrans_tosearch[:, :, 0]\n ch2 = ctrans_tosearch[:, :, 1]\n ch3 = ctrans_tosearch[:, :, 2]\n\n # Define blocks and steps as above\n nxblocks = (ch1.shape[1] // pix_per_cell) - cell_per_block + 1\n nyblocks = (ch1.shape[0] // pix_per_cell) - cell_per_block + 1\n\n # 64 was the orginal sampling rate, with 8 cells and 8 pix per cell\n window = 64\n nblocks_per_window = (window // pix_per_cell) - cell_per_block + 1\n cells_per_step = 2 # Instead of overlap, define how many cells to step\n nxsteps = (nxblocks - nblocks_per_window) // cells_per_step\n nysteps = (nyblocks - nblocks_per_window) // cells_per_step\n\n # Compute individual channel HOG features for the entire image\n hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)\n detections = []\n\n for xb in range(nxsteps):\n for yb in range(nysteps):\n ypos = yb * cells_per_step\n xpos = xb * cells_per_step\n # Extract HOG for this patch\n\n # Flatten the HOG features for each channel position\n hog_feat1 = hog1[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n hog_feat2 = hog2[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n hog_feat3 = hog3[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n # Build hog_features array by stacking each channel\n hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))\n\n xleft = xpos * pix_per_cell\n ytop = ypos * pix_per_cell\n\n # Extract the image patch\n subimg = cv2.resize(ctrans_tosearch[ytop:ytop + window, xleft:xleft + window], (64, 64))\n\n # Get color features\n spatial_features = bin_spatial(subimg, size=spatial_size)\n hist_features = color_hist(subimg, nbins=hist_bins)\n\n # Scale features and make a prediction\n test_features = X_scaler.transform(\n np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1))\n test_prediction = svc.predict(test_features)\n confidence = svc.decision_function(test_features)\n\n if test_prediction == 1 and confidence > 0.6:\n xbox_left = np.int(xleft * scale)\n ytop_draw = np.int(ytop * scale)\n win_draw = np.int(window * scale)\n x1 = xbox_left\n y1 = ytop_draw + ystart\n x2 = xbox_left + win_draw\n y2 = ytop_draw + win_draw + ystart\n detections.append((x1, y1, x2, y2))\n return detections",
"def find_cars_image(image, clf, hyperparams, box_color=None, old_heatmap=None):\n heat_threshold = hyperparams[\"HEAT_THRESHOLD\"]\n # Scan the image and get the new heatmap\n _, heatmap = scan_multiple_win_sizes(image, clf, hyperparams, box_colors=None)\n # Build an aggregated heatmap of this heatmap and the old heatmap\n agg_heatmap = old_heatmap+heatmap if old_heatmap is not None else heatmap\n # Apply threshold to find cars\n thresh_heatmap = apply_threshold(agg_heatmap, heat_threshold)\n # Label cars\n labels_heatmap, n_cars = label(thresh_heatmap)\n # Draw labeled boxes and get bounding boxes\n draw_image, bboxes = draw_labeled_bboxes(np.zeros_like(image), labels_heatmap, n_cars, box_color=box_color)\n # Return values\n return bboxes, draw_image, labels_heatmap, heatmap, agg_heatmap",
"def get_largest_two_component(img, print_info = False, threshold = None):\n s = ndimage.generate_binary_structure(3,2) # iterate structure\n labeled_array, numpatches = ndimage.label(img,s) # labeling\n sizes = ndimage.sum(img,labeled_array,range(1,numpatches+1)) \n sizes_list = [sizes[i] for i in range(len(sizes))]\n sizes_list.sort()\n if(print_info):\n print('component size', sizes_list)\n if(len(sizes) == 1):\n out_img = img\n else:\n if(threshold):\n out_img = np.zeros_like(img)\n for temp_size in sizes_list:\n if(temp_size > threshold):\n temp_lab = np.where(sizes == temp_size)[0] + 1\n temp_cmp = labeled_array == temp_lab\n out_img = (out_img + temp_cmp) > 0\n return out_img\n else: \n max_size1 = sizes_list[-1]\n max_size2 = sizes_list[-2]\n max_label1 = np.where(sizes == max_size1)[0] + 1\n max_label2 = np.where(sizes == max_size2)[0] + 1\n component1 = labeled_array == max_label1\n component2 = labeled_array == max_label2\n if(max_size2*10 > max_size1):\n component1 = (component1 + component2) > 0\n out_img = component1\n return out_img",
"def get_largest_two_component(img, print_info = False, threshold = None):\n s = ndimage.generate_binary_structure(3,2) # iterate structure\n labeled_array, numpatches = ndimage.label(img,s) # labeling\n sizes = ndimage.sum(img,labeled_array,range(1,numpatches+1)) \n sizes_list = [sizes[i] for i in range(len(sizes))]\n sizes_list.sort()\n if(print_info):\n print('component size', sizes_list)\n if(len(sizes) == 1):\n out_img = img\n else:\n if(threshold):\n out_img = np.zeros_like(img)\n for temp_size in sizes_list:\n if(temp_size > threshold):\n temp_lab = np.where(sizes == temp_size)[0] + 1\n temp_cmp = labeled_array == temp_lab\n out_img = (out_img + temp_cmp) > 0\n return out_img\n else: \n max_size1 = sizes_list[-1]\n max_size2 = sizes_list[-2]\n max_label1 = np.where(sizes == max_size1)[0] + 1\n max_label2 = np.where(sizes == max_size2)[0] + 1\n component1 = labeled_array == max_label1\n component2 = labeled_array == max_label2\n if(max_size2*10 > max_size1):\n component1 = (component1 + component2) > 0\n out_img = component1\n return out_img",
"def get_params(img, output_size):\r\n h, w, _ = img.shape\r\n th, tw = output_size\r\n\r\n if h + 1 < th or w + 1 < tw:\r\n raise ValueError(\r\n \"Required crop size {} is larger then input image size {}\".format((th, tw), (h, w))\r\n )\r\n\r\n if w == tw and h == th:\r\n return 0, 0, h, w\r\n\r\n i = torch.randint(0, h - th + 1, size=(1,)).item()\r\n j = torch.randint(0, w - tw + 1, size=(1,)).item()\r\n return i, j, th, tw",
"def getEllipticalKernel(size = (5,5)):\n\treturn cv2.getStructuringElement(cv2.MORPH_ELLIPSE, size)",
"def smallest_bounding_box(msk):\n x, y, z = np.where(msk > 0)\n corner = np.array([x.min(), y.min(), z.min()])\n size = np.array([x.max() + 1, y.max() + 1, z.max() + 1])\n return corner, size",
"def smallest_bounding_box(msk):\n x, y, z = np.where(msk > 0)\n corner = np.array([x.min(), y.min(), z.min()])\n size = np.array([x.max() + 1, y.max() + 1, z.max() + 1])\n return corner, size",
"def find_cars(img, scale):\n img_boxes = [] # Clears img_boxes so we don't keep unwanted heatmap history\n count = 0\n draw_img = np.copy(img)\n\n # Make a heatmap of zeros\n heatmap = np.zeros_like(img[:, :, 0])\n\n # IMPORTANT : reading *.jpeg's (scaled 0-255, aka scaling needed), but\n # # trained on *.png's (scaled 0-1, aka scaling not needed)\n if img.dtype == 'uint8':\n img = img.astype(np.float32) / 255 # aka scaling needed\n\n img_tosearch = img[ystart:ystop, :, :]\n ctrans_tosearch = convert_color(img_tosearch, conv='RGB2YCrCb')\n\n if scale != 1: # resize whole image instead of separate windows\n imshape = ctrans_tosearch.shape\n ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1] / scale), np.int(imshape[0] / scale)))\n\n ch1 = ctrans_tosearch[:, :, 0]\n ch2 = ctrans_tosearch[:, :, 1]\n ch3 = ctrans_tosearch[:, :, 2]\n\n # Define blocks and steps as above\n # These hold the number of HOG cells\n nxblocks = (ch1.shape[1] // pix_per_cell) - 1 # Note : '//' causes integers to be result, instead of floats\n nyblocks = (ch1.shape[0] // pix_per_cell) - 1\n # How many features per block are we going to be extracting\n nfeat_per_block = orient * cell_per_block ** 2\n window = 64\n nblocks_per_window = (window // pix_per_cell) - 1\n # aka 75% overlap between cells\n cells_per_step = 2 # Instead of overlap, define how many cells to step\n nxsteps = (nxblocks - nblocks_per_window) // cells_per_step\n nysteps = (nyblocks - nblocks_per_window) // cells_per_step\n\n # Compute individual channel HOG features for the entire image\n hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)\n\n for xb in range(nxsteps):\n for yb in range(nysteps):\n count += 1\n ypos = yb * cells_per_step\n xpos = xb * cells_per_step\n # Extract HOG for this patch\n hog_feat1 = hog1[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n hog_feat2 = hog2[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n hog_feat3 = hog3[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()\n hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))\n\n xleft = xpos * pix_per_cell\n ytop = ypos * pix_per_cell\n\n # Extract the image patch\n subimg = cv2.resize(ctrans_tosearch[ytop:ytop + window, xleft:xleft + window], (64, 64))\n\n # Get colour features\n spatial_features = bin_spatial(subimg, size=spatial_size)\n hist_features = color_hist(subimg, nbins=hist_bins)\n\n # Scale features and make a prediction\n test_features = X_scaler.transform(\n np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1))\n test_prediction = svc.predict((test_features))\n\n if test_prediction == 1:\n xbox_left = np.int(xleft * scale)\n ytop_draw = np.int(ytop * scale)\n win_draw = np.int(window * scale)\n cv2.rectangle(draw_img, (xbox_left, ytop_draw + ystart),\n (xbox_left + win_draw, ytop_draw + win_draw + ystart), (0, 0, 255))\n img_boxes.append(\n ((xbox_left, ytop_draw + ystart), (xbox_left + win_draw, ytop_draw + win_draw + ystart)))\n heatmap[ytop_draw + ystart:ytop_draw + win_draw + ystart, xbox_left:xbox_left + win_draw] += 1\n\n return draw_img, img_boxes, heatmap",
"def centroid_2D(image, checkbox_center, checkbox_halfwidth, max_iter=0, threshold=0, debug=False):\n \n # First calculate centroid to use for the first iteration\n c_sum = 0\n xsum = 0\n ysum = 0\n \n convergence_flag = 'N/A'\n \n # Unpack the checkbox_center and checkbox_halfwidth into \n # their appropriate variables\n xpeak, ypeak = checkbox_center\n xhw, yhw = checkbox_halfwidth \n \n for ii in xrange(xpeak - xhw - 1, xpeak + xhw - 1):\n for jj in xrange(ypeak - yhw - 1, ypeak + yhw - 1):\n xloc = ii + 1\n yloc = jj + 1\n c_sum = c_sum + image[jj, ii]\n xsum += xloc * image[jj, ii]\n ysum += yloc * image[jj, ii]\n \n if debug:\n # Initial sum calculation (before iterations)\n print('(centroid_2D): Init. Sum (before iterations) = ', c_sum)\n print()\n\n if c_sum == 0:\n print('(centroid_2D): ERROR - divide by zero.')\n print()\n exit\n else:\n xcen = xsum / c_sum\n ycen = ysum / c_sum\n \n # Iteratively calculate centroid until solution converges, using \n # neighboring pixels to apply weighting...\n old_xcen = xcen\n old_ycen = ycen\n num_iter = 0\n \n for kk in xrange(max_iter):\n num_iter += 1\n c_sum = 0\n xsum = 0\n ysum = 0\n \n # Set up x and y centroid scanning ranges\n x_range = np.array((np.floor(old_xcen - xhw) - 1, np.ceil(old_xcen + xhw) - 1))\n y_range = np.array((np.floor(old_ycen - yhw) - 1, np.ceil(old_ycen + yhw) - 1))\n \n for ii in xrange(np.int(x_range[0]), np.int(x_range[1])):\n for jj in xrange(np.int(y_range[0]), np.int(y_range[1])):\n \n # Initalize weights to zero\n xweight = 0\n yweight = 0\n \n # Adjust weights given distance from current centroid\n xoff = np.abs((ii + 1) - old_xcen)\n yoff = np.abs((jj + 1) - old_ycen)\n \n # If within the original centroid box, set weight to 1\n # for both x and y.\n # If on the border, the scale weight\n if xoff <= xhw:\n xweight = 1\n elif xhw < xoff < (xhw + 1):\n xweight = xhw + 1 - xoff\n \n if yoff <= yhw:\n yweight = 1\n elif yhw < yoff < (yhw + 1):\n yweight = yhw + 1 - yoff\n \n # Compute cummulative weight\n weight = xweight * yweight\n \n # Calculate centroid\n xloc = ii + 1\n yloc = jj + 1\n\n c_sum += image[jj, ii] * weight\n xsum += xloc * image[jj, ii] * weight\n ysum += yloc * image[jj, ii] * weight\n \n if c_sum == 0:\n print('(centroid_2D): ERROR - Divide by zero.')\n print()\n else:\n xcen = xsum / c_sum\n ycen = ysum / c_sum\n \n # Check for convergence\n if np.abs(xcen - old_xcen) <= threshold and np.abs(ycen - old_ycen) <= threshold:\n convergence_flag = 'Success'\n break\n elif kk == max_iter:\n convergence_flag = 'Fail'\n break\n else:\n old_xcen = xcen\n old_ycen = ycen\n \n # Debug messages\n if debug:\n print('(centroid_2D): xpeak, ypeak = {}, {}'.format(xpeak, ypeak))\n print('(centroid_2D): xhw, yhw = {}, {}'.format(xhw, yhw))\n print('(centroid_2D): xcen, ycen = {}, {} '.format(xcen, ycen)) \n print()\n \n \n print('(centroid_2D): Centroid = [{}, {}] for num_iter = {}.'.format(xcen-1, ycen-1, num_iter))\n print('(centroid_2D): Converged? ', convergence_flag)\n print()\n \n # -1 on both axes, as Python is 0 major \n centroid = np.array((xcen-1, ycen-1))\n return centroid, c_sum",
"def get_params(img, output_size):\n c, h, w = img.shape\n th, tw = output_size\n if w == tw and h == th:\n return 0, 0, h, w\n\n i = (h - th)//2\n j = (w - tw)//2\n return i, j, th, tw",
"def harrisSubPixel(img, blockSize=2, ksize=3, k=0.04):\n\ttmp = img.copy()\n\tgray = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY)\n\tgray = np.float32(gray)\n\tdst = cv2.cornerHarris(gray, blockSize, ksize, k)\n\tdst = cv2.dilate(dst, None)\n\tret, dst = cv2.threshold(dst, 0.01*dst.max(), 255, 0)\n\tdst = np.uint8(dst)\n\tret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)\n\tcriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)\n\tcorners = cv2.cornerSubPix(gray, np.float32(centroids), (5,5), (-1,-1), criteria)\n\treturn corners",
"def smallest_bounding_box(msk):\n x, y, z = np.where(msk > 10)\n corner = np.array([x.min(), y.min(), z.min()])\n size = np.array([x.max() + 1, y.max() + 1, z.max() + 1])\n return corner, size",
"def getRectangularKernel(size = (5,5)):\n\treturn cv2.getStructuringElement(cv2.MORPH_RECT, size)",
"def get_largest_blob(img, invert):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n gray_threshed = np.zeros_like(gray)\n if invert:\n gray_threshed[gray<np.mean(gray)] = 255\n else:\n gray_threshed[gray>np.mean(gray)] = 255\n\n # First, detect very large regions and remove them\n if DEBUG:\n print(\"gray_threshed start\")\n disp(gray_threshed)\n retval, labels, stats, centroids = cv2.connectedComponentsWithStats(gray_threshed)\n for i in range(len(stats)):\n show = show_cc(gray_threshed, stats[i])\n x0, y0, w, h, _ = stats[i]\n if (w*h) > 0.5*(gray_threshed.shape[0]*gray_threshed.shape[1]):\n gray_threshed[labels == i] = 0\n if DEBUG:\n print(\"gray_threshed large removed\")\n disp(gray_threshed)\n\n # ds = 2\n # element = cv2.getStructuringElement(cv2.MORPH_RECT, (ds*2+1, ds*2+1), (ds, ds))\n element = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 2), (1, 1))\n gray_threshed_dilated = cv2.dilate(gray_threshed, element)\n gray_threshed_dilated = cv2.erode(gray_threshed_dilated, element)\n\n if DEBUG:\n print(\"gray_threshed dilated\")\n disp(gray_threshed_dilated)\n # return\n\n retval, labels, stats, centroids = cv2.connectedComponentsWithStats(gray_threshed_dilated)\n sorted_indices = np.argsort(-1.0*stats[:,-1]) #Sort by size descending \n idx = sorted_indices[1] # Take the largest component aside from full the bounding box \n\n show = show_cc(img, stats[idx])\n if DEBUG:\n print('CC')\n disp(show)\n\n x0, y0, x1, y1, _ = stats[idx]\n x1 += x0 + 5\n y1 += y0 + 5\n x0 -= 5\n y0 -= 5\n x0 = max(0, x0)\n y0 = max(0, y0)\n x1 = min(x1, img.shape[1]-1)\n y1 = min(y1, img.shape[0]-1)\n\n return img[y0 : y1, x0 : x1, :]",
"def get_bounding_box(im):\n coords = np.where(im)\n \n return np.array([np.min(coords[0]), np.max(coords[0]), \n np.min(coords[1]), np.max(coords[1])])",
"def identify_watermark(exam, image):\n img = cv2.imread(image)\n img = cv2.copyMakeBorder(img, 100, 100, 100, 100, cv2.BORDER_CONSTANT)\n\n corners = []\n bits = []\n\n def handle_click(event, x, y, flags, params):\n if event == cv2.EVENT_LBUTTONDOWN:\n bits.append(Point(x, y))\n cv2.circle(img, (x, y), 5, (255, 0, 0), -1)\n if event == cv2.EVENT_RBUTTONDOWN:\n corners.append(Point(x, y))\n cv2.circle(img, (x, y), 5, (0, 255, 0), -1)\n\n cv2.namedWindow(\"image\")\n cv2.setMouseCallback(\"image\", handle_click)\n while True:\n cv2.imshow(\"image\", img)\n if cv2.waitKey(20) & 0xFF == 13:\n break\n\n print(decode_watermark(get_exam(exam=exam), get_roster(exam=exam), corners, bits))"
] | [
"0.66205657",
"0.56624645",
"0.56198984",
"0.5569849",
"0.5483061",
"0.5444478",
"0.543192",
"0.53851235",
"0.5374986",
"0.5329282",
"0.5318074",
"0.5252045",
"0.5229608",
"0.5215915",
"0.5213296",
"0.5213153",
"0.5213153",
"0.5213136",
"0.51987267",
"0.5176628",
"0.5176628",
"0.51756984",
"0.51751965",
"0.51682055",
"0.5147997",
"0.5145346",
"0.51415545",
"0.5129913",
"0.51222366",
"0.50982696"
] | 0.6849945 | 0 |
Find the course location of an flattened input psf by finding the brightest checkbox. This function uses an image as input, flattens it into a vector and finds the the brightest checkbox of given size in the image. | def checkbox_1D(image, checkbox, debug=False):
# Collapse input image, currently onto X axis
# Reshape to reflect collapse onto x axis
vector = np.sum(image, axis=0)
print('(checkbox_1D): Image collapsed into 1D vector.')
print()
# Calculate the checkbox half-width
chw = (checkbox - 1) / 2
# Calculate the image size
xsize, ysize = image.shape[1], image.shape[0]
# Calculate the x and y widths of checkbox region
xwidth = xsize - checkbox + 1
# If the checkbox size is not equal to both the X and Y sizes,
# find the pixel with the brightest checkbox
if checkbox != xsize and checkbox != ysize:
xpeak = 0
ypeak = 1
sumpeak = 0
for ii in xrange(xsize - checkbox):
t = np.sum(vector[ii:ii+checkbox])
if t > sumpeak:
xpeak = ii + 1
sumpeak = t
print('(checkbox_1D): Checkbox not equal to xsize.')
# If the checkbox size is equal to both the X and Y sizes
if checkbox == xsize:
xpeak = xsize / 2
sumpeak = np.sum(vector, axis=None)
print('(checkbox_1D): Checkbox equal to xsize.')
# Print checkbox center and peak around centroid region
# Find the checkbox region half-width in x and y
xhw = xwidth / 2
if xpeak < xhw or xpeak > xsize - xhw:
print('(checkbox_1D): WARNING - Peak too close to edge of image.')
# Debug messages
if debug:
print('(checkbox_1D): chw = ', chw)
print('(checkbox_1D): xhw = ', xhw)
print('(checkbox_1D): xsize = ', xsize)
print('(checkbox_1D): xwidth = ', xwidth)
print('(checkbox_1D): xpeak = ', xpeak)
print('(checkbox_1D): sumpeak = ', sumpeak)
print()
# NOTE: Use this section of the input image is a subset of a larger image
# Not currently needed for this analysis
# # Determine the center of the brightest checkbox, in extracted
# # image coordinates
# xpeak = xpeak + xhw
return xpeak, xhw | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def checkbox_2D(image, checkbox, debug=False):\n \n # Calculate the checkbox half-width\n chw = (checkbox - 1) / 2\n \n # Calculate the image size\n xsize, ysize = image.shape[1], image.shape[0]\n \n # Calculate the x and y widths of checkbox region\n xwidth, ywidth = xsize - checkbox + 1, ysize - checkbox + 1\n \n # If the checkbox size is not equal to both the X and Y sizes, \n # find the pixel with the brightest checkbox\n if checkbox != xsize and checkbox != ysize:\n xpeak = 0\n ypeak = 0\n sumpeak = 0\n for ii in xrange(xsize - checkbox):\n for jj in xrange(ysize - checkbox):\n t = np.sum(image[jj:jj+checkbox, ii:ii+checkbox])\n if t > sumpeak:\n xpeak = ii + chw + 1\n ypeak = jj + chw + 1\n sumpeak = t\n \n print('(checkbox_2D): Checkbox not equal to both x/ysize.')\n print() \n\n \n # If the checkbox size is equal to both the X and Y sizes\n if checkbox == xsize and checkbox == ysize:\n xpeak = xsize / 2\n ypeak = ysize / 2\n sumpeak = np.sum(image, axis=None)\n \n print('(checkbox_2D): Checkbox equal to x/ysize.')\n print()\n \n # Print calculated checkbox center, and sum within checkbox centroid\n\n # Find the checkbox region half-width in x and y\n xhw = xwidth / 2\n yhw = ywidth / 2\n \n if xpeak < xhw or xpeak > xsize - xhw or ypeak < yhw or ypeak > ysize - yhw:\n print('(checkbox_2D): WARNING - Peak too close to edge of image.')\n print()\n \n# NOTE: Use this section of the input image is a subset of a larger image\n# Not currently needed for this analysis\n# # Determine the center of the brightest checkbox, in extracted\n# # image coordinates\n# xpeak = xpeak + xhw\n# ypeak = ypeak + yhw\n\n # Debug messages\n if debug:\n print('(checkbox_2D): chw = ', chw)\n print('(checkbox_2D): xsize, ysize = {}, {}'.format(xsize, ysize))\n print('(checkbox_2D): xwidth, ywidth = {}, {}'.format(xwidth, ywidth))\n print('(checkbox_2D): xpeak, ypeak = {}, {}'.format(xpeak, ypeak))\n print('(checkbox_2D): sumpeak = ', sumpeak)\n print('(checkbox_2D): xhw, yhw = {}, {}'.format(xhw, yhw))\n print()\n \n checkbox_ctr = np.array((xpeak, ypeak))\n checkbox_hfw = np.array((xhw, yhw))\n\n return checkbox_ctr, checkbox_hfw",
"def find_img2d_candidates(image, **kwargs):\n\n # filter_kernel = np.array([[-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225]])\n\n filter_kernel = np.array([[-2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2/324, -2/324, -2/324],\n [-2 / 324, -2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -2/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -2/324, -2/324],\n [-2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2/324, -2/324, -2/324]])\n\n res = sg.convolve2d(image, filter_kernel, mode='same', boundary='fill', fillvalue=0)\n coord_x, coord_y = find_max_coords(np.absolute(res))\n\n return coord_x, coord_y",
"def getCrossKernel(size = (5,5)):\n\treturn cv2.getStructuringElement(cv2.MORPH_CROSS, size)",
"def _locate_finder_in_square(image, transform, size):\n radius = int(round(size/2))\n center = transform.trans\n angle = transform.rot\n\n rotated = image.rotate(angle, center)\n\n sx1, sy1 = center.x-radius, center.y-radius\n sx2, sy2 = center.x+radius, center.y+radius\n thick = int(round(size / 14))\n\n # Top\n x1, y1 = sx1, sy1\n x2, y2 = sx2, sy1 + thick\n top = np.sum(rotated.img[y1:y2, x1:x2]) / (size * thick)\n\n # Left\n x1, y1 = sx1, sy1\n x2, y2 = sx1 + thick, sy2\n left = np.sum(rotated.img[y1:y2, x1:x2]) / (size * thick)\n\n # Bottom\n x1, y1 = sx1, sy2 - thick\n x2, y2 = sx2, sy2\n bottom = np.sum(rotated.img[y1:y2, x1:x2]) / (size * thick)\n\n # Right\n x1, y1 = sx2 - thick, sy1\n x2, y2 = sx2, sy2\n right = np.sum(rotated.img[y1:y2, x1:x2]) / (size * thick)\n\n # Identify finder edges\n if top < bottom and left < right:\n c1 = [sx1, sy1]\n c2 = [sx1, sy2]\n c3 = [sx2, sy1]\n elif top < bottom and right < left:\n c1 = [sx2, sy1]\n c2 = [sx1, sy1]\n c3 = [sx2, sy2]\n elif bottom < top and left < right:\n c1 = [sx1, sy2]\n c2 = [sx2, sy2]\n c3 = [sx1, sy1]\n elif bottom < top and right < left:\n c1 = [sx2, sy2]\n c2 = [sx2, sy1]\n c3 = [sx1, sy2]\n else:\n return None\n\n # rotate points around center of square\n c1 = _rotate_around_point(Point.from_array(c1), angle, center)\n c2 = _rotate_around_point(Point.from_array(c2), angle, center)\n c3 = _rotate_around_point(Point.from_array(c3), angle, center)\n\n # Create finder pattern\n c1 = c1.intify()\n side1 = (c2 - c1).intify()\n side2 = (c3 - c1).intify()\n fp = FinderPattern(c1, side1, side2)\n\n return fp",
"def centroid_2D(image, checkbox_center, checkbox_halfwidth, max_iter=0, threshold=0, debug=False):\n \n # First calculate centroid to use for the first iteration\n c_sum = 0\n xsum = 0\n ysum = 0\n \n convergence_flag = 'N/A'\n \n # Unpack the checkbox_center and checkbox_halfwidth into \n # their appropriate variables\n xpeak, ypeak = checkbox_center\n xhw, yhw = checkbox_halfwidth \n \n for ii in xrange(xpeak - xhw - 1, xpeak + xhw - 1):\n for jj in xrange(ypeak - yhw - 1, ypeak + yhw - 1):\n xloc = ii + 1\n yloc = jj + 1\n c_sum = c_sum + image[jj, ii]\n xsum += xloc * image[jj, ii]\n ysum += yloc * image[jj, ii]\n \n if debug:\n # Initial sum calculation (before iterations)\n print('(centroid_2D): Init. Sum (before iterations) = ', c_sum)\n print()\n\n if c_sum == 0:\n print('(centroid_2D): ERROR - divide by zero.')\n print()\n exit\n else:\n xcen = xsum / c_sum\n ycen = ysum / c_sum\n \n # Iteratively calculate centroid until solution converges, using \n # neighboring pixels to apply weighting...\n old_xcen = xcen\n old_ycen = ycen\n num_iter = 0\n \n for kk in xrange(max_iter):\n num_iter += 1\n c_sum = 0\n xsum = 0\n ysum = 0\n \n # Set up x and y centroid scanning ranges\n x_range = np.array((np.floor(old_xcen - xhw) - 1, np.ceil(old_xcen + xhw) - 1))\n y_range = np.array((np.floor(old_ycen - yhw) - 1, np.ceil(old_ycen + yhw) - 1))\n \n for ii in xrange(np.int(x_range[0]), np.int(x_range[1])):\n for jj in xrange(np.int(y_range[0]), np.int(y_range[1])):\n \n # Initalize weights to zero\n xweight = 0\n yweight = 0\n \n # Adjust weights given distance from current centroid\n xoff = np.abs((ii + 1) - old_xcen)\n yoff = np.abs((jj + 1) - old_ycen)\n \n # If within the original centroid box, set weight to 1\n # for both x and y.\n # If on the border, the scale weight\n if xoff <= xhw:\n xweight = 1\n elif xhw < xoff < (xhw + 1):\n xweight = xhw + 1 - xoff\n \n if yoff <= yhw:\n yweight = 1\n elif yhw < yoff < (yhw + 1):\n yweight = yhw + 1 - yoff\n \n # Compute cummulative weight\n weight = xweight * yweight\n \n # Calculate centroid\n xloc = ii + 1\n yloc = jj + 1\n\n c_sum += image[jj, ii] * weight\n xsum += xloc * image[jj, ii] * weight\n ysum += yloc * image[jj, ii] * weight\n \n if c_sum == 0:\n print('(centroid_2D): ERROR - Divide by zero.')\n print()\n else:\n xcen = xsum / c_sum\n ycen = ysum / c_sum\n \n # Check for convergence\n if np.abs(xcen - old_xcen) <= threshold and np.abs(ycen - old_ycen) <= threshold:\n convergence_flag = 'Success'\n break\n elif kk == max_iter:\n convergence_flag = 'Fail'\n break\n else:\n old_xcen = xcen\n old_ycen = ycen\n \n # Debug messages\n if debug:\n print('(centroid_2D): xpeak, ypeak = {}, {}'.format(xpeak, ypeak))\n print('(centroid_2D): xhw, yhw = {}, {}'.format(xhw, yhw))\n print('(centroid_2D): xcen, ycen = {}, {} '.format(xcen, ycen)) \n print()\n \n \n print('(centroid_2D): Centroid = [{}, {}] for num_iter = {}.'.format(xcen-1, ycen-1, num_iter))\n print('(centroid_2D): Converged? ', convergence_flag)\n print()\n \n # -1 on both axes, as Python is 0 major \n centroid = np.array((xcen-1, ycen-1))\n return centroid, c_sum",
"def thresh_frame_sobel(frame, kernel_size):\n\tgray = frame.gray\n\t# side x\n\tsobel_x = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=kernel_size)\n\t# side y\n\tsobel_y = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=kernel_size)\n\n\tsobel_mag = np.sqrt(sobel_x ** 2 + sobel_y ** 2)\n\tsobel_mag = np.uint8(sobel_mag / np.max(sobel_mag) * 255)\n\n\t_, sobel_mag = cv2.threshold(sobel_mag, 50, 1, cv2.THRESH_BINARY)\n\treturn sobel_mag.astype(bool)",
"def smallest_bounding_box(msk):\n x, y, z = np.where(msk > 0)\n corner = np.array([x.min(), y.min(), z.min()])\n size = np.array([x.max() + 1, y.max() + 1, z.max() + 1])\n return corner, size",
"def smallest_bounding_box(msk):\n x, y, z = np.where(msk > 0)\n corner = np.array([x.min(), y.min(), z.min()])\n size = np.array([x.max() + 1, y.max() + 1, z.max() + 1])\n return corner, size",
"def getEllipticalKernel(size = (5,5)):\n\treturn cv2.getStructuringElement(cv2.MORPH_ELLIPSE, size)",
"def find_eyes(\n image_path: str, base_img_size: int\n ) -> Tuple[Tuple[int, int], Tuple[int, int]]:\n # TODO Make this a filter_eyes thing that also checks relative coords \n eye_cascade = cv2.CascadeClassifier('templates/cascades/haarcascade_eye.xml')\n cv2_img = cv2.imread(image_path)\n gray = cv2.cvtColor(cv2_img, cv2.COLOR_BGR2GRAY)\n\n image_path = eye_cascade.detectMultiScale(gray)\n largest_eyes = sorted(image_path, key=lambda eye: eye[3], reverse=True)[:min(3, len(image_path))]\n\n # Find the eyes that are the most similar in size and\n\n size_thresh = base_img_size // 15\n for i in range(len(largest_eyes)):\n for j in range(len(largest_eyes[i:])):\n if i == j:\n continue\n elif abs(largest_eyes[i][3] - largest_eyes[j][3]) < size_thresh:\n return largest_eyes[i], largest_eyes[j]",
"def smallest_bounding_box(msk):\n x, y, z = np.where(msk > 10)\n corner = np.array([x.min(), y.min(), z.min()])\n size = np.array([x.max() + 1, y.max() + 1, z.max() + 1])\n return corner, size",
"def find_brightest_biggest(filename, catname=\"sources.cat\", config=\"config.sex\", minsize=10,\n minflux=450000):\n # Get dimensions of the image\n hdulist = pf.open(filename)\n (height, width) = hdulist[0].data.shape\n bloblist = manual_blob_finder(hdulist[0].data)\n hdulist.close()\n\n if bloblist is None:\n return None\n\n sort_ind = np.argsort(bloblist['max'])[::-1]\n blob_ind = None\n for ind in sort_ind:\n if bloblist['width'][ind] >= minsize and bloblist['flux'][ind] > minflux:\n blob_ind = ind\n break\n\n if blob_ind is None:\n return None\n\n return (bloblist['cent_x'][blob_ind] - (float(width) / 2.),\n bloblist['cent_y'][blob_ind] - (float(height) / 2.),\n bloblist['flux'][blob_ind],\n bloblist['width'][blob_ind],\n bloblist['max'][blob_ind])\n\n '''\n # Use sextractor to find blobs.\n # N.B. may be tuning of parameters, but this was mostly unreliable and noisy.\n\n hdulist.close()\n\n # Source extract\n call([\"sextractor\", filename, \"-c\", config, \"-CATALOG_NAME\", catname])\n\n # Load the catalog file\n srclist = pf.open(catname)\n srctable = srclist[2].data\n sort_ind = np.argsort(srctable['FLUX_MAX'])[::-1]\n blob_ind = None\n for ind in sort_ind:\n if (srctable['FLUX_RADIUS'][ind] > minsize and srctable['FLUX_MAX'][ind] > minflux and\n srctable['FLUX_RADIUS'][ind] < maxradius and srctable['FLUX_MAX'][ind] < maxflux):\n blob_ind = ind;\n break\n if blob_ind is None:\n return None\n return (srctable['X_IMAGE'][blob_ind] - (float(width) / 2.), \n srctable['Y_IMAGE'][blob_ind] - (float(height) / 2.),\n srctable['FLUX_MAX'][blob_ind],\n srctable['FLUX_RADIUS'][blob_ind],\n srctable['SNR_WIN'][blob_ind])\n '''",
"def get_bounding_box(img):\n rows = np.any(img, axis=1)\n cols = np.any(img, axis=0)\n rmin, rmax = np.where(rows)[0][[0, -1]]\n cmin, cmax = np.where(cols)[0][[0, -1]]\n # due to python indexing, need to add 1 to max\n # else accessing will be 1px in the box, not out\n rmax += 1\n cmax += 1\n return [rmin, rmax, cmin, cmax]",
"def getRectangularKernel(size = (5,5)):\n\treturn cv2.getStructuringElement(cv2.MORPH_RECT, size)",
"def threshold_image(self, image, ksize, sobel_thresh, mag_thresh, dir_thresh, s_thresh, l_thresh, b_thresh):\n # Note: Magnitude and direction thresholds were not needed for the project. Probably they are for the challenges\n # For Sobel, the light channel will be used\n hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS).astype(np.float)\n l_channel = hls[:, :, 1]\n # Sobel x\n sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0, ksize=ksize) # Take the derivative in x\n abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal\n scaled_sobel = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))\n\n # Threshold x gradient\n gradient_binary = np.zeros_like(scaled_sobel)\n gradient_binary[(scaled_sobel >= sobel_thresh[0]) & (scaled_sobel <= sobel_thresh[1])] = 1\n\n s_binary, l_binary = hls_filter(image, s_thresh, l_thresh)\n l_color_channel = lab_filter(image, b_thresh)\n binary = np.zeros_like(gradient_binary)\n binary[((l_binary == 1) | (l_color_channel == 1))] = 1\n binary = 255 * np.dstack((binary, binary, binary)).astype('uint8')\n images = [\n [{'title': 'Original', 'data': image},\n {'title': 'Full Combined', 'data': binary}\n ]\n ]\n title = 'Kernel = {}; sobel = {}, mag = {}, dir = {}, s_filter = {}, l_filter = {}' \\\n .format(ksize, sobel_thresh, mag_thresh, dir_thresh, s_thresh, l_thresh)\n if self.args.is_test:\n self.image_logger.plot_results(images, title)\n\n return binary",
"def sobelxy(img, ksize=5):\n\tgray = grayscale(img)\n\treturn cv2.Sobel(gray, cv2.CV_64F, 1, 1, ksize= ksize)",
"def get_largest_blob(img, invert):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n gray_threshed = np.zeros_like(gray)\n if invert:\n gray_threshed[gray<np.mean(gray)] = 255\n else:\n gray_threshed[gray>np.mean(gray)] = 255\n\n # First, detect very large regions and remove them\n if DEBUG:\n print(\"gray_threshed start\")\n disp(gray_threshed)\n retval, labels, stats, centroids = cv2.connectedComponentsWithStats(gray_threshed)\n for i in range(len(stats)):\n show = show_cc(gray_threshed, stats[i])\n x0, y0, w, h, _ = stats[i]\n if (w*h) > 0.5*(gray_threshed.shape[0]*gray_threshed.shape[1]):\n gray_threshed[labels == i] = 0\n if DEBUG:\n print(\"gray_threshed large removed\")\n disp(gray_threshed)\n\n # ds = 2\n # element = cv2.getStructuringElement(cv2.MORPH_RECT, (ds*2+1, ds*2+1), (ds, ds))\n element = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 2), (1, 1))\n gray_threshed_dilated = cv2.dilate(gray_threshed, element)\n gray_threshed_dilated = cv2.erode(gray_threshed_dilated, element)\n\n if DEBUG:\n print(\"gray_threshed dilated\")\n disp(gray_threshed_dilated)\n # return\n\n retval, labels, stats, centroids = cv2.connectedComponentsWithStats(gray_threshed_dilated)\n sorted_indices = np.argsort(-1.0*stats[:,-1]) #Sort by size descending \n idx = sorted_indices[1] # Take the largest component aside from full the bounding box \n\n show = show_cc(img, stats[idx])\n if DEBUG:\n print('CC')\n disp(show)\n\n x0, y0, x1, y1, _ = stats[idx]\n x1 += x0 + 5\n y1 += y0 + 5\n x0 -= 5\n y0 -= 5\n x0 = max(0, x0)\n y0 = max(0, y0)\n x1 = min(x1, img.shape[1]-1)\n y1 = min(y1, img.shape[0]-1)\n\n return img[y0 : y1, x0 : x1, :]",
"def edge_detect(im):\n return np.max(np.array([sobel(im[:,:, 0]), sobel(im[:,:, 1]), sobel(im[:,:, 2]) ]), axis=0)",
"def belt(image):\n\n # Belt Detector\n x, y = circular_detector(image, 70, 80)\n\n return x, y",
"def extract_roi(img):\n roi1 = get_largest_blob(img, invert=True)\n roi2 = get_largest_blob(img, invert=False)\n if roi1.shape[0]*roi1.shape[1] > roi2.shape[0]*roi2.shape[1]:\n return roi1\n else:\n return roi2",
"def extract_blobs_closest_points(this_robot, in_image, active_mask):\n\n out_image = PointSampleImage(in_image.calib_array, in_image.neighbour_array)\n\n G = nx.Graph()\n\n # First add all nodes, where each node consists of an index into\n # calib_array for one of the active pixels.\n for i in range(in_image.n_rows):\n G.add_node(i)\n\n # We will add edges between neighbouring pixels. See\n # sensors/pointsamplecam for the definition of neighbouring.\n node_list = G.nodes()\n n = len(node_list)\n for i in range(n):\n if in_image.masks[i] & active_mask != 0:\n (ixi, iyi) = in_image.calib_array[i,0], in_image.calib_array[i,1]\n for j in in_image.neighbour_array[i]:\n if in_image.masks[j] & active_mask != 0:\n G.add_edge(i, j)\n\n clusters = nx.connected_component_subgraphs(G, copy=False)\n n_clusters = 0\n for cluster in clusters:\n n_clusters += 1\n # Find the closest pixel to the robot in this cluster. \n closest_i = None\n closest_distance = float('inf')\n for i in cluster.nodes():\n #(xr, yr) = in_image.calib_array[i,2], in_image.calib_array[i,3]\n #d = sqrt(xr*xr + yr*yr)\n\n # The pre-computed distance sqrt(xr*xr + yr*yr)\n d = in_image.calib_array[i,5]\n\n if d < closest_distance:\n closest_i = i\n closest_distance = d\n if closest_i != None:\n out_image.masks[closest_i] = in_image.masks[closest_i]\n\n return out_image",
"def locate_shape(shape):",
"def optimize_bbox(img_shape,\n bbox,\n edge_width=8):\n (rows,columns) = img_shape\n (x1,y1,x2,y2) = bbox\n\n return max(0,x1-edge_width),max(0,y1-edge_width),min(rows-1,x2+edge_width),min(columns-1,y2+edge_width)",
"def get_largest_two_component(img, print_info = False, threshold = None):\n s = ndimage.generate_binary_structure(3,2) # iterate structure\n labeled_array, numpatches = ndimage.label(img,s) # labeling\n sizes = ndimage.sum(img,labeled_array,range(1,numpatches+1)) \n sizes_list = [sizes[i] for i in range(len(sizes))]\n sizes_list.sort()\n if(print_info):\n print('component size', sizes_list)\n if(len(sizes) == 1):\n out_img = img\n else:\n if(threshold):\n out_img = np.zeros_like(img)\n for temp_size in sizes_list:\n if(temp_size > threshold):\n temp_lab = np.where(sizes == temp_size)[0] + 1\n temp_cmp = labeled_array == temp_lab\n out_img = (out_img + temp_cmp) > 0\n return out_img\n else: \n max_size1 = sizes_list[-1]\n max_size2 = sizes_list[-2]\n max_label1 = np.where(sizes == max_size1)[0] + 1\n max_label2 = np.where(sizes == max_size2)[0] + 1\n component1 = labeled_array == max_label1\n component2 = labeled_array == max_label2\n if(max_size2*10 > max_size1):\n component1 = (component1 + component2) > 0\n out_img = component1\n return out_img",
"def get_largest_two_component(img, print_info = False, threshold = None):\n s = ndimage.generate_binary_structure(3,2) # iterate structure\n labeled_array, numpatches = ndimage.label(img,s) # labeling\n sizes = ndimage.sum(img,labeled_array,range(1,numpatches+1)) \n sizes_list = [sizes[i] for i in range(len(sizes))]\n sizes_list.sort()\n if(print_info):\n print('component size', sizes_list)\n if(len(sizes) == 1):\n out_img = img\n else:\n if(threshold):\n out_img = np.zeros_like(img)\n for temp_size in sizes_list:\n if(temp_size > threshold):\n temp_lab = np.where(sizes == temp_size)[0] + 1\n temp_cmp = labeled_array == temp_lab\n out_img = (out_img + temp_cmp) > 0\n return out_img\n else: \n max_size1 = sizes_list[-1]\n max_size2 = sizes_list[-2]\n max_label1 = np.where(sizes == max_size1)[0] + 1\n max_label2 = np.where(sizes == max_size2)[0] + 1\n component1 = labeled_array == max_label1\n component2 = labeled_array == max_label2\n if(max_size2*10 > max_size1):\n component1 = (component1 + component2) > 0\n out_img = component1\n return out_img",
"def getMostContour(img,svm,knn,filterArr,digits,wThresh,hThresh):\r\n # append the filter to filter array, this approach is used in case of \r\n # multiple filter methods would be used.\r\n counts = []\r\n # iterare through every filter\r\n for flt in filterArr:\r\n # copy the image so we don't draw on same image\r\n flt_img = img.copy()\r\n last_img = img.copy()\r\n flt_contour,cntfound_fltr = drawcntMap(img.copy(),flt,wThresh,hThresh) \r\n if not digits:\r\n flt_contour,cntfound_fltr = drawcntMap(img.copy(),flt,wThresh,hThresh)\r\n flt_contour_map = []\r\n labels = []\r\n for crop,(x,y,w,h),contour in cropNwriteBBs(img,cntfound_fltr):\r\n #crop = np.array(crop,dtype='float32')\r\n crop = cv2.cvtColor(crop,cv2.COLOR_BGR2GRAY)\r\n crop = cv2.resize(crop,(25,25))\r\n # winSize is the size of the image cropped to an multiple of the cell size\r\n hog_fts = hog.compute(crop)\\\r\n .reshape(n_cells[1] - block_size[1] + 1,\r\n n_cells[0] - block_size[0] + 1,\r\n block_size[0], block_size[1], nbins) \\\r\n .transpose((1, 0, 2, 3, 4))\r\n hog_fts = np.resize(hog_fts.flatten(),(1,576))\r\n # make the resulted crop same type with the trained values\r\n hog_fts.dtype = 'float32'\r\n # get predicted labels\r\n label_svm=svm.predict(hog_fts)[1]\r\n label_knn = knn.findNearest(hog_fts,k=5)[1]\r\n # label 10 is considered as 'not digit' or 'thrash'\r\n # so if predicted label is not 10, draw the bounding box\r\n if digits:\r\n if(label_svm!=10 and label_knn != 10 and label_svm!=11 and label_knn != 11):\r\n flt_contour_map.append(contour)\r\n labels.append(str(label_knn[0])[1])\r\n else:\r\n if(label_svm!=2 and label_knn != 2):\r\n flt_contour_map.append(contour)\r\n labels.append(str(label_knn[0])[1])\r\n #cv2.putText(flt_img,str(label_knn[0])[1],(x,y),fontFace=cv2.FONT_HERSHEY_COMPLEX,fontScale=0.8,color=(0,0,255))\r\n #cv2.putText(flt_img,str(label_knn[0])[1],(x,y),fontFace=cv2.FONT_HERSHEY_COMPLEX,fontScale=0.8,color=(0,0,255))\r\n last_cnt,last_labels = secondElimination(flt_contour_map,labels)\r\n for cnt in last_cnt:\r\n x,y,w,h = cv2.boundingRect(cnt)\r\n cv2.rectangle(flt_img,(x,y),(x+w,y+h),[0,255,0],2)\r\n #showWait(flt_img,'fltres')\r\n _,xx,res_boxes,_,_ = mergeBoundingBoxes(flt_img,last_cnt,last_labels)\r\n cnt = len(res_boxes)\r\n counts.append([cnt,flt_img,last_cnt,last_labels])\r\n # append resulted image and contours to an array\r\n counts = np.asarray(counts)\r\n # get the resulted image which contain more digits (bounding boxes)\r\n tmp = counts[:,0]\r\n resulted_img = counts[np.argmax(tmp),1]\r\n result_labels = counts[np.argmax(tmp),3]\r\n resulted_contour = counts[np.argmax(tmp),2]\r\n return resulted_contour,result_labels,resulted_img",
"def get_bounding_box(im):\n coords = np.where(im)\n \n return np.array([np.min(coords[0]), np.max(coords[0]), \n np.min(coords[1]), np.max(coords[1])])",
"def bbox(img):\n a = np.where(img != 0)\n bbox = np.min(a[0]), np.max(a[0]), np.min(a[1]), np.max(a[1])\n return bbox",
"def clConvolution(self, size, mask):",
"def isFusion(event,buff):\n index,diff,label = event\n label = label[0]\n if diff>0:\n return False,[]\n img_before = np.copy(buff[:,:,index-1])\n img_after = np.copy(buff[:,:,index])\n mask_before = (img_before==label).astype(np.uint8)\n nb_elts_before = np.amax(img_before)\n kernel = np.ones((7,7),np.uint8)\n neighbouring_mask = cv2.dilate(mask_before,kernel,iterations=8)\n\n new_map = np.multiply(img_before,neighbouring_mask.astype(np.uint8))\n \n #Removing the element we are currently looking at\n new_map[img_before==label]=0\n possible_candidates = []\n for i in range(nb_elts_before):\n if np.any(new_map==i+1):\n possible_candidates.append(i+1)\n #Computes the area of the cells and compares them\n size_cell_disappearing = np.count_nonzero(img_before==label)\n match = [] #lists the ratios sizeAfter/sizeBefore for possible matches\n \n for vals in possible_candidates:\n size_other_cell = np.count_nonzero(img_before==vals)\n size_before = size_other_cell+size_cell_disappearing\n size_after = np.count_nonzero(img_after==vals)\n ratio = float(size_after)/float(size_before)\n if ratio>0.8 and ratio<1.2:\n match.append((vals,abs(1-ratio)))\n if len(match)==0:\n return False,[]\n if len(match)>1:\n #Several matches, so pick the best\n values = [y for x,y in match]\n result_label,osef = match[np.argmin(values)]\n else:\n result_label, osef = match[0]\n return True,result_label"
] | [
"0.6801461",
"0.55033886",
"0.55019563",
"0.54685766",
"0.5424752",
"0.53917885",
"0.5366351",
"0.5366351",
"0.5352218",
"0.52790505",
"0.5261697",
"0.52307206",
"0.52187544",
"0.52073383",
"0.51733804",
"0.5172248",
"0.51645684",
"0.5158878",
"0.51432467",
"0.51419896",
"0.5125112",
"0.51229036",
"0.50851786",
"0.50800145",
"0.50800145",
"0.50792104",
"0.5077512",
"0.5046245",
"0.50181544",
"0.5000347"
] | 0.69018906 | 0 |
Fine location of the target by calculating the centroid for the region centered on the brightest checkbox. Performs the centroid calculation on the checkbox region calculated using the function checkbox_2D(). | def centroid_2D(image, checkbox_center, checkbox_halfwidth, max_iter=0, threshold=0, debug=False):
# First calculate centroid to use for the first iteration
c_sum = 0
xsum = 0
ysum = 0
convergence_flag = 'N/A'
# Unpack the checkbox_center and checkbox_halfwidth into
# their appropriate variables
xpeak, ypeak = checkbox_center
xhw, yhw = checkbox_halfwidth
for ii in xrange(xpeak - xhw - 1, xpeak + xhw - 1):
for jj in xrange(ypeak - yhw - 1, ypeak + yhw - 1):
xloc = ii + 1
yloc = jj + 1
c_sum = c_sum + image[jj, ii]
xsum += xloc * image[jj, ii]
ysum += yloc * image[jj, ii]
if debug:
# Initial sum calculation (before iterations)
print('(centroid_2D): Init. Sum (before iterations) = ', c_sum)
print()
if c_sum == 0:
print('(centroid_2D): ERROR - divide by zero.')
print()
exit
else:
xcen = xsum / c_sum
ycen = ysum / c_sum
# Iteratively calculate centroid until solution converges, using
# neighboring pixels to apply weighting...
old_xcen = xcen
old_ycen = ycen
num_iter = 0
for kk in xrange(max_iter):
num_iter += 1
c_sum = 0
xsum = 0
ysum = 0
# Set up x and y centroid scanning ranges
x_range = np.array((np.floor(old_xcen - xhw) - 1, np.ceil(old_xcen + xhw) - 1))
y_range = np.array((np.floor(old_ycen - yhw) - 1, np.ceil(old_ycen + yhw) - 1))
for ii in xrange(np.int(x_range[0]), np.int(x_range[1])):
for jj in xrange(np.int(y_range[0]), np.int(y_range[1])):
# Initalize weights to zero
xweight = 0
yweight = 0
# Adjust weights given distance from current centroid
xoff = np.abs((ii + 1) - old_xcen)
yoff = np.abs((jj + 1) - old_ycen)
# If within the original centroid box, set weight to 1
# for both x and y.
# If on the border, the scale weight
if xoff <= xhw:
xweight = 1
elif xhw < xoff < (xhw + 1):
xweight = xhw + 1 - xoff
if yoff <= yhw:
yweight = 1
elif yhw < yoff < (yhw + 1):
yweight = yhw + 1 - yoff
# Compute cummulative weight
weight = xweight * yweight
# Calculate centroid
xloc = ii + 1
yloc = jj + 1
c_sum += image[jj, ii] * weight
xsum += xloc * image[jj, ii] * weight
ysum += yloc * image[jj, ii] * weight
if c_sum == 0:
print('(centroid_2D): ERROR - Divide by zero.')
print()
else:
xcen = xsum / c_sum
ycen = ysum / c_sum
# Check for convergence
if np.abs(xcen - old_xcen) <= threshold and np.abs(ycen - old_ycen) <= threshold:
convergence_flag = 'Success'
break
elif kk == max_iter:
convergence_flag = 'Fail'
break
else:
old_xcen = xcen
old_ycen = ycen
# Debug messages
if debug:
print('(centroid_2D): xpeak, ypeak = {}, {}'.format(xpeak, ypeak))
print('(centroid_2D): xhw, yhw = {}, {}'.format(xhw, yhw))
print('(centroid_2D): xcen, ycen = {}, {} '.format(xcen, ycen))
print()
print('(centroid_2D): Centroid = [{}, {}] for num_iter = {}.'.format(xcen-1, ycen-1, num_iter))
print('(centroid_2D): Converged? ', convergence_flag)
print()
# -1 on both axes, as Python is 0 major
centroid = np.array((xcen-1, ycen-1))
return centroid, c_sum | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def checkbox_1D(image, checkbox, debug=False):\n \n # Collapse input image, currently onto X axis\n # Reshape to reflect collapse onto x axis\n vector = np.sum(image, axis=0)\n print('(checkbox_1D): Image collapsed into 1D vector.')\n print()\n \n # Calculate the checkbox half-width\n chw = (checkbox - 1) / 2\n\n \n # Calculate the image size\n xsize, ysize = image.shape[1], image.shape[0]\n \n # Calculate the x and y widths of checkbox region\n xwidth = xsize - checkbox + 1\n\n # If the checkbox size is not equal to both the X and Y sizes, \n # find the pixel with the brightest checkbox\n if checkbox != xsize and checkbox != ysize:\n xpeak = 0\n ypeak = 1\n sumpeak = 0\n for ii in xrange(xsize - checkbox):\n t = np.sum(vector[ii:ii+checkbox])\n if t > sumpeak:\n xpeak = ii + 1\n sumpeak = t\n\n print('(checkbox_1D): Checkbox not equal to xsize.')\n \n \n # If the checkbox size is equal to both the X and Y sizes\n if checkbox == xsize:\n xpeak = xsize / 2\n sumpeak = np.sum(vector, axis=None)\n \n print('(checkbox_1D): Checkbox equal to xsize.')\n \n # Print checkbox center and peak around centroid region\n\n # Find the checkbox region half-width in x and y\n xhw = xwidth / 2\n \n if xpeak < xhw or xpeak > xsize - xhw:\n print('(checkbox_1D): WARNING - Peak too close to edge of image.')\n \n \n # Debug messages\n if debug:\n print('(checkbox_1D): chw = ', chw)\n print('(checkbox_1D): xhw = ', xhw)\n print('(checkbox_1D): xsize = ', xsize)\n print('(checkbox_1D): xwidth = ', xwidth)\n print('(checkbox_1D): xpeak = ', xpeak)\n print('(checkbox_1D): sumpeak = ', sumpeak)\n print() \n \n# NOTE: Use this section of the input image is a subset of a larger image\n# Not currently needed for this analysis\n# # Determine the center of the brightest checkbox, in extracted\n# # image coordinates\n# xpeak = xpeak + xhw\n \n return xpeak, xhw",
"def find_centroid_cell(self):\n\n x_min, y_min = self.find_min()\n x_max, y_max = self.find_max()\n x_centroid = int((x_max+x_min)/2)\n y_centroid = int((y_max+y_min)/2)\n centroide = x_centroid, y_centroid\n return centroide",
"def find_centroid_for_each(self):",
"def checkbox_2D(image, checkbox, debug=False):\n \n # Calculate the checkbox half-width\n chw = (checkbox - 1) / 2\n \n # Calculate the image size\n xsize, ysize = image.shape[1], image.shape[0]\n \n # Calculate the x and y widths of checkbox region\n xwidth, ywidth = xsize - checkbox + 1, ysize - checkbox + 1\n \n # If the checkbox size is not equal to both the X and Y sizes, \n # find the pixel with the brightest checkbox\n if checkbox != xsize and checkbox != ysize:\n xpeak = 0\n ypeak = 0\n sumpeak = 0\n for ii in xrange(xsize - checkbox):\n for jj in xrange(ysize - checkbox):\n t = np.sum(image[jj:jj+checkbox, ii:ii+checkbox])\n if t > sumpeak:\n xpeak = ii + chw + 1\n ypeak = jj + chw + 1\n sumpeak = t\n \n print('(checkbox_2D): Checkbox not equal to both x/ysize.')\n print() \n\n \n # If the checkbox size is equal to both the X and Y sizes\n if checkbox == xsize and checkbox == ysize:\n xpeak = xsize / 2\n ypeak = ysize / 2\n sumpeak = np.sum(image, axis=None)\n \n print('(checkbox_2D): Checkbox equal to x/ysize.')\n print()\n \n # Print calculated checkbox center, and sum within checkbox centroid\n\n # Find the checkbox region half-width in x and y\n xhw = xwidth / 2\n yhw = ywidth / 2\n \n if xpeak < xhw or xpeak > xsize - xhw or ypeak < yhw or ypeak > ysize - yhw:\n print('(checkbox_2D): WARNING - Peak too close to edge of image.')\n print()\n \n# NOTE: Use this section of the input image is a subset of a larger image\n# Not currently needed for this analysis\n# # Determine the center of the brightest checkbox, in extracted\n# # image coordinates\n# xpeak = xpeak + xhw\n# ypeak = ypeak + yhw\n\n # Debug messages\n if debug:\n print('(checkbox_2D): chw = ', chw)\n print('(checkbox_2D): xsize, ysize = {}, {}'.format(xsize, ysize))\n print('(checkbox_2D): xwidth, ywidth = {}, {}'.format(xwidth, ywidth))\n print('(checkbox_2D): xpeak, ypeak = {}, {}'.format(xpeak, ypeak))\n print('(checkbox_2D): sumpeak = ', sumpeak)\n print('(checkbox_2D): xhw, yhw = {}, {}'.format(xhw, yhw))\n print()\n \n checkbox_ctr = np.array((xpeak, ypeak))\n checkbox_hfw = np.array((xhw, yhw))\n\n return checkbox_ctr, checkbox_hfw",
"def estimate_centroid(self):\r\n\t\tstrain = self.strain_distribution_compr(self.max_pure_compresive_strain,\\\r\n\t\t\tself.max_pure_compresive_strain)\r\n\t\tself.geometric_centrod = (self.depth/2) \r\n\t\tself.plastic_centroid = (self.depth/2)+\\\r\n\t\t\t(self.sectional_moment(strain, self.depth/2)/\\\r\n\t\t\tself.sectional_force(strain))",
"def calculate_cluster_center(self, threshold):\n gamma = self.gamma\n self.cluster_center = np.where(gamma >= threshold)[0]",
"def calculate_laser_center(self):\n image = np.copy(self.camera_fiber.temp_image)\n brightest = np.unravel_index(image.argmax(), image.shape)\n self.laser_center = self.calculate_gaussian_centroid(image, brightest[0], brightest[1], crop_size=25)",
"def centroid(image, threshold=0, binarize=False):\n\n signal = np.where(image > threshold)\n sy, sx = image.shape[0], image.shape[1]\n\n temp = np.zeros((sy, sx))\n\n if binarize is True:\n temp[signal] = 1.0\n else:\n temp[signal] = image[signal]\n\n profx = 1.0 * temp.sum(axis=0)\n profy = 1.0 * temp.sum(axis=1)\n profx -= np.min(profx)\n profy -= np.min(profy)\n\n x0 = (profx * np.arange(sx)).sum() / profx.sum()\n y0 = (profy * np.arange(sy)).sum() / profy.sum()\n\n return (x0, y0)",
"def center(self):\n return np.sum(self.bbox, 0) / 2",
"def center(self, obj):\n mn0 = self.master.xy >= obj.center\n mn1 = self.master.xy <= obj.center\n\n point_list = [self.master.xy[mn0], self.master.xy[mn1], self.master.xy[mn0[0], mn1[1]], self.master.xy[mn1[0], mn0[1]]] # 4 physical points near the center coordinate.\n dist_list = []\n idx = 0\n for point in point_list:\n dist_list.append([idx, np.linalg.norm(point - obj.center)]) # Calculate Euclidean distances.\n idx += 1\n dist_sorted = sorted(dist_list, key=lambda distance : distance[1]) # Sort distances in ascending order.\n return self.master.mn(point_list[dist_sorted[0][0]]) # Convert the closest point to abstract coordinate and then return.",
"def test_centroids_mask():\n data = np.ones((2, 2)).astype(np.float)\n mask = [[False, False], [True, True]]\n centroid = centroid_com(data, mask=None)\n centroid_mask = centroid_com(data, mask=mask)\n assert_allclose([0.5, 0.5], centroid, rtol=0, atol=1.e-6)\n assert_allclose([0.5, 0.0], centroid_mask, rtol=0, atol=1.e-6)",
"def centroid(self): # -> BaseGeometry:\n ...",
"def recenter(self):\n self.centroid = self.consensus()\n return self.centroid",
"def _calculate_binary_center(_binary_label):\n _pixel_inds = np.indices(np.shape(_binary_label)).astype(np.uint16)\n _coord = []\n for _l in (_pixel_inds * _binary_label):\n _coord.append(np.mean(_l[_l>0]))\n return np.array(_coord)",
"def getBeliefsCentroid(self, idx):\n x = 0.0\n y = 0.0\n total = 0.0\n for p in self.beliefs[idx]:\n x += p[0]\n y += p[1]\n total += 1.0\n return (round(x / total), round(y / total))",
"def calculateCentroid(self,image):\n\t\tim=cv2.imread(image,0) #reads it in greyscale\n\t\tret,thresh = cv2.threshold(img_copy,128,255,cv2.THRESH_OTSU)\n\t\tim2,contours,hierarchy = cv2.findContours(thresh, 1, 2)\n\t\tcnt = contours[0]\n\t\tM = cv2.moments(cnt)\n\t\tcx = int(M['m10']/M['m00'])\n\t\tcy = int(M['m01']/M['m00'])\n\t\tcentroid=(cx,cy)\n\t\treturn centroid",
"def _compute_centroids(self):\n\n for i in range(0, self.k):\n cluster = np.argwhere(self.assigned_clusters == i)\n cluster_points = self.data[cluster].squeeze()\n self.centroids[i] = np.mean(cluster_points, axis=0)",
"def get_object_centroid(labelmask, id):\n # Get coordinates \n coords = np.where(labelmask == id)\n # Find mean of each coordinate, remove negatives, make int.\n return tuple([int(np.mean(x)) for x in coords])",
"def get_object_centroid(labelmask, id):\n # Get coordinates \n coords = np.where(labelmask == id)\n # Find mean of each coordinate, remove negatives, make int.\n return tuple([int(np.mean(x)) for x in coords])",
"def find_centroid(event_file):\n \n print('Finding the centroid of the event file...\\n')\n \n make_img(event_file,clobber=True)\n \n fits = pyfits.open('temp.fits')\n \n #Previously used the RA and DEC headers to find the centre, now trying a more nuanced\n #max pixel value method\n \n #source_ra = fits[1].header['RA_TARG']\n #source_dec = fits[1].header['DEC_TARG']\n \n #return source_ra,source_dec\n \n data = fits[0].data\n \n #As the data from make_img is 1024x1024 based on the centre of the image, use modulo\n #arithmetic to find the physical x and y coordinates\n \n argmax = np.argmax(data)\n \n x = argmax%1024 + 3584\n y = int(argmax/1024) + 3584\n \n return x,y",
"def mcentroid(xarr, yarr, kern=default_kernal, xc=None, xdiff=None):\n\n if xdiff is None:\n xdiff = len(kern)\n\n if xdiff < len(kern):\n xdiff = len(kern)\n\n\n if xc is not None and xdiff:\n mask = (abs(xarr - xc) < xdiff)\n else:\n mask = np.ones(len(xarr), dtype=bool)\n\n # convle the input array with the default kernal\n warr = np.convolve(yarr[mask], kern, mode='same')\n\n # interpolate the results\n # imask is used to make sure we are only gettin the\n # center pixels\n imask = (abs(xarr[mask]-xarr[mask].mean()) < 3)\n cx = np.interp(0, warr[imask], xarr[mask][imask])\n return cx",
"def get_center(self):\n return center_points(np.expand_dims(self.best_box, axis=0))[0]",
"def centroid_1D(image, xpeak, xhw, debug=False):\n \n # Collapse input image unto x axis\n vector = np.sum(image, axis=0)\n \n c_sum = 0.0\n xcen = 0.0\n \n for ii in xrange(int(xpeak - xhw - 1), int(xpeak + xhw - 1)):\n c_sum = c_sum + vector[ii]\n xloc = ii + 1\n xcen += xloc * vector[ii]\n \n print('(centroid_1D): Sum = ', c_sum)\n \n \n if c_sum == 0:\n print('(centroid_1D): ERROR - divide by zero')\n else:\n xcen /= c_sum\n \n print('(centroid_1D): Centroid = ', xcen-1)\n \n # -1 on both axes, as Python is 0 major \n return xcen-1, c_sum",
"def centroid(arr):\n l = arr.shape[0]\n ixs = np.arange(l)\n arr = arr - np.median(arr)\n arr = np.where(arr < 0, 0, arr) \n ixs2 = ixs * ixs\n sumarr = arr.sum()\n cen = np.dot(arr, ixs)/sumarr\n return cen, math.sqrt(np.dot(arr, ixs2)/sumarr - cen * cen)",
"def findCentroid(img, file):\n \n print(file)\n \n # convert the image to grayscale\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n # find the number of rows and columns of the image \n img_lin = img.shape[0]\n img_col = img.shape[1]\n \n # to find the average of half of the image \n img_mean = np.uint8(np.mean(img[0:700,:]))\n \n threshold_img = np.zeros_like(img)\n \n # we set a threshold to detect the fly's body at 70% of the average\n for i in range(1,img_lin): \n for j in range(1,img_col):\n if img[i,j] <= img_mean*0.70:\n threshold_img[i,j] = 255\n \n clean_threshold = threshold_img\n\n # erosion applied to remove unwanted details, like the lanes borders \n kernel = np.ones((3,3), np.uint8)\n img_erosion = cv2.erode(threshold_img, kernel, iterations=5)\n\n clean_erosion = img_erosion\n \n wings_img = np.zeros_like(img)\n\n # thresholding to detect the fly's body with wings \n for i in range(1,img_lin): \n for j in range(1,img_col):\n if img[i,j] <= img_mean*0.90 and img[i,j] >= img_mean*0.50:\n wings_img[i,j] = 255\n \n clean_wings = wings_img\n \n # erosion and dilation to the the fly's body and wings \n wings_erosion = cv2.erode(wings_img, kernel, iterations=2)\n clean_wings_erosion = wings_erosion\n wings_erode_dilate = cv2.dilate(clean_erosion, kernel, iterations=10)\n \n final_img = np.zeros_like(img)\n\n for i in range(1,img_lin): \n for j in range(1,img_col):\n if wings_erode_dilate[i,j] == 255 and clean_wings[i,j] == 255 :\n final_img[i,j] = 255\n \n # final image with only the fly's wings\n final_img = final_img - clean_threshold\n \n img_sample = img_erosion\n\n # the centroid detection by using connected components \n output = cv2.connectedComponentsWithStats(img_sample, 4, cv2.CV_32S) \n \n x_centroid = int(output[3][1][0])\n y_centroid = int(output[3][1][1])\n \n remove_value = False\n add_value = False\n \n # we segment the image in two, based on the location of the centroid\n # we take a small square of pixels, to have a more precise detection\n # this squre is 55 x 55 pixels around the centroid \n # (if the fly is not at the border)\n if x_centroid-55 > 0:\n part_left = final_img[30:100,(x_centroid-55):x_centroid]\n part_left_track = img_erosion[30:100,(x_centroid-55):x_centroid]\n remove_value = True\n else:\n part_left = final_img[30:100,:x_centroid]\n part_left_track = img_erosion[30:100,:x_centroid]\n \n if x_centroid+55 < len(final_img):\n part_right = final_img[30:100,x_centroid:x_centroid+55]\n part_right_track = img_erosion[30:100,x_centroid:x_centroid+55]\n add_value = True\n else:\n part_right = final_img[30:100,x_centroid:]\n part_right_track = img_erosion[30:100,x_centroid:]\n \n axis_left_x = part_left_track.shape[0] \n axis_left_y = part_left_track.shape[1]\n \n # we count the number of white pixels in the left part of the image\n white_left = 0\n \n for i in range(axis_left_x): \n for j in range(axis_left_y):\n if part_left[i,j] >= 50 :\n white_left = white_left + 1\n \n print('Part left scored : ' + str(white_left) + ' white pixels.')\n \n axis_right_x = part_right_track.shape[0]\n axis_right_y = part_right_track.shape[1]\n\n # we count the number of white pixels in the right part of the image\n white_right = 0\n \n for i in range(axis_right_x): \n for j in range(axis_right_y):\n if part_right[i,j] >= 50 :\n white_right = white_right + 1\n \n print('Part right scored : ' + str(white_right) + ' white pixels.')\n \n x_head = 0\n y_head = 0\n \n # the part having the smallest number of white pixels corresponds to the head\n \n if white_left < white_right:\n print('Head is in part left')\n for i in range(0,axis_left_x): \n for j in range(0,axis_left_y):\n if part_left_track[i,j] == 255 and x_head == 0:\n \n if remove_value:\n x_head = i+x_centroid-55\n else:\n x_head = i+x_centroid\n\n y_head = j\n print(\"head is in part left : \"+ str(np.array([i,j])))\n else:\n axis_x = part_right_track.shape[1]\n axis_y = part_right_track.shape[0]\n for i in reversed(range(0,axis_x)): \n for j in reversed(range(0,axis_y)):\n if part_right_track[j,i] == 255 and x_head == 0:\n \n if add_value:\n x_head = i+x_centroid+55\n else:\n x_head = i+x_centroid\n \n x_head = i+x_centroid\n y_head = j\n print(\"head is in part right : \"+ str(np.array([i,j])))\n \n return x_centroid, y_centroid, x_head, y_head",
"def calc_centroid(self):\n num = 0\n centroid = numpy.zeros(3, float)\n for atm in self:\n if atm.position is not None:\n centroid += atm.position\n num += 1\n return centroid / num",
"def calculate_fiber_center(self, x, y, crop_size=15):\n self.logger.info(f'Calculating fiber center using ({x}, {y})')\n image = np.copy(self.camera_fiber.temp_image)\n self.fiber_center_position = self.calculate_gaussian_centroid(image, x, y, crop_size)\n return [x,y] #m",
"def set_centroid_2(self):\n if not self.centroid_2_active:\n self.centroid_2_active = True\n \n self.centroid_2 = self.current_centroid",
"def compute_center(self, mole_object):\r\n if mole_object.plugin_type == \"PyMOL\":\r\n sel = PymolPlugin.PymolPlugin().get_model('all')\r\n cnt = len(sel.atom)\r\n\r\n else:\r\n sel = ChimeraPlugin.ChimeraPlugin().select()\r\n cnt = len(ChimeraPlugin.ChimeraPlugin().current_atoms())\r\n\r\n cent_x = 0\r\n cent_y = 0\r\n cent_z = 0\r\n\r\n if cnt == 0:\r\n return 0, 0, 0\r\n\r\n if mole_object.plugin_type == \"PyMOL\":\r\n\r\n for a in sel.atom:\r\n cent_x += a.coord[0]\r\n cent_y += a.coord[1]\r\n cent_z += a.coord[2]\r\n\r\n else:\r\n\r\n for a in ChimeraPlugin.ChimeraPlugin().current_atoms():\r\n cent_x += a.coord()[0]\r\n cent_y += a.coord()[1]\r\n cent_z += a.coord()[2]\r\n\r\n cent_x /= cnt\r\n cent_y /= cnt\r\n cent_z /= cnt\r\n\r\n self.point_x.component('entryfield').setentry(cent_x)\r\n self.point_y.component('entryfield').setentry(cent_y)\r\n self.point_z.component('entryfield').setentry(cent_z)\r\n\r\n self.show_crisscross(mole_object)",
"def set_centroid_1(self):\n if not self.centroid_1_active:\n self.centroid_1_active = True\n \n self.centroid_1 = self.current_centroid"
] | [
"0.63599515",
"0.63562965",
"0.62887686",
"0.62374955",
"0.6117501",
"0.60575074",
"0.60160685",
"0.59349656",
"0.5921528",
"0.59124064",
"0.5907163",
"0.5869538",
"0.58653986",
"0.57672626",
"0.57584727",
"0.5742122",
"0.57366914",
"0.57219857",
"0.57219857",
"0.5709737",
"0.57090646",
"0.568401",
"0.56743294",
"0.5651538",
"0.56481165",
"0.5625685",
"0.5619786",
"0.5612849",
"0.5610503",
"0.5592695"
] | 0.76711303 | 0 |
Calculate the higher moments of the object in the image. Find the normalized squared and cubed moments with reference to an origin at the centroid, using the centroid and and sum values calculatated previously. | def find2D_higher_moments(image, centroid, halfwidths, c_sum):
# Unpack centroid to seperate values
xcen, ycen = np.floor(centroid)
xhw, yhw = halfwidths
xmoment2 = 0
xmoment3 = 0
ymoment2 = 0
ymoment3 = 0
# Set up x and y centroid scanning ranges
x_range = np.array((np.floor(xcen - xhw) - 1, np.ceil(xcen + xhw) - 1))
y_range = np.array((np.floor(ycen - yhw) - 1, np.ceil(ycen + yhw) - 1))
for ii in xrange(np.int(x_range[0]), np.int(x_range[1])):
for jj in xrange(np.int(y_range[0]), np.int(y_range[1])):
xloc = ii - np.floor(xcen)
yloc = jj - np.floor(ycen)
xweight = 0
yweight = 0
xoff = np.abs(ii - xcen)
yoff = np.abs(jj - ycen)
if xoff <= xhw:
xweight = 1
elif xhw < xoff < (xhw + 1):
xweight = xhw + 1 - xoff
if yoff <= yhw:
yweight = 1
elif yhw < yoff < (yhw + 1):
yweight = yhw + 1 - yoff
weight = xweight * yweight
xmoment2 += xloc ** 2 * image[jj, ii] * weight
xmoment3 += xloc ** 3 * image[jj, ii] * weight
ymoment2 += yloc ** 2 * image[jj, ii] * weight
ymoment3 += yloc ** 3 * image[jj, ii] * weight
xmoment2 = xmoment2 / c_sum
xmoment3 = xmoment3 / c_sum
ymoment2 = ymoment2 / c_sum
ymoment3 = ymoment3 / c_sum
# Pack the x and y moments to return to main program
x_moment = np.array((xmoment2, xmoment3))
y_moment = np.array((ymoment2, ymoment3))
return x_moment, y_moment | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find1D_higher_moments(image, xcen, xhw, c_sum):\n \n # Collapse input image unto x axis\n vector = np.sum(image, axis=0)\n \n xmoment2 = 0.0\n xmoment3 = 0.0\n \n # Set up x and y centroid scanning ranges\n x_range = np.array((np.floor(xcen - xhw) - 1, np.ceil(xcen + xhw) - 1))\n\n for ii in xrange(np.int(x_range[0]), np.int(x_range[1])):\n xloc = (ii + 1) - np.floor(xcen)\n \n xweight = 0\n xoff = np.abs(ii - xcen)\n \n if xoff <= xhw:\n xweight = 0\n elif xhw < xoff < xhw + 1:\n xweight = xhw + 1 - xoff\n\n xmoment2 += xloc ** 2 * vector[ii] * xweight\n xmoment3 += xloc ** 3 * vector[ii] * xweight\n \n xmoment2 = xmoment2 / c_sum\n xmoment3 = xmoment3 / c_sum\n \n # Pack moments for return to main program\n x_mom = np.array((xmoment2, xmoment3))\n \n return x_mom",
"def moments(data):\n# =============================================================================\n# total = data.sum()\n# X, Y = np.indices(data.shape)\n# x = (X*data).sum()/total\n# y = (Y*data).sum()/total\n# col = data[:, int(y)]\n# \n# width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n# \n# row = data[int(x), :]\n# width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n# height = data.max()\n# height1 = height\n# =============================================================================\n return(1, 15, 14, 3, 3, 1, 14, 16, 3, 2)",
"def measure_image_moments(image):\n data = image.quantity\n\n coords = image.geom.get_coord().skycoord\n x, y = coords.data.lon.wrap_at(\"180d\"), coords.data.lat\n\n A = data[np.isfinite(data)].sum()\n\n # Center of mass\n x_cms = (x * data)[np.isfinite(data)].sum() / A\n y_cms = (y * data)[np.isfinite(data)].sum() / A\n\n # Second moments\n x_var = ((x - x_cms) ** 2 * data)[np.isfinite(data)].sum() / A\n y_var = ((y - y_cms) ** 2 * data)[np.isfinite(data)].sum() / A\n x_sigma = np.sqrt(x_var)\n y_sigma = np.sqrt(y_var)\n\n return A, x_cms, y_cms, x_sigma, y_sigma, np.sqrt(x_sigma * y_sigma)",
"def calculateCentroid(self,image):\n\t\tim=cv2.imread(image,0) #reads it in greyscale\n\t\tret,thresh = cv2.threshold(img_copy,128,255,cv2.THRESH_OTSU)\n\t\tim2,contours,hierarchy = cv2.findContours(thresh, 1, 2)\n\t\tcnt = contours[0]\n\t\tM = cv2.moments(cnt)\n\t\tcx = int(M['m10']/M['m00'])\n\t\tcy = int(M['m01']/M['m00'])\n\t\tcentroid=(cx,cy)\n\t\treturn centroid",
"def moments(self):",
"def moments(data):\n total = data.sum()\n X, Y = np.indices(data.shape)\n x = int((X*data).sum()/total)\n y = int((Y*data).sum()/total)\n col = data[:, int(y)]\n \n width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n \n row = data[int(x), :]\n width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return(height, x, y, width_x, width_y, 0.0)\n #return(1, 15, 15, 2, 2, 0.0)",
"def centroidFloat(cnt):\n M = cv2.moments(cnt)\n cx = M['m10']/M['m00']\n\tcy = M['m01']/M['m00']\n\treturn (cx, cy)",
"def moments(data):\n total = data.sum()\n if total != 0.:\n X, Y = np.indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = np.sqrt(abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n else:\n height=0\n x=0\n y=0\n width_x=0\n width_y=0\n return height,np.sqrt(width_x**2 + width_y**2)",
"def computeX0 (self):\n self.m_x0 = np.sum(self.m_arr, axis=0)\n \"\"\" Subtract the point for which f(x) is max \"\"\"\n self.m_x0 -= self.m_arr[self.m_sorted[-1], :]\n \"\"\" Compute average \"\"\"\n self.m_x0 /= self.m_dim\n _debugPrint(\"Centroid: %s\" %self.m_x0)",
"def complex2ndMoments(data=None,sigma=None):\n nrow,ncol=data.shape\n Isum = data.sum()\n Icol = data.sum(axis=0) # sum over all rows\n Irow = data.sum(axis=1) # sum over all cols\n colgrid = np.arange(ncol)\n rowgrid = np.arange(nrow)\n rowmean=np.sum(rowgrid*Irow)/Isum\n colmean=np.sum(colgrid*Icol)/Isum\n ROW,COL=np.indices((nrow,ncol))\n maxItr = 50\n EP = 0.0001\n for i in range(maxItr):\n wrmat = wr(ROW,COL,rowmean,colmean,sigma)\n IWmat = data*wrmat\n IWcol = IWmat.sum(axis=0)\n IWrow = IWmat.sum(axis=1)\n IWsum = IWmat.sum()\n drowmean = np.sum((rowgrid-rowmean)*IWrow)/IWsum\n dcolmean = np.sum((colgrid-colmean)*IWcol)/IWsum\n rowmean = rowmean+2.*drowmean\n colmean = colmean+2.*dcolmean\n if drowmean**2+dcolmean**2 <= EP:\n break\n rowgrid = rowgrid - rowmean # centered\n colgrid = colgrid - colmean\n Mr = np.sum(rowgrid*IWrow)/IWsum\n Mc = np.sum(colgrid*IWcol)/IWsum\n Mrr = np.sum(rowgrid**2*IWrow)/IWsum\n Mcc = np.sum(colgrid**2*IWcol)/IWsum\n Mrc = np.sum(np.outer(rowgrid,colgrid)*IWmat)/IWsum\n Cm = np.matrix([[Mcc,Mrc],[Mrc,Mrr]])\n Cw = np.matrix([[sigma**2,0.],[0.,sigma**2]])\n Cimg = (Cm.I - Cw.I).I\n Mcc = Cimg[0,0]\n Mrr = Cimg[1,1]\n Mrc = Cimg[0,1]\n #M20 = Mrr + Mcc\n #M22 = complex(Mcc - Mrr,2*Mrc)\n return Mcc, Mrr, Mrc",
"def moments(cnt):\n\treturn cv2.moments(cnt)",
"def moments(cnt):\n\treturn cv2.moments(cnt)",
"def get_centroid(image, method=\"propio\"):\n # ---------Método Propio (directo de la definición)----------\n if method == \"propio\":\n # Dimensiones\n height, width = image.shape[:2]\n # Masa total\n total_mass = image.sum()\n\n # Si la masa total es cero, entonces el centro de masa \n # no existe\n if total_mass == 0:\n r = np.array([-1, -1])\n return r, None\n\n # Primera componente (suma por filas)\n row_sum = image.sum(axis=1)\n row_weight = np.arange(1, height+1)\n r_i = np.dot(row_sum, row_weight)\n r_i /= total_mass\n r_i = int(r_i)\n \n # Segunda componente (suma por columnas)\n column_sum = image.sum(axis=0)\n column_weight = np.arange(1, width+1)\n r_j = np.dot(column_sum, column_weight)\n r_j /= total_mass\n r_j = int(r_j)\n\n # Retorna el centroide en coordenadas de imagen\n r = np.array([r_j, r_i])\n return r, None\n\n # ---------Método con contornos-----------------\n else:\n # Obtener contorno imagen binaria (máscara)\n cnts = get_contours(image)\n \n # Para cada contorno, obtener el centroide y añadirlo a lista\n r = []\n for c in cnts:\n M = cv2.moments(c)\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n r.append(np.array([cX, cY]))\n\n # Ahora se retorna una lista con centroides (según la \n # cantidad de contornos que se hayan encontrado)\n if len(r) == 0:\n r.append(np.array([-1, -1]))\n return r, cnts\n else:\n return r, cnts",
"def moments(self, data):\n total = data.sum()\n X, Y = indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = sqrt(abs((arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = sqrt(abs((arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return x, y, width_x, width_y, height",
"def adaptiveCentroid(data=None,sigma=None):\n nrow,ncol=data.shape\n Isum = data.sum()\n Icol = data.sum(axis=0) # sum over all rows\n Irow = data.sum(axis=1) # sum over all cols\n colgrid = np.arange(ncol)\n rowgrid = np.arange(nrow)\n rowmean=np.sum(rowgrid*Irow)/Isum\n colmean=np.sum(colgrid*Icol)/Isum\n ROW,COL=np.indices((nrow,ncol))\n maxItr = 50\n EP = 0.0001\n for i in range(maxItr):\n wrmat = wr(ROW,COL,rowmean,colmean,sigma)\n IWmat = data*wrmat\n IWcol = IWmat.sum(axis=0)\n IWrow = IWmat.sum(axis=1)\n drowmean = np.sum((rowgrid-rowmean)*IWrow)/np.sum(IWrow)\n dcolmean = np.sum((colgrid-colmean)*IWcol)/np.sum(IWcol)\n rowmean = rowmean+2.*drowmean\n colmean = colmean+2.*dcolmean\n if drowmean**2+dcolmean**2 <= EP:\n break\n\n return rowmean,colmean",
"def moments(data):\n total = data.sum()\n X, Y = np.indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return height, x, y, width_x, width_y",
"def moments(data):\n total = data.sum()\n X, Y = np.indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return height, x, y, width_x, width_y",
"def moments(data):\n total = data.sum()\n X, Y = np.indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = np.sqrt(np.abs((np.arange(col.size)-x)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(np.abs((np.arange(row.size)-y)**2*row).sum()/row.sum())\n height = data.max()\n return height, x, y, width_x, width_y",
"def centroid(arr):\n l = arr.shape[0]\n ixs = np.arange(l)\n arr = arr - np.median(arr)\n arr = np.where(arr < 0, 0, arr) \n ixs2 = ixs * ixs\n sumarr = arr.sum()\n cen = np.dot(arr, ixs)/sumarr\n return cen, math.sqrt(np.dot(arr, ixs2)/sumarr - cen * cen)",
"def complexMoments(data=None,sigma=None):\n nrow,ncol=data.shape\n Isum = data.sum()\n Icol = data.sum(axis=0) # sum over all rows\n Irow = data.sum(axis=1) # sum over all cols\n colgrid = np.arange(ncol)\n rowgrid = np.arange(nrow)\n rowmean=np.sum(rowgrid*Irow)/Isum\n colmean=np.sum(colgrid*Icol)/Isum\n ROW,COL=np.indices((nrow,ncol))\n maxItr = 50\n EP = 0.0001\n for i in range(maxItr):\n wrmat = wr(ROW,COL,rowmean,colmean,sigma)\n IWmat = data*wrmat\n IWcol = IWmat.sum(axis=0)\n IWrow = IWmat.sum(axis=1)\n IWsum = IWmat.sum()\n drowmean = np.sum((rowgrid-rowmean)*IWrow)/IWsum\n dcolmean = np.sum((colgrid-colmean)*IWcol)/IWsum\n rowmean = rowmean+2.*drowmean\n colmean = colmean+2.*dcolmean\n if drowmean**2+dcolmean**2 <= EP:\n break\n rowgrid = rowgrid - rowmean # centered\n colgrid = colgrid - colmean\n Mr = np.sum(rowgrid*IWrow)/IWsum\n Mc = np.sum(colgrid*IWcol)/IWsum\n Mrr = np.sum(rowgrid**2*IWrow)/IWsum\n Mcc = np.sum(colgrid**2*IWcol)/IWsum\n Mrc = np.sum(np.outer(rowgrid,colgrid)*IWmat)/IWsum\n Mrrr = np.sum(rowgrid**3*IWrow)/IWsum\n Mccc = np.sum(colgrid**3*IWcol)/IWsum\n Mrrc = np.sum(np.outer(rowgrid**2,colgrid)*IWmat)/IWsum\n Mrcc = np.sum(np.outer(rowgrid,colgrid**2)*IWmat)/IWsum\n M20 = Mrr + Mcc\n M22 = complex(Mcc - Mrr,2*Mrc)\n M31 = complex(3*Mc - (Mccc+Mrrc)/sigma**2, 3*Mr - (Mrcc + Mrrr)/sigma**2)\n M33 = complex(Mccc-3*Mrrc, 3.*Mrcc - Mrrr)\n return M20, M22, M31, M33",
"def calc_centroid(self):\n num = 0\n centroid = numpy.zeros(3, float)\n for atm in self:\n if atm.position is not None:\n centroid += atm.position\n num += 1\n return centroid / num",
"def compute_center(self, mole_object):\r\n if mole_object.plugin_type == \"PyMOL\":\r\n sel = PymolPlugin.PymolPlugin().get_model('all')\r\n cnt = len(sel.atom)\r\n\r\n else:\r\n sel = ChimeraPlugin.ChimeraPlugin().select()\r\n cnt = len(ChimeraPlugin.ChimeraPlugin().current_atoms())\r\n\r\n cent_x = 0\r\n cent_y = 0\r\n cent_z = 0\r\n\r\n if cnt == 0:\r\n return 0, 0, 0\r\n\r\n if mole_object.plugin_type == \"PyMOL\":\r\n\r\n for a in sel.atom:\r\n cent_x += a.coord[0]\r\n cent_y += a.coord[1]\r\n cent_z += a.coord[2]\r\n\r\n else:\r\n\r\n for a in ChimeraPlugin.ChimeraPlugin().current_atoms():\r\n cent_x += a.coord()[0]\r\n cent_y += a.coord()[1]\r\n cent_z += a.coord()[2]\r\n\r\n cent_x /= cnt\r\n cent_y /= cnt\r\n cent_z /= cnt\r\n\r\n self.point_x.component('entryfield').setentry(cent_x)\r\n self.point_y.component('entryfield').setentry(cent_y)\r\n self.point_z.component('entryfield').setentry(cent_z)\r\n\r\n self.show_crisscross(mole_object)",
"def moments3d(data):\n # Find total for all values in the data.\n total = data.sum()\n \n # Make index matrices.\n Z, X, Y = np.indices(data.shape)\n \n # Find mean positions in each dimension by weighted average (weight is intensity, index is position)\n z = (Z*data).sum()/total\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n \n # Estimate width in each dimension. Procedure is to fix the other two dimensions at their mean\n # and retrieve a single column in the dimension of interest through the peak. Visually, in a Z-\n # stack you would determine the X,Y position of the center of the peak, then we're drawing a line\n # (or a bar) in Z through that point. This becomes a simple 1D profile of intensity as a function\n # of Z position. Standard deviation of this 1D vector about z (mean Z position) is computed.\n z_col = data[:, int(x), int(y)] #single column through z with x and y fixed at their means.\n width_z = np.sqrt(np.abs((np.arange(z_col.size)-z)**2*z_col).sum()/z_col.sum())\n x_col = data[int(z), :, int(y)] #single column through x with z and y fixed at their means.\n width_x = np.sqrt(np.abs((np.arange(x_col.size)-x)**2*x_col).sum()/x_col.sum())\n y_col = data[int(z), int(x), :] #single column through y with z and x fixed at their means.\n width_y = np.sqrt(np.abs((np.arange(y_col.size)-y)**2*y_col).sum()/y_col.sum())\n \n # Estimator height from max value.\n height = data.max()\n return z, x, y, height, width_z, width_x, width_y",
"def __CalculateCentroid(self, contour):\r\n moments = cv2.moments(contour)\r\n\r\n centroid = (-1, -1)\r\n if moments[\"m00\"] != 0:\r\n centroid = (int(round(moments[\"m10\"] / moments[\"m00\"])),\r\n int(round(moments[\"m01\"] / moments[\"m00\"])))\r\n\r\n return centroid",
"def moments(data):\n total = data.sum()\n X, Y = indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = sqrt(abs((arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = sqrt(abs((arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return height, x, y, width_x, width_y",
"def moments2nd(data):\n nrow,ncol=data.shape\n Isum = data.sum()\n Icol = data.sum(axis=0) # sum over all rows\n Irow = data.sum(axis=1) # sum over all cols\n colgrid = np.arange(ncol)\n rowgrid = np.arange(nrow)\n rowmean=np.sum(rowgrid*Irow)/Isum\n colmean=np.sum(colgrid*Icol)/Isum\n ROW,COL=np.indices((nrow,ncol)) \n rowgrid = rowgrid - rowmean # centered\n colgrid = colgrid - colmean\n Mrr = np.sum(rowgrid**2*data)/Isum\n Mcc = np.sum(colgrid**2*data)/Isum\n Mrc = np.sum(np.outer(rowgrid,colgrid)*data)/Isum \n return Mcc, Mrr, Mrc",
"def centroid(self, method: str = 'median') -> Vector:\n\n\t\toriginal_origins = deepcopy(self.origin)\n\t\tself.origin_to_centroid(method=method)\n\t\tcentroid = deepcopy(self.origin)\n\t\tself.origin = original_origins\n\n\t\tif len(self._meshes) > 1:\n\t\t\treturn sum(centroid) / len(self._meshes)\n\n\t\treturn centroid",
"def moments(data):\n height = data.max()\n background = data.min()\n data = data - np.min(data)\n total = data.sum()\n x, y = np.indices(data.shape)\n x = (x * data).sum() / total\n y = (y * data).sum() / total\n col = data[:, int(y)]\n width_x = np.sqrt(abs((np.arange(col.size) - y) ** 2 * col).sum() / col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(abs((np.arange(row.size) - x) ** 2 * row).sum() / row.sum())\n width_x /= gaussian_sigma_to_fwhm\n width_y /= gaussian_sigma_to_fwhm\n return {\n \"amplitude\": height,\n \"x\": x,\n \"y\": y,\n \"sigma_x\": width_x,\n \"sigma_y\": width_y,\n \"background\": background,\n \"theta\": 0.0,\n }",
"def moments(data):\n\n data = np.absolute(data)\n total = data.sum()\n X = np.indices(data.shape)\n x = (X*data).sum()/total\n width = np.sqrt((((X-x)**2)*data).sum()/data.sum())\n m_max = data.max()\n m_min = data.min()\n if np.absolute(m_max) >= np.absolute(m_min):\n height = m_max\n else:\n height = m_min\n return height, x, width",
"def centroid(sign, FS):\n\n time = compute_time(sign, FS)\n\n energy, time_energy=signal_energy(sign, time)\n\n total_energy = np.dot(np.array(time_energy),np.array(energy))\n energy_sum = np.sum(energy)\n\n if energy_sum == 0 or total_energy == 0:\n centroid = 0\n else:\n centroid = total_energy / energy_sum\n return centroid"
] | [
"0.6681578",
"0.600257",
"0.5924344",
"0.5806046",
"0.578322",
"0.5735648",
"0.5720438",
"0.57188725",
"0.5675963",
"0.56692505",
"0.56374645",
"0.56374645",
"0.5587799",
"0.5568614",
"0.5558758",
"0.55336404",
"0.55336404",
"0.55277026",
"0.5523867",
"0.5521471",
"0.5500428",
"0.5488313",
"0.5476658",
"0.54598796",
"0.54582715",
"0.5452019",
"0.54518193",
"0.54449433",
"0.54182684",
"0.5416011"
] | 0.74053806 | 0 |
Calculate the higher moments of the object in the image. Find the normalized squared and cubed moments with reference to an origin at the centroid, using the centroid and and sum values calculatated previously. | def find1D_higher_moments(image, xcen, xhw, c_sum):
# Collapse input image unto x axis
vector = np.sum(image, axis=0)
xmoment2 = 0.0
xmoment3 = 0.0
# Set up x and y centroid scanning ranges
x_range = np.array((np.floor(xcen - xhw) - 1, np.ceil(xcen + xhw) - 1))
for ii in xrange(np.int(x_range[0]), np.int(x_range[1])):
xloc = (ii + 1) - np.floor(xcen)
xweight = 0
xoff = np.abs(ii - xcen)
if xoff <= xhw:
xweight = 0
elif xhw < xoff < xhw + 1:
xweight = xhw + 1 - xoff
xmoment2 += xloc ** 2 * vector[ii] * xweight
xmoment3 += xloc ** 3 * vector[ii] * xweight
xmoment2 = xmoment2 / c_sum
xmoment3 = xmoment3 / c_sum
# Pack moments for return to main program
x_mom = np.array((xmoment2, xmoment3))
return x_mom | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find2D_higher_moments(image, centroid, halfwidths, c_sum):\n \n # Unpack centroid to seperate values\n xcen, ycen = np.floor(centroid)\n xhw, yhw = halfwidths\n \n xmoment2 = 0\n xmoment3 = 0\n ymoment2 = 0\n ymoment3 = 0\n \n # Set up x and y centroid scanning ranges\n x_range = np.array((np.floor(xcen - xhw) - 1, np.ceil(xcen + xhw) - 1))\n y_range = np.array((np.floor(ycen - yhw) - 1, np.ceil(ycen + yhw) - 1))\n \n \n for ii in xrange(np.int(x_range[0]), np.int(x_range[1])):\n for jj in xrange(np.int(y_range[0]), np.int(y_range[1])):\n \n xloc = ii - np.floor(xcen)\n yloc = jj - np.floor(ycen)\n \n xweight = 0\n yweight = 0\n \n xoff = np.abs(ii - xcen)\n yoff = np.abs(jj - ycen)\n \n if xoff <= xhw:\n xweight = 1\n elif xhw < xoff < (xhw + 1):\n xweight = xhw + 1 - xoff\n \n if yoff <= yhw:\n yweight = 1\n elif yhw < yoff < (yhw + 1):\n yweight = yhw + 1 - yoff\n \n weight = xweight * yweight\n\n xmoment2 += xloc ** 2 * image[jj, ii] * weight\n xmoment3 += xloc ** 3 * image[jj, ii] * weight\n ymoment2 += yloc ** 2 * image[jj, ii] * weight\n ymoment3 += yloc ** 3 * image[jj, ii] * weight\n \n xmoment2 = xmoment2 / c_sum\n xmoment3 = xmoment3 / c_sum\n ymoment2 = ymoment2 / c_sum\n ymoment3 = ymoment3 / c_sum\n\n # Pack the x and y moments to return to main program\n x_moment = np.array((xmoment2, xmoment3))\n y_moment = np.array((ymoment2, ymoment3))\n \n return x_moment, y_moment",
"def moments(data):\n# =============================================================================\n# total = data.sum()\n# X, Y = np.indices(data.shape)\n# x = (X*data).sum()/total\n# y = (Y*data).sum()/total\n# col = data[:, int(y)]\n# \n# width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n# \n# row = data[int(x), :]\n# width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n# height = data.max()\n# height1 = height\n# =============================================================================\n return(1, 15, 14, 3, 3, 1, 14, 16, 3, 2)",
"def measure_image_moments(image):\n data = image.quantity\n\n coords = image.geom.get_coord().skycoord\n x, y = coords.data.lon.wrap_at(\"180d\"), coords.data.lat\n\n A = data[np.isfinite(data)].sum()\n\n # Center of mass\n x_cms = (x * data)[np.isfinite(data)].sum() / A\n y_cms = (y * data)[np.isfinite(data)].sum() / A\n\n # Second moments\n x_var = ((x - x_cms) ** 2 * data)[np.isfinite(data)].sum() / A\n y_var = ((y - y_cms) ** 2 * data)[np.isfinite(data)].sum() / A\n x_sigma = np.sqrt(x_var)\n y_sigma = np.sqrt(y_var)\n\n return A, x_cms, y_cms, x_sigma, y_sigma, np.sqrt(x_sigma * y_sigma)",
"def calculateCentroid(self,image):\n\t\tim=cv2.imread(image,0) #reads it in greyscale\n\t\tret,thresh = cv2.threshold(img_copy,128,255,cv2.THRESH_OTSU)\n\t\tim2,contours,hierarchy = cv2.findContours(thresh, 1, 2)\n\t\tcnt = contours[0]\n\t\tM = cv2.moments(cnt)\n\t\tcx = int(M['m10']/M['m00'])\n\t\tcy = int(M['m01']/M['m00'])\n\t\tcentroid=(cx,cy)\n\t\treturn centroid",
"def moments(self):",
"def moments(data):\n total = data.sum()\n X, Y = np.indices(data.shape)\n x = int((X*data).sum()/total)\n y = int((Y*data).sum()/total)\n col = data[:, int(y)]\n \n width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n \n row = data[int(x), :]\n width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return(height, x, y, width_x, width_y, 0.0)\n #return(1, 15, 15, 2, 2, 0.0)",
"def centroidFloat(cnt):\n M = cv2.moments(cnt)\n cx = M['m10']/M['m00']\n\tcy = M['m01']/M['m00']\n\treturn (cx, cy)",
"def moments(data):\n total = data.sum()\n if total != 0.:\n X, Y = np.indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = np.sqrt(abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n else:\n height=0\n x=0\n y=0\n width_x=0\n width_y=0\n return height,np.sqrt(width_x**2 + width_y**2)",
"def computeX0 (self):\n self.m_x0 = np.sum(self.m_arr, axis=0)\n \"\"\" Subtract the point for which f(x) is max \"\"\"\n self.m_x0 -= self.m_arr[self.m_sorted[-1], :]\n \"\"\" Compute average \"\"\"\n self.m_x0 /= self.m_dim\n _debugPrint(\"Centroid: %s\" %self.m_x0)",
"def complex2ndMoments(data=None,sigma=None):\n nrow,ncol=data.shape\n Isum = data.sum()\n Icol = data.sum(axis=0) # sum over all rows\n Irow = data.sum(axis=1) # sum over all cols\n colgrid = np.arange(ncol)\n rowgrid = np.arange(nrow)\n rowmean=np.sum(rowgrid*Irow)/Isum\n colmean=np.sum(colgrid*Icol)/Isum\n ROW,COL=np.indices((nrow,ncol))\n maxItr = 50\n EP = 0.0001\n for i in range(maxItr):\n wrmat = wr(ROW,COL,rowmean,colmean,sigma)\n IWmat = data*wrmat\n IWcol = IWmat.sum(axis=0)\n IWrow = IWmat.sum(axis=1)\n IWsum = IWmat.sum()\n drowmean = np.sum((rowgrid-rowmean)*IWrow)/IWsum\n dcolmean = np.sum((colgrid-colmean)*IWcol)/IWsum\n rowmean = rowmean+2.*drowmean\n colmean = colmean+2.*dcolmean\n if drowmean**2+dcolmean**2 <= EP:\n break\n rowgrid = rowgrid - rowmean # centered\n colgrid = colgrid - colmean\n Mr = np.sum(rowgrid*IWrow)/IWsum\n Mc = np.sum(colgrid*IWcol)/IWsum\n Mrr = np.sum(rowgrid**2*IWrow)/IWsum\n Mcc = np.sum(colgrid**2*IWcol)/IWsum\n Mrc = np.sum(np.outer(rowgrid,colgrid)*IWmat)/IWsum\n Cm = np.matrix([[Mcc,Mrc],[Mrc,Mrr]])\n Cw = np.matrix([[sigma**2,0.],[0.,sigma**2]])\n Cimg = (Cm.I - Cw.I).I\n Mcc = Cimg[0,0]\n Mrr = Cimg[1,1]\n Mrc = Cimg[0,1]\n #M20 = Mrr + Mcc\n #M22 = complex(Mcc - Mrr,2*Mrc)\n return Mcc, Mrr, Mrc",
"def moments(cnt):\n\treturn cv2.moments(cnt)",
"def moments(cnt):\n\treturn cv2.moments(cnt)",
"def get_centroid(image, method=\"propio\"):\n # ---------Método Propio (directo de la definición)----------\n if method == \"propio\":\n # Dimensiones\n height, width = image.shape[:2]\n # Masa total\n total_mass = image.sum()\n\n # Si la masa total es cero, entonces el centro de masa \n # no existe\n if total_mass == 0:\n r = np.array([-1, -1])\n return r, None\n\n # Primera componente (suma por filas)\n row_sum = image.sum(axis=1)\n row_weight = np.arange(1, height+1)\n r_i = np.dot(row_sum, row_weight)\n r_i /= total_mass\n r_i = int(r_i)\n \n # Segunda componente (suma por columnas)\n column_sum = image.sum(axis=0)\n column_weight = np.arange(1, width+1)\n r_j = np.dot(column_sum, column_weight)\n r_j /= total_mass\n r_j = int(r_j)\n\n # Retorna el centroide en coordenadas de imagen\n r = np.array([r_j, r_i])\n return r, None\n\n # ---------Método con contornos-----------------\n else:\n # Obtener contorno imagen binaria (máscara)\n cnts = get_contours(image)\n \n # Para cada contorno, obtener el centroide y añadirlo a lista\n r = []\n for c in cnts:\n M = cv2.moments(c)\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n r.append(np.array([cX, cY]))\n\n # Ahora se retorna una lista con centroides (según la \n # cantidad de contornos que se hayan encontrado)\n if len(r) == 0:\n r.append(np.array([-1, -1]))\n return r, cnts\n else:\n return r, cnts",
"def moments(self, data):\n total = data.sum()\n X, Y = indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = sqrt(abs((arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = sqrt(abs((arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return x, y, width_x, width_y, height",
"def adaptiveCentroid(data=None,sigma=None):\n nrow,ncol=data.shape\n Isum = data.sum()\n Icol = data.sum(axis=0) # sum over all rows\n Irow = data.sum(axis=1) # sum over all cols\n colgrid = np.arange(ncol)\n rowgrid = np.arange(nrow)\n rowmean=np.sum(rowgrid*Irow)/Isum\n colmean=np.sum(colgrid*Icol)/Isum\n ROW,COL=np.indices((nrow,ncol))\n maxItr = 50\n EP = 0.0001\n for i in range(maxItr):\n wrmat = wr(ROW,COL,rowmean,colmean,sigma)\n IWmat = data*wrmat\n IWcol = IWmat.sum(axis=0)\n IWrow = IWmat.sum(axis=1)\n drowmean = np.sum((rowgrid-rowmean)*IWrow)/np.sum(IWrow)\n dcolmean = np.sum((colgrid-colmean)*IWcol)/np.sum(IWcol)\n rowmean = rowmean+2.*drowmean\n colmean = colmean+2.*dcolmean\n if drowmean**2+dcolmean**2 <= EP:\n break\n\n return rowmean,colmean",
"def moments(data):\n total = data.sum()\n X, Y = np.indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return height, x, y, width_x, width_y",
"def moments(data):\n total = data.sum()\n X, Y = np.indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return height, x, y, width_x, width_y",
"def moments(data):\n total = data.sum()\n X, Y = np.indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = np.sqrt(np.abs((np.arange(col.size)-x)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(np.abs((np.arange(row.size)-y)**2*row).sum()/row.sum())\n height = data.max()\n return height, x, y, width_x, width_y",
"def centroid(arr):\n l = arr.shape[0]\n ixs = np.arange(l)\n arr = arr - np.median(arr)\n arr = np.where(arr < 0, 0, arr) \n ixs2 = ixs * ixs\n sumarr = arr.sum()\n cen = np.dot(arr, ixs)/sumarr\n return cen, math.sqrt(np.dot(arr, ixs2)/sumarr - cen * cen)",
"def complexMoments(data=None,sigma=None):\n nrow,ncol=data.shape\n Isum = data.sum()\n Icol = data.sum(axis=0) # sum over all rows\n Irow = data.sum(axis=1) # sum over all cols\n colgrid = np.arange(ncol)\n rowgrid = np.arange(nrow)\n rowmean=np.sum(rowgrid*Irow)/Isum\n colmean=np.sum(colgrid*Icol)/Isum\n ROW,COL=np.indices((nrow,ncol))\n maxItr = 50\n EP = 0.0001\n for i in range(maxItr):\n wrmat = wr(ROW,COL,rowmean,colmean,sigma)\n IWmat = data*wrmat\n IWcol = IWmat.sum(axis=0)\n IWrow = IWmat.sum(axis=1)\n IWsum = IWmat.sum()\n drowmean = np.sum((rowgrid-rowmean)*IWrow)/IWsum\n dcolmean = np.sum((colgrid-colmean)*IWcol)/IWsum\n rowmean = rowmean+2.*drowmean\n colmean = colmean+2.*dcolmean\n if drowmean**2+dcolmean**2 <= EP:\n break\n rowgrid = rowgrid - rowmean # centered\n colgrid = colgrid - colmean\n Mr = np.sum(rowgrid*IWrow)/IWsum\n Mc = np.sum(colgrid*IWcol)/IWsum\n Mrr = np.sum(rowgrid**2*IWrow)/IWsum\n Mcc = np.sum(colgrid**2*IWcol)/IWsum\n Mrc = np.sum(np.outer(rowgrid,colgrid)*IWmat)/IWsum\n Mrrr = np.sum(rowgrid**3*IWrow)/IWsum\n Mccc = np.sum(colgrid**3*IWcol)/IWsum\n Mrrc = np.sum(np.outer(rowgrid**2,colgrid)*IWmat)/IWsum\n Mrcc = np.sum(np.outer(rowgrid,colgrid**2)*IWmat)/IWsum\n M20 = Mrr + Mcc\n M22 = complex(Mcc - Mrr,2*Mrc)\n M31 = complex(3*Mc - (Mccc+Mrrc)/sigma**2, 3*Mr - (Mrcc + Mrrr)/sigma**2)\n M33 = complex(Mccc-3*Mrrc, 3.*Mrcc - Mrrr)\n return M20, M22, M31, M33",
"def calc_centroid(self):\n num = 0\n centroid = numpy.zeros(3, float)\n for atm in self:\n if atm.position is not None:\n centroid += atm.position\n num += 1\n return centroid / num",
"def compute_center(self, mole_object):\r\n if mole_object.plugin_type == \"PyMOL\":\r\n sel = PymolPlugin.PymolPlugin().get_model('all')\r\n cnt = len(sel.atom)\r\n\r\n else:\r\n sel = ChimeraPlugin.ChimeraPlugin().select()\r\n cnt = len(ChimeraPlugin.ChimeraPlugin().current_atoms())\r\n\r\n cent_x = 0\r\n cent_y = 0\r\n cent_z = 0\r\n\r\n if cnt == 0:\r\n return 0, 0, 0\r\n\r\n if mole_object.plugin_type == \"PyMOL\":\r\n\r\n for a in sel.atom:\r\n cent_x += a.coord[0]\r\n cent_y += a.coord[1]\r\n cent_z += a.coord[2]\r\n\r\n else:\r\n\r\n for a in ChimeraPlugin.ChimeraPlugin().current_atoms():\r\n cent_x += a.coord()[0]\r\n cent_y += a.coord()[1]\r\n cent_z += a.coord()[2]\r\n\r\n cent_x /= cnt\r\n cent_y /= cnt\r\n cent_z /= cnt\r\n\r\n self.point_x.component('entryfield').setentry(cent_x)\r\n self.point_y.component('entryfield').setentry(cent_y)\r\n self.point_z.component('entryfield').setentry(cent_z)\r\n\r\n self.show_crisscross(mole_object)",
"def moments3d(data):\n # Find total for all values in the data.\n total = data.sum()\n \n # Make index matrices.\n Z, X, Y = np.indices(data.shape)\n \n # Find mean positions in each dimension by weighted average (weight is intensity, index is position)\n z = (Z*data).sum()/total\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n \n # Estimate width in each dimension. Procedure is to fix the other two dimensions at their mean\n # and retrieve a single column in the dimension of interest through the peak. Visually, in a Z-\n # stack you would determine the X,Y position of the center of the peak, then we're drawing a line\n # (or a bar) in Z through that point. This becomes a simple 1D profile of intensity as a function\n # of Z position. Standard deviation of this 1D vector about z (mean Z position) is computed.\n z_col = data[:, int(x), int(y)] #single column through z with x and y fixed at their means.\n width_z = np.sqrt(np.abs((np.arange(z_col.size)-z)**2*z_col).sum()/z_col.sum())\n x_col = data[int(z), :, int(y)] #single column through x with z and y fixed at their means.\n width_x = np.sqrt(np.abs((np.arange(x_col.size)-x)**2*x_col).sum()/x_col.sum())\n y_col = data[int(z), int(x), :] #single column through y with z and x fixed at their means.\n width_y = np.sqrt(np.abs((np.arange(y_col.size)-y)**2*y_col).sum()/y_col.sum())\n \n # Estimator height from max value.\n height = data.max()\n return z, x, y, height, width_z, width_x, width_y",
"def __CalculateCentroid(self, contour):\r\n moments = cv2.moments(contour)\r\n\r\n centroid = (-1, -1)\r\n if moments[\"m00\"] != 0:\r\n centroid = (int(round(moments[\"m10\"] / moments[\"m00\"])),\r\n int(round(moments[\"m01\"] / moments[\"m00\"])))\r\n\r\n return centroid",
"def moments(data):\n total = data.sum()\n X, Y = indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = sqrt(abs((arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = sqrt(abs((arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return height, x, y, width_x, width_y",
"def moments2nd(data):\n nrow,ncol=data.shape\n Isum = data.sum()\n Icol = data.sum(axis=0) # sum over all rows\n Irow = data.sum(axis=1) # sum over all cols\n colgrid = np.arange(ncol)\n rowgrid = np.arange(nrow)\n rowmean=np.sum(rowgrid*Irow)/Isum\n colmean=np.sum(colgrid*Icol)/Isum\n ROW,COL=np.indices((nrow,ncol)) \n rowgrid = rowgrid - rowmean # centered\n colgrid = colgrid - colmean\n Mrr = np.sum(rowgrid**2*data)/Isum\n Mcc = np.sum(colgrid**2*data)/Isum\n Mrc = np.sum(np.outer(rowgrid,colgrid)*data)/Isum \n return Mcc, Mrr, Mrc",
"def centroid(self, method: str = 'median') -> Vector:\n\n\t\toriginal_origins = deepcopy(self.origin)\n\t\tself.origin_to_centroid(method=method)\n\t\tcentroid = deepcopy(self.origin)\n\t\tself.origin = original_origins\n\n\t\tif len(self._meshes) > 1:\n\t\t\treturn sum(centroid) / len(self._meshes)\n\n\t\treturn centroid",
"def moments(data):\n height = data.max()\n background = data.min()\n data = data - np.min(data)\n total = data.sum()\n x, y = np.indices(data.shape)\n x = (x * data).sum() / total\n y = (y * data).sum() / total\n col = data[:, int(y)]\n width_x = np.sqrt(abs((np.arange(col.size) - y) ** 2 * col).sum() / col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(abs((np.arange(row.size) - x) ** 2 * row).sum() / row.sum())\n width_x /= gaussian_sigma_to_fwhm\n width_y /= gaussian_sigma_to_fwhm\n return {\n \"amplitude\": height,\n \"x\": x,\n \"y\": y,\n \"sigma_x\": width_x,\n \"sigma_y\": width_y,\n \"background\": background,\n \"theta\": 0.0,\n }",
"def moments(data):\n\n data = np.absolute(data)\n total = data.sum()\n X = np.indices(data.shape)\n x = (X*data).sum()/total\n width = np.sqrt((((X-x)**2)*data).sum()/data.sum())\n m_max = data.max()\n m_min = data.min()\n if np.absolute(m_max) >= np.absolute(m_min):\n height = m_max\n else:\n height = m_min\n return height, x, width",
"def centroid(sign, FS):\n\n time = compute_time(sign, FS)\n\n energy, time_energy=signal_energy(sign, time)\n\n total_energy = np.dot(np.array(time_energy),np.array(energy))\n energy_sum = np.sum(energy)\n\n if energy_sum == 0 or total_energy == 0:\n centroid = 0\n else:\n centroid = total_energy / energy_sum\n return centroid"
] | [
"0.7405449",
"0.60013694",
"0.5921748",
"0.58050823",
"0.5781569",
"0.5734265",
"0.5720013",
"0.5717238",
"0.5676586",
"0.5669695",
"0.5635732",
"0.5635732",
"0.55874103",
"0.5567119",
"0.555904",
"0.5532004",
"0.5532004",
"0.5526069",
"0.5523735",
"0.5521724",
"0.54989535",
"0.54869103",
"0.54765713",
"0.5458823",
"0.5456567",
"0.5451338",
"0.5450756",
"0.54432404",
"0.5417538",
"0.5414794"
] | 0.6681457 | 1 |
Iterates over the unknown items in each term, checks if they have become numeric, i.e. have a value now whereas they previously didn't. If so, updates the constant factor by multiplying it with the newly determined value and removes it from the unknowns. | def update(self):
terms_toRemove = []
for termIndex, [term_constantFactor, term_unknowns_attributeAddresses] in enumerate(self.LHS):
# Check if coefficient is 0 - then no need to process any of the unknowns since term will be 0 anyways
if term_constantFactor == 0:
terms_toRemove.append(termIndex)
continue # continue to next term, no need to resolve the unknowns of this term since the product will be 0 anyways
# Check if any unknowns became known
unknowns_toRemove = []
for unknown_attributeAddress in term_unknowns_attributeAddresses:
attribute = getattr_fromAddress(*unknown_attributeAddress)
if isNumeric(attribute):
# object.attribute which had previously been identified as unknown now has a value, add it to the constant factor product and remove from the unknowns
self.LHS[termIndex][0] *= attribute # multiply it with the constant factor product
unknowns_toRemove.append([termIndex, unknown_attributeAddress])
for termIndex, unknown_attributeAddress in unknowns_toRemove: # remove unknowns which have become known in the end
# removing in the end not to tamper with the iteration of the above loop
self.LHS[termIndex][1].remove(unknown_attributeAddress)
# Move constants to RHS
if self.LHS[termIndex][1] == []:
# if term has no unknowns, it is a constant, move to RHS
self.RHS -= self.LHS[termIndex][0]
self.LHS.pop(termIndex)
for termIndex in reversed(terms_toRemove): # reversed - otherwise would tamper with indices of items identified for removal
self.LHS.pop(termIndex)
self._gatherUnknowns() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def handle_unknowns(self):\n for label in self.words_labels_counts:\n for word in self.words_labels_counts[label]:\n if self.words_labels_counts[label][word] <= self.UNKNOWN_TOKEN_THRESHOLD:\n self.words_labels_counts[label][self.UNKNOWN_TOKEN] += self.words_labels_counts[label][word]\n self.words_labels_counts[label][word] = 0",
"def code_unknown_to_nan(data, attribute_values):\n attribute_values_unknown = attribute_values[attribute_values['Meaning'] == \"unknown\"]\n for i in range(len(attribute_values_unknown)):\n colname = attribute_values_unknown.iloc[i]['Attribute']\n unknown_values = eval('[' + str(attribute_values_unknown.iloc[i]['Value']) + ']')\n try:\n data[colname] = data[colname].replace(unknown_values, float('nan'))\n except:\n pass\n return data",
"def clean_dict(self,dict_to_clean):\n for i in dict_to_clean: \n try:\n float( dict_to_clean[i] ) \n except:\n dict_to_clean[ i ] = \"'%s'\"%( dict_to_clean[i ].replace(\"'\",\"\").replace('\"',\"\") )",
"def coerce_empty_numeric_values(self):\n if \"numeric\" in self.annot_types:\n numeric_columns = self.file.xs(\n \"numeric\", axis=1, level=1, drop_level=False\n ).columns.tolist()\n self.file[numeric_columns].replace(\"\", np.nan, inplace=True)",
"def mutate_fix_var_filter(item_counts):\n assert isinstance(item_counts, Counter)\n for i in list(item_counts.keys()):\n if isinstance(i, Literal):\n i_n3 = i.n3()\n if len(i_n3) > config.MAX_LITERAL_SIZE:\n logger.debug(\n 'excluding very long literal %d > %d from mutate_fix_var:\\n'\n '%s...',\n len(i_n3), config.MAX_LITERAL_SIZE, i_n3[:128]\n )\n del item_counts[i]\n elif i.datatype in (XSD['float'], XSD['double']) \\\n and six.text_type(i).lower() in ('nan', 'inf'):\n logger.debug('excluding %s due to Virtuoso Bug', i_n3)\n del item_counts[i]\n elif isinstance(i, URIRef):\n # noinspection PyBroadException\n try:\n i.n3()\n except Exception: # sadly RDFLib doesn't raise a more specific one\n # it seems some SPARQL endpoints (Virtuoso) are quite liberal\n # during their import process, so it can happen that we're\n # served broken URIs, which break when re-inserted into SPARQL\n # later by calling URIRef.n3()\n logger.warning(\n 'removed invalid URI from mutate_fix_var:\\n%r',\n i\n )\n del item_counts[i]\n elif isinstance(i, BNode):\n # make sure that BNodes stay variables\n logger.info('removed BNode from mutate_fix_var')\n del item_counts[i]\n else:\n logger.warning(\n 'exlcuding unknown result type from mutate_fix_var:\\n%r',\n i\n )\n del item_counts[i]",
"def _clean(self, dataset):\n # Replace missing values with numpy's NaN. The missing value is\n # usually 1e+20, but values can be like 1.0000002e+20, which is\n # different. Ergo the inequality.\n for var in dataset.data_vars.itervalues():\n if 'missing_value' in var.attrs:\n missing_data_value = var.missing_value\n try:\n var.values[var.values >= missing_data_value] = np.NaN\n except ValueError:\n print \"Encountered ValueError in {0}. Ignoring\".format(var.name)",
"def _remove_nan(parsed_dictionary):\n for key, value in parsed_dictionary.items():\n if isinstance(value, np.ndarray):\n non_nan_value = np.nan_to_num(value, nan=123456789, posinf=2e308, neginf=-2e308)\n parsed_dictionary.update({key: non_nan_value})\n\n return parsed_dictionary",
"def unsetNumericValue(self):\n return _libsbml.PossibleSpeciesFeatureValue_unsetNumericValue(self)",
"def _fix_surprising_number(val, s):\n if (\n isinstance(val, (int, float)) and \"!!\" not in s\n and _contains_non_numeric_chars(s)\n ):\n return s\n return val",
"def _nuclear_factor(self,Tp):\n sigmaRpp = 10 * np.pi * 1e-27\n sigmainel = self._sigma_inel(Tp)\n sigmainel0 = self._sigma_inel(1e3) # at 1e3 GeV\n f = sigmainel / sigmainel0\n f2 = np.where(f > 1, f, 1.0)\n G = 1.0 + np.log(f2)\n # epsilon factors computed from Eqs 21 to 23 with local ISM abundances\n epsC = 1.37\n eps1 = 0.29\n eps2 = 0.1\n\n epstotal = np.where(Tp > self._Tth,\n epsC + (eps1 + eps2) * sigmaRpp * G / sigmainel,\n 0.0)\n\n if np.any(Tp < 1.0):\n # nuclear enhancement factor diverges towards Tp = Tth, fix Tp<1 to eps(1.0) = 1.91\n loE=np.where((Tp > self._Tth) * (Tp < 1.0))\n epstotal[loE] = 1.9141\n\n return epstotal",
"def fix_initial_nan_learning_rate(dict_list):\n if len(dict_list) > 1:\n dict_list[0]['LearningRate'] = dict_list[1]['LearningRate']",
"def try_all_derived(SIunits, derived):\r\n res = []\r\n for d in derived:\r\n for sign in [-1, 1]: # 1 means in numerator, -1 in denominator\r\n improvement = -30\r\n if derived[d] * sign < 0:\r\n continue # don't take away derived units\r\n for old, used_in_derived in zip(SIunits, unitquant[d].units):\r\n improvement += 20 * (abs(old) - abs(old - sign * used_in_derived))\r\n if old and not (old - sign * used_in_derived):\r\n improvement += 6\r\n res.append( (improvement, d, sign) )\r\n return max(iter(res))",
"def non_specificity(self):\n result = 0\n for focal, value in self.items():\n if focal.cardinal > 0:\n result += value * math.log(focal.cardinal, 2)\n return round(result, 6)",
"def _diagnose_doubles(data):\n _, idx = np.unique(data, return_index=True)\n missing = np.array([i for i in np.arange(len(data)) if i not in idx])\n if len(missing) > 0:\n missing_values = data[missing]\n print(f\"Missing values Indicies[{missing}]/ Values[{missing_values}]\")",
"def numeric(self, values):\n x = values[0].copy()\n x[x > 0] = 1.0\n x[x <= 0] = -1.0\n return x",
"def nominal_to_float(self, x, missing_values=['nan', '?']):\n new_x = np.empty_like(x, dtype=float)\n x = np.squeeze(x)\n names = np.unique(x)\n substract = 0\n for i, name in enumerate(names):\n if name in missing_values:\n new_x[x==name] = np.nan\n substract += 1\n else:\n new_x[x==name] = i - substract\n return new_x",
"def eliminateRedundantInfo(self):\n\n allEliminated = False\n edep = self.energyDependentWidths\n for colId in range(edep.nColumns)[::-1]:\n column = edep.columns[colId]\n columnData = edep.getColumn( column.name, units='eV' )\n if len(set( columnData ) ) == 1:\n setattr( self.constantWidths, column.name, PQU.PQU( PQU.pqu_float.surmiseSignificantDigits( columnData[0] ), column.units ) )\n [d.pop(colId) for d in edep.data]\n edep.columns.pop(colId)\n for idx, col in enumerate( edep.columns ): col.index = idx #re-number\n #if edep.nColumns == 1 and edep.columns[0].name == 'energy':\n # edep.columns, edep.data = [],[] # all widths are constant\n # allEliminated = True\n return allEliminated",
"def fix_special_floats(value, _inf=INFINITY, _neginf=-INFINITY):\n\n try:\n value = convert_tensor_to_numpy(value)\n\n # Check if the value is Nan, equivalent of math.isnan\n if math.isnan(value):\n return \"NaN\"\n\n elif value == _inf:\n return \"Infinity\"\n\n elif value == _neginf:\n return \"-Infinity\"\n\n except Exception:\n # Value cannot be compared\n return value\n\n return value",
"def _fix_array_item_vals(self):\n for m in self.masks():\n if self._has_categorical_data(m):\n lib_vals = 'lib@values@{}'.format(m)\n self._meta['masks'][m]['values'] = lib_vals\n for s in self.sources(m):\n self._meta['columns'][s]['values'] = lib_vals\n return None",
"def removeFixedEffect(self, index=None):\n if self._n_terms==0:\n pass\n if index is None or index==(self._n_terms-1):\n\n self._n_terms-=1\n F = self._F.pop() #= self.F[:-1]\n A = self._A.pop() #= self.A[:-1]\n self._A_identity.pop() #= self.A_identity[:-1]\n REML_term = self._REML_term.pop()# = self.REML_term[:-1]\n self._B.pop()# = self.B[:-1]\n self._n_fixed_effs-=F.shape[1]*A.shape[0]\n if REML_term:\n self._n_fixed_effs_REML-=F.shape[1]*A.shape[0]\n\n pass\n elif index >= self.n_terms:\n raise Exception(\"index exceeds max index of terms\")\n else:\n raise NotImplementedError(\"currently only last term can be removed\")\n pass\n self._rebuild_indicator()\n self.clear_cache('Fstar','Astar','Xstar','Xhat',\n 'Areml','Areml_eigh','Areml_chol','Areml_inv','beta_hat','B_hat',\n 'LRLdiag_Xhat_tens','Areml_grad',\n 'beta_grad','Xstar_beta_grad','Zstar','DLZ')",
"def chao1_uncorrected(observed, singles, doubles):\n return observed + singles**2/float(doubles*2)",
"def collect_like_terms(term_matrix):\n t = [term[:] for term in term_matrix]\n for i, term in enumerate(t, start=1):\n if i < len(t) - 1:\n for j in range(i+1, len(t)):\n if t[i][1:] == t[j][1:]:\n t[i] = [t[i][0] + t[j][0]] + t[i][1:]\n t[j][0] = 0\n # get rid of 0 terms\n t = [u for u in t if u[0] != 0]\n # get rid of extra variables\n if len(t[0]) > 0:\n for i in reversed(range(len(t[0]))):\n # in reverse so deletion doesn't affect index of subsequent variables\n extra = True\n if len(t) > 0:\n for term in t[1:]:\n try:\n if term[i] != 0:\n extra = False\n except IndexError:\n extra = True\n if extra:\n for term in t:\n try:\n del term[i]\n except IndexError:\n pass\n if t == [[]]:\n return [['constant']]\n return t",
"def replace_invalids(data, threshold = 50, print_vals = False, med=True):\n \n # prints a description of the features if print_vals True\n means, stds, medians, mins, maxs = summarize_features(data, print_vals=print_vals)\n \n if(print_vals):\n print(\"Number of invalid values: \" + str(len(data[data == -999.0])))\n print(\"Number of Nan values: \" + str(np.count_nonzero(np.isnan(data))))\n print(\"Shape: \" + str(data.shape))\n print()\n \n # gets the number of invalid values for each feature\n percent_invalid = find_percentage_of_invalid(data, mins, print_vals=print_vals)\n \n # stores the indices of the features to delete because the number of invalid values is above the threshold\n to_delete = [k for k,v in percent_invalid.items() if v > threshold]\n \n # stores the indices of the features where the invalid values need to be replaces\n change_to_mean = [k for k in percent_invalid.keys() if k not in to_delete]\n \n for idx in change_to_mean:\n # puts the value np.NaN in place of the invalid values in the features to modify to make the measurements on the features\n # without the invalid values\n data[:,idx] = np.where(data[:,idx]==-999, np.NaN, data[:,idx])\n \n \n # calculates the means, medians and means without the invalid values and NaNs\n n_means, _, n_medians, n_mins, _ = summarize_features(data, include_nan=False, print_vals=print_vals)\n \n \n if med:\n for idx in change_to_mean:\n # replacing the invalid values by the median if med is True\n data[:,idx] = np.where(np.isnan(data[:,idx]), n_medians[idx], data[:,idx])\n else :\n for idx in change_to_mean:\n # replacing the invalid values by the mean if med is False\n data[:,idx] = np.where(np.isnan(data[:,idx]), n_means[idx], data[:,idx])\n\n \n n_percent_invalid = find_percentage_of_invalid(data, n_mins, print_vals=print_vals)\n \n # deletes the features to discard\n data = np.delete(data,to_delete,axis=1)\n \n if(print_vals):\n print()\n print(\"Number of invalid values: \" + str(len(data[data == -999.0])))\n print(\"Number of Nan values: \" + str(np.count_nonzero(np.isnan(data))))\n print(\"New shape: \" + str(data.shape))\n \n return data",
"def preferred_rep(self):\n # reducing coefficients mod torsion\n if self.torsion != 'free':\n for key, value in self.items():\n self[key] = value % self.torsion\n\n # removing key:value pairs with value = 0\n zeros = [k for k, v in self.items() if not v]\n for key in zeros:\n del self[key]",
"def normalize_quantities(self):\n return (\n pynini.cdrewrite(self.units_map, \"\", \"\", self.sigma_star, direction=\"ltr\") *\n pynini.cdrewrite(self.singularize_map, \"1 \", \"\", self.sigma_star, direction=\"ltr\") *\n pynini.cdrewrite(self.thousands_map, \"\", self.triple_digits, self.sigma_star, direction=\"ltr\") *\n pynini.cdrewrite(self.hundreds_map, \"\", self.double_digits, self.sigma_star, direction=\"ltr\") *\n pynini.cdrewrite(self.tens_map, \"\", self.digits, self.sigma_star, direction=\"ltr\") *\n pynini.cdrewrite(self.teens_map, \"\", \"\", self.sigma_star, direction=\"ltr\") *\n pynini.cdrewrite(self.ones_map, \"\", \"\", self.sigma_star, direction=\"ltr\") *\n pynini.cdrewrite(self.zero_del, \"\", \"\", self.sigma_star, direction=\"ltr\")\n )",
"def correct_abnormal_price_variations(ls_ls_prices, var_lim):\n # TODO: Inspect with pandas\n # TODO: Check mistakes with sp95 and/or input forward vs. backward\n dict_suspects_opposit = {}\n dict_suspects_single = {}\n for indiv_ind, ls_prices in enumerate(ls_ls_prices):\n last_price_var = np.nan # check that\n last_price_day_ind = 0\n for day_ind, price in enumerate(ls_prices[1:], start=1):\n if np.abs(price - ls_ls_prices[indiv_ind][day_ind-1]) > var_lim:\n if (np.abs(last_price_var) > var_lim) and\\\n (last_price_var*(price - ls_ls_prices[indiv_ind][day_ind-1]) < 0):\n for i in range(last_price_day_ind, day_ind):\n ls_ls_prices[indiv_ind][i] = ls_ls_prices[indiv_ind][last_price_day_ind - 1]\n dict_suspects_opposit.setdefault(indiv_ind, []).append((last_price_day_ind, day_ind))\n else:\n if np.isnan(last_price_var):\n for i in range(last_price_day_ind, day_ind):\n ls_ls_prices[indiv_ind][i] = np.nan\n dict_suspects_single.setdefault(indiv_ind, []).append((last_price_day_ind, day_ind))\n else:\n j = 0\n while (ls_ls_prices[indiv_ind][day_ind + j] == ls_ls_prices[indiv_ind][day_ind]) and\\\n day_ind + j < len(ls_ls_prices[indiv_ind]) - 1:\n j += 1\n if np.isnan(ls_ls_prices[indiv_ind][day_ind + j]):\n for i in range(day_ind, day_ind + j):\n ls_ls_prices[indiv_ind][i] = np.nan\n dict_suspects_single.setdefault(indiv_ind, []).append((day_ind, day_ind + j))\n if price != ls_ls_prices[indiv_ind][day_ind-1]:\n last_price_var = price -ls_ls_prices[indiv_ind][day_ind-1]\n last_price_day_ind = day_ind\n return dict_suspects_opposit, dict_suspects_single, ls_ls_prices",
"def _add_units_uncertainties(self):\n if \"Energy Level\" in self.keys():\n self._convert_column(\n \"Energy Level\", lambda x: _parse_float_uncertainty(x, \"\")\n )\n self.df.rename(columns={\"Energy Level\": \"Energy Level (MeV)\"}, inplace=True)\n\n if \"Parent Energy Level\" in self.keys():\n self._convert_column_uncertainty(\"Parent Energy Level\")\n self.df.rename(\n columns={\"Parent Energy Level\": \"Energy Level (MeV)\"}, inplace=True\n )\n self.df[\"Energy Level (MeV)\"] *= 0.001\n\n if \"Mass Excess\" in self.keys():\n self._convert_column_uncertainty(\"Mass Excess\")\n self.df.rename(columns={\"Mass Excess\": \"Mass Excess (MeV)\"}, inplace=True)\n\n self._convert_column(\"T1/2 (s)\", float)\n\n if \"Abundance (%)\" in self.keys():\n self._convert_column_uncertainty(\"Abundance (%)\")\n\n if \"Branching (%)\" in self.keys():\n self._convert_column(\n \"Branching (%)\", lambda x: _parse_float_uncertainty(x, \"\")\n )\n\n if \"Radiation Energy\" in self.keys():\n self._convert_column_uncertainty(\"Radiation Energy\")\n self.df.rename(\n columns={\"Radiation Energy\": \"Radiation Energy (keV)\"}, inplace=True\n )\n\n if \"Endpoint Energy\" in self.keys():\n self._convert_column_uncertainty(\"Endpoint Energy\")\n self.df.rename(\n columns={\"Endpoint Energy\": \"Endpoint Energy (keV)\"}, inplace=True\n )\n\n if \"Radiation Intensity (%)\" in self.keys():\n self._convert_column_uncertainty(\"Radiation Intensity (%)\")\n\n if \"Dose\" in self.keys():\n self._convert_column_uncertainty(\"Dose\")\n self.df.rename(columns={\"Dose\": \"Dose (MeV / Bq / s)\"}, inplace=True)",
"def _replace_units(original_units, values_by_name):\n q = 1\n for arg_name, exponent in original_units.items():\n q = q * values_by_name[arg_name] ** exponent\n\n return getattr(q, \"_units\", UnitsContainer({}))",
"def change_nan(dict):\n\n for k,v in dict.items():\n if np.isnan(v):\n dict[k] = 0.0\n else:\n dict[k] = v",
"def replace_nan_num(filename, columns, value_dic):\n\th = pyfits.open(filename, mode='update')\n\tfor col in columns:\n\t\tif value_dic.has_key(col):\n\t\t\tval = value_dic[col]\n\t\telse:\n\t\t\tval = 0\n\t\tdata = h[1].data.field(col)\n\t\th[1].data.field(col)[:] = where(isnan(data), val, data)\n\th.flush()\n\th.close()"
] | [
"0.5276544",
"0.5141085",
"0.5104796",
"0.5103461",
"0.50599724",
"0.50108856",
"0.5003846",
"0.49739787",
"0.49336976",
"0.49155074",
"0.49132082",
"0.48991868",
"0.48374784",
"0.4828213",
"0.48211414",
"0.4781902",
"0.47690916",
"0.475525",
"0.47551218",
"0.474309",
"0.47317553",
"0.47299966",
"0.47294876",
"0.4725902",
"0.47196725",
"0.4719595",
"0.4718855",
"0.47112542",
"0.46919164",
"0.46872476"
] | 0.61236215 | 0 |
Returns a dictionary representation of the equation. Each unknown is a key, and the matching values are their coefficients. RHS (i.e. all constant terms) are tagged with the 'RHS' key. | def get_asDict(self):
dictRepresentation = {}
for [term_constantFactor, term_unknowns_attributeAddresses] in self.LHS:
term_unknowns_attributeAddresses_key = tuple(term_unknowns_attributeAddresses)
assert term_unknowns_attributeAddresses_key not in dictRepresentation, 'PrgError: Same unknowns encountered multiple times in the equation LHS, unknowns must have been gathered by now. (i.e. coefficients must have been combined as a single coefficient for the variable)'
dictRepresentation[term_unknowns_attributeAddresses_key] = term_constantFactor
dictRepresentation['RHS'] = self.RHS
return dictRepresentation | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def linearize(self, params, unknowns, resids):\n\n x = params['x']\n a = self.a\n b = self.b\n J = {}\n\n J['y', 'x'] = 2.0*a*x + b\n return J",
"def linearize(self, params, unknowns, resids):\n\n x = hash(params['x'])\n y = params['y']\n J = {}\n\n J['f_xy', 'x'] = 2.0*x - 6.0 + y\n J['f_xy', 'y'] = 2.0*y + 8.0 + x\n return J",
"def _get_graph_based_ic_dictionary(self):\n\n\t\t# TODO find the literature reference or presentation where this equation is from instead of just the presentation.\n\n\t\t#ic_dict = {}\n\t\t#num_terms_in_ontology = len(self)\n\t\t#for term in self.terms():\n\t\t#\tdepth = self._depth_dict[term.id]\n\t\t#\tnum_descendants = len(list(term.subclasses(with_self=False)))\n\t\t#\tic_value = float(depth)*(1-(math.log(num_descendants+1)/math.log(num_terms_in_ontology)))\n\t\t#\tic_dict[term.id] = ic_value\n\t\t#return(ic_dict)\n\n\n\t\t# Getting the information content of each term in the ontology based on graph structure.\n\t\tic_dict = {}\n\t\tnum_terms_in_ontology = len(self)\n\t\tfor term in self.terms():\n\t\t\tdepth = self._depth_dict[term.id]\n\t\t\tnum_descendants = len(list(term.subclasses(with_self=False)))\n\t\t\tic_value = float(depth)*(1-(math.log(num_descendants+1)/math.log(num_terms_in_ontology)))\n\t\t\tic_dict[term.id] = ic_value\n\n\n\t\t# Converting to weights based on information content rather than raw value.\n\t\tic_dict_as_weights = {}\n\t\tic_values = ic_dict.values()\n\t\tmin_ic = min(ic_values)\n\t\tmax_ic = max(ic_values)\n\t\tnew_max = 1.00\n\t\tnew_min = 0.00\n\t\tfor k,v in ic_dict.items():\n\t\t\told_range = max_ic-min_ic\n\t\t\tnew_range = new_max-new_min\n\t\t\tnew_value = (((v - min_ic) * new_range) / old_range) + new_min\n\t\t\tic_dict_as_weights[k] = new_value\n\n\t\treturn(ic_dict, ic_dict_as_weights)",
"def linearize(self, params, unknowns, resids):\n\n J = {}\n J['x', 'y1'] = 1.0\n J['x', 'y2'] = -1.0\n return J",
"def __init__(self, equation_dict):\n self.equation = equation_dict['equation']\n self.variables = equation_dict['variables']\n self.dict = equation_dict\n self.x = list(self.variables)[-1]['variable'] # The variable to solve for",
"def equation(self):\n mat = np.zeros((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n rhs[0:self.nlayers - 1] = 0.0\n rhs[self.nlayers - 1] = self.Qc\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n head = e.potinflayers(self.xc, self.yc, self.layers) / self.aq.Tcol[self.layers, :]\n mat[0:self.nlayers - 1, ieq:ieq + e.nunknowns] = head[:-1] - head[1:]\n if e == self:\n for i in range(self.nlayers - 1):\n mat[i, ieq + i] -= self.resfac[i]\n mat[i, ieq + i + 1] += self.resfac[i + 1]\n mat[self.nlayers - 1, ieq:ieq + self.nlayers] = 1.0\n ieq += e.nunknowns\n else:\n head = e.potentiallayers(self.xc, self.yc, self.layers) / self.aq.T[self.layers]\n rhs[0:self.nlayers - 1] -= head[:-1] - head[1:]\n return mat, rhs",
"def solve_equations(equations):\n # variables in the system of equations\n var_list = list(reduce(set.union, (set(var for coeff, var in eqn if var)\n for eqn in equations)))\n # number of variables\n num_vars = len(var_list)\n # the index of each variable in |var_list|\n var_index = dict(zip(var_list, range(num_vars)))\n # matrices to solve system (Ax = b)\n A, b = [], []\n # populate matrices\n for equation in equations:\n coeffs, const = [0] * num_vars, 0\n for coeff, var in equation:\n if var:\n coeffs[var_index[var]] += coeff\n else:\n const -= coeff\n A.append(coeffs)\n b.append([const])\n try:\n # solve system\n x = solve(matrix(A), matrix(b))\n return dict(zip(var_list, [x[i, 0] for i in xrange(num_vars)]))\n except:\n raise Exception('Could not solve system of equations')",
"def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n # rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n rhs = self.hc.copy()\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n # rhs[istart:istart+self.nlayers] = self.pc[]\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \\\n e.potinflayers(self.xc[icp], self.yc[icp], self.layers) / self.aq.Tcol[self.layers]\n if e == self:\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] -= self.resfac[icp]\n ieq += e.nunknowns\n else:\n rhs[istart:istart + self.nlayers] -= \\\n e.potentiallayers(self.xc[icp], self.yc[icp], self.layers) / self.aq.T[\n self.layers] # Pretty cool that this works, really\n return mat, rhs",
"def get_subs_dict(self, qnodes=None):\n #d = self.qparams.copy()\n d = self.qparams\n d.update(self.optimize_params(qnodes=qnodes))\n # clean null values\n subs_dict = {k: v for k, v in d.items() if v is not None}\n #print(\"subs_dict:\", subs_dict)\n return subs_dict",
"def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n rhs[istart:istart + self.nlayers] = self.pc\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \\\n e.potinflayers(self.xc[icp], self.yc[icp], self.layers)\n ieq += e.nunknowns\n else:\n rhs[istart:istart + self.nlayers] -= \\\n e.potentiallayers(self.xc[icp], self.yc[icp],\n self.layers) # Pretty cool that this works, really\n return mat, rhs",
"def formula_to_dictionary(formula='', thickness=np.NaN, density=np.NaN, database='ENDF_VIII'):\n _formula_parsed = re.findall(r'([A-Z][a-z]*)(\\d*)', formula)\n\n _dictionary = {}\n _elements_array = []\n _atomic_ratio_array = []\n for _element in _formula_parsed:\n [_single_element, _atomic_ratio] = list(_element)\n if not is_element_in_database(element=_single_element, database=database):\n raise ValueError(\"element {} not found in database!\".format(_single_element))\n\n if _atomic_ratio == '':\n _atomic_ratio = 1\n\n _atomic_ratio_array.append(int(_atomic_ratio))\n _elements_array.append(_single_element)\n\n return {formula: {'elements': _elements_array,\n 'stoichiometric_ratio': _atomic_ratio_array,\n 'thickness': {'value': thickness,\n 'units': 'mm'},\n 'density': {'value': density,\n 'units': 'g/cm3',\n }\n },\n }",
"def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \\\n e.potinflayers(self.xcin[icp], self.ycin[icp], self.layers, aq=self.aqin) / self.aqin.Tcol - \\\n e.potinflayers(self.xcout[icp], self.ycout[icp], self.layers, aq=self.aqout) / self.aqout.Tcol\n ieq += e.nunknowns\n else:\n rhs[istart:istart + self.nlayers] -= \\\n e.potentiallayers(self.xcin[icp], self.ycin[icp], self.layers, aq=self.aqin) / self.aqin.T - \\\n e.potentiallayers(self.xcout[icp], self.ycout[icp], self.layers, aq=self.aqout) / self.aqout.T\n return mat, rhs",
"def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n # rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n rhs = self.pc.copy()\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n # rhs[istart:istart+self.nlayers] = self.pc[]\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \\\n e.potinflayers(self.xc[icp], self.yc[icp], self.layers)\n if e == self:\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] -= self.resfac[icp]\n ieq += e.nunknowns\n else:\n rhs[istart:istart + self.nlayers] -= \\\n e.potentiallayers(self.xc[icp], self.yc[icp],\n self.layers) # Pretty cool that this works, really\n return mat, rhs",
"def cexpr_operands(self):\n\n if self.op >= cot_comma and self.op <= cot_asgumod or \\\n self.op >= cot_lor and self.op <= cot_fdiv or \\\n self.op == cot_idx:\n return {'x': self.x, 'y': self.y}\n\n elif self.op == cot_tern:\n return {'x': self.x, 'y': self.y, 'z': self.z}\n\n elif self.op in [cot_fneg, cot_neg, cot_sizeof] or \\\n self.op >= cot_lnot and self.op <= cot_predec:\n return {'x': self.x}\n\n elif self.op == cot_cast:\n return {'type': self.type, 'x': self.x}\n\n elif self.op == cot_call:\n return {'x': self.x, 'a': self.a}\n\n elif self.op in [cot_memref, cot_memptr]:\n return {'x': self.x, 'm': self.m}\n\n elif self.op == cot_num:\n return {'n': self.n}\n\n elif self.op == cot_fnum:\n return {'fpc': self.fpc}\n\n elif self.op == cot_str:\n return {'string': self.string}\n\n elif self.op == cot_obj:\n return {'obj_ea': self.obj_ea}\n\n elif self.op == cot_var:\n return {'v': self.v}\n\n elif self.op == cot_helper:\n return {'helper': self.helper}\n\n raise RuntimeError('unknown op type %s' % self.opname)",
"def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n qx, qy = e.disvecinflayers(self.xc[icp], self.yc[icp], self.layers)\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \\\n qx * self.cosnorm[icp] + qy * self.sinnorm[icp]\n ieq += e.nunknowns\n else:\n qx, qy = e.disveclayers(self.xc[icp], self.yc[icp], self.layers)\n rhs[istart:istart + self.nlayers] -= qx * self.cosnorm[icp] + qy * self.sinnorm[icp]\n return mat, rhs",
"def coefficients(self, force_characters = False) :\n if len(self.__coefficients) == 0 :\n return dict()\n elif not force_characters and len(self.__coefficients) == 1 :\n return self.__coefficients.values()[0] \n else :\n return self.__coefficients",
"def get_equation_matrix(obj_dict, node_dict):\n\n v_src_count = len(obj_dict['V'])\n v_src_map = {}\n for ind in range(len(obj_dict['V'])):\n v_src_map[obj_dict['V'][ind].name] = ind \n \n node_count = len(node_dict.keys())-1 # excluded GND\n \n A = np.zeros((node_count, node_count), dtype='complex') # top left quad\n B = np.zeros((node_count, v_src_count), dtype='complex') # top right quad\n aux = np.zeros((v_src_count, node_count), dtype='complex') # bottom left quad\n zero_quad = np.zeros((v_src_count,v_src_count), dtype='complex') # bottom right quad\n const_vec_nodes = np.zeros((node_count), dtype='complex')\n const_vec_aux = np.zeros((v_src_count), dtype='complex')\n\n for k in obj_dict.keys():\n lst = obj_dict[k]\n for obj in lst:\n n1 = node_dict[obj.n1]-1\n n2 = node_dict[obj.n2]-1\n value = obj.value\n\n if obj.el_type in 'RLC':\n if obj.imp == None: # dc\n if obj.el_type == 'L':\n value = EPS\n elif obj.el_type == 'C':\n value = INF\n else:\n value = obj.imp + EPS\n\n if n1 != -1 and n2 != -1:\n A[n1,n1] += 1/value\n A[n1,n2] += -1/value\n A[n2,n1] += -1/value\n A[n2,n2] += 1/value\n\n elif n1 == -1:\n A[n2,n2] += 1/value\n\n elif n2 == -1:\n A[n1,n1] += 1/value\n\n elif obj.el_type == 'V':\n nv = v_src_map[obj.name]\n if n1 != -1 and n2 != -1:\n B[n1,nv] += 1\n B[n2,nv] += -1\n aux[nv,n1] += 1\n aux[nv,n2] += -1\n \n elif n1 == -1:\n B[n2,nv] += -1\n aux[nv,n2] += -1\n \n elif n2 == -1:\n B[n1,nv] += 1\n aux[nv,n1] += 1\n\n const_vec_aux[nv] += value\n\n elif obj.el_type == 'I':\n if n1 != -1 and n2 != -1:\n const_vec_nodes[n1] += obj.value\n const_vec_nodes[n2] += -obj.value\n elif n1 == -1:\n const_vec_nodes[n2] += -obj.value\n elif n2 == -1:\n const_vec_nodes[n1] += obj.value\n\n elif obj.el_type in 'EGHF':\n print(\"ERR: Controlled sources not supported yet\")\n return None\n\n top_half = np.concatenate((A, B), axis=1) \n bot_half = np.concatenate((aux, zero_quad), axis=1)\n coeff_mat = np.concatenate((top_half, bot_half), axis=0)\n const_vec = np.concatenate((const_vec_nodes, const_vec_aux), axis=0)\n\n return [coeff_mat, const_vec]",
"def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n qx, qy = e.disvecinflayers(self.xcout[icp], self.ycout[icp], self.layers)\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \\\n qx * self.cosnorm[icp] + qy * self.sinnorm[icp]\n ieq += e.nunknowns\n else:\n qx, qy = e.disveclayers(self.xcout[icp], self.ycout[icp], self.layers)\n rhs[istart:istart + self.nlayers] -= qx * self.cosnorm[icp] + qy * self.sinnorm[icp]\n return mat, rhs",
"def _coeff_to_dict(self):\n idx = list(np.nonzero(np.reshape(self.model_.coef_, (1, -1)))[1])\n dct = dict(\n zip(\n [self.X_train_.columns.tolist()[i] for i in idx],\n [\n self.model_.coef_.reshape(-1, self.model_.coef_.shape[-1])[0][i]\n for i in idx\n ],\n )\n )\n\n return dct",
"def find_solution(formula):\n #if formula is empty or if there is a contradiction between clauses\n if not formula or disqualifier(formula):\n return {}\n \n solution = get_one_unit_clause(formula)\n #if there are no unit clauses, move on to non-unit clauses\n if not solution:\n solution = get_non_unit_clause(formula)\n #if there are contradictions with literals on non-unit clauses, backtrack, get rid of that contradicting literal, and try again\n if disqualifier(reduce_expression(formula, solution)):\n solution = get_non_unit_clause(formula, True)\n updatedForm = reduce_expression(formula, solution)\n #double asterisks allow any number of keywords to be passed as an argument\n return {**find_solution(updatedForm), **{solution[0]: solution[1]}}",
"def _symbolic_equations(self):\n return [self.model.matching.mu_prime, self.model.matching.theta_prime]",
"def symbolic_objective(ingredients) -> Tuple[List[sp.Symbol], sp.Eq]:\n assignments = sp.symbols(' '.join(ingredients.keys()))\n\n # Skip negative logic due to differentiability requirements\n objective = 1\n for attribute in filter(lambda x: x != \"calories\", next(iter(ingredients.values())).keys()):\n objective *= sum(ingredients[str(x)][attribute] * x for x in assignments)\n\n return assignments, objective",
"def operator_dict(self, index, vars, **kw):\n out = defaultdict(int)\n # Freeze arg1 metadata for caching ncc matrices\n frozen_arg1_basis_meta = freeze_meta(self.args[1].meta)[-1]\n op0 = self.args[0].as_ncc_operator(frozen_arg1_basis_meta, **kw)\n op1 = self.args[1].operator_dict(index, vars, **kw)\n for var in op1:\n out[var] = op0 * op1[var]\n return out",
"def substituteEquation(equation, substitutedVariable, substitutionEquation):\n# print(equation)\n# print(substitutionEquation)\n if substitutedVariable not in equation.keys():\n return equation\n \n sub_var_coefficient = substitutionEquation[substitutedVariable]\n coeff = equation[substitutedVariable]\n\n for i in substitutionEquation.keys():\n substitutionEquation[i] = substitutionEquation[i]/(sub_var_coefficient)\n# print(substitutionEquation)\n for i in substitutionEquation.keys() :\n\n if i not in equation.keys():\n equation[i] = 0\n equation[i] += -coeff*substitutionEquation[i]\n if equation[i] == 0:\n del equation[i]\n return equation",
"def get_equation(self):\n self.polynomials = dict(sorted(self.polynomials.items(), reverse=True))\n string = \"\"\n\n for index, polynomial in self.polynomials.items():\n polynomial = int(polynomial)\n index = int(index)\n\n if polynomial != 0:\n if polynomial < 0:\n string_pre = \" - \"\n else:\n string_pre = \" + \"\n\n if index != 0:\n string_append = \"x\"\n elif polynomial == 1 or polynomial == -1:\n string_append = str(abs(polynomial))\n else:\n string_append = \"\"\n\n if polynomial < 0:\n polynomial = abs(polynomial)\n\n if polynomial != 1:\n string_append = str(polynomial) + string_append\n\n if index != 0 and index != 1:\n string_append += \"^\" + str(index)\n\n string += string_pre + string_append\n\n if len(string) > 0:\n string = string[3:]\n else:\n string = \"0\"\n\n return string",
"def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n headin = self.intpot(e.potinflayers, self.xcin[icp], self.ycin[icp],\n self.xcin[icp + 1], self.ycin[icp + 1], self.layers,\n aq=self.aqin) / self.aqin.Tcol[self.layers]\n headout = self.intpot(e.potinflayers, self.xcout[icp], self.ycout[icp],\n self.xcout[icp + 1], self.ycout[icp + 1], self.layers,\n aq=self.aqout) / self.aqout.Tcol[self.layers]\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = headin - headout\n ieq += e.nunknowns\n else:\n headin = self.intpot(e.potentiallayers, self.xcin[icp], self.ycin[icp],\n self.xcin[icp + 1], self.ycin[icp + 1], self.layers,\n aq=self.aqin) / self.aqin.T[self.layers]\n headout = self.intpot(e.potentiallayers, self.xcout[icp], self.ycout[icp],\n self.xcout[icp + 1], self.ycout[icp + 1], self.layers,\n aq=self.aqout) / self.aqout.T[self.layers]\n rhs[istart:istart + self.nlayers] -= headin - headout\n return mat, rhs",
"def _coefficients(regression_df):\n coeff_names = ('mindist', 'x_j', 'f_is', 'v_is')\n coefficients = {x: _get_coefficient(regression_df, x) for x in coeff_names}\n return coefficients",
"def model(self, m):\n eqs = {}\n vals = {}\n for d in m:\n if isinstance(m[d], z3.FuncInterp):\n interp = m[d].as_list()[:-1]\n interp = [[self.translate(p) for p in r] for r in interp]\n d_eqs = interp_to_eqns(self.translate(d), interp)\n for i, e in enumerate(d_eqs):\n eqs[str(d) + 'def' + str(i)] = e\n vals[str(d)] = Value(z3_to_val(m[d]))\n else:\n eqs[str(d) + 'def'] = \\\n self.translate(d()) == self.translate(m[d])\n vals[str(d)] = Value(z3_to_val(m[d]))\n return value.Model(eqs, vals)",
"def get_constraints(self):\n return ({'type': 'ineq', 'fun': lambda x: x[1] - x[2]},\n {'type': 'ineq', 'fun': lambda x: x[3] - x[4]})",
"def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n qx, qy = e.disvecinflayers(self.xc[icp], self.yc[icp], self.layers)\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \\\n qx * self.cosnorm[icp] + qy * self.sinnorm[icp] - self.resfac[:, np.newaxis] * \\\n (e.potinflayers(self.xcin[icp], self.ycin[icp], self.layers, aq=self.aq) / self.aq.Tcol[\n self.layers] - \\\n e.potinflayers(self.xcout[icp], self.ycout[icp], self.layers, aq=self.aq) / self.aq.Tcol[\n self.layers])\n ieq += e.nunknowns\n else:\n qx, qy = e.disveclayers(self.xc[icp], self.yc[icp], self.layers)\n rhs[istart:istart + self.nlayers] -= qx * self.cosnorm[icp] + qy * self.sinnorm[icp] + self.resfac * \\\n (e.potentiallayers(self.xcin[icp], self.ycin[icp], self.layers,\n aq=self.aq) / self.aq.T[self.layers] -\n e.potentiallayers(self.xcout[icp], self.ycout[icp],\n self.layers, aq=self.aq) / self.aq.T[\n self.layers])\n return mat, rhs"
] | [
"0.586238",
"0.5818951",
"0.5657222",
"0.56080526",
"0.55192053",
"0.5433741",
"0.54323214",
"0.5415554",
"0.5337335",
"0.5325749",
"0.531823",
"0.5238016",
"0.5237405",
"0.5228737",
"0.521135",
"0.5207039",
"0.5191282",
"0.5167047",
"0.5164668",
"0.51615524",
"0.51598066",
"0.5137693",
"0.51185197",
"0.5112362",
"0.5091293",
"0.5072504",
"0.5062474",
"0.50620544",
"0.5055558",
"0.5046065"
] | 0.75358605 | 0 |
Updates the LinearEquations in the list equations if equation contains an unknown from the list updatedUnknowns. Updated all equations if updateAll. | def updateEquations(equations: List, updatedUnknowns: Set, updateAll: bool = False):
for equation in equations:
if updateAll or any(unknown in equation.get_unknowns() for unknown in updatedUnknowns):
equation.update()
updatedUnknowns = set() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def solve_solvableEquations(equations: List):\n solvedEquations = []\n updatedUnknowns = set()\n\n for equation in equations:\n equation.update()\n if equation.isSolvable():\n solution = equation.solve()\n unknownAddress = list(solution.keys())[0]\n setattr_fromAddress(object=unknownAddress[0], attributeName=unknownAddress[1], value=solution[unknownAddress])\n updatedUnknowns.add(unknownAddress)\n solvedEquations.append(equation)\n\n for equation in solvedEquations:\n equations.remove(equation)\n\n return updatedUnknowns",
"def solve_combination_ofEquations(equations: List, number_ofEquations: int) -> Set:\n updatedUnknowns = set()\n\n for equationCombination in combinations(equations, number_ofEquations):\n\n # If any of the equations got solved in a previous iteration and got removed from _equations, skip this combination\n # Combinations are generated beforehand at the beginning of the main for loop.\n if any(equation not in equations for equation in equationCombination):\n continue\n\n if (system := System_ofLinearEquations(list(equationCombination))).isSolvable():\n solution = system.solve()\n unknownAddresses = list(solution.keys())\n for unknownAddress in unknownAddresses:\n setattr_fromAddress(object=unknownAddress[0], attributeName=unknownAddress[1], value=solution[unknownAddress])\n updatedUnknowns.add(unknownAddress)\n\n # If system is solved, all equations in the combination is solved. Remove them from equations pool.\n for equation in equationCombination:\n equations.remove(equation)\n\n return updatedUnknowns",
"def solve_nonlinear(self, params, unknowns, resids):\n\n x = params['x']\n a = self.a\n b = self.b\n c = self.c\n\n unknowns['y'] = a*x**2 + b*x + c",
"def solve_nonlinear(self, params, unknowns, resids):\n\n x = params['x']\n m = self.slope\n b = self.intercept\n\n unknowns['y'] = m*x + b",
"def solve_nonlinear(self, params, unknowns, resids):\n\n x = params['x']\n y = params['y']\n\n unknowns['f_xy'] = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0",
"def solve_nonlinear(self, params, unknowns, resids):\n\n x = hash(params['x'])\n y = params['y']\n\n unknowns['f_xy'] = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0",
"def variational_update(self):\n with self.elbo_check('update_p_allele_swap'):\n self.model.update_p_allele_swap()\n\n with self.elbo_check('p_cn'):\n self.model.update_p_cn()\n\n with self.elbo_check('p_breakpoint'):\n self.model.update_p_breakpoint()\n\n with self.elbo_check('p_outlier_total'):\n self.model.update_p_outlier_total()\n\n with self.elbo_check('p_outlier_allele'):\n self.model.update_p_outlier_allele()",
"def test_solution_usage(self, test_data):\n for finput in test_data[\"EKFSLAM.EKFSLAM.update\"][:1]:\n params = finput\n\n solution.used[\"EKFSLAM.EKFSLAM.update\"] = False\n\n EKFSLAM.EKFSLAM.update(**params)\n\n assert not solution.used[\"EKFSLAM.EKFSLAM.update\"], \"The function uses the solution\"",
"def solve_nonlinear(self, params, unknowns, resids):\n pass",
"def solve_nonlinear(self, params, unknowns, resids):\n pass",
"def eq_in_evaluate_y_derivative(self, eq, used_equations):\n eq.setdefault('in_evaluate_y_derivative', []).append(eq['sympy_lhs'] in [eq[0] for eq in used_equations])",
"def equations(self):\n k = 0\n ######################################################################\n # equations for fluid balance\n self.residual[k:k + self.num_nw_fluids] = self.fluid_func()\n k += self.num_nw_fluids\n\n ######################################################################\n # equations for mass flow balance\n self.residual[k] = self.mass_flow_func()\n k += 1\n\n ######################################################################\n # equations for specified heta transfer\n if self.Q.is_set:\n self.residual[k] = self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) - self.Q.val\n k += 1\n\n ######################################################################\n # equations for specified pressure ratio\n if self.pr.is_set:\n self.residual[k] = (\n self.inl[0].p.val_SI * self.pr.val - self.outl[0].p.val_SI)\n k += 1\n\n ######################################################################\n # equations for specified zeta\n if self.zeta.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.zeta_func(zeta='zeta')\n k += 1\n\n ######################################################################\n # equation for specified hydro-group paremeters\n if self.hydro_group.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n # hazen williams equation\n if self.hydro_group.method == 'HW':\n func = self.hw_func\n # darcy friction factor\n else:\n func = self.darcy_func\n self.residual[k] = func()\n k += 1\n\n ######################################################################\n # additional equations\n self.additional_equations(k)",
"def solve_nonlinear(self, params, unknowns, resids):\n\n x = params['x']\n z = unknowns['z']\n znew = z\n\n iter = 0\n eps = 1.0e99\n while iter < self.maxiter and abs(eps) > self.atol:\n z = znew\n znew = 4.0 - x*z\n\n eps = x*znew + znew - 4.0\n\n unknowns['z'] = znew\n unknowns['y'] = x + 2.0*znew\n\n resids['z'] = eps\n #print(unknowns['y'], unknowns['z'])",
"def apply_nonlinear(self, params, unknowns, resids):\n\n x = params['x']\n z = unknowns['z']\n resids['z'] = x*z + z - 4.0\n\n # Output equations need to evaluate a residual just like an explicit comp.\n resids['y'] = x + 2.0*z - unknowns['y']\n #print(x, unknowns['y'], z, resids['z'], resids['y'])",
"def _old_linearize(self):\n\n if (self._fr is None) or (self._frstar is None):\n raise ValueError('Need to compute Fr, Fr* first.')\n\n # Note that this is now unneccessary, and it should never be\n # encountered; I still think it should be in here in case the user\n # manually sets these matrices incorrectly.\n for i in self.q:\n if self._k_kqdot.diff(i) != 0 * self._k_kqdot:\n raise ValueError('Matrix K_kqdot must not depend on any q.')\n\n t = dynamicsymbols._t\n uaux = self._uaux\n uauxdot = [diff(i, t) for i in uaux]\n # dictionary of auxiliary speeds & derivatives which are equal to zero\n subdict = dict(zip(uaux[:] + uauxdot[:],\n [0] * (len(uaux) + len(uauxdot))))\n\n # Checking for dynamic symbols outside the dynamic differential\n # equations; throws error if there is.\n insyms = set(self.q[:] + self._qdot[:] + self.u[:] + self._udot[:] +\n uaux[:] + uauxdot)\n if any(find_dynamicsymbols(i, insyms) for i in [self._k_kqdot,\n self._k_ku, self._f_k, self._k_dnh, self._f_dnh, self._k_d]):\n raise ValueError('Cannot have dynamicsymbols outside dynamic \\\n forcing vector.')\n other_dyns = list(find_dynamicsymbols(msubs(self._f_d, subdict), insyms))\n\n # make it canonically ordered so the jacobian is canonical\n other_dyns.sort(key=default_sort_key)\n\n for i in other_dyns:\n if diff(i, dynamicsymbols._t) in other_dyns:\n raise ValueError('Cannot have derivatives of specified '\n 'quantities when linearizing forcing terms.')\n\n o = len(self.u) # number of speeds\n n = len(self.q) # number of coordinates\n l = len(self._qdep) # number of configuration constraints\n m = len(self._udep) # number of motion constraints\n qi = Matrix(self.q[: n - l]) # independent coords\n qd = Matrix(self.q[n - l: n]) # dependent coords; could be empty\n ui = Matrix(self.u[: o - m]) # independent speeds\n ud = Matrix(self.u[o - m: o]) # dependent speeds; could be empty\n qdot = Matrix(self._qdot) # time derivatives of coordinates\n\n # with equations in the form MM udot = forcing, expand that to:\n # MM_full [q,u].T = forcing_full. This combines coordinates and\n # speeds together for the linearization, which is necessary for the\n # linearization process, due to dependent coordinates. f1 is the rows\n # from the kinematic differential equations, f2 is the rows from the\n # dynamic differential equations (and differentiated non-holonomic\n # constraints).\n f1 = self._k_ku * Matrix(self.u) + self._f_k\n f2 = self._f_d\n # Only want to do this if these matrices have been filled in, which\n # occurs when there are dependent speeds\n if m != 0:\n f2 = self._f_d.col_join(self._f_dnh)\n fnh = self._f_nh + self._k_nh * Matrix(self.u)\n f1 = msubs(f1, subdict)\n f2 = msubs(f2, subdict)\n fh = msubs(self._f_h, subdict)\n fku = msubs(self._k_ku * Matrix(self.u), subdict)\n fkf = msubs(self._f_k, subdict)\n\n # In the code below, we are applying the chain rule by hand on these\n # things. All the matrices have been changed into vectors (by\n # multiplying the dynamic symbols which it is paired with), so we can\n # take the jacobian of them. The basic operation is take the jacobian\n # of the f1, f2 vectors wrt all of the q's and u's. f1 is a function of\n # q, u, and t; f2 is a function of q, qdot, u, and t. In the code\n # below, we are not considering perturbations in t. So if f1 is a\n # function of the q's, u's but some of the q's or u's could be\n # dependent on other q's or u's (qd's might be dependent on qi's, ud's\n # might be dependent on ui's or qi's), so what we do is take the\n # jacobian of the f1 term wrt qi's and qd's, the jacobian wrt the qd's\n # gets multiplied by the jacobian of qd wrt qi, this is extended for\n # the ud's as well. dqd_dqi is computed by taking a taylor expansion of\n # the holonomic constraint equations about q*, treating q* - q as dq,\n # separating into dqd (depedent q's) and dqi (independent q's) and the\n # rearranging for dqd/dqi. This is again extended for the speeds.\n\n # First case: configuration and motion constraints\n if (l != 0) and (m != 0):\n fh_jac_qi = fh.jacobian(qi)\n fh_jac_qd = fh.jacobian(qd)\n fnh_jac_qi = fnh.jacobian(qi)\n fnh_jac_qd = fnh.jacobian(qd)\n fnh_jac_ui = fnh.jacobian(ui)\n fnh_jac_ud = fnh.jacobian(ud)\n fku_jac_qi = fku.jacobian(qi)\n fku_jac_qd = fku.jacobian(qd)\n fku_jac_ui = fku.jacobian(ui)\n fku_jac_ud = fku.jacobian(ud)\n fkf_jac_qi = fkf.jacobian(qi)\n fkf_jac_qd = fkf.jacobian(qd)\n f1_jac_qi = f1.jacobian(qi)\n f1_jac_qd = f1.jacobian(qd)\n f1_jac_ui = f1.jacobian(ui)\n f1_jac_ud = f1.jacobian(ud)\n f2_jac_qi = f2.jacobian(qi)\n f2_jac_qd = f2.jacobian(qd)\n f2_jac_ui = f2.jacobian(ui)\n f2_jac_ud = f2.jacobian(ud)\n f2_jac_qdot = f2.jacobian(qdot)\n\n dqd_dqi = - fh_jac_qd.LUsolve(fh_jac_qi)\n dud_dqi = fnh_jac_ud.LUsolve(fnh_jac_qd * dqd_dqi - fnh_jac_qi)\n dud_dui = - fnh_jac_ud.LUsolve(fnh_jac_ui)\n dqdot_dui = - self._k_kqdot.inv() * (fku_jac_ui +\n fku_jac_ud * dud_dui)\n dqdot_dqi = - self._k_kqdot.inv() * (fku_jac_qi + fkf_jac_qi +\n (fku_jac_qd + fkf_jac_qd) * dqd_dqi + fku_jac_ud * dud_dqi)\n f1_q = f1_jac_qi + f1_jac_qd * dqd_dqi + f1_jac_ud * dud_dqi\n f1_u = f1_jac_ui + f1_jac_ud * dud_dui\n f2_q = (f2_jac_qi + f2_jac_qd * dqd_dqi + f2_jac_qdot * dqdot_dqi +\n f2_jac_ud * dud_dqi)\n f2_u = f2_jac_ui + f2_jac_ud * dud_dui + f2_jac_qdot * dqdot_dui\n # Second case: configuration constraints only\n elif l != 0:\n dqd_dqi = - fh.jacobian(qd).LUsolve(fh.jacobian(qi))\n dqdot_dui = - self._k_kqdot.inv() * fku.jacobian(ui)\n dqdot_dqi = - self._k_kqdot.inv() * (fku.jacobian(qi) +\n fkf.jacobian(qi) + (fku.jacobian(qd) + fkf.jacobian(qd)) *\n dqd_dqi)\n f1_q = (f1.jacobian(qi) + f1.jacobian(qd) * dqd_dqi)\n f1_u = f1.jacobian(ui)\n f2_jac_qdot = f2.jacobian(qdot)\n f2_q = (f2.jacobian(qi) + f2.jacobian(qd) * dqd_dqi +\n f2.jac_qdot * dqdot_dqi)\n f2_u = f2.jacobian(ui) + f2_jac_qdot * dqdot_dui\n # Third case: motion constraints only\n elif m != 0:\n dud_dqi = fnh.jacobian(ud).LUsolve(- fnh.jacobian(qi))\n dud_dui = - fnh.jacobian(ud).LUsolve(fnh.jacobian(ui))\n dqdot_dui = - self._k_kqdot.inv() * (fku.jacobian(ui) +\n fku.jacobian(ud) * dud_dui)\n dqdot_dqi = - self._k_kqdot.inv() * (fku.jacobian(qi) +\n fkf.jacobian(qi) + fku.jacobian(ud) * dud_dqi)\n f1_jac_ud = f1.jacobian(ud)\n f2_jac_qdot = f2.jacobian(qdot)\n f2_jac_ud = f2.jacobian(ud)\n f1_q = f1.jacobian(qi) + f1_jac_ud * dud_dqi\n f1_u = f1.jacobian(ui) + f1_jac_ud * dud_dui\n f2_q = (f2.jacobian(qi) + f2_jac_qdot * dqdot_dqi + f2_jac_ud\n * dud_dqi)\n f2_u = (f2.jacobian(ui) + f2_jac_ud * dud_dui + f2_jac_qdot *\n dqdot_dui)\n # Fourth case: No constraints\n else:\n dqdot_dui = - self._k_kqdot.inv() * fku.jacobian(ui)\n dqdot_dqi = - self._k_kqdot.inv() * (fku.jacobian(qi) +\n fkf.jacobian(qi))\n f1_q = f1.jacobian(qi)\n f1_u = f1.jacobian(ui)\n f2_jac_qdot = f2.jacobian(qdot)\n f2_q = f2.jacobian(qi) + f2_jac_qdot * dqdot_dqi\n f2_u = f2.jacobian(ui) + f2_jac_qdot * dqdot_dui\n f_lin_A = -(f1_q.row_join(f1_u)).col_join(f2_q.row_join(f2_u))\n if other_dyns:\n f1_oths = f1.jacobian(other_dyns)\n f2_oths = f2.jacobian(other_dyns)\n f_lin_B = -f1_oths.col_join(f2_oths)\n else:\n f_lin_B = Matrix()\n return (f_lin_A, f_lin_B, Matrix(other_dyns))",
"def update(self):\n self.tick()\n\n self.ball.update(self)\n for c in self.balls:\n print(\"updated ball\")\n c.update(self)\n for c in self.balls:\n for z in self.blocks:\n print(\"updated ball/block\")\n collideWithBlock(z,c)\n for c in self.balls:\n for d in self.balls:\n if not c == d:\n print(\"updated ball/ball\")\n liquidInfluence(c, d)",
"def updateall(self, params):\n for gauge in self.gauges:\n self.safexec(gauge.updateall, params)",
"def update_all_layers(self,t1=0.0,t2=0.0,phi=0.0,m=0.0,t31=0.0, t32=0.0, randomly=False,sigma=0.03):\n if randomly:\n for i in range(self.NL):\n self.layers[i].update_values( t1*(1.+np.random.randn(1)*sigma) ,t2*(1.+np.random.randn(1)*sigma) ,phi*(1.+np.random.randn(1)*sigma) ,m*(1.+np.random.randn(1)*sigma) ,t31*(1.+np.random.randn(1)*sigma), t32*(1.+np.random.randn(1)*sigma) )\n else:\n for i in range(self.NL):\n self.layers[i].update_values( t1 ,t2 ,phi ,m ,t31 ,t32 )",
"def solve(self,notifications = False):\n if notifications:\n print('[info]: Solving differential equations for '+self.name+' model. ')\n \n # getting the time values\n self.days_list = np.linspace(self.tbeg,self.tend,self.npoints)\n\n # calling the odeint method to solve the diff. equations\n self.x = odeint(self.diff_eq,self.x0,self.days_list,args = (self.par,))\n \n #setting the variables\n self.confirmed_list = self.x[:,1] + self.x[:,2] + self.x[:,3]\n self.recovered_list = self.x[:,2]\n self.death_list = self.x[:,3]",
"def solve(self,notifications = False):\n if notifications:\n print('[info]: Solving differential equations for '+self.name+' model. ')\n \n # getting the time values\n self.days_list = np.linspace(self.tbeg,self.tend,self.npoints)\n\n # calling the odeint method to solve the diff. equations\n self.x = odeint(self.diff_eq,self.x0,self.days_list,args = (self.par,))\n \n #setting the variables\n self.confirmed_list = self.x[:,1] + self.x[:,2] + self.x[:,3]\n self.recovered_list = self.x[:,2]\n self.death_list = self.x[:,3]",
"def update_(self):\n for function in self.functions:\n try:\n function()\n except Exception as err:\n self.log.info(\n \"While updating the framework an error happened in function {} with error: {}\".format(\n function, err\n )\n )\n raise",
"def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:\n graph = collections.defaultdict(list)\n n = len(values)\n for i in range(n):\n dd, d = equations[i]\n graph[dd].append((d, values[i]))\n graph[d].append((dd, 1/values[i]))\n \n res = []\n for dd, d in queries:\n res.append(self.dfs(graph, dd, d, set()))\n \n return res",
"def _map_state_vars_and_eqs(self):\n\n def get_used_eqs_and_state_vars(eq_to_expand, equations):\n \"\"\" Returns used equations and state vars for a given equation\n\n :param eq_to_expand: list containing equations to recurse over and expand definitions for\n note: expecting equations in [(lhs, rhs)] form.\n :param equations: set of equations to look for definitions in.\n :return: set of equations and set of used state vars.\n \"\"\"\n used_state_vars = set()\n for eq in eq_to_expand:\n for v in eq[1].atoms(Derivative) | eq[1].free_symbols:\n if v in self._model.state_vars:\n used_state_vars.add(v)\n elif v not in [e[0] for e in eq_to_expand]:\n eq_to_expand.extend(filter(lambda e: e[0] == v, equations))\n return set(eq_to_expand), used_state_vars\n\n for i, deriv in enumerate(self._model.y_derivatives):\n equations, used_state_vars = \\\n get_used_eqs_and_state_vars([(d.lhs, d.rhs) for d in self._derivative_equations if d.lhs == deriv],\n set(map(lambda e: (e.lhs, e.rhs), self._derivative_equations)))\n\n # get all the variables used in jacobian matrix entry and all variables used to define them\n used_jacobian_vars, used_jacobian_state_vars = \\\n get_used_eqs_and_state_vars([(None, self._jacobian_matrix[i, i])], set(self._jacobian_equations))\n\n for sv in self._formatted_state_vars:\n sv.setdefault('in_evaluate_y_derivative', []).append(sv['sympy_var'] in used_state_vars)\n sv.setdefault('in_evaluate_partial_derivative', []).append(sv['sympy_var'] in used_jacobian_state_vars)\n\n for eq in self._vars_for_template['y_derivative_equations']:\n self.eq_in_evaluate_y_derivative(eq, equations)\n\n for je in self._vars_for_template['jacobian_equations']:\n self.eq_in_evaluate_partial_derivative(je, used_jacobian_vars)",
"def solve(self, niter):\n E = self.list_PDE[0]\n I = self.list_PDE[1]\n\n # ...\n un = E.unknown\n unew = I.unknown\n\n# un.set(E.rhs)\n # ...\n\n # ...\n for i in range(0,niter):\n\n rhs = E.dot(un)\n I.solve(rhs)\n\n un.set(unew)\n # ...",
"def apply_updates(\n self, unused_unit, unused_updated_unit_dict, unused_errors):\n raise Exception('Not implemented')",
"def note_update(self, upd_note_handle_list):\n for handle in upd_note_handle_list :\n if handle in self.data:\n self.rebuild()\n break",
"def solve_equations(equations):\n # variables in the system of equations\n var_list = list(reduce(set.union, (set(var for coeff, var in eqn if var)\n for eqn in equations)))\n # number of variables\n num_vars = len(var_list)\n # the index of each variable in |var_list|\n var_index = dict(zip(var_list, range(num_vars)))\n # matrices to solve system (Ax = b)\n A, b = [], []\n # populate matrices\n for equation in equations:\n coeffs, const = [0] * num_vars, 0\n for coeff, var in equation:\n if var:\n coeffs[var_index[var]] += coeff\n else:\n const -= coeff\n A.append(coeffs)\n b.append([const])\n try:\n # solve system\n x = solve(matrix(A), matrix(b))\n return dict(zip(var_list, [x[i, 0] for i in xrange(num_vars)]))\n except:\n raise Exception('Could not solve system of equations')",
"def updateAllParams(self):\n try:\n self.sigTreeStateChanged.disconnect(self.updateSystem)\n reconnect = True\n except TypeError:\n reconnect = False\n try:\n with self.treeChangeBlocker():\n for param in self:\n constraints = self.system._vars[param.name()][3]\n if 'f' in constraints:\n fixed = param['fixed']\n else:\n fixed = None\n\n\n if fixed is True:\n self.updateParam(param, 'fixed')\n else:\n try: # value is auto-generated\n val = getattr(self.system, param.name())\n if param.type() == 'str':\n param.setValue(repr(val))\n else:\n param.setValue(val)\n param.setReadonly(True)\n if fixed is False:\n self.updateParam(param, 'autoFixable')\n else:\n self.updateParam(param, 'auto')\n\n except RuntimeError: \n if fixed is not None: # no value, fixable\n self.updateParam(param, 'incomplete')\n else:\n self.updateParam(param, 'unconstrained')\n\n finally:\n if reconnect:\n self.sigTreeStateChanged.connect(self.updateSystem)",
"def equations(self):\n k = 0\n ######################################################################\n # equations for fluid balance\n self.residual[k:k + self.num_nw_fluids * 2] = self.fluid_func()\n k += self.num_nw_fluids * 2\n\n ######################################################################\n # equations for mass flow balance\n self.residual[k:k + 2] = self.mass_flow_func()\n k += 2\n\n ######################################################################\n # equations for energy balance\n self.residual[k] = self.energy_func()\n k += 1\n\n ######################################################################\n # equations for specified heat transfer\n if self.Q.is_set:\n self.residual[k] = (\n self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) - self.Q.val)\n k += 1\n\n ######################################################################\n # equations for specified heat transfer coefficient\n if self.kA.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.kA_func()\n k += 1\n\n ######################################################################\n # equations for specified heat transfer coefficient characteristic\n if self.kA_char.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.kA_char_func()\n k += 1\n\n ######################################################################\n # equations for specified upper terminal temperature difference\n if self.ttd_u.is_set:\n self.residual[k] = self.ttd_u_func()\n k += 1\n\n ######################################################################\n # equations for specified lower terminal temperature difference\n if self.ttd_l.is_set:\n self.residual[k] = self.ttd_l_func()\n k += 1\n\n ######################################################################\n # equations for specified pressure ratio at hot side\n if self.pr1.is_set:\n self.residual[k] = (\n self.pr1.val * self.inl[0].p.val_SI - self.outl[0].p.val_SI)\n k += 1\n\n ######################################################################\n # equations for specified pressure ratio at cold side\n if self.pr2.is_set:\n self.residual[k] = (\n self.pr2.val * self.inl[1].p.val_SI - self.outl[1].p.val_SI)\n k += 1\n\n ######################################################################\n # equations for specified zeta at hot side\n if self.zeta1.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.zeta_func(\n zeta='zeta1', inconn=0, outconn=0)\n k += 1\n\n ######################################################################\n # equations for specified zeta at cold side\n if self.zeta2.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.zeta_func(\n zeta='zeta2', inconn=1, outconn=1)\n k += 1\n\n ######################################################################\n # additional equations\n self.additional_equations(k)",
"def update_inputs(self, compname, exprs):\n expr_info = []\n invalids = []\n\n if compname is not None:\n pred = self._exprmapper._exprgraph.pred\n if exprs:\n ex = ['.'.join([compname, n]) for n in exprs]\n exprs = []\n for e in ex:\n exprs.extend([expr for expr in self._exprmapper.find_referring_exprs(e)\n if expr in pred])\n else:\n exprs = [expr for expr in self._exprmapper.find_referring_exprs(compname)\n if expr in pred]\n for expr in exprs:\n srctxt = self._exprmapper.get_source(expr)\n if srctxt:\n srcexpr = self._exprmapper.get_expr(srctxt)\n invalids.extend(srcexpr.invalid_refs())\n expr_info.append((srcexpr, self._exprmapper.get_expr(expr)))\n\n # if source exprs reference invalid vars, request an update\n if invalids:\n for cname, vnames in partition_names_by_comp(invalids).items():\n if cname is None:\n if self.parent:\n self.parent.update_inputs(self.name, vnames)\n \n # If our source component is in a loop with us, don't\n # run it. Otherwise you have infinite recursion. It is\n # the responsibility of the solver to properly execute\n # the comps in its loop.\n elif self._graph_loops:\n for loop in self._graph_loops:\n if compname in loop and cname in loop:\n break\n else:\n getattr(self, cname).update_outputs(vnames)\n \n else:\n getattr(self, cname).update_outputs(vnames)\n #self.set_valid(vnames, True)\n\n for srcexpr, destexpr in expr_info:\n try:\n destexpr.set(srcexpr.evaluate(), src=srcexpr.text)\n except Exception as err:\n self.raise_exception(\"cannot set '%s' from '%s': %s\" %\n (destexpr.text, srcexpr.text, str(err)), type(err))"
] | [
"0.71400636",
"0.5981026",
"0.5746874",
"0.57122636",
"0.56178534",
"0.5491521",
"0.53655064",
"0.53652704",
"0.53611565",
"0.53611565",
"0.5353377",
"0.52511984",
"0.524344",
"0.5169635",
"0.51264143",
"0.5035766",
"0.5034198",
"0.49286285",
"0.49235213",
"0.49235213",
"0.4893059",
"0.4888762",
"0.48761746",
"0.4872401",
"0.48390514",
"0.48367348",
"0.48211896",
"0.4765582",
"0.47548652",
"0.47531077"
] | 0.861441 | 0 |
Solves the solvable LinearEquations in equations and returns the newly solved unknowns in the updatedUnknowns set. | def solve_solvableEquations(equations: List):
solvedEquations = []
updatedUnknowns = set()
for equation in equations:
equation.update()
if equation.isSolvable():
solution = equation.solve()
unknownAddress = list(solution.keys())[0]
setattr_fromAddress(object=unknownAddress[0], attributeName=unknownAddress[1], value=solution[unknownAddress])
updatedUnknowns.add(unknownAddress)
solvedEquations.append(equation)
for equation in solvedEquations:
equations.remove(equation)
return updatedUnknowns | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def solve_combination_ofEquations(equations: List, number_ofEquations: int) -> Set:\n updatedUnknowns = set()\n\n for equationCombination in combinations(equations, number_ofEquations):\n\n # If any of the equations got solved in a previous iteration and got removed from _equations, skip this combination\n # Combinations are generated beforehand at the beginning of the main for loop.\n if any(equation not in equations for equation in equationCombination):\n continue\n\n if (system := System_ofLinearEquations(list(equationCombination))).isSolvable():\n solution = system.solve()\n unknownAddresses = list(solution.keys())\n for unknownAddress in unknownAddresses:\n setattr_fromAddress(object=unknownAddress[0], attributeName=unknownAddress[1], value=solution[unknownAddress])\n updatedUnknowns.add(unknownAddress)\n\n # If system is solved, all equations in the combination is solved. Remove them from equations pool.\n for equation in equationCombination:\n equations.remove(equation)\n\n return updatedUnknowns",
"def solve_nonlinear(self, params, unknowns, resids):\n pass",
"def solve_nonlinear(self, params, unknowns, resids):\n pass",
"def solve_nonlinear(self, params, unknowns, resids):\n\n x = params['x']\n a = self.a\n b = self.b\n c = self.c\n\n unknowns['y'] = a*x**2 + b*x + c",
"def updateEquations(equations: List, updatedUnknowns: Set, updateAll: bool = False):\n for equation in equations:\n if updateAll or any(unknown in equation.get_unknowns() for unknown in updatedUnknowns):\n equation.update()\n updatedUnknowns = set()",
"def solve_nonlinear(self, params, unknowns, resids):\n\n x = params['x']\n m = self.slope\n b = self.intercept\n\n unknowns['y'] = m*x + b",
"def solve_nonlinear(self, params, unknowns, resids):\n\n x = params['x']\n y = params['y']\n\n unknowns['f_xy'] = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0",
"def solve_nonlinear(self, params, unknowns, resids):\n\n x = hash(params['x'])\n y = params['y']\n\n unknowns['f_xy'] = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0",
"def solve_nonlinear(self, params, unknowns, resids):\n\n x = params['x']\n z = unknowns['z']\n znew = z\n\n iter = 0\n eps = 1.0e99\n while iter < self.maxiter and abs(eps) > self.atol:\n z = znew\n znew = 4.0 - x*z\n\n eps = x*znew + znew - 4.0\n\n unknowns['z'] = znew\n unknowns['y'] = x + 2.0*znew\n\n resids['z'] = eps\n #print(unknowns['y'], unknowns['z'])",
"def solve_equations(equations):\n # variables in the system of equations\n var_list = list(reduce(set.union, (set(var for coeff, var in eqn if var)\n for eqn in equations)))\n # number of variables\n num_vars = len(var_list)\n # the index of each variable in |var_list|\n var_index = dict(zip(var_list, range(num_vars)))\n # matrices to solve system (Ax = b)\n A, b = [], []\n # populate matrices\n for equation in equations:\n coeffs, const = [0] * num_vars, 0\n for coeff, var in equation:\n if var:\n coeffs[var_index[var]] += coeff\n else:\n const -= coeff\n A.append(coeffs)\n b.append([const])\n try:\n # solve system\n x = solve(matrix(A), matrix(b))\n return dict(zip(var_list, [x[i, 0] for i in xrange(num_vars)]))\n except:\n raise Exception('Could not solve system of equations')",
"def solveAll(self) :\n return [g for g in self if not g.solveAll()]",
"def solve(self, **kwargs):\n return self.system.solve(**kwargs)",
"def solve(self, solver):\n solver.solve()",
"def solve(self,notifications = False):\n if notifications:\n print('[info]: Solving differential equations for '+self.name+' model. ')\n \n # getting the time values\n self.days_list = np.linspace(self.tbeg,self.tend,self.npoints)\n\n # calling the odeint method to solve the diff. equations\n self.x = odeint(self.diff_eq,self.x0,self.days_list,args = (self.par,))\n \n #setting the variables\n self.confirmed_list = self.x[:,1] + self.x[:,2] + self.x[:,3]\n self.recovered_list = self.x[:,2]\n self.death_list = self.x[:,3]",
"def solve(self,notifications = False):\n if notifications:\n print('[info]: Solving differential equations for '+self.name+' model. ')\n \n # getting the time values\n self.days_list = np.linspace(self.tbeg,self.tend,self.npoints)\n\n # calling the odeint method to solve the diff. equations\n self.x = odeint(self.diff_eq,self.x0,self.days_list,args = (self.par,))\n \n #setting the variables\n self.confirmed_list = self.x[:,1] + self.x[:,2] + self.x[:,3]\n self.recovered_list = self.x[:,2]\n self.death_list = self.x[:,3]",
"def solve(self):\n pass",
"def solve(self):\n pass",
"def _solve_linear_system(self):\n\n # Solve the linear system\n centered_list_y, mean = self._center_data(self.list_y)\n y = np.linalg.solve(self.cov_matrix, centered_list_y)\n # Assert the resolution of the linear system went well\n assert np.allclose(np.array(centered_list_y), self.cov_matrix @ y)\n\n return y, mean",
"def apply_nonlinear(self, params, unknowns, resids):\n\n x = params['x']\n z = unknowns['z']\n resids['z'] = x*z + z - 4.0\n\n # Output equations need to evaluate a residual just like an explicit comp.\n resids['y'] = x + 2.0*z - unknowns['y']\n #print(x, unknowns['y'], z, resids['z'], resids['y'])",
"def solve(self):\n for step in self.run.values():\n step.solve()",
"def solve(self, X,missing_mask):\n raise ValueError(\"%s.solve not yet implemented!\" % (\n self.__class__.__name__,))",
"def solve(self,\n notifications = False\n ):\n\n if notifications:\n print('[info]: Solving differential equations for '+self.name+' model. ')\n \n\n \n # getting the time values\n self.days_list = np.linspace(self.tbeg,self.tend,self.npoints)\n\n # calling the odeint method to solve the diff. equations\n self.x = odeint(self.diff_eq,self.x0,self.days_list,args = (self.par,))\n '''\n Its important to note that (par_est,) is the way to define a tuple\n with just one element. When we put (par_est), the parenteses won't\n indicate a tuple\n '''\n \n #setting the variables\n self.confirmed_list = self.x[:,1] + self.x[:,2] + self.x[:,3]\n self.recovered_list = self.x[:,2]\n self.death_list = self.x[:,3]",
"def solve(self,\n notifications = False\n ):\n\n if notifications:\n print('[info]: Solving differential equations for '+self.name+' model. ')\n \n\n \n # getting the time values\n self.days_list = np.linspace(self.tbeg,self.tend,self.npoints)\n\n # calling the odeint method to solve the diff. equations\n self.x = odeint(self.diff_eq,self.x0,self.days_list,args = (self.par,))\n '''\n Its important to note that (par_est,) is the way to define a tuple\n with just one element. When we put (par_est), the parenteses won't\n indicate a tuple\n '''\n \n #setting the variables\n self.confirmed_list = self.x[:,1] + self.x[:,2] + self.x[:,3]\n self.recovered_list = self.x[:,2]\n self.death_list = self.x[:,3]",
"def solve(self, niter):\n E = self.list_PDE[0]\n I = self.list_PDE[1]\n\n # ...\n un = E.unknown\n unew = I.unknown\n\n# un.set(E.rhs)\n # ...\n\n # ...\n for i in range(0,niter):\n\n rhs = E.dot(un)\n I.solve(rhs)\n\n un.set(unew)\n # ...",
"def solve(self) :\n for g in self :\n if not g.solve() :\n return g\n return None",
"def solve(self):\n \n raise NotImplementedError(\"not implemented!\")",
"def solve(self):\n \n # getting the time values\n self.days_list = np.linspace(self.tbeg,self.tend,self.npoints)\n\n # calling the odeint method to solve the diff. equations\n self.x = odeint(self.diff_eq,self.x0,self.days_list,args = (self.par,))\n '''\n Its important to note that (par_est,) is the way to define a tuple\n with just ode element. When we put (par_est), the parenteses won't\n indicate a typle\n '''\n \n #setting the variables\n self.confirmed_list = self.x[:,1] + self.x[:,2] + self.x[:,3]\n self.recovered_list = self.x[:,2]\n self.death_list = self.x[:,3]",
"def solve(self):\n\n constrains, bounds = self.init_constraint_list()\n result = minimize(self.objective_function,\n x0=self.init_guess,\n constraints=constrains,\n bounds=bounds,\n options={'disp': False})\n\n return result",
"def solve(self):",
"def solve(self):\n ..."
] | [
"0.70187455",
"0.69558996",
"0.69558996",
"0.6913716",
"0.6836692",
"0.6721397",
"0.668759",
"0.6643494",
"0.6503861",
"0.6311069",
"0.6079616",
"0.6071727",
"0.60672903",
"0.5963581",
"0.5963581",
"0.59088594",
"0.59088594",
"0.58770156",
"0.5818542",
"0.5798993",
"0.5780101",
"0.57768184",
"0.57768184",
"0.5763496",
"0.5741845",
"0.5741254",
"0.5685601",
"0.56787664",
"0.56759226",
"0.5669172"
] | 0.78649527 | 0 |
Iterates through combinations of equations (from the equations pool) with the specified number_ofEquations. For each combination, checks if the system is solvable. If so, solves it, assigns the unknowns the solution values and removes the solved equations from the _equations pool. | def solve_combination_ofEquations(equations: List, number_ofEquations: int) -> Set:
updatedUnknowns = set()
for equationCombination in combinations(equations, number_ofEquations):
# If any of the equations got solved in a previous iteration and got removed from _equations, skip this combination
# Combinations are generated beforehand at the beginning of the main for loop.
if any(equation not in equations for equation in equationCombination):
continue
if (system := System_ofLinearEquations(list(equationCombination))).isSolvable():
solution = system.solve()
unknownAddresses = list(solution.keys())
for unknownAddress in unknownAddresses:
setattr_fromAddress(object=unknownAddress[0], attributeName=unknownAddress[1], value=solution[unknownAddress])
updatedUnknowns.add(unknownAddress)
# If system is solved, all equations in the combination is solved. Remove them from equations pool.
for equation in equationCombination:
equations.remove(equation)
return updatedUnknowns | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def solve_solvableEquations(equations: List):\n solvedEquations = []\n updatedUnknowns = set()\n\n for equation in equations:\n equation.update()\n if equation.isSolvable():\n solution = equation.solve()\n unknownAddress = list(solution.keys())[0]\n setattr_fromAddress(object=unknownAddress[0], attributeName=unknownAddress[1], value=solution[unknownAddress])\n updatedUnknowns.add(unknownAddress)\n solvedEquations.append(equation)\n\n for equation in solvedEquations:\n equations.remove(equation)\n\n return updatedUnknowns",
"def solve(self, niter):\n E = self.list_PDE[0]\n I = self.list_PDE[1]\n\n # ...\n un = E.unknown\n unew = I.unknown\n\n# un.set(E.rhs)\n # ...\n\n # ...\n for i in range(0,niter):\n\n rhs = E.dot(un)\n I.solve(rhs)\n\n un.set(unew)\n # ...",
"def solve_equations(equations):\n # variables in the system of equations\n var_list = list(reduce(set.union, (set(var for coeff, var in eqn if var)\n for eqn in equations)))\n # number of variables\n num_vars = len(var_list)\n # the index of each variable in |var_list|\n var_index = dict(zip(var_list, range(num_vars)))\n # matrices to solve system (Ax = b)\n A, b = [], []\n # populate matrices\n for equation in equations:\n coeffs, const = [0] * num_vars, 0\n for coeff, var in equation:\n if var:\n coeffs[var_index[var]] += coeff\n else:\n const -= coeff\n A.append(coeffs)\n b.append([const])\n try:\n # solve system\n x = solve(matrix(A), matrix(b))\n return dict(zip(var_list, [x[i, 0] for i in xrange(num_vars)]))\n except:\n raise Exception('Could not solve system of equations')",
"def create_solutions(self, num_of_solutions):\r\n solutions = []\r\n for _ in range(num_of_solutions):\r\n polygons = self.create_polygons()\r\n solution = Solution(genetic_units=polygons, fitting_function=self.fitting_function)\r\n solutions.append(solution)\r\n return solutions",
"def updateEquations(equations: List, updatedUnknowns: Set, updateAll: bool = False):\n for equation in equations:\n if updateAll or any(unknown in equation.get_unknowns() for unknown in updatedUnknowns):\n equation.update()\n updatedUnknowns = set()",
"def write_problems(self, valid_combos):\n\n # To be used when converting the equation to sympy\n transformations = standard_transformations + (implicit_multiplication_application,)\n\n # To preserve the order of the expression, may need to sub each arg separately and then reconstruct the whole equation (sympy can identify each term as an arg)\n for combo in valid_combos:\n\n # Sympy doesn't like equations, so this allows it to evaluate\n # the left and right side independently\n subbed_left_side = self.equation[:self.equation.find('=')]\n subbed_right_side = self.equation[self.equation.find('=')+1:]\n\n for i, var in enumerate(self.variables):\n variable = var['variable']\n # Replace non-answer variables with values\n if i < len(self.variables)-1:\n subbed_left_side = subbed_left_side.replace(variable, str(combo['values'][variable]))\n subbed_right_side = subbed_right_side.replace(variable, str(combo['values'][variable]))\n\n # Store the answer(s)\n else:\n if len(combo['values'][variable]) == 1:\n combo['answer'] = f\"{variable} = {combo['values'][variable][0]}\"\n else:\n combo['answer'] = f\"{variable} = {combo['values'][variable]}\"\n\n # Latexify each side of the equation, then concatenate\n latex_left_side = latex(parse_expr(subbed_left_side,\n transformations=transformations,\n evaluate=False))\n latex_right_side = latex(parse_expr(subbed_right_side,\n transformations=transformations,\n evaluate=False))\n latex_problem = str(latex_left_side) + ' = ' + str(latex_right_side)\n\n combo['problem'] = str(latex_problem)",
"def solve(self):\n dim = self.puzzle.dimension\n\n # initial loop\n for value, (row, col) in self.puzzle:\n if value:\n self.clear_row(row, value)\n self.clear_col(col, value)\n self.clear_subgrid(row, col, value)\n self.updates.add((value, (row, col)))\n for ps in self.possibilities:\n ps.discard((row, col))\n\n while self.updates:\n while self.updates:\n # while self.updates:\n value, (row, col) = self.updates.pop()\n for i in range(1, dim + 1):\n self.check_row(i, value)\n self.check_col(i, value)\n for i in range(2, 8, 3):\n self.check_subgrid(row, i, value)\n self.check_subgrid(i, col, value)\n\n for value, (row, col) in self.puzzle:\n if not value:\n self.check_cell(row, col)\n\n # for value in range(1, dim + 1):\n # for row in [2, 5, 8]:\n # for col in [2, 5, 8]:\n # self.check_subgrid(row, col, value)",
"def solve(self):\r\n while not self.done():\r\n self.no_open_cells()\r\n self.all_cells_are_mines()\r\n self.no_mines()\r\n if not self.done():\r\n self.obvious_cells()\r\n if not self.done():\r\n made_progress = self.safe_neighbour_difference()\r\n if made_progress:\r\n continue\r\n if not self.done():\r\n made_progress = self.adjacent_combinations()\r\n if made_progress:\r\n continue\r\n return",
"def get_n_solutions(self, n):\n return [self.get_solution() for _ in range(n)]",
"def solve(num_wizards, num_constraints, wizards, constraints): \n global wiz_const\n wiz_const = mapConstraints(wizards, constraints)\n partial_soltns = []\n\n # counter for priority queue since it doesn't allow \n # identical priorities\n k = 0\n\n # list of wizards sorted by lowest to highest degree\n sorted_wiz = sortWizByConsts(wiz_const)\n wiz_rankings = {wiz: i for i, wiz in enumerate(sorted_wiz)}\n\n const_set = set(map(tuple, constraints))\n for i in range(4) : \n heapq.heappush(partial_soltns, (0, k, nx.DiGraph(), const_set.copy()))\n k += 1\n\n print(\"setup done, commencing solving\")\n\n while len(partial_soltns) : \n\n # for partial_soltn, const_set in partial_soltns : \n# partial_soltns.remove(partial_soltn)\n num_seen, _, partial_soltn, const_set = heapq.heappop(partial_soltns)\n const = findNextConst(partial_soltn, const_set, wiz_rankings)\n print(\"seen \" + str(len(partial_soltn)) + \"\\t num partial_solutions\\t\" + str(len(partial_soltns)))\n try : \n const_set.remove(const)\n except KeyError : \n print(\"BAD SHIT\")\n pass\n possible_arrangements = [(const[0], const[1], const[2]),\n (const[2], const[0], const[1]), \n (const[2], const[1], const[0]),\n (const[1], const[0], const[2])]\n for arr in possible_arrangements:\n soltn = partial_soltn.copy()\n a, b, c = arr\n if not (soltn.has_node(a) and soltn.has_node(b) and nx.has_path(soltn, a, b)) : \n soltn.add_edge(a, b)\n if not (soltn.has_node(b) and soltn.has_node(c) and nx.has_path(soltn, b, c)) : \n soltn.add_edge(b, c)\n # see if we violated any other constraints (seen or not seen)\n is_valid, num_wiz = validNumWiz(soltn, const_set)\n\n if is_valid and len(list(nx.simple_cycles(soltn))) == 0 :\n heapq.heappush(partial_soltns, (-len(soltn), k, soltn, const_set.copy()))\n k += 1\n # are we done?\n if num_wiz == num_wizards :\n print(\"FINAL SOLUTION (found without processing all constraints but validating against them)\")\n ordering = list(nx.topological_sort(soltn))\n finishEverything(ordering, constraints)\n return ordering\n if foundCompleteOrdering(heapq.heappop(partial_soltns)) : \n print(\"FINAL SOLUTION\")\n ordering = list(nx.topological_sort(soltn))\n finishEverything(ordering, constraints)\n return ordering\n print(\"NO SOLUTION FOUND\")\n return \"\"",
"def test_create_satifiable_problems(self):\n # runs 10 attempts to generate satisfiable problems\n for _ in range(10):\n clauses = create_sat_problem(True, 'kcolor', 3, 'gnp', 20, 0.2)\n with Solver(name='Glucose3', bootstrap_with=clauses) as solver:\n self.assertTrue(solver.solve())",
"def generate_problems(self):\n\n prepped_equation = self.prep_equation()\n var_ranges = self.generate_var_ranges()\n input_array = self.generate_input_array(var_ranges)\n valid_combos = self.generate_valid_combos(prepped_equation, var_ranges, input_array)\n self.write_problems(valid_combos)\n self.dict['problems'] = valid_combos\n logging.info(f\"Generated {len(self.dict['problems'])} valid problems.\")\n logging.info(self.dict['problems'])",
"def solveAll(self) :\n return [g for g in self if not g.solveAll()]",
"def generate_valid_combos(self, prepped_equation, var_ranges, input_array):\n\n valid_combos = []\n\n # Generate the set of valid answer values as a FiniteSet. The\n # FiniteSet is necessary because sympy returns a FiniteSet when\n # it solves equations.\n solution_set = FiniteSet(*var_ranges[str(self.x)])\n\n # For every variable combination, substitute the values for each\n # input variable into the final_equation so sympy can solve for the\n # remaining variable.\n\n for var_values in input_array:\n final_equation = prepped_equation\n for i, var in enumerate(self.variables):\n if i < len(self.variables)-1:\n final_equation = final_equation.subs(var['variable'], var_values[i])\n\n # Solve for self.x.\n answer = solveset(final_equation, self.x)\n\n #### Currently, this is just rigged to capture when we have a single integer solution\n if self.dict['positive_only'] == True:\n answer = answer.intersection(ConditionSet(x, x > 0))\n\n # Add valid combinations to valid_combos list, with each valid combo as a dict\n if answer.issubset(solution_set) and answer != set():\n valid_combo = {}\n valid_combo['values'] = {}\n\n # Add variable values to dict\n for i, var in enumerate(self.variables):\n if i < len(self.variables)-1:\n valid_combo['values'][var['variable']] = int(var_values[i]) ### Forces int, which needs to be updated\n\n # Add answer value(s) to dict\n valid_combo['values'][self.x] = [int(i) for i in answer]\n\n valid_combos.append(valid_combo)\n\n return valid_combos",
"def compute(self):\n\n self.setd = []\n self.satc = [False for cl in self.soft] # satisfied clauses\n self.solution = None\n self.bb_assumps = [] # backbone assumptions\n self.ss_assumps = [] # satisfied soft clause assumptions\n\n if self.oracle.solve():\n # hard part is satisfiable => there is a solution\n self._filter_satisfied(update_setd=True)\n self._compute()\n\n self.solution = list(map(lambda i: i + 1, filter(lambda i: not self.satc[i], range(len(self.soft)))))\n\n return self.solution",
"def gen_all_solutions(self, n_candidats):\n all_solutions = []\n for i in range(self.n_ants):\n # Positionner la fourmis sur un objets de départ aleatoirement\n n = rn.randint(0, self.n_objets - 1)\n # generation de la solution par la fourmis en utilisant n_candidats\n solution = self.gen_sol(n, n_candidats)\n\n # ajouter la solution a la liste de toute les solutions\n all_solutions.append((solution[0], solution[1], solution[2]))\n return all_solutions",
"def solve_with_oil(self, items):\n\t\tresults = self.solve_all(items)\n\t\tresults, further_inputs = self.solve_oil(results)\n\t\tmerge_processes_into(results, self.solve_all(further_inputs))\n\t\treturn results",
"def assess_all_solutions_clasically(self):\n all_possible_solutions = list(itertools.product([0, 1], repeat = len(self.adj_matrix)))\n for solution in all_possible_solutions:\n print(solution, self._calculate_cost_once(solution))",
"def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:\n graph = collections.defaultdict(list)\n n = len(values)\n for i in range(n):\n dd, d = equations[i]\n graph[dd].append((d, values[i]))\n graph[d].append((dd, 1/values[i]))\n \n res = []\n for dd, d in queries:\n res.append(self.dfs(graph, dd, d, set()))\n \n return res",
"def solve(self):\n for step in self.run.values():\n step.solve()",
"def validate_solutions(self, solutions: List[List['Package']], needed_packages: Sequence['Package']) -> List[\n Tuple['System', List['Package']]]:\n\n # calculate new systems\n new_systems = [self.hypothetical_append_packages_to_system(solution) for solution in solutions]\n valid_systems_tuples = []\n # find valid systems\n for i, new_system in enumerate(new_systems):\n for package in needed_packages:\n if package.name not in new_system.all_packages_dict:\n break\n else:\n valid_systems_tuples.append((new_system, solutions[i]))\n\n # no valid solutions\n if not valid_systems_tuples:\n return []\n\n # calculate the differences between the resulting systems for the valid solutions\n systems_differences = self.differences_between_systems(\n [valid_systems_tuple[0] for valid_systems_tuple in valid_systems_tuples])\n\n # delete duplicate resulting systems\n return_list = []\n already_seen_differences = set()\n for i, valid_systems_tuple in enumerate(valid_systems_tuples):\n difference_set = frozenset(set.union(systems_differences[1][i][0], systems_differences[1][i][1]))\n if difference_set not in already_seen_differences:\n already_seen_differences.add(difference_set)\n return_list.append(valid_systems_tuple)\n\n return return_list",
"def solve_all_parallel(self, use_cache=True):\n self.generate_test_instances()\n\n # workers = multiprocessing.cpu_count()/2\n workers = 8\n\n # create two queues: one for files, one for results\n work_queue = multiprocessing.Queue()\n done_queue = multiprocessing.Queue()\n processes = []\n\n # add filepaths to work queue\n # format is (problemID, configID)\n # start processes\n if use_cache:\n cachedResults = {}\n try:\n with open(self.cacheFile, \"rb\") as f:\n cachedResults = pkl.load(f)\n except: # pragma: no cover\n print(\"Creating new cache file: {}\".format(self.cacheFile))\n with open(self.cacheFile, \"wb\") as f:\n for instance in self.instances:\n instancehash = hash(instance)\n if instancehash in cachedResults:\n # Retrieve TestResult from the results dictionary:\n self.results.append(cachedResults[instancehash])\n else:\n # Add this result to the cache\n work_queue.put((instance.testproblem.problemID, instance.solverconfig.configID))\n\n else:\n for instance in self.instances:\n work_queue.put((instance.testproblem.problemID, instance.solverconfig.configID))\n\n for w in range(workers):\n p = multiprocessing.Process(target=worker,\n args=(self.problemDir,\n self.configDir,\n work_queue,\n done_queue))\n p.start()\n processes.append(p)\n work_queue.put((STOP,STOP))\n\n # Poll done_queue and empty it right away.\n # keep track of the number of poison pills we get-\n # once it's equal to the number of workers, stop.\n processes_left = workers\n while processes_left:\n\n if not done_queue.empty():\n result = done_queue.get()\n if result == STOP:\n processes_left -= 1\n print(\"Processes left: {}\".format(str(processes_left)))\n else:\n self.results.append(result)\n if use_cache: # Add new cached result to the cache.\n with open(self.cacheFile, \"wb\") as f:\n cachedResults[result.instancehash] = result\n pkl.dump(cachedResults, f)\n time.sleep(0.5) # Wait for processes to run.\n\n for p in processes:\n print(\"process {} exited with code {}\".format(p,p.exitcode))\n return",
"def solve(self):\n\n # Assign variables to each quantity being solved.\n r_lookup, lookup, num = {}, {}, 0\n for element in self.elements:\n if is_wire(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n elif not is_cs(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n\n # Set up the linear algebraic equation Ax=b\n A = np.zeros((num, num))\n b = np.zeros(num)\n for row, element in lookup.items():\n if is_wire(element) and element is not self.ground:\n for two_sided in element.attached:\n if is_cs(two_sided):\n if two_sided.pos is element:\n b[row] += -1 * two_sided.current\n else:\n b[row] += two_sided.current\n else:\n if two_sided.pos is element:\n flow = 1\n else:\n flow = -1\n A[row, r_lookup[two_sided]] = flow\n elif is_vs(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n b[row] = element.voltage\n elif is_resistor(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n A[row, r_lookup[element]] = -1 * element.resistance\n\n b = b.reshape((num, 1))\n try:\n x = np.linalg.solve(A, b)\n except np.linalg.LinAlgError:\n raise CircuitError('Insufficient information to solve circuit')\n\n # Assign values to all circuit components\n for i in range(num):\n item = lookup[i]\n if is_wire(item):\n item.potential = x[i, 0]\n elif isinstance(item, DualSided):\n item.current = x[i, 0]\n\n # Mark circuit as solved\n self.been_solved = True",
"def generate_solvated_ensemble(orig_mol, mol_id, solvent_mol, solvent_id, n_solvent, ensemble_size):\n \n #joblib fails, multiprocessing + pool.map fails\n # maybe need to try using partial + pool.map?\n # e.g. http://stackoverflow.com/questions/16542261/python-multiprocessing-pool-with-map-async\n\n #from joblib import Parallel, delayed\n #if we are in a pbs job parallelise loop\n #ncpus = os.environ.get('NCPUS', 1)\n #ensemble = Parallel(n_jobs=ncpus)( \\\n # delayed(solvate)(orig_mol, mol_id, solvent_mol, solvent_id, n_solvent) for i in range(ensemble_size) \\\n # )\n \n #p_solvate = lambda e: solvate(orig_mol, mol_id, solvent_mol, solvent_id, n_solvent)\n #ncpus = os.environ.get('NCPUS', 1)\n #pool = Pool(processes=ncpus)\n #ensemble = pool.map(p_solvate, range(ensemble_size)) \n #ensemble_dfs = [delayed(solvate)(orig_mol, mol_id, solvent_mol, solvent_id, n_solvent) for i in range(ensemble_size)]\n #ensemble = Parallel(n_jobs=ncpus)(ensemble_dfs)\n #ensemble = [df[0](*df[1]) for df in ensemble_dfs] \n \n ensemble = (solvate(orig_mol, mol_id, solvent_mol, solvent_id, n_solvent) for i in range(ensemble_size))\n \n return ensemble",
"def forqs_parallel(configs):\n pool = Pool(21)\n pool.map(forqs_sim, configs)\n pool.close()\n pool.join()",
"def solve_all(grids, name='', showif=0.0):\n\n def time_solve(grid):\n start = time.clock()\n values = solve(grid)\n t = time.clock() - start\n ## Display puzzles that take long enough\n if showif is not None and t > showif:\n display(grid_values(grid))\n if values: display(values)\n print('(%.2f seconds)\\n' % t)\n return (t, solved(values))\n\n times, results = zip(*[time_solve(grid) for grid in grids])\n N = len(grids)\n if N > 1:\n print(\"Solved %d of %d %s puzzles (avg %.2f secs (%d Hz), max %.2f secs).\" % (\n sum(results), N, name, sum(times) / N, N / sum(times), max(times)))",
"def _check_determinancy(self, values, errors, combo):\n val, err = self.used_vars(values, errors, combo)\n n, m = len(val), len(self.get_equations(combo))\n\n if n != m:\n if m > n:\n s = '>'\n t = 'remove'\n v = err\n else:\n s = '<'\n t = 'add'\n v = val\n\n a = abs(n - m)\n\n raise ValueError('Indeterminant system:: Number of equations ({}) '\n '{} number of unknowns ({}). To correct, {} ({}) errors in {} '\n 'or adjust the input equations.'.format(m, s, n, t, a, v))",
"def select(individuals_list, energy_system_solutions_dict, population_size, optimization_tracker=None):\n # create a dictionary associating the connectivity 'str'-expression to the corresponding objects\n individual_dict = {ind.as_str(): ind for ind in individuals_list}\n\n # combine all energy system solutions (i.e. supply system combinations) in one large list\n all_supsys_combinations = sum(energy_system_solutions_dict.values(), start=[])\n nbr_solutions = len(all_supsys_combinations)\n\n # perform non-dominated sorting on the list of energy system solutions and identify which connectivity-vector's\n # solutions appear in which front (i.e. order of non-dominated front)\n supsys_combination_solution_fronts = tools.emo.sortLogNondominated(all_supsys_combinations, nbr_solutions)\n connectivity_vectors_by_front = {front: list(set([supsys_combination.encoding[0]\n for supsys_combination in solutions_in_front]))\n for front, solutions_in_front in enumerate(supsys_combination_solution_fronts)}\n\n # select a new population of 'best' connectivity vectors\n new_population = []\n for front in connectivity_vectors_by_front.values():\n if len(new_population) >= population_size:\n break\n\n for connectivity_vector in front:\n if len(new_population) >= population_size:\n break\n\n if connectivity_vector in individual_dict.keys():\n new_population += [individual_dict[connectivity_vector]]\n del individual_dict[connectivity_vector]\n\n if optimization_tracker:\n optimization_tracker.update_current_non_dominated_fronts(new_population, supsys_combination_solution_fronts)\n\n return new_population",
"def solve_nonlinear(self, params, unknowns, resids):\n pass",
"def solve_nonlinear(self, params, unknowns, resids):\n pass"
] | [
"0.6905477",
"0.6124301",
"0.6108061",
"0.591164",
"0.5895522",
"0.5372257",
"0.536119",
"0.5250545",
"0.5245906",
"0.52004254",
"0.51897514",
"0.51829034",
"0.5087571",
"0.50711465",
"0.50709516",
"0.50574046",
"0.5053622",
"0.50343215",
"0.50048316",
"0.4997917",
"0.49872226",
"0.4985751",
"0.4969882",
"0.4964029",
"0.49561352",
"0.4943972",
"0.49379322",
"0.49353972",
"0.49186707",
"0.49186707"
] | 0.8106124 | 0 |
get content from url images to download it next | def get_content(url):
img=requests.get(url).content
return img | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def get_url_images(session, url):\n content = await get_page(session, url)\n if not content:\n return []\n soup = BeautifulSoup(content, features=\"html.parser\")\n image_sources = [img['src'] for img in soup.find_all('img')]\n image_sources_fixed = [f'https:{source}' if 'https:' not in source else source for source in image_sources]\n images = []\n for source in image_sources_fixed:\n image = await get_image(session, source)\n if image:\n images.append((source, image))\n\n return images",
"def read_from_server(url_base=\"http://10.200.102.18/\", url_dir=\"G179-dataset/\"):\n\n all_images = urllib2.urlopen(url_base + url_dir).read()\n\n parser = ImagesHTMLParser()\n parser.feed(all_images)\n data = parser.data\n imgs = []\n\n print(\"Found %d images!\" % len(data))\n print(\"Started Download!\")\n i = 1\n\n for d in data:\n print(\"\\rProgress: %d/%d \" % (i, len(data)), end='')\n dl_img = urllib2.urlopen(url_base + url_dir + d).read()\n asd = cStringIO.StringIO(dl_img)\n img = Image.open(asd)\n imgs.append(np.array(img))\n i = i + 1\n\n return imgs",
"def download_image(urls):\r\n image_paths = []\r\n\r\n base_url = \"https://classifieds.castanet.net\"\r\n image_directory = os.path.join('C:\\\\', 'users', 'ccholon', 'my documents', 'castanet images')\r\n\r\n for url in urls:\r\n listing_url = base_url + url\r\n image_page = requests.get(listing_url)\r\n image_soup = BeautifulSoup(image_page.text, 'html.parser')\r\n\r\n # find the URL for the listing image\r\n image_element = image_soup.find(name='div', class_='image_container')\r\n image_element = image_element.find(name='img')\r\n image_url = image_element.get('src')\r\n\r\n # download the image\r\n #image = requests.get(image_url, stream=True)\r\n\r\n # save to local directory\r\n #image_file = open(os.path.join(image_directory, os.path.basename(image_url)), 'wb')\r\n #for bytes in image.iter_content(100000):\r\n #image_file.write(bytes)\r\n #image_file.close()\r\n\r\n image_paths.append(os.path.join(image_directory, os.path.basename(image_url)))\r\n\r\n return image_paths",
"def downloadImages(self):\n\t\ti = 0\n\t\tfor im in self.images:\n\t\t\t# Let's get the file extension and file name and make the final file path. \n\t\t\t# We need to do this to slugify the file name and avoid errors when loading images\n\t\t\tfile_name, file_extension = os.path.splitext(im['url'])\n\t\t\tfile_name = file_name.split(\"/\")[-1]\n\n\t\t\tfile_path = self.data_path + self.dataset + \"/\" + im['slug'] + '/' + str(im['id']) + '_' + slugify(file_name) + file_extension\n\n\t\t\t# If file is not in the file path, then download from the url\n\t\t\tif not os.path.exists(file_path):\n\t\t\t\ttry:\n\t\t\t\t\turllib.urlretrieve(im['url'], file_path )\n\t\t\t\t\tprint \"i:{} url:{}\".format(i,im['url'])\n\t\t\t\texcept Exception, e:\n\t\t\t\t\tprint e\n\t\t\ti += 1",
"def download_pics(pics_links):\n\n for link in range(len(pics_links)):\n r = requests.get(pics_links[link][0])\n with open(os.path.join(\"tmp\", f\"{link}.jpg\"), \"wb\") as dl:\n dl.write(r.content)",
"def download_urls(urls, path):\n count = 0\n if urls:\n for url in urls:\n try:\n res = requests.get(url, verify=False, stream=True)\n rawdata = res.raw.read()\n with open(os.path.join(path, 'img_' + str(count) + '.jpg'), 'wb') as f:\n f.write(rawdata)\n count += 1\n except Exception as e:\n print('Failed to write rawdata.')\n print(e)",
"def extract_images_url(url, source):\n if source == \"mangaseeonline\":\n r = s.post(\n \"http://playwright:5000/scrape\",\n json={\n \"url\": url.replace(\"-page-1\", \"\"), \"wait\": 1}\n )\n tree = html.fromstring(r.text)\n return tree.xpath('//*[@id=\"TopPage\"]/descendant::img/@src')\n if source == \"nettruyen\":\n r = s.get(\n settings.SPLASH_URL, params={\n \"url\": url.replace(\"-page-1\", \"\"), \"wait\": 1}\n )\n tree = html.fromstring(r.text)\n return tree.xpath('//*[@class=\"reading-detail box_doc\"]/div/img/@src')\n if source == \"doctruyen3q\":\n r = s.get(\n settings.SPLASH_URL, params={\"url\": url, \"wait\": 1}\n )\n tree = html.fromstring(r.text)\n return tree.xpath('//*[contains(@id, \"page_\")]/img/@src')\n if source == \"truyenkinhdien\":\n r = s.get(\n settings.SPLASH_URL.replace(\"render.html\", \"execute\"),\n params={\"url\": url, \"lua_source\": lua_script, \"wait\": 1},\n )\n tree = html.fromstring(r.json()[\"html\"])\n return tree.xpath(\n '//*[@class=\"sgdg-gallery\"]/a[not(contains(@style,\"display:none\"))]/img/@src'\n )",
"def download_engine(fcsd): #fcsd = first comic strip date\n\n url_list = get_comic_strip_url(fcsd)\n\n for url in url_list:\n session = requests.Session()\n response = session.get(url)\n download_url = get_image_comic_url(session, response)\n# download_dilbert(session, download_url)\n return download_url",
"def download_content(content_link, output_dir):\n if content_link is None: return None\n res = requests.get(content_link, stream=True)\n try:\n res.raise_for_status()\n except requests.exceptions.HTTPError:\n return None\n img_name, img_format = parse_image_url(res.url)\n filepath = '{}/{}.{}'.format(output_dir, img_name, img_format)\n\n with open(filepath, mode='wb') as image_file:\n for chunk in res.iter_content(chunk_size=chunk_size):\n image_file.write(chunk)\n\n return abspath(filepath)",
"def get_images(outputdir, parent_key, key, searchurl, maximum, json_path):\n body, browser = build_browser(searchurl)\n\n urls = []\n\n while len(urls) < maximum:\n try:\n page_source = browser.page_source\n\n soup = BeautifulSoup(page_source, 'lxml')\n\n search_result_soup = get_div_child(soup.body, \"islrg\")\n images = search_result_soup.find_all('img')\n urls = get_url_from_images(images)\n print(urls)\n\n for i in range(50):\n scroll_down(body)\n # browser.find_element_by_xpath('//*[@id=\"islmp\"]/div/div/div/div')\n browser.find_element_by_class_name(\"mye4qd\").click()\n print(len(urls) < maximum)\n except ElementNotInteractableException as e: # There is no next page\n print(e)\n break\n\n\n\n if not os.path.exists(outputdir):\n os.makedirs(outputdir)\n\n write_urls(json_path, parent_key, key, urls)\n\n # download_urls(urls, outputdir)\n browser.close()",
"def get_images(url):\n \n # =============================================================================\n # Selenium.\n # =============================================================================\n\n chrome_options = Options()\n #chrome_options.add_argument('--incognito')\n #chrome_options.add_argument('--headless')\n #chrome_options.add_argument('--no-sandbox')\n \n driver = webdriver.Chrome(options=chrome_options,executable_path='/usr/local/bin/chromedriver') # Optional argument, if not specified will search path.\n driver.get('https://' + url)\n \n #scrolling to bottom to load all images on the page\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n #sleep to make sure everything loads\n time.sleep(5)\n \n \n html_source = driver.page_source\n \n img_alt_src(html_source)\n \n driver.close()\n driver.quit()",
"def get_img_from_url(index, url):\n try:\n with urllib.request.urlopen(url) as response:\n if response.headers.get_content_maintype() == 'image':\n image_filename = image_filename_prefix.format(name=image_class_name,\n counter=index,\n ext=response.headers.get_content_subtype())\n image_filepath = os.path.join(target_folder, image_filename)\n with open(image_filepath, 'wb') as image_file:\n image_file.write(response.read())\n\n print('Fetched URL {}'.format(index))\n\n except urllib.request.HTTPError:\n pass\n except Exception:\n pass",
"def downloadLocal(url_list,path):\n print(\"You are downloading {} images\".format(parser_arguments().limit),end=\" \");print(\"of {} class.\".format(parser_arguments().classes))\n print(\"Please, be patient :)\")\n for i in range(len(url_list)):\n filename= url_list[i].split(\"/\")[-1] # name of the picture file\n r = requests.get(url_list[i], stream =True)\n print(filename)\n\n with open(filename,'wb') as f : # create the file locally in binary-write mode\n r = requests.get(url_list[i], stream =True)\n shutil.copyfileobj(r.raw, f) #write our image to the file\n shutil.move(filename,path)\n print('Done!')",
"def get_image_qm(html_src, todir):\n #print url\n\n img_url, title = img_details(html_src)\n \n r = requests.get(img_url)\n with open(todir+title+'.jpg','wb') as f:\n f.write(r.content)",
"def download_images(links):\n\n for link in links:\n print(\"Processing\", link)\n try:\n response = requests.get(link,\n timeout=METADATA_REQUEST_TIMEOUT, stream=True)\n except requests.exceptions.RequestException as e:\n print(e)\n sys.exit(1)\n\n artist_name = link.rsplit('/', 2)[1]\n image_name = link.rsplit('/', 2)[2]\n image_name = artist_name + image_name\n\n file_location = ASSET_PATH.joinpath(image_name)\n\n with open(str(file_location), 'wb') as outfile:\n shutil.copyfileobj(response.raw, outfile)",
"def get_images(self):\n # test\n for it in self.xml.iterfind('image'):\n print(it)\n\n elements = []\n els = self.xml.findall('image')\n for el in els:\n elements.push(el.find('src')[0])\n els = self.xml.findall('full_picture')\n elements = elements + els\n self.__download_(elements)",
"def extract_image(page_html, family_url, folder):\n image_extractor = Extractor(page_html, family_url)\n for url in image_extractor.get_image_table():\n image_page_url = urljoin(family_url, url)\n # print(image_page_url)\n imres = requests.get(image_page_url)\n image_page_extractor = Extractor(imres.text, image_page_url)\n image_src, image_name = image_page_extractor.get_image_link()\n\n image_link = urljoin(image_page_url, image_src)\n\n print(image_link, image_name)\n # Download image\n fetch(image_link, image_name, folder)",
"def _get_image_content(image_url):\n response = requests.get(image_url)\n return response.content",
"def download_photos(urls, folder=''):\n folder_path = os.path.join('photos', folder)\n if not os.path.exists(folder_path):\n os.mkdir(folder_path)\n for url in urls:\n image = requests.get(url)\n filename = os.path.join(folder_path, url.split('/')[-1])\n with open(filename, 'wb') as f:\n f.write(image.content)",
"def GET(self, url):\n try:\n f = open(url, 'r')\n image = f.read()\n f.close()\n except:\n\n db_module.resave_img(url[5:])\n\n f = open(url, 'r')\n image = f.read()\n f.close()\n\n return image",
"def get_image(url, path):\n r = requests.get(url, stream=True)\n if r.status_code == 200:\n with open(path, 'wb') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n print(\"[>] get\", url, \">>\", path)\n f.close()",
"def _download_images(self, url_file, destination_dir, log_file):\n logger = self.setup_log(log_file)\n logger.info(config.LOG_INITIAL_MESSAGE % (url_file, destination_dir))\n\n with open(url_file) as urls:\n for i, l in enumerate(urls):\n pass\n bar = progressbar.ProgressBar(i + 1)\n\n download_count = 0\n\n # opening the url file and reading the urls\n with open(url_file, 'r') as urls:\n for i, url in enumerate(urls):\n bar.set(i)\n\n url = url.strip()\n components = urllib.parse.urlparse(url)\n if not (components.scheme and components.netloc and components.path):\n logger.error('%s: \"%s\"' % (config.LOG_URL_INVALID, self.truncate_middle(url, config.MAX_URL)))\n continue\n \n # check whether the robots.txt allows us to crawl this URL\n try:\n can_fetch = self.download_allowed(url, components.scheme, components.netloc)\n except (AttributeError, urllib.error.URLError, ValueError):\n logger.error('%s: %s' % (config.LOG_ERROR_ROBOTS, self.truncate_middle(url, config.MAX_URL)))\n continue\n\n # log that image download is disallowed\n if not can_fetch:\n logger.error('%s: %s' % (config.LOG_DISALLOWED, self.truncate_middle(url, config.MAX_URL)))\n continue\n \n # open image url\n try:\n url_response = urllib.request.urlopen(url)\n except urllib.error.URLError as error:\n logger.error('%s: %s' % (config.LOG_ERROR_OPENING, self.truncate_middle(url, config.MAX_URL)))\n continue\n\n # check whether the URL content is an image \n if url_response.info().get_content_maintype().lower() != config.IMAGE_MIMETYPE:\n logger.error('%s: %s' % (config.LOG_NOT_AN_IMAGE, self.truncate_middle(url, config.MAX_URL)))\n continue\n \n # retrieve the content and store in the destination directory\n os.makedirs(destination_dir, exist_ok=True) \n image_name = '%s_%s' % (download_count + 1, os.path.basename(url))\n with open(os.path.join(destination_dir, image_name), 'wb') as image_file:\n try:\n image_file.write(url_response.read())\n except urllib.error.URLError as error:\n logger.error('%s: %s' % (config.LOG_ERROR_DOWNLOADING, self.truncate_middle(url, config.MAX_URL)))\n continue\n \n # log download and increment the counter\n logger.info('%s %s, url: %s' % (config.LOG_DOWNLOADED, self.truncate_middle(image_name, config.MAX_FILE_NAME), self.truncate_middle(url, config.MAX_URL)))\n download_count += 1\n\n # set the progress bar to 100 percent and print a comment and new line for the returning prompt\n bar.complete('completed')\n\n # release the logger handles\n self.shutdown_log(logger)",
"async def dl_image(url, filename):\n\ttry:\n\t\twith aiohttp.ClientSession() as session:\n\t\t\tasync with session.get(url) as resp:\n\t\t\t\ttest = await resp.read()\n\t\t\t\twith open('data/tmp/'+filename.lower(), \"wb\") as f:\n\t\t\t\t\tf.write(test)\n\t\t\t\treturn 0\n\texcept Exception as e:\n\t\tprint('[!ERROR!] in Get image')\n\t\tprint(e)\n\t\treturn -1",
"def dl_image(img_name, img_url):\n path = os.path.join(base_path, img_name)\n res = requests.get(img_url)\n with open(path, 'wb') as fout:\n fout.write(res.content)",
"def fetch_image(img_url):\n\n r = requests.get(img_url)\n return r.content",
"def _download_img_from_url(self, img_url):\r\n response = requests.get(img_url)\r\n img = Image.open(BytesIO(response.content))\r\n print(\"Downloaded image from url\")\r\n return img",
"def download_images(urls: List[str] = None):\n are_images = [is_url_image(url) for url in urls]\n if not are_images[: sum(are_images)]:\n raise NotImplementedError('Only images are supported')\n downloads = [requests.get(url) for url in urls]\n images = [load_image(io.BytesIO(download.content)) for download in downloads]\n return images",
"def download_images(image_urls):\n fetched = []\n count = 0\n for img_url in image_urls:\n if not db.is_image_in_db(img_url):\n filename = os.path.basename(img_url)\n if not os.path.exists(cfg.PHOTO_DIR + filename):\n referer_string = web.get_referrer_string(img_url) # to trick 4walled.org\n cmd = \"wget -t {retry_count} -T {timeout} {ref} {url} -O {save}\".format(url=img_url,\n save=os.path.join(cfg.PHOTO_DIR, filename),\n ref=referer_string,\n retry_count=cfg.WGET_RET,\n timeout=cfg.WGET_TIMEOUT)\n print cmd\n os.system(cmd)\n fetched.append(img_url)\n count += 1\n else:\n print(\"# {0} was already fetched once...\".format(img_url))\n\n print(\"# new imgage(s): {0}\".format(count))\n return fetched",
"def _download_images(self, image_urls: typing.List[str], save_dir: str) -> typing.List[str]:\n\n\t\timage_paths = []\n\n\t\tfor i, url in enumerate(image_urls):\n\t\t\timage = self.send_request_image(url)\n\n\t\t\timage_ext = url.split(\".\")[-1]\n\n\t\t\timage_dst_path = os.path.join(save_dir, f\"{i}.{image_ext}\")\n\n\t\t\tif image is not None:\n\t\t\t\twith open(image_dst_path, \"wb\") as fh:\n\n\t\t\t\t\t# Magic boolean which makes it work\n\t\t\t\t\timage.raw.decode_content = True\n\n\t\t\t\t\t# noinspection PyBroadException\n\n\t\t\t\t\t# Attempt to download the image from the URL\n\t\t\t\t\ttry:\n\t\t\t\t\t\tshutil.copyfileobj(image.raw, fh)\n\n\t\t\t\t\t# We should reduce the scope\n\t\t\t\t\texcept Exception:\n\t\t\t\t\t\tpass\n\n\t\t\t\t\t# We downloaded the image without any errors\n\t\t\t\t\telse:\n\t\t\t\t\t\timage_paths.append(image_dst_path)\n\n\t\treturn image_paths",
"def download_images(keyword, limit = 1):\n #creating list of arguments\n arguments = {\"keywords\": keyword ,\n \"limit\": limit , \n \"print_urls\": False,\n \"output_directory\": OUT_DIR} \n\n # Pass the arguments to above function and download images\n paths = response.download(arguments)"
] | [
"0.74554664",
"0.7305587",
"0.72142935",
"0.7059712",
"0.7036332",
"0.70010024",
"0.7000979",
"0.69411886",
"0.6839603",
"0.6806571",
"0.6789045",
"0.6769762",
"0.67669344",
"0.67415214",
"0.6739915",
"0.67326695",
"0.6728846",
"0.6691945",
"0.66686773",
"0.6667633",
"0.66651917",
"0.6655087",
"0.6646566",
"0.6630848",
"0.6607262",
"0.6598642",
"0.6580078",
"0.6578263",
"0.6576536",
"0.65291935"
] | 0.7497507 | 0 |
Test copy and pickle. | def test_copy_pickle(self):
# Test that we can pickle and unpickle
# We force a pattern that contains all custom types:
# `Selector`, `NullSelector`, `SelectorTag`, `SelectorAttribute`,
# `SelectorNth`, `SelectorLang`, `SelectorList`, and `Namespaces`
p1 = sv.compile(
'p.class#id[id]:nth-child(2):lang(en):focus', {'html': 'http://www.w3.org/TR/html4/'}
)
sp1 = pickle.dumps(p1)
pp1 = pickle.loads(sp1)
self.assertTrue(pp1 == p1)
# Test that we pull the same one from cache
p2 = sv.compile(
'p.class#id[id]:nth-child(2):lang(en):focus', {'html': 'http://www.w3.org/TR/html4/'}
)
self.assertTrue(p1 is p2)
# Test that we compile a new one when providing a different flags
p3 = sv.compile(
'p.class#id[id]:nth-child(2):lang(en):focus', {'html': 'http://www.w3.org/TR/html4/'}, flags=0x10
)
self.assertTrue(p1 is not p3)
self.assertTrue(p1 != p3)
# Test that the copy is equivalent, but not same.
p4 = copy.copy(p1)
self.assertTrue(p4 is not p1)
self.assertTrue(p4 == p1)
p5 = copy.copy(p3)
self.assertTrue(p5 is not p3)
self.assertTrue(p5 == p3)
self.assertTrue(p5 is not p4) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test__pickle_unpickle(self):\n pass",
"def test_clone_scenario(self):\n pass",
"def test_self_write(self):\n self.assertFalse(os.path.exists(self.f1))\n self.assertFalse(os.path.exists(self.f2))\n self.sync.pickle_write()\n self.assertTrue(os.path.exists(self.f1))\n self.assertTrue(os.path.exists(self.f2))",
"def test_pickle_serialization(self, molecule):\n serialized = pickle.dumps(molecule)\n molecule_copy = pickle.loads(serialized)\n assert molecule == molecule_copy",
"def test_pickle():\n dictionary = {\n 'a': 0,\n 'b': 1,\n 'c': 2,\n 'd': 3\n }\n file = 'pickle_test'\n # write\n write_pickle(dictionary, file)\n\n # read back\n dictionary_read = read_pickle(file)\n print(dictionary_read)\n print(dictionary == dictionary_read)",
"def test_clone_system(self):\n pass",
"def test_can_pickle(self):\n settings = UploadSettings(None, FakeDataServiceApi(), None, ProjectNameOrId.create_from_name('mouse'), None)\n params = ('one', 'two', 'three')\n context = UploadContext(settings, params, multiprocessing.Manager().Queue(), 12)\n pickle.dumps(context)",
"def test_restore_backup():",
"def test_pickle(\n config: Config\n) -> None:\n assert isinstance(config, Config)\n other: Config = pickle.loads(pickle.dumps(config))\n assert config == other",
"def test_copy(self):\n data = [[0, 1], [1, 0]]\n b1 = Board(data)\n b2 = b1.copy()\n # test if proper copy\n self.assertListEqual(b1.data, b2.data)\n # teset if not just a shallow copy\n b1.data[0][0] = 1\n self.assertNotEqual(b1.data[0][0], b2.data[0][0])",
"def test_pickle_load(self):\n l = [1, 2, 3, 4, 5]\n self.plugin.save_data(l)\n\n l = self.plugin.load_data()\n self.assertIn(4, l)",
"def test_copy(self):\n p = hw.create_tile_puzzle(3, 3)\n p2 = p.copy()\n self.assertTrue(p.get_board() == p2.get_board())\n p2.perform_move('up')\n self.assertFalse(p.get_board() == p2.get_board())",
"def test_pickle(self):\n X = self.generate_X()\n task = mmRDTR()\n task.fit(X)\n with tempfile.TemporaryFile(mode='w+b') as tf:\n cPickle.dump(task, tf)",
"def testBackupAndRestore(self):\n try:\n # -- Backup/Restore cache ---\n pcdcP = PubChemDataCacheProvider(self.__cfgOb, self.__cachePath)\n ok, failList = pcdcP.load(self.__cidList, exportPath=None)\n self.assertEqual(len(failList), 0)\n self.assertTrue(ok)\n ok = pcdcP.dump(fmt=\"json\")\n self.assertTrue(ok)\n #\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()",
"def test_pickle_save(self):\n l = [1, 2, 3, 4, 5]\n self.plugin.save_data(l)",
"def test_clone_deployment(self):\n pass",
"def run_copy(self, src, dst):\n pass",
"def test_deepcopy_RiakDisabledForTest(self):\n rdft = RiakDisabledForTest()\n self.assertEqual(rdft, deepcopy(rdft))",
"def test_copy(self):\n\n # Copy the 'orig' data pipe to the 'new' data pipe.\n pipes.copy('orig', 'new')\n\n # Test that the new data pipe exists.\n self.assert_('new' in ds)\n\n # Test that the new data pipe has the object 'x' and that its value is 1.\n self.assertEqual(ds['new'].x, 1)\n\n # Change the value of x.\n ds['new'].x = 2\n\n # Test that the two values are different.\n self.assert_(ds['orig'].x != ds['new'].x)\n\n # Test that the new data pipe has the object 'mol[0].res[0].spin[0].num' and that its value is 1.\n self.assertEqual(ds['new'].mol[0].res[0].spin[0].num, 1)\n\n # Change the spin system number.\n ds['new'].mol[0].res[0].spin[0].num = 2\n\n # Test that the original spin system number hasn't changed.\n self.assertEqual(ds['orig'].mol[0].res[0].spin[0].num, 1)",
"def test_pickle_limit_continue(self):\n l = []\n for i in range(0, 30):\n l.append(i)\n self.plugin.save_data(l, 10)\n l = self.plugin.load_data()\n self.assertEqual(20, l[0])",
"def test_save_load(self):\n lib = archive([])\n f = io.StringIO()\n lib.save(f)\n f2 = io.StringIO(f.getvalue())\n lib2 = get_archive(f2)\n self.assertTrue(lib2)",
"def test_copied_models_are_equal(dbdiskrepo):\n original = fit_model()\n\n shallow = copy(original)\n assert original.artifact.id == shallow.artifact.id\n assert original.artifact.value_id == shallow.artifact.value_id\n assert hash(original) == hash(shallow)\n\n deep = deepcopy(original)\n assert original.artifact.id == deep.artifact.id\n assert original.artifact.value_id == deep.artifact.value_id\n assert hash(original) == hash(deep)",
"def run_pickle(data):\n log.info(\"\\n\\n====\")\n log.info('Step 1: Demonstrate persistence with pickle')\n log.info('Write a pickle file with the product data')\n\n pickle.dump(data, open('../data/data.pkl', 'wb'))\n\n log.info('Step 2: Now read it back from the pickle file')\n read_data = pickle.load(open('../data/data.pkl', 'rb'))\n log.info('Step 3: Show that the write and read were successful')\n assert read_data == data\n log.info(\"and print the data\")\n pprint.pprint(read_data)",
"def test_storage_copy(tmp_path):\n grid = UnitGrid([2])\n field = ScalarField(grid)\n\n storage_classes = {\"None\": None, \"MemoryStorage\": MemoryStorage}\n if module_available(\"h5py\"):\n file_path = tmp_path / \"test_storage_apply.hdf5\"\n storage_classes[\"FileStorage\"] = functools.partial(FileStorage, file_path)\n\n s1 = MemoryStorage()\n s1.start_writing(field, info={\"b\": 2})\n field.data = np.array([0, 1])\n s1.append(field, 0)\n field.data = np.array([1, 2])\n s1.append(field, 1)\n s1.end_writing()\n\n for name, storage_cls in storage_classes.items():\n out = None if storage_cls is None else storage_cls()\n s2 = s1.copy(out=out)\n assert storage_cls is None or s2 is out\n assert len(s2) == 2\n np.testing.assert_allclose(s2.times, s1.times)\n assert s2[0] == s1[0], name\n assert s2[1] == s1[1], name\n\n # test empty storage\n s1 = MemoryStorage()\n s2 = s1.copy()\n assert len(s2) == 0",
"def testCheckSourceCopyOperation_Pass(self):\n payload_checker = checker.PayloadChecker(self.MockPayload())\n self.assertIsNone(\n payload_checker._CheckSourceCopyOperation(None, 134, 134, 'foo'))",
"def test_pickle(self):\n X,Y,Z = self.generate_data(nrows=200)\n task = mmSCHPOLY()\n task.fit(X,Y,Z)\n with tempfile.TemporaryFile(mode='w+b') as tf:\n cPickle.dump(task, tf)\n\n X,Y,Z = self.generate_data(nrows=200)\n task = mmSCH2W()\n task.fit(X,Y,Z)\n with tempfile.TemporaryFile(mode='w+b') as tf:\n cPickle.dump(task, tf)",
"def test_pickle_persistence():\n tree = STRtree([Point(i, i).buffer(0.1) for i in range(3)], range(3))\n\n pickled_strtree = pickle.dumps(tree)\n unpickle_script_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"unpickle-strtree.py\")\n proc = subprocess.Popen(\n [sys.executable, str(unpickle_script_file_path)],\n stdin=subprocess.PIPE,\n )\n proc.communicate(input=pickled_strtree)\n proc.wait()\n assert proc.returncode == 0",
"def test_11_clone(self):\n # Test basic operation of cloning repo which contains one\n # publisher to repo which contains same publisher\n self.pkgrecv(self.durl1, \"--clone -d {0}\".format(self.dpath2))\n\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\", \n \"index\", \"-x\", \"trans\", self.dpath1, self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that packages in dst which are not in src get removed.\n self.pkgsend_bulk(self.durl2, (self.amber30))\n self.pkgrecv(self.durl1, \"--clone -d {0}\".format(self.dpath2))\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\", \n \"index\", \"-x\", \"trans\", self.dpath1, self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that clone reports publishers not in the dest repo.\n amber = self.amber10.replace(\"open \", \"open pkg://test2/\")\n self.pkgsend_bulk(self.durl1, amber)\n self.pkgrecv(self.durl1, \"--clone -d {0}\".format(self.dpath2), exit=1)\n\n # Test that clone adds new publishers if requested.\n amber = self.amber10.replace(\"open \", \"open pkg://test2/\")\n self.pkgsend_bulk(self.durl1, amber)\n self.pkgrecv(self.durl1, \"--clone -d {0} -p test2\".format(self.dpath2))\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\", \n \"index\", \"-x\", \"trans\", self.dpath1,\n self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that clone removes all packages if source is empty\n self.pkgrecv(self.durl3, \"--clone -d {0}\".format(self.dpath2))\n self.pkgrepo(\"-s {0} list -H -p test2\".format(self.dpath2))\n self.assertEqualDiff(\"\", self.output)\n\n # Test that clone works fine with mulitple publishers\n amber = self.amber10.replace(\"open \", \"open pkg://test2/\")\n self.pkgsend_bulk(self.durl1, amber)\n\n path = os.path.join(self.dpath2, \"publisher/test1\")\n shutil.rmtree(path)\n path = os.path.join(self.dpath2, \"publisher/test2\")\n shutil.rmtree(path)\n self.pkgrecv(self.durl1, \"--clone -d {0} -p test2 -p test1\".format(\n self.dpath2))\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\",\n \"index\", \"-x\", \"trans\", self.dpath1, self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that clone fails if --raw is specified.\n self.pkgrecv(self.durl1, \"--raw --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)\n\n # Test that clone fails if -c is specified.\n self.pkgrecv(self.durl1, \"-c /tmp/ --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)\n\n # Test that clone fails if -a is specified.\n self.pkgrecv(self.durl1, \"-a --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)\n\n # Test that clone fails if --newest is specified.\n self.pkgrecv(self.durl1, \"--newest --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)",
"def test_simple_backup(self):\n with TemporaryDirectory() as temporary_directory:\n source = os.path.join(temporary_directory, 'source')\n destination = os.path.join(temporary_directory, 'destination')\n latest_directory = os.path.join(destination, 'latest')\n # Create a source for testing.\n self.create_source(source)\n # Run the program through the command line interface.\n exit_code, output = run_cli(\n '--no-sudo', '--ionice=idle',\n '--disable-notifications',\n source, latest_directory,\n )\n assert exit_code == 0\n # Make sure the backup was created.\n self.verify_destination(latest_directory)\n # Make sure a snapshot was created.\n assert len(find_snapshots(destination)) == 1",
"def test_15_copyto(self):\n with mock.patch(BUILTINS + '.open', mock.mock_open()):\n status = udocker.FileUtil(\"source\").copyto(\"dest\")\n self.assertTrue(status)\n status = udocker.FileUtil(\"source\").copyto(\"dest\", \"w\")\n self.assertTrue(status)\n status = udocker.FileUtil(\"source\").copyto(\"dest\", \"a\")\n self.assertTrue(status)"
] | [
"0.74684554",
"0.6731694",
"0.6722892",
"0.67018497",
"0.6625926",
"0.649761",
"0.64614326",
"0.64460385",
"0.6438617",
"0.6370284",
"0.6349577",
"0.633919",
"0.6325789",
"0.6300349",
"0.62820846",
"0.62548345",
"0.6244965",
"0.621606",
"0.612267",
"0.607975",
"0.60563904",
"0.60280764",
"0.60274726",
"0.60202384",
"0.600574",
"0.59866965",
"0.59805727",
"0.5980373",
"0.5979018",
"0.5970547"
] | 0.73475975 | 1 |
Test invalid pseudo class. | def test_invalid_pseudo(self):
with self.assertRaises(NotImplementedError):
sv.compile(':before')
with self.assertRaises(SyntaxError):
sv.compile(':nth-child(a)') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_invalid_pseudo_open(self):\n\n with self.assertRaises(SyntaxError):\n sv.compile(':is(div')",
"def test_invalid_pseudo_close(self):\n\n with self.assertRaises(SyntaxError):\n sv.compile('div)')\n\n with self.assertRaises(SyntaxError):\n sv.compile(':is(div,)')",
"def _is_valid_css_class(string):\n exp = re.compile('^-?[_a-zA-Z][_a-zA-Z0-9-]+$')\n if exp.match(string):\n return True\n return False",
"def test_invalid_rule(self):\n html = '<div class=\"pink\">test</div>'\n css = '.pink { opacity: 0.8; }'\n expected = '<div class=\"pink\">test</div>'\n result = inline_css(html, css, pretty_print=False)\n self.assertEqual(expected, result)",
"def test_invalid_tag(self):\n\n with self.assertRaises(SyntaxError):\n sv.compile(':is(div)p')",
"def test_invalid_tokens(self):\n self.assertTrue(1 + 1)",
"def test_invalid_input_tag(self):\r\n with self.assertRaisesRegexp(Exception, \"Error in xml\"):\r\n self.check_group('checkboxtextgroup', 'invalid', 'checkbox')",
"def test_find_break_points_invalid_num_classes(self):\r\n self.assertRaises(ValueError, self.mc._find_break_points, 0, 1, 0)\r\n self.assertRaises(ValueError, self.mc._find_break_points, 0, 1, -1)",
"def test_invalid_rule_not_stripped(self):\n html = '<div class=\"pink\">test</div>'\n css = '.pink { opacity: 0.8; }'\n expected = '<div class=\"pink\" style=\"opacity: 0.8;\">test</div>'\n result = inline_css(html, css, strip_unsupported_css=False, pretty_print=False)\n self.assertEqual(expected, result)",
"def test_class_errored(self, cls, exception):",
"def test_invalid_scalene():\n assert 'invalid' == classify_triangle(1,2,3)",
"def test_color__sequence_arg_invalid_value_without_alpha(self):\n cls = pygame.Color\n for seq_type in (tuple, list):\n self.assertRaises(ValueError, cls, seq_type((256, 90, 80)))\n self.assertRaises(ValueError, cls, seq_type((100, 256, 80)))\n self.assertRaises(ValueError, cls, seq_type((100, 90, 256)))",
"def is_invalid():\n print(colored('Invalid input\\n', 'red', attrs=['bold']))",
"def test_invalid_isosceles():\n assert 'invalid' == classify_triangle(1,1,3)",
"def test_false_lower(self):\n self.assertRaises(ParseException, self.flag.parseString, 'n')",
"def test_classes(self):\r\n css_classes = [\r\n ('unsubmitted', 'unanswered'),\r\n ('incomplete', 'incorrect'),\r\n ('queued', 'processing'),\r\n ('correct', 'correct'),\r\n ('test', 'test'),\r\n ]\r\n for status, classname in css_classes:\r\n statobj = inputtypes.Status(status)\r\n self.assertEqual(statobj.classname, classname)",
"def test_invalid_tag(tag: str) -> None:\n with pytest.raises(Exception):\n class InvalidClass1(Base, yaml_tag=tag):\n ...",
"def test_wrong_inputs(self):\r\n with self.assertRaises(ValueError):\r\n triangle_classification('hello', 1, 1)\r\n triangle_classification('A', 'B', 'C')",
"def test_invalid_input(self):\n self.assertEqual(classify_triangle(-4, -6, -7), 'InvalidInput')\n self.assertEqual(classify_triangle(200, 200, -300), 'InvalidInput')\n self.assertEqual(classify_triangle(3, 'x', 4), 'InvalidInput')\n self.assertEqual(classify_triangle('x', 'y', 'z'), 'InvalidInput')\n self.assertEqual(classify_triangle('x', 1, -1), 'InvalidInput')\n self.assertEqual(classify_triangle(-4, 'y', 'z'), 'InvalidInput')",
"def test_custom_class_fail_import(self):\n conf = Configuration(Path(self.conf_dir, \"custom_class_doesnt_exists.yaml\"))\n self.test_survey = Survey.objects.get(name=\"Test survëy\")\n fail_import = str(Survey2Tex(self.test_survey, conf))\n should_contain = [\n \"could not render\",\n \"not a standard type\",\n \"importable valid Question2Tex child class\",\n \"'raw'\",\n \"'sankey'\",\n \"'pie'\",\n \"'cloud'\",\n \"'square'\",\n \"'polar'\",\n ]\n for text in should_contain:\n self.assertIn(text, fail_import)",
"def test_pauli_error_raise_invalid(self):\n self.assertRaises(NoiseError, lambda: pauli_error([('S', 1)]))",
"def test_invalid_type_input(self):\n\n with self.assertRaises(TypeError):\n sv.match('div', \"not a tag\")\n\n with self.assertRaises(TypeError):\n sv.select('div', \"not a tag\")\n\n with self.assertRaises(TypeError):\n sv.filter('div', \"not a tag\")\n\n with self.assertRaises(TypeError):\n sv.comments('div', \"not a tag\")",
"def test_check_x_ValueError(self):\n self.assertRaisesRegex(\n ValueError,\n 'x must be >= 0',\n Rectangle,\n 4, 2, -1, 0, 12\n )",
"def test_color__sequence_arg_invalid_value(self):\n cls = pygame.Color\n for seq_type in (tuple, list):\n self.assertRaises(ValueError, cls, seq_type((256, 90, 80, 70)))\n self.assertRaises(ValueError, cls, seq_type((100, 256, 80, 70)))\n self.assertRaises(ValueError, cls, seq_type((100, 90, 256, 70)))\n self.assertRaises(ValueError, cls, seq_type((100, 90, 80, 256)))",
"def test_invalid_tag(self):\r\n with self.assertRaises(Exception):\r\n self.check_group('invalid', 'choice', 'checkbox')",
"def skip_if_invalid(self, descriptor_cls):\r\n pass",
"def test_color__sequence_arg_invalid_format(self):\n cls = pygame.Color\n for seq_type in (tuple, list):\n self.assertRaises(ValueError, cls, seq_type((100,)))\n self.assertRaises(ValueError, cls, seq_type((100, 90)))\n self.assertRaises(ValueError, cls, seq_type((100, 90, 80, 70, 60)))",
"def _not_valid_(s) :\n return not s.valid()",
"def test_raises_useful_exception(self):\n exp = Expression(r'inalid (\\d]', {}, [], lambda x: x)\n with self.assertRaises(exp.InvalidPattern):\n assert not exp.pattern",
"def testinvalidelement(self):\n self.assertRaises(AbilityError, \n AmuletAbility, 'Proof', element='Invalid')\n self.assertRaises(AbilityError, AmuletAbility, 'Proof', element='')\n self.assertRaises(AbilityError, \n AmuletAbility, 'Control NPC', element='Fire')"
] | [
"0.68971527",
"0.6783288",
"0.64454436",
"0.6112505",
"0.6026437",
"0.5795469",
"0.57921946",
"0.57608944",
"0.57498455",
"0.5723173",
"0.5720089",
"0.56820285",
"0.5644386",
"0.56317127",
"0.5618471",
"0.5617376",
"0.5591648",
"0.5563214",
"0.55580693",
"0.55521137",
"0.55478865",
"0.55401087",
"0.55166405",
"0.5499597",
"0.5473252",
"0.5449702",
"0.54392874",
"0.54219496",
"0.540122",
"0.5393083"
] | 0.7125582 | 0 |
Test invalid pseudo close. | def test_invalid_pseudo_close(self):
with self.assertRaises(SyntaxError):
sv.compile('div)')
with self.assertRaises(SyntaxError):
sv.compile(':is(div,)') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testCloseFail(t, env):\n c = env.c1\n c.init_connection()\n fh, stateid = c.create_confirm(t.code)\n ops = c.use_obj(fh)\n ops += [c.close_op(c.get_seqid(t.code)+1, stateid)]\n _replay(c, ops, NFS4ERR_BAD_SEQID)",
"def test_ignore_close():\n try:\n yield\n except GeneratorExit:\n yield",
"def is_closed(self) -> bool:",
"def _isclose(self):\n return self.dp.state()==PyTango.DevState.CLOSE",
"def test_invalid_pseudo_open(self):\n\n with self.assertRaises(SyntaxError):\n sv.compile(':is(div')",
"def is_close(self) -> bool:\n return not self.open",
"def test_datachannel_stop_already_closed(testchannel):\n with testchannel.open():\n pass\n\n with pytest.raises(ChannelClosedError):\n testchannel.stop()",
"def test_module_closed(self):\r\n closed = self.peer_grading.closed()\r\n self.assertFalse(closed)",
"def test_pause_already_closed(testchannel):\n with pytest.raises(ChannelClosedError):\n testchannel.pause()",
"def test_close():\n while True:\n yield",
"def _basicClose(self):\n raise NotImplementedError()",
"def _basicClose(self):\n raise NotImplementedError()",
"def test_disconnect_closed(self):\n self.sock.close()\n self.inverter.sock.close()\n self.inverter.sock_file.close()\n self.inverter.disconnect() # Should not raise exception",
"def test_file_closed(self):\n try:\n with get_temp_file() as (fd, name):\n os.close(fd)\n except Exception as err:\n self.fail('Failed with exception \"{}\"'.format(err))\n else:\n file_exists = os.access(name, os.F_OK)\n self.assertFalse(file_exists)",
"def is_closed(self): # -> bool | Any:\n ...",
"def is_closed(self) -> bool:\n raise NotImplementedError() # pragma: nocover",
"def test_closed(self):\n server, client = loopback()\n server.sock_shutdown(2)\n with pytest.raises(SysCallError) as err:\n server.sendall(b\"hello, world\")\n if platform == \"win32\":\n assert err.value.args[0] == ESHUTDOWN\n else:\n assert err.value.args[0] == EPIPE",
"def is_closed(self):\n raise NotImplementedError",
"def close_pseudocode(self, *args):\n return _ida_hexrays.Hexrays_Hooks_close_pseudocode(self, *args)",
"def testClose(t, env):\n c = env.c1\n c.init_connection()\n fh, stateid = c.create_confirm(t.code)\n ops = c.use_obj(fh)\n ops += [c.close_op(c.get_seqid(t.code), stateid)]\n _replay(c, ops)",
"def isClosed(self):\n pass",
"def is_closed(self):\n return None",
"def test_eof_on_remote_close(self):\n parent, child = create_psuedo_anonymous_duct_pair()\n child.close()\n self.assertRaises(EOFError, parent.recv)\n try:\n parent.send(\"test\")\n except IOError as e:\n assert getattr(e, 'errno') == errno.EPIPE\n except:\n raise AssertionError(\"Incorrect exception raised for parent.send() on a broken connection!\")\n parent.close()",
"def test_can_not_handle_brew_container_task_closed_event(self):\n event = self.get_event_from_msg(get_fedmsg('brew_container_task_closed'))\n self.assertFalse(self.handler.can_handle(event))",
"def _close(self):\n # TODO\n self.holding = False",
"def test_start_closed(testchannel):\n with pytest.raises(ChannelClosedError):\n testchannel.start()",
"def test_shutdown_closed(self):\n server, client = loopback()\n server.sock_shutdown(2)\n with pytest.raises(SysCallError) as exc:\n server.shutdown()\n if platform == \"win32\":\n assert exc.value.args[0] == ESHUTDOWN\n else:\n assert exc.value.args[0] == EPIPE",
"def test_validate_closed_wire(self, circuit):\n circuit._wires[0].closed = True\n with pytest.raises(ValueError, match=\"Wire 0 is closed.\"):\n circuit._validate_wire_ids(wire_ids=[0])",
"def close():",
"def close(self):\n # This is a NOOP by default"
] | [
"0.680997",
"0.6563538",
"0.65439314",
"0.64253104",
"0.62412703",
"0.6232089",
"0.61920905",
"0.6143005",
"0.61108303",
"0.606794",
"0.6032187",
"0.6032187",
"0.59965396",
"0.5996407",
"0.5931704",
"0.59279484",
"0.5927667",
"0.5872422",
"0.5868556",
"0.5859582",
"0.5855965",
"0.58016396",
"0.57939565",
"0.57491493",
"0.5738147",
"0.5735313",
"0.5732486",
"0.57319397",
"0.57048416",
"0.57033247"
] | 0.6680395 | 1 |
Send command at address to HL2, cmd may be bytes or number. Returns a response. | def command(self,addr,cmd):
if isinstance(cmd,int):
cmd = struct.pack('!L',cmd)
res = self._send(bytes([0xef,0xfe,0x05,addr<<1])+cmd)
if res:
self.wrcache[addr] = cmd | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def send_telnet_command(self, cmd):\n data = bytes(cmd)\n self.send_to_client(data)",
"def send_command(self, cmd):\n\n\t\tself.eyetribe._connection.request(cmd)",
"def SEND_cmd(self, cmd):\n\n # check for pending command results to be retrieved\n if self.recwaiting != 0: \n #print \"ARGH! can't send cmd before result of last command is retrieved!!!\"\n raise ErrorRtrvBeforeSend(\"ARGH! can't send cmd before result of last command is retrieved!!!\")\n\n # send command\n if self.State != 1:\n print \"Can't send OBD2 command, device not connected\"\n raise self.ErrorNotConnected(\"Can't send OBD2 command\")\n elif self.Type == \"SERIAL\":\n if self.Device == \"ELM327\":\n #self.ELM327_SEND_cmd(cmd)\n self.SERIAL_SEND_cmd(cmd)\n # mark that there is now a record waiting to be retrieved\n self.recwaiting = 1\n else:\n raise self.ErrorReaderNotRecognized(\"Unknown OBD2 Reader device\")\n elif self.Type == \"FILE\":\n # Cant Send commands to a trace\n pass\n else:\n # unknown self.Type \n pass\n\n return",
"def sendCmd(self,cmd):\n self.ser.write(cmd.encode()+END.encode())\n out = self.ser.readline()\n return out",
"def send_command(self, cmd):\n # encrypt command\n encrypted_cmd = self.encrypt_command(cmd, self.nonce_w)\n \n # send data size header\n self.sock.send(struct.pack(\"!I\", len(encrypted_cmd)))\n\n # send encrypted command\n self.sock.send(encrypted_cmd)\n \n # waiting to receive data size\n data = self.sock.recv(4)\n if not data:\n raise Exception(\"no response size received\")\n \n # unpack response size\n (response_size,) = struct.unpack(\"!I\", data)\n\n # waiting to response response according to the response size\n data = self.sock.recv(response_size)\n while len(data) < response_size:\n data += self.sock.recv(response_size - len(data))\n\n # decrypt data\n r = self.decrypt_response(data, self.nonce_r)\n\n # incremente nonce for next command\n self.nonce_r = self.incremente_nonce(nonce=self.nonce_r)\n self.nonce_w = self.incremente_nonce(nonce=self.nonce_w)\n\n # return response output\n return r",
"def issue(self, cmd):\n self.send([cmd])\n return self.read_until_prompt()[1:] # drop the echo",
"def send_command(command):\n if connection_type == USE_I2C:\n cmd = \"\"\n cmd += chr( SSD1306_ADDRESS )\n cmd += chr( SELECT_CONTROL_BYTE )\n cmd += chr( command )\n i2cWrite(cmd, 10, False)\n else:\n print \"Not implemented for that connection type yet.\"",
"def command(self, cmd):\n cmd = cmd.encode(encoding='UTF-8')\n if cmd[-1] != b'\\n':\n # make sure the command ends with \\n, otherwise the client will\n # block\n cmd += b'\\n'\n yield from self._cmds.put(cmd)\n resp = yield from self._get_response()\n return resp",
"def send_cmd(self,cmd,timeout=5):\n if not self.checkConnected():\n self.connect()\n \n while True:\n try:\n self.sock.recv(10000)\n except:\n break\n \n tic = time.time()\n try:\n self._send(cmd)\n except Exception,e:\n corelog.exception(\"Couldn't send command to XANT\")\n return\n tstart = time.time()\n resp = ''\n while (time.time()-tstart < timeout):\n try:\n resp = resp + self.sock.recv(1000)\n if resp.find('completed') >= 0:\n corelog.debug(\"Command %s accepted\" % cmd)\n break\n elif resp.find('rejected') >= 0:\n corelog.warning(\"Command %s rejected with response: %s\" % (cmd,resp))\n break\n \n except:\n corelog.exception(\"Problem waiting for response from XANT\")\n pass\n corelog.debug(\"Xant response time: %.2f ms\" %((time.time()-tic)*1000))\n time.sleep(0.1) #sleep a moment \n return resp",
"def send_cmd(self, cmd: bytes):\n self._send_command(cmd)\n return self._get_multiline()",
"def send_cmd(self, cmd):\n # print(\"... sending: {}\".format(' '.join(\"0x{0:02X}\".format(x) for x in cmd)))\n self.x10g_rdma.uart_tx(cmd)",
"def command(self, *cmd):\n assert(len(cmd) <= 32)\n self.bus.write_i2c_block_data(self.addr, self.cmd_mode, list(cmd))",
"def send_simple_command(self, cmd):\n pkt = MqttPkt()\n \n pkt.command = cmd\n pkt.remaining_length = 0\n \n ret = pkt.alloc()\n if ret != NC.ERR_SUCCESS:\n return ret\n \n return self.packet_queue(pkt)",
"def _send_cmd(self, cmd):\n # Get length and checksum\n length = self._get_cmd_length(cmd, 2) # checksum + length = 2 bytes\n cmd.insert(0, length)\n checksum = self._get_checksum(cmd)\n cmd.append(checksum)\n\n self._send_raw_cmd(cmd)",
"def do_command(command):\n send_command(command)\n # time.sleep(0.1) # may be required on slow machines\n response = get_response()\n print(\"Rcvd: <<< \" + response)\n return response",
"def SERIAL_SEND_cmd(self, cmd):\n # Must be connected & operational\n if self.State == 0:\n # a slightly more informative result might help\n return \n\n # SEND\n if self.Port.writable():\n #print \"\\nwriting \" + cmd + \" to port...\"\n for c in str(cmd):\n self.Port.write(c)\n self.Port.write(\"\\r\\n\")\n\n return",
"def send_command(self, cmd):\n self.mgen_pipe.Send(cmd)",
"def send_cmd(self, cmd=\"\"):\n start = time.time()\n\n try:\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.connect((self.host, self.port))\n logger.info(\"Sending command: %(cmd)s\", {'cmd': cmd})\n\n cmd = cmd.encode('utf-8')\n self.socket.send(b\"%s\\r\" % cmd)\n logger.info(\"Command set\")\n time.sleep(.1)\n self.socket.send(b\"logout\\r\")\n data = self.socket.recv(2048)\n logger.info(\"Recieved: %s\", data)\n self.socket.close()\n logger.info(\"Socket closed\")\n return {'elaptime': time.time()-start,\n 'data': data}\n except Exception as e:\n logger.error(\"Error sending command\", exc_info=True)\n return {'elaptime': time.time() - start,\n 'error': str(e)}",
"def do_command(command):\n send_command(command)\n response = get_response()\n print(\"Rcvd: <<< \\n\" + response)\n return response",
"def send_command(self, command):\r\n print (\">> send cmd: {}\".format(command))\r\n self.abort_flag = False\r\n timer = threading.Timer(self.command_timeout, self.set_abort_flag)\r\n\r\n self.socket.sendto(command.encode('utf-8'), self.tello_address)\r\n\r\n timer.start()\r\n while self.response is None:\r\n if self.abort_flag is True:\r\n break\r\n timer.cancel()\r\n \r\n if self.response is None:\r\n response = 'none_response'\r\n else:\r\n response = self.response.decode('utf-8')\r\n\r\n self.response = None\r\n\r\n return response",
"def sendCmd( self, *cmd, **kwargs ):\n kwargs.setdefault( 'printPid', False )\n if not self.execed:\n return Node.sendCmd( self, *cmd, **kwargs )\n else:\n error( '*** Error: %s has execed and cannot accept commands' %\n self.name )",
"def send_command(self, command):\n question = jbus.jbus_generator_data_write(self.node, 0x15b0, bytes([0x00,command]))\n answer = self.send_request(question)\n #print(\"Question: [\", question, \"]\")\n #print(\"Answer: [\",answer,\"] LEN: \",len(answer))\n return self.verify_response(question, answer)",
"def send_command(self, command):\r\n\r\n connection = self.establish_connection()\r\n connection.send(command + '\\n')\r\n print command\r\n output = connection.recv(1000)\r\n return output",
"def send(self,cmd):\n bit_list = '{:b}'.format(int(cmd,16))\n self._lead()\n for i in bit_list:\n self.ir_pin.duty(512)\n time.sleep_us(_Const.NEC_BIT_MARK)\n self.ir_pin.duty(0)\n if i == '0':\n time.sleep_us(_Const.NEC_ZERO_SPACE)\n else:\n time.sleep_us(_Const.NEC_ONE_SPACE)\n self._end()",
"def query(self, cmd, force=False):\n\n # send command and retrieve message\n cmd_string = self.__build_command_string(cmd)\n\n messages = self.send_and_parse(cmd_string)\n\n\n ot=cmd(messages) # compute a response object\n print(ot)\n ot=str(ot)\n return ot",
"def send(self, cmd, data):\n return self.client_provider.send(cmd, data)",
"def do(self, command):\r\n command += xsct_line_end\r\n logger.info('Sending command: %s ...', repr(command))\r\n self.send(command)\r\n ans = self.recv()\r\n if ans.startswith('okay'):\r\n return ans[5:]\r\n if ans.startswith('error'):\r\n raise PyXilException(ans[6:])\r\n raise PyXilException('Illegal start-string in protocol. Answer is: ' + ans)",
"def do_cmd(cmd,sock):\n\n buffer = ''\n \n # Write the command and wait one second.\n print 'writing command '+cmd \n sock.send(cmd+SBE37_NEWLINE)\n time.sleep(1)\n \n # Block to receive all data.\n # Continue reading if the received data does not include a prompt.\n # Break out when the received data ends in a prompt.\n while True:\n try:\n data = ''\n data = sock.recv(1024)\n buffer += data\n except:\n raise\n else:\n #print 'received '+str(len(data))+' bytes' \n if buffer.endswith(SBE37Prompt.COMMAND):\n break\n elif buffer.endswith(SBE37Prompt.AUTOSAMPLE):\n break\n elif buffer.endswith(SBE37Prompt.BAD_COMMAND):\n break\n\n return buffer",
"def run_command(self, server_id, cmd):\n status, data, errors, messages = self._make_post_request(MCAPIRoutes.SEND_CMD, extra_params={'id': server_id}, body={'command':cmd})\n \n if status == 200:\n return data\n elif status == 500:\n self._check_errors(errors, messages)",
"def send_cmd(self, cmd):\n logger.info(\"sending cmd %s to fuse\", cmd)\n with open(self.ipc + \".\" + str(threading.current_thread().ident), 'w', 0) as f:\n #with open(self.ipc, 'a+') as f:\n f.write(cmd)\n #f.flush()\n logger.info(\"writing to fuse returned\")"
] | [
"0.73290044",
"0.687813",
"0.6716656",
"0.67163163",
"0.6493919",
"0.6450437",
"0.6449417",
"0.6435877",
"0.63993514",
"0.63926345",
"0.6376549",
"0.6374566",
"0.6362477",
"0.63120687",
"0.6233574",
"0.62273973",
"0.6221301",
"0.6218935",
"0.6216378",
"0.6206575",
"0.62015694",
"0.6173464",
"0.6138872",
"0.6117247",
"0.6108032",
"0.6071564",
"0.6068021",
"0.60447806",
"0.6035948",
"0.60246396"
] | 0.7104647 | 1 |
Set buffer latency and ptt hang time in ms. | def config_txbuffer(self,latency=10,ptt_hang=4):
cmd = bytes([0x00,0x00,int(ptt_hang)&0x1f,int(latency)&0x7f])
return self.command(0x17,cmd) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def inc_latency(self, *_, **__): # pylint: disable=arguments-differ\n pass",
"async def async_set_latency(self, latency):\n await self._client.set_latency(latency)\n self.async_write_ha_state()",
"def _init_timeouts(self):\n cur_time = time()\n self._chunk_time = cur_time\n self._total_time = cur_time",
"def _latency(self):\n\n return\n time.sleep(0.005 + random.random() / 30.)",
"def set_to_slow(self):\n self.set_remote_status(1)\n logging.info(__name__ + ' : Setting Helium Probe in SLOW rate')\n self._execute('S1')\n self.set_remote_status(3)",
"def timing(self, stat, time, sample_rate=1):\n stats = {stat: \"%f|ms\" % time}\n self.send(stats, sample_rate)",
"async def handle_set_latency(entity, service_call):\n if not isinstance(entity, SnapcastClientDevice):\n raise TypeError(\"Latency can only be set for a Snapcast client.\")\n await entity.async_set_latency(service_call.data[ATTR_LATENCY])",
"def __init__(self, t):\n\t\tself.delay = math.ceil(t / config.time_resolution)",
"def set_speed():\n pass",
"def setTimepoint(self, tp):\n\t\tpass",
"def send_buffer_times(self):\n return self._send_buffer_times",
"def update_link_statistics(self):\n if (self.track):\n key = self.id + \":\" + self.source + \"->\" + self.destination + \":\" \\\n + globals.BUFFEROCCUPANCY\n globals.statistics[key][globals.systime] = self.buffersize",
"def set_time(self, timestamp):\n\n\t\tdata = pack(\"!bL\", 2, timestamp)\n\t\tself._send_message(\"TIME\", data)",
"def set_times(self, p, f):\n self._dot_print_time = p\n self._dot_feed_time = f",
"def setup(bot: Bot) -> None:\n bot.add_cog(Latency(bot))",
"def _fill_buffer(buff, in_data, frame_count, time_info, status_flags):\n buff.put(in_data)\n return None, pyaudio.paContinue",
"def timing(self, timing):\n\n self._timing = timing",
"def set_custom_speed(self, bytes_per_second):\n self._custom_speed = bytes_per_second",
"def setSampleTime(self, sample_time):\r\n self.sample_time = sample_time",
"def __init__(self, *args, **kwargs):\n self.total_timeout = kwargs.pop('total_timeout', None)\n self.chunk_timeout = kwargs.pop('chunk_timeout', None)\n super(TimeoutProc, self).__init__(*args, **kwargs)",
"def setTimeDelay(*args):\n args[0].TimeState.TimeDelay.time_delay = args[1]",
"def set_throttling(self, state: bool, value: int = 0):\r\n if state:\r\n self.msg_send_upr.data[0] = b\"\\x26\"[0]\r\n self.msg_send_upr.data[2] = value\r\n else:\r\n self.msg_send_upr.data[:3] = b\"\\x27\"[0]\r\n self.send_and_flush(self.msg_send_upr)",
"async def ping(self, ctx: commands.Context):\r\n await ctx.send(f'Bot\\' latency is {self.bot.latency*1000:.3f}ms')",
"def __init__(self, addr, mss=4000, err_prob=0.00):\n super(PFTPServer, self).__init__()\n self.addr = addr\n self.sock.bind(addr)\n self.mss = mss\n self.err_prob = err_prob",
"def setSampleTime(self, sample_time):\n\t\tself.sample_time = sample_time",
"def SetBuffered(self, buffered):\r\n\r\n self._buffered = buffered",
"def bufferManager(self):\r\n # intialize index to current processor time\r\n index = int(time.time()%self.nBins) \r\n # open arduino connection\r\n while(True):\r\n time.sleep(0.1)\r\n curr_time = int(time.time()%self.nBins)\r\n # if new bin is entered\r\n if (not(index == curr_time)): \r\n # fill count of last second in buffer\r\n self.wpm += self.counter - self.buffer[index]\r\n self.buffer[index] = self.counter\r\n # increment index to next bin\r\n index = curr_time \r\n # reset counter\r\n self.counter = 0\r\n # push to arduino\r\n start_new_thread(self.arduPusherWPM,())",
"def run(self):\n rate = WallRate(self.ping_frequency)\n while True:\n # In case of failure, this call will take approx 10s\n try:\n # Send 5 pings at an interval of 0.2s\n output = subprocess.check_output(\"ping -c 1 %s\" % self.ip,\n shell=True, stderr=subprocess.STDOUT)\n self.time_last_seen = time.time()\n try:\n parsed_output = \\\n output.splitlines()[-1].split(' ')[3].split('/')\n latency_stats = [float(x) for x in parsed_output]\n # Since this was a single ping, min = max = avg\n self.buffer[self.current_ring_counter] = latency_stats[1]\n self.values_available = self.values_available + 1 \\\n if self.values_available < self.buffer_size \\\n else self.buffer_size\n self.current_ring_counter = \\\n (self.current_ring_counter + 1) % self.buffer_size\n\n except (KeyError, ValueError) as e:\n # Had one occasion when something was wrong with ping output\n rospy.logwarn(\"Unable to update latency statistics from \" +\n self.ip + \". Error parsing ping output: \" +\n str(e))\n except subprocess.CalledProcessError:\n # Ping failed. Do not update time last seen\n pass\n rate.sleep()",
"def test_set_speed(self):\n\n with patch.object(socket.socket, 'connect', return_value=True) as mock_connect, \\\n patch.object(socket.socket, 'bind', return_value=True) as mock_bind, \\\n patch.object(src.drivers.hyundai_robot, 'sendSetSpeed') as setSpeed, \\\n patch.object(UdpConnector, 'appendToQueue', mock_appendToQueue_setspeed):\n\n src.drivers.hyundai_robot.udp = UdpConnector(\"localhost\", 8000)\n \n req = SetSpeedRequest()\n req.value = 5\n src.drivers.hyundai_robot.set_speed(req)\n\n src.drivers.hyundai_robot.udp.stopConsumeThread()\n \n assert setSpeed.called\n assert setSpeed.call_count == 1",
"def update_state(self, elapsed_time):\n if self.state == INITIAL_BUFFERING_STATE:\n# config_pytomo.LOG.debug('State: INITIAL_BUFFERING_STATE')\n# config_pytomo.LOG.debug('current_time=%s; Total_bytes=%s' %\n# (self.current_time, self._total_bytes))\n if (self.flv_timestamp > config_pytomo.INITIAL_BUFFER):\n self.state = PLAYING_STATE\n if config_pytomo.DEMO:\n config_pytomo.LOG.info('\\n\\nStart\\n')\n self.start_playback = elapsed_time\n self.initial_data = self._total_bytes\n try:\n self.initial_rate = (self.initial_data * 8\n / self.current_time / 1000)\n except ZeroDivisionError:\n self.initial_rate = 0\n elif self.state == PLAYING_STATE:\n# config_pytomo.LOG.debug('State: PLAYING_STATE')\n# config_pytomo.LOG.debug('current_time=%s; Total_bytes=%s' %\n# (self.current_time, self._total_bytes))\n self.accumulated_playback = self.flv_timestamp\n video_playback_time = (self.current_time - self.start_playback -\n self.accumulated_buffer)\n #print (\"PLaying state\", self.flv_timestamp, video_playback_time,\n #self.accumulated_buffer)\n if ((self.flv_timestamp - video_playback_time)\n < config_pytomo.MIN_PLAYOUT_BUFFER):\n self.state = BUFFERING_STATE\n self.interruptions += 1\n if config_pytomo.DEMO:\n config_pytomo.LOG.info('\\n\\nInterruption\\n')\n #import pdb; pdb.\n elif self.state == BUFFERING_STATE:\n# config_pytomo.LOG.debug('State: BUFFERING_STATE')\n# config_pytomo.LOG.debug('current_time=%s; Total_bytes=%s' %\n# (self.current_time, self._total_bytes))\n self.accumulated_buffer += elapsed_time\n video_playback_time = (self.current_time - self.start_playback -\n self.accumulated_buffer)\n #print \"BUFFERING_STATE \", self.flv_timestamp, video_playback_time\n if (self.flv_timestamp - video_playback_time\n > config_pytomo.MIN_PLAYOUT_RESTART):\n self.state = PLAYING_STATE\n if config_pytomo.DEMO:\n config_pytomo.LOG.info('\\n\\nRestart\\n')"
] | [
"0.6061889",
"0.5883974",
"0.5868308",
"0.5705283",
"0.56552017",
"0.5610477",
"0.54945177",
"0.54630864",
"0.5459672",
"0.54183984",
"0.53861696",
"0.5385074",
"0.53232306",
"0.53059715",
"0.52688515",
"0.5237016",
"0.52125376",
"0.51865286",
"0.5185032",
"0.5175649",
"0.51576847",
"0.5119954",
"0.5117611",
"0.5117505",
"0.5114095",
"0.5111343",
"0.51109385",
"0.51081496",
"0.5106508",
"0.5103627"
] | 0.7190556 | 0 |
Enable CL2 output, copy of clock to AD9866. | def enable_cl2_copy_ad9866(self):
self.write_versa5(0x62,0x3b) ## Clock2 CMOS1 output, 3.3V
self.write_versa5(0x2c,0x01) ## Enable aux output on clock 1
self.write_versa5(0x31,0x0c) ## Use clock1 aux output as input for clock2
self.write_versa5(0x63,0x01) ## Enable clock2 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def disable_cl2(self):\n self.write_versa5(0x31,0x80) ## Disable divider output for clock2\n self.write_versa5(0x63,0x00) ## Disable clock2 output",
"def enable_cl2_61p44(self):\n self.write_versa5(0x62,0x3b) ## Clock2 CMOS1 output, 3.3V\n self.write_versa5(0x2c,0x00) ## Disable aux output on clock 1\n self.write_versa5(0x31,0x81) ## Use divider for clock2\n ## VCO multiplier is shared for all outputs, set to 68 by firmware\n ## VCO = 38.4*68 = 2611.2 MHz\n ## There is a hardwired divide by 2 in the Versa 5 at the VCO output\n ## VCO to Dividers = 2611.2 MHZ/2 = 1305.6\n ## Target frequency of 61.44 requires dividers of 1305.6/61.44 = 21.25\n ## Frational dividers are supported\n ## Set integer portion of divider 21 = 0x15, 12 bits split across 2 registers\n self.write_versa5(0x3d,0x01)\n self.write_versa5(0x3e,0x50)\n ## Set fractional portion, 30 bits, 2**24 * .25 = 0x400000\n self.write_versa5(0x32,0x01) ## [29:22]\n self.write_versa5(0x33,0x00) ## [21:14]\n self.write_versa5(0x34,0x00) ## [13:6]\n self.write_versa5(0x35,0x00) ## [5:0] and disable ss\n self.write_versa5(0x63,0x01) ## Enable clock2",
"def enable_cl1_pll1(self):\n self.write_versa5(0x17,0x02) ## Change top multiplier to 0x22\n self.write_versa5(0x18,0x20)\n self.write_versa5(0x10,0xc0) ## Enable xtal and clock\n self.write_versa5(0x13,0x03) ## Switch to clock\n self.write_versa5(0x10,0x40) ## Enable clock input only, won't lock to master",
"def enable_cl1_direct(self):\n self.write_versa5(0x17,0x02) ## Change top multiplier to 0x22\n self.write_versa5(0x18,0x20)\n self.write_versa5(0x10,0xc0) ## Enable xtal and clock\n self.write_versa5(0x13,0x03) ## Switch to clock\n self.write_versa5(0x10,0x44) ## Enable clock input only and refmode\n self.write_versa5(0x21,0x0c) ## Use previous channel, direct input, may have skew",
"def enable_cl2_sync_76p8(self,iskw=0,fskw=31):\n iskw = iskw & 0x0f\n iskw = iskw << 4\n fskw = fskw & 0x3f\n self.write_versa5(0x62,0x3b) ## Clock2 CMOS1 output, 3.3V\n self.write_versa5(0x3d,0x01) ## Set divide by 0x0110\n self.write_versa5(0x3e,0x10)\n self.write_versa5(0x31,0x81) ## Enable divider output for clock2\n self.write_versa5(0x3c,iskw) ## Write integer portion of skew\n self.write_versa5(0x3f,fskw) ## Write fractional portion of skew\n self.write_versa5(0x63,0x01) ## Enable clock2 output\n self.reset_versa5()",
"def disable_cl1(self):\n self.write_versa5(0x10,0xc4) ## Enable xtal and clock\n self.write_versa5(0x21,0x81) ## Use and enable divider\n self.write_versa5(0x13,0x00) ## Use CL1 input instead of xtal\n self.write_versa5(0x10,0x80) ## Enable xtal input only\n self.write_versa5(0x17,0x04) ## Change top multiplier to 0x44\n self.write_versa5(0x18,0x40)",
"def enable_output(self):\n\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 7, 1)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 4, 1)\n self.__bus.write_byte_data(\n self.__rtcaddress, self.CONTROL, self.__rtcconfig)\n return",
"def turn_output_on(self):\n self.instr.write('RF1')\n time.sleep(self.sleep_time)",
"def d2out():\n\tsetState(\"D2\", \"-DI-PHDGN-02:CON\", CON_OUT)",
"def disable_output(self):\n\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 7, 0)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 4, 0)\n self.__bus.write_byte_data(\n self.__rtcaddress, self.CONTROL, self.__rtcconfig)\n return",
"def turn_on(self):\n self.write(\"OUT1\\n\")",
"def r2_on_off():\n \n r2_cmd_packet = b'\\x04\\x14\\x02\\x00\\x00\\xe6\\x0f'\n ser_relay.write(r2_cmd_packet)",
"def enable_amplitude_modulation(self):\n self.write(\":SOUR:AM:STAT ON\")",
"def EnableI2c(self):\n\n try:\n\n if os.path.exists('/sys/bus/i2c/devices/i2c-0/0-0060'):\n result = \" - I2C device already enabled!\"\n\n else:\n\n with open('/sys/bus/i2c/devices/i2c-0/new_device', 'a') as f:\n # 'echo '+i2c_device.driver+' '+i2c_device.addr+ '\n f.write('mpl3115 0x60')\n result = \" - I2C device enabled!\"\n\n LOG.info(result)\n\n except Exception as err:\n LOG.error(\"Error enabling I2C (device1): \" + str(err))",
"def enable(self):\n self.write(\":OUTPUT ON;\")",
"def use_i2c():\n _LIB.oled_click_use_i2c()",
"def turnOn(self):\n self.write('E;O1;E;')\n return self.output()",
"def set_write_cycle_time(self, osc_freq=32000000):\n self.SPItrans([0xac, 0x5d, 0x00, int((0.000025 * osc_freq) / 64)])\n self._wrt_defined = True",
"def set_clock_config(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_set_clock_config(self, *args, **kwargs)",
"def d1out():\n\tsetState(\"D1\", \"-DI-PHDGN-01:CON\", CON_OUT)",
"def sm_output_on(self):\n self.sm.output_on()\n #self.sm_restore_display()",
"def calibrate_power_ADC(self):\n self.send_packet('\\x61')",
"def set_clock_config(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_sptr_set_clock_config(self, *args, **kwargs)",
"def SPIwriteenable(self):\n data=[0x06];\n self.SPItrans(data);",
"def write_reg2(self, value: int) -> None:\n self.timer_low = value\n\n self.output.setFreq(1789773 / (((value | self.timer_high) + 1) << 4))",
"def start_clock(self):\n pass",
"def d2in():\n\tsetState(\"D2\", \"-DI-PHDGN-02:CON\", CON_IN)",
"def write_reg2(self, value: int) -> None:\n self.timer_low = value\n\n self.output.setFreq(1789773 / (((value | self.timer_high) + 1) << 5))",
"def enableDebugLoadOutput(self):\n key = \"NatlinkmainDebugLoad\"\n self.userregnl.set(key, 1)",
"def set_output(self, on=False):\r\n #Note that, for SMA, switiching off the output set the automatically the mode to Fixed.... !!\r\n if on: self.write(\"OUTP1:STAT ON\")\r\n else: self.write(\"OUTP1:STAT OFF\")"
] | [
"0.7833633",
"0.74575394",
"0.7012131",
"0.6946339",
"0.66515845",
"0.66254556",
"0.6489273",
"0.6157492",
"0.60730433",
"0.595916",
"0.59293306",
"0.5682851",
"0.56691897",
"0.5629112",
"0.56212586",
"0.55156344",
"0.54842496",
"0.5464774",
"0.5436226",
"0.54299486",
"0.539665",
"0.538434",
"0.5367002",
"0.5364885",
"0.5361745",
"0.5356423",
"0.53536785",
"0.5350287",
"0.52992564",
"0.52822787"
] | 0.83531237 | 0 |
Disable CL2 clock output | def disable_cl2(self):
self.write_versa5(0x31,0x80) ## Disable divider output for clock2
self.write_versa5(0x63,0x00) ## Disable clock2 output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def disable_output(self):\n\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 7, 0)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 4, 0)\n self.__bus.write_byte_data(\n self.__rtcaddress, self.CONTROL, self.__rtcconfig)\n return",
"def disable_cl1(self):\n self.write_versa5(0x10,0xc4) ## Enable xtal and clock\n self.write_versa5(0x21,0x81) ## Use and enable divider\n self.write_versa5(0x13,0x00) ## Use CL1 input instead of xtal\n self.write_versa5(0x10,0x80) ## Enable xtal input only\n self.write_versa5(0x17,0x04) ## Change top multiplier to 0x44\n self.write_versa5(0x18,0x40)",
"def enable_cl2_copy_ad9866(self):\n self.write_versa5(0x62,0x3b) ## Clock2 CMOS1 output, 3.3V\n self.write_versa5(0x2c,0x01) ## Enable aux output on clock 1\n self.write_versa5(0x31,0x0c) ## Use clock1 aux output as input for clock2\n self.write_versa5(0x63,0x01) ## Enable clock2",
"def disable(self):\n self.write(\":OUTPUT OFF;\")",
"def turn_output_off(self):\n self.instr.write('RF0')\n time.sleep(self.sleep_time)",
"def turn_off(self):\n self.write(\"OUT0\\n\")",
"def sm_output_off(self):\n self.sm.output_off()",
"def noDisplay(self):\n self.displaycontrol &= ~self.LCD_DISPLAYON\n self.write_lcd(self.LCD_DATA_E1, self.LCD_DISPLAYCONTROL | self.displaycontrol)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_DISPLAYCONTROL | self.displaycontrol)",
"def systemOff():\n # Updated 11/19/16\n I2C.write_byte_data(Valve_bus, pinOut_O, 0x00 )\n I2C.write_byte_data(Pump_Mag_bus, pinOut_O, 0x00)",
"def disable_low_freq_out(self):\n self.write(\":SOUR:LFO:STAT OFF\")",
"def disable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT OFF\")",
"def enable_cl2_61p44(self):\n self.write_versa5(0x62,0x3b) ## Clock2 CMOS1 output, 3.3V\n self.write_versa5(0x2c,0x00) ## Disable aux output on clock 1\n self.write_versa5(0x31,0x81) ## Use divider for clock2\n ## VCO multiplier is shared for all outputs, set to 68 by firmware\n ## VCO = 38.4*68 = 2611.2 MHz\n ## There is a hardwired divide by 2 in the Versa 5 at the VCO output\n ## VCO to Dividers = 2611.2 MHZ/2 = 1305.6\n ## Target frequency of 61.44 requires dividers of 1305.6/61.44 = 21.25\n ## Frational dividers are supported\n ## Set integer portion of divider 21 = 0x15, 12 bits split across 2 registers\n self.write_versa5(0x3d,0x01)\n self.write_versa5(0x3e,0x50)\n ## Set fractional portion, 30 bits, 2**24 * .25 = 0x400000\n self.write_versa5(0x32,0x01) ## [29:22]\n self.write_versa5(0x33,0x00) ## [21:14]\n self.write_versa5(0x34,0x00) ## [13:6]\n self.write_versa5(0x35,0x00) ## [5:0] and disable ss\n self.write_versa5(0x63,0x01) ## Enable clock2",
"def noBlink(self):\n self.displaycontrol &= ~self.LCD_BLINKON\n self.write_lcd(self.LCD_DATA_E1, self.LCD_DISPLAYCONTROL | self.displaycontrol)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_DISPLAYCONTROL | self.displaycontrol)",
"def enable_cl1_pll1(self):\n self.write_versa5(0x17,0x02) ## Change top multiplier to 0x22\n self.write_versa5(0x18,0x20)\n self.write_versa5(0x10,0xc0) ## Enable xtal and clock\n self.write_versa5(0x13,0x03) ## Switch to clock\n self.write_versa5(0x10,0x40) ## Enable clock input only, won't lock to master",
"def disable_amplitude_modulation(self):\n self.write(\":SOUR:AM:STAT OFF\")",
"def disable(self):\n self.out.close()\n sys.stdout = self._stdout",
"def disable_modulation(self):\n self.write(\":OUTPUT:MOD OFF;\")\n self.write(\":lfo:stat off;\")",
"def disableDebugLoadOutput(self):\n key = \"NatlinkmainDebugLoad\"\n self.userregnl.delete(key)",
"def stop_timing_no_callback(self) -> None:\n self._is_timing = False",
"def rtsOff():\n pass",
"def disable_relays(self):\n #ensure clock low and data high\n self.e.clear_bit(7)\n self.e.set_bit(5)\n time.sleep(0.01)\n\n #pulse the clock line\n self.e.set_bit(7)\n time.sleep(0.01)\n self.e.clear_bit(7)\n\n #clear the data line\n self.e.clear_bit(5)",
"def disableDebugCallbackOutput(self):\n key = \"NatlinkmainDebugCallback\"\n self.userregnl.delete(key)",
"def postproc_disable(self):\n self.write(\":CALC:MATH:STATE OFF\")\n self.write(\":CALC2:LIM:STATE OFF\")\n self.write(\":CALC3:AVER:STATE OFF\")",
"def disable_reporting(self):\n self.reporting = False\n msg = chr(REPORT_DIGITAL + self.port_number)\n msg += chr(0)\n self.board.sp.write(msg)",
"def disable_gps(self):\n self.pass_command(b\"AT+CGPS=0,1\")\n return self.read_output()",
"def disable_idle_states(self):\n # Disable C1 (cluster shutdown).\n self.set_sysfile_value('/sys/devices/system/cpu/cpu0/cpuidle/state1/disable', 1, verify=False)\n # Disable C0.\n self.set_sysfile_value('/sys/kernel/debug/idle_debug/enable_idle', 0xFF, verify=False)",
"def turnOff(self):\n self.write(\"E;O0;E;\")\n return self.output()",
"def start_clock(self):\n pass",
"def enable_cl1_direct(self):\n self.write_versa5(0x17,0x02) ## Change top multiplier to 0x22\n self.write_versa5(0x18,0x20)\n self.write_versa5(0x10,0xc0) ## Enable xtal and clock\n self.write_versa5(0x13,0x03) ## Switch to clock\n self.write_versa5(0x10,0x44) ## Enable clock input only and refmode\n self.write_versa5(0x21,0x0c) ## Use previous channel, direct input, may have skew",
"def turnLightingSystemOff():\n dislin.light('OFF')"
] | [
"0.6932594",
"0.69154507",
"0.66963387",
"0.6469269",
"0.6422932",
"0.62550884",
"0.6240097",
"0.6142875",
"0.6009278",
"0.5959486",
"0.59415823",
"0.5938613",
"0.5936487",
"0.5925635",
"0.5921237",
"0.5806834",
"0.5805442",
"0.580352",
"0.5793692",
"0.5790586",
"0.57899404",
"0.57535046",
"0.57204235",
"0.569011",
"0.568266",
"0.5678271",
"0.56585914",
"0.56507885",
"0.56464636",
"0.5622362"
] | 0.8451922 | 0 |
Enable CL2 output at 61.44MH | def enable_cl2_61p44(self):
self.write_versa5(0x62,0x3b) ## Clock2 CMOS1 output, 3.3V
self.write_versa5(0x2c,0x00) ## Disable aux output on clock 1
self.write_versa5(0x31,0x81) ## Use divider for clock2
## VCO multiplier is shared for all outputs, set to 68 by firmware
## VCO = 38.4*68 = 2611.2 MHz
## There is a hardwired divide by 2 in the Versa 5 at the VCO output
## VCO to Dividers = 2611.2 MHZ/2 = 1305.6
## Target frequency of 61.44 requires dividers of 1305.6/61.44 = 21.25
## Frational dividers are supported
## Set integer portion of divider 21 = 0x15, 12 bits split across 2 registers
self.write_versa5(0x3d,0x01)
self.write_versa5(0x3e,0x50)
## Set fractional portion, 30 bits, 2**24 * .25 = 0x400000
self.write_versa5(0x32,0x01) ## [29:22]
self.write_versa5(0x33,0x00) ## [21:14]
self.write_versa5(0x34,0x00) ## [13:6]
self.write_versa5(0x35,0x00) ## [5:0] and disable ss
self.write_versa5(0x63,0x01) ## Enable clock2 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def enable_cl2_copy_ad9866(self):\n self.write_versa5(0x62,0x3b) ## Clock2 CMOS1 output, 3.3V\n self.write_versa5(0x2c,0x01) ## Enable aux output on clock 1\n self.write_versa5(0x31,0x0c) ## Use clock1 aux output as input for clock2\n self.write_versa5(0x63,0x01) ## Enable clock2",
"def disable_cl2(self):\n self.write_versa5(0x31,0x80) ## Disable divider output for clock2\n self.write_versa5(0x63,0x00) ## Disable clock2 output",
"def disable_cl1(self):\n self.write_versa5(0x10,0xc4) ## Enable xtal and clock\n self.write_versa5(0x21,0x81) ## Use and enable divider\n self.write_versa5(0x13,0x00) ## Use CL1 input instead of xtal\n self.write_versa5(0x10,0x80) ## Enable xtal input only\n self.write_versa5(0x17,0x04) ## Change top multiplier to 0x44\n self.write_versa5(0x18,0x40)",
"def enable_cl1_direct(self):\n self.write_versa5(0x17,0x02) ## Change top multiplier to 0x22\n self.write_versa5(0x18,0x20)\n self.write_versa5(0x10,0xc0) ## Enable xtal and clock\n self.write_versa5(0x13,0x03) ## Switch to clock\n self.write_versa5(0x10,0x44) ## Enable clock input only and refmode\n self.write_versa5(0x21,0x0c) ## Use previous channel, direct input, may have skew",
"def enable_cl1_pll1(self):\n self.write_versa5(0x17,0x02) ## Change top multiplier to 0x22\n self.write_versa5(0x18,0x20)\n self.write_versa5(0x10,0xc0) ## Enable xtal and clock\n self.write_versa5(0x13,0x03) ## Switch to clock\n self.write_versa5(0x10,0x40) ## Enable clock input only, won't lock to master",
"def d2out():\n\tsetState(\"D2\", \"-DI-PHDGN-02:CON\", CON_OUT)",
"def sm_output_on(self):\n self.sm.output_on()\n #self.sm_restore_display()",
"def enable(self):\n self.write(\":OUTPUT ON;\")",
"def enable_cl2_sync_76p8(self,iskw=0,fskw=31):\n iskw = iskw & 0x0f\n iskw = iskw << 4\n fskw = fskw & 0x3f\n self.write_versa5(0x62,0x3b) ## Clock2 CMOS1 output, 3.3V\n self.write_versa5(0x3d,0x01) ## Set divide by 0x0110\n self.write_versa5(0x3e,0x10)\n self.write_versa5(0x31,0x81) ## Enable divider output for clock2\n self.write_versa5(0x3c,iskw) ## Write integer portion of skew\n self.write_versa5(0x3f,fskw) ## Write fractional portion of skew\n self.write_versa5(0x63,0x01) ## Enable clock2 output\n self.reset_versa5()",
"def on_L2(self):\r\n self.log()",
"def turn_output_on(self):\n self.instr.write('RF1')\n time.sleep(self.sleep_time)",
"def enableDebugLoadOutput(self):\n key = \"NatlinkmainDebugLoad\"\n self.userregnl.set(key, 1)",
"def warmup():\n print camera.CoolerOFF()\n camera.status.update()",
"def use_i2c():\n _LIB.oled_click_use_i2c()",
"def enable_output(self):\n\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 7, 1)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 4, 1)\n self.__bus.write_byte_data(\n self.__rtcaddress, self.CONTROL, self.__rtcconfig)\n return",
"def enable_low_freq_out(self):\n self.write(\":SOUR:LFO:STAT ON\")",
"def turn_on(self):\n self.write(\"OUT1\\n\")",
"def enable_amplitude_modulation(self):\n self.write(\":SOUR:AM:STAT ON\")",
"def sm_output_off(self):\n self.sm.output_off()",
"def set_output(self, on=False):\r\n #Note that, for SMA, switiching off the output set the automatically the mode to Fixed.... !!\r\n if on: self.write(\"OUTP1:STAT ON\")\r\n else: self.write(\"OUTP1:STAT OFF\")",
"def set_output(self, on=False):\r\n #Note that, for SMA, switiching off the output set the automatically the mode to Fixed.... !!\r\n if on: self.write(\"OUTP1:STAT ON\")\r\n else: self.write(\"OUTP1:STAT OFF\")",
"def set_output(self, on=False):\r\n if on: self.write(\"OUTP ON\")\r\n else: self.write(\"OUTP OFF\")",
"def turnOn(self):\n self.write('E;O1;E;')\n return self.output()",
"def d1out():\n\tsetState(\"D1\", \"-DI-PHDGN-01:CON\", CON_OUT)",
"def turnLightingSystemOn():\n dislin.light('ON')",
"def enableDebugCallbackOutput(self):\n key = \"NatlinkmainDebugCallback\"\n self.userregnl.set(key, 1)",
"def set_output(self, on=False):\r\n _debug('simq03b_api.set_output')\r\n \r\n #Note that, for SMA, switiching off the output set the automatically the mode to Fixed.... !!\r\n if on: self.write(\"OUTP:STAT ON\")\r\n else: self.write(\"OUTP:STAT OFF\")",
"def postproc_disable(self):\n self.write(\":CALC:MATH:STATE OFF\")\n self.write(\":CALC2:LIM:STATE OFF\")\n self.write(\":CALC3:AVER:STATE OFF\")",
"def disable(self):\n self.write(\":OUTPUT OFF;\")",
"def power_on(self):\n return self.inst.write(':OUTP ON')"
] | [
"0.7363293",
"0.7133375",
"0.6799634",
"0.6786987",
"0.66550547",
"0.59399265",
"0.5891924",
"0.5827261",
"0.5815432",
"0.57599264",
"0.574008",
"0.56148237",
"0.5605408",
"0.5511515",
"0.5497506",
"0.54570615",
"0.54495126",
"0.54291975",
"0.53969467",
"0.5371264",
"0.5371264",
"0.53554004",
"0.5347559",
"0.53099567",
"0.5308036",
"0.5298026",
"0.5274557",
"0.5242264",
"0.5240393",
"0.5227488"
] | 0.7136829 | 1 |
Pass CL1 input directly with buffering to AD9866 | def enable_cl1_direct(self):
self.write_versa5(0x17,0x02) ## Change top multiplier to 0x22
self.write_versa5(0x18,0x20)
self.write_versa5(0x10,0xc0) ## Enable xtal and clock
self.write_versa5(0x13,0x03) ## Switch to clock
self.write_versa5(0x10,0x44) ## Enable clock input only and refmode
self.write_versa5(0x21,0x0c) ## Use previous channel, direct input, may have skew | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sendBuffer():\n dislin.sendbf()",
"def clear_buffer(self):\r\n self.cmd = \"TRAC:CLE\"\r\n self.I_source.write(self.cmd)\r\n self.in_buffer = int(self.I_source.query(\"TRAC:POIN:ACT?\"))",
"def copeWithInput(self, s):\n \n if self.debug > 5:\n CPL.log('TCCShell.copeWithInput', \"Nub %s read: %r, with buf=%r\" % (self.name, s, self.inputBuffer))\n\n while 1:\n # Connections to the TCC's tccuser captive account return lines\n # terminated by CRLF, but with the LF coming at the start of the \"next\n # line\". Odd, and to be investigated. In the meanwhile, strip leading LFs\n #\n if len(self.inputBuffer) > 0 and self.inputBuffer[0] == '\\n':\n self.inputBuffer = self.inputBuffer[1:]\n \n reply, leftover = self.decoder.decode(self.inputBuffer, s)\n s = None\n if self.debug > 5:\n CPL.log('TCCShell.copeWithInput', \"decoded: %s, yielding buf=%r\" % (reply, leftover))\n\n self.inputBuffer = leftover\n if not reply:\n break\n\n if self.log:\n try:\n txt = reply['RawText']\n except:\n txt = \"UNKNOWN INPUT\"\n self.log.log(txt, note='<')\n \n # Here's the special TCC bit: search for YourUserNum, \n if self.cid == None:\n newCID = self.findUserNum(reply['KVs'])\n if newCID != None:\n self.cid = newCID\n CPL.log('TCCShell.copeWithInput', \"setting CID=%s\" % (self.cid))\n self.connected()\n \n cmd = self.getCmdForReply(reply)\n r = Hub.Reply.Reply(cmd, reply['flag'], reply['KVs'])\n cmd.reply(r)",
"def Start(self):\n self.CallClient(standard.ReadBuffer, next_state=\"WrongProcess\")",
"def Buffer(self) -> _n_0_t_7[_n_0_t_6]:",
"def Buffer(self) -> _n_0_t_7[_n_0_t_6]:",
"def Buffer(self) -> _n_0_t_7[_n_0_t_6]:",
"def enable_cl2_copy_ad9866(self):\n self.write_versa5(0x62,0x3b) ## Clock2 CMOS1 output, 3.3V\n self.write_versa5(0x2c,0x01) ## Enable aux output on clock 1\n self.write_versa5(0x31,0x0c) ## Use clock1 aux output as input for clock2\n self.write_versa5(0x63,0x01) ## Enable clock2",
"def __init__ (self, input) :\r\n ReaderA.__init__(self) # call parent\r\n # print '************************* input = ', input, type(input)\r\n self.buffer_ = input # this is any thing that can be indexed\r\n self.current_ = 0",
"def d1in():\n\tsetState(\"D1\", \"-DI-PHDGN-01:CON\", CON_IN)",
"def pc_input_buffers_full(self, *args):\n return _spacegrant_swig.binary_sink_sptr_pc_input_buffers_full(self, *args)",
"def read(self):\n try:\n cmd = 'SAMP:COUN 1' \n self.handle.write(cmd) #one sample per trigger\n self.handle.write('TRIG:SOUR BUS') #triggered by command\n self.handle.write('TRIG:COUN 1') #one trigger to return to wait for trg\n self.handle.write('INIT:IMM') #DVM to \"wait for trigger\" \n self.handle.write('*TRG')\n startTime = time.time()\n while True: #wait until measuring flag goes to 0\n try:\n measured = self.handle.ask(\"DATA:POIN?\")\n measured = measured.strip() #remove CR \n measured = int(measured) #convert to number\n if measured == 1: #final number of samples achieved\n break;\n except Exception:\n print('Dvm34411:read() polling failed !')\n raise\n \n if time.time() - startTime > self.timeout:\n print('Dvm34411:read() timeout !')\n return False\n \n time.sleep(1) \n reading = self.handle.ask('R? 1;') #definite-Length block format\n except Exception:\n print('Dvm34411.read() failed !')\n raise\n if reading[0] != '#':\n print('Dvm34411.read() DLB format error - # expected !')\n return False\n digits = int(reading[1])\n reading = reading[2 + digits:]\n rdg = float(reading)\n return rdg",
"def has_buffered_inputs(self):",
"def disable_cl1(self):\n self.write_versa5(0x10,0xc4) ## Enable xtal and clock\n self.write_versa5(0x21,0x81) ## Use and enable divider\n self.write_versa5(0x13,0x00) ## Use CL1 input instead of xtal\n self.write_versa5(0x10,0x80) ## Enable xtal input only\n self.write_versa5(0x17,0x04) ## Change top multiplier to 0x44\n self.write_versa5(0x18,0x40)",
"def do_input(self, line):\n cmd_args = io.parse_cmd_args(line, io.input_cmd_pattern)\n if cmd_args:\n success = self.manager.input(\n cmd_args.get('target'), \n cmd_args.get('cslist'), \n mode=cmd_args.get('mode')\n )\n if success:\n self.console_print(\"Yippee! input successfull!\", settings.INFO_FORMAT)\n else:\n self.console_print(\"Sorry, something kinda went wrong! You can try again.\", settings.ERROR_FORMAT)\n else:\n self.console_print(settings.COMMMAND_ARGS_ERROR_MSG, settings.ERROR_FORMAT)",
"def proxy_input(self, src, dest, buf, extra):\n self.route.proxy_input(src, dest, buf=buf, extra=extra)",
"def pc_input_buffers_full(self, *args):\n return _spacegrant_swig.general_burster_2_sptr_pc_input_buffers_full(self, *args)",
"def pc_input_buffers_full(self, *args):\n return _uhd_swig.usrp_sink_sptr_pc_input_buffers_full(self, *args)",
"def d1out():\n\tsetState(\"D1\", \"-DI-PHDGN-01:CON\", CON_OUT)",
"def pc_input_buffers_full(self, *args):\n return _spacegrant_swig.hdlc_framer_sptr_pc_input_buffers_full(self, *args)",
"def pc_input_buffers_full(self, *args):\n return _TestA_swig.cleanslate_sptr_pc_input_buffers_full(self, *args)",
"def pc_input_buffers_full(self, *args):\n return _spacegrant_swig.message_debug_sptr_pc_input_buffers_full(self, *args)",
"def clear_input_buffer(ser):\n sys.stderr.write(\"\\n\")\n LOGGER.warning(\"***** Unprocessed input buffer content *****\")\n sys.stderr.write(\"\\n\")\n capture = \"\"\n rx = 1\n while rx:\n rx = ser.read(ser.in_waiting or 1)\n if rx:\n capture += rx.decode(errors=\"replace\")\n if capture != \"\":\n LOGGER.info(capture.strip())\n sys.stderr.write(\"\\n\")\n LOGGER.warning(\"*\" * 44)\n sys.stderr.write(\"\\n\")\n ser.reset_input_buffer()",
"def pc_input_buffers_full(self, *args):\n return _spacegrant_swig.udp_debug_sptr_pc_input_buffers_full(self, *args)",
"def pc_input_buffers_full(self, *args):\n return _spacegrant_swig.G3RUH_descramble_sptr_pc_input_buffers_full(self, *args)",
"def fillBuffer():\n buff[bufferCounter].next = dataIn",
"def pc_input_buffers_full(self, *args):\n return _uhd_swig.usrp_source_sptr_pc_input_buffers_full(self, *args)",
"def cstringio_buf(self):\r\n pass",
"def test23b(self):\n self.spawn(\"./binary\").stdin(\"2\").stdin(\"-1\").stdin(\"322\").stdin(\"23\").stdout(\"00010111\\n\").exit(0)",
"def _initialize_buffers(self) -> None:"
] | [
"0.55064315",
"0.5361335",
"0.5239564",
"0.523027",
"0.5139434",
"0.5139434",
"0.5139434",
"0.5129984",
"0.5062465",
"0.50183797",
"0.5008681",
"0.49903777",
"0.4955058",
"0.4947918",
"0.49448824",
"0.4941468",
"0.49169698",
"0.4904956",
"0.49041283",
"0.489346",
"0.48889497",
"0.4888259",
"0.48775533",
"0.48775032",
"0.48678175",
"0.4848223",
"0.483244",
"0.48278004",
"0.48254445",
"0.48124602"
] | 0.57670486 | 0 |
Use CL1 as input to PLL1 and then to AD9866 | def enable_cl1_pll1(self):
self.write_versa5(0x17,0x02) ## Change top multiplier to 0x22
self.write_versa5(0x18,0x20)
self.write_versa5(0x10,0xc0) ## Enable xtal and clock
self.write_versa5(0x13,0x03) ## Switch to clock
self.write_versa5(0x10,0x40) ## Enable clock input only, won't lock to master | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def enable_cl1_direct(self):\n self.write_versa5(0x17,0x02) ## Change top multiplier to 0x22\n self.write_versa5(0x18,0x20)\n self.write_versa5(0x10,0xc0) ## Enable xtal and clock\n self.write_versa5(0x13,0x03) ## Switch to clock\n self.write_versa5(0x10,0x44) ## Enable clock input only and refmode\n self.write_versa5(0x21,0x0c) ## Use previous channel, direct input, may have skew",
"def enable_cl2_61p44(self):\n self.write_versa5(0x62,0x3b) ## Clock2 CMOS1 output, 3.3V\n self.write_versa5(0x2c,0x00) ## Disable aux output on clock 1\n self.write_versa5(0x31,0x81) ## Use divider for clock2\n ## VCO multiplier is shared for all outputs, set to 68 by firmware\n ## VCO = 38.4*68 = 2611.2 MHz\n ## There is a hardwired divide by 2 in the Versa 5 at the VCO output\n ## VCO to Dividers = 2611.2 MHZ/2 = 1305.6\n ## Target frequency of 61.44 requires dividers of 1305.6/61.44 = 21.25\n ## Frational dividers are supported\n ## Set integer portion of divider 21 = 0x15, 12 bits split across 2 registers\n self.write_versa5(0x3d,0x01)\n self.write_versa5(0x3e,0x50)\n ## Set fractional portion, 30 bits, 2**24 * .25 = 0x400000\n self.write_versa5(0x32,0x01) ## [29:22]\n self.write_versa5(0x33,0x00) ## [21:14]\n self.write_versa5(0x34,0x00) ## [13:6]\n self.write_versa5(0x35,0x00) ## [5:0] and disable ss\n self.write_versa5(0x63,0x01) ## Enable clock2",
"def enable_cl2_copy_ad9866(self):\n self.write_versa5(0x62,0x3b) ## Clock2 CMOS1 output, 3.3V\n self.write_versa5(0x2c,0x01) ## Enable aux output on clock 1\n self.write_versa5(0x31,0x0c) ## Use clock1 aux output as input for clock2\n self.write_versa5(0x63,0x01) ## Enable clock2",
"def disable_cl1(self):\n self.write_versa5(0x10,0xc4) ## Enable xtal and clock\n self.write_versa5(0x21,0x81) ## Use and enable divider\n self.write_versa5(0x13,0x00) ## Use CL1 input instead of xtal\n self.write_versa5(0x10,0x80) ## Enable xtal input only\n self.write_versa5(0x17,0x04) ## Change top multiplier to 0x44\n self.write_versa5(0x18,0x40)",
"def d1in():\n\tsetState(\"D1\", \"-DI-PHDGN-01:CON\", CON_IN)",
"def d1out():\n\tsetState(\"D1\", \"-DI-PHDGN-01:CON\", CON_OUT)",
"def generate_l1ca_codes(self, prn):\n output_taps = self.l1_code_phase_assignments.loc[prn, 'CA_Phase_Select']\n g1 = self.generate_mls(10, self.g1_feedback_taps, [10])\n g2 = self.generate_mls(10, self.g2_feedback_taps, output_taps)\n ca_code = []\n for index, bit in enumerate(g1):\n ca_code.append(int((bit + g2[index]) % 2))\n return ca_code",
"def d2in():\n\tsetState(\"D2\", \"-DI-PHDGN-02:CON\", CON_IN)",
"def set_lo1b(self):\n freq_units = 1\n power_units = 1\n intpol_units = 1\n power = 10\n mindist = 10000000\n if self.rcvr_name == \"RcvrArray18_26\":\n lo1b_value = freq_calc.get_lo1b()\n self.seq.add_param(self.mng_name, \"testToneFreq\",\n str(lo1b_value), 1)\n self.seq.add_param(self.mng_name, \"testTonePowerLevel\",\n str(power), 4)\n self.seq.add_param(self.mng_name, \"S8\", \"5\", 1)\n # if only type 2 beams are being used then completely disconnect\n # the Lo1B signal to stop signals from the unused\n # idm type 1 kfpa beam atrributing signal strength so that it is\n # possible to balance this path\n # Note: The setting of loConfig must match the S10 setting below.\n # The LO1 coordinator has an S10 dependency method which sets S10\n # based on loConfig. (Hence the setting of S10 is redundant.)\n if 1 not in rcvr.get_idmtypes():\n self.seq.add_param(self.mng_name, \"S10\", \"2\")\n self.seq.add_param(self.mng_name, \"loConfig\", \"TrackA_BNotUsed\", 1)\n else:\n self.seq.add_param(self.mng_name, \"S10\", \"1\")\n self.seq.add_param(self.mng_name, \"loConfig\", \"TrackA_TToneB\", 1)\n\n elif self.rcvr_name in (\"Rcvr26_40\", \"Rcvr68_92\"):\n # reset switch for LO1B because these rcvrs use it\n self.seq.add_param(self.mng_name, \"loConfig\", \"TrackA_TToneB\", 1)\n self.seq.add_param(self.mng_name, \"S10\", \"1\")\n # mmc_sw = freq_calc.get_filter()\n # if mmc_sw != \"F2_8\":\n # MMConvFile = (getConfigValue(\"/home/gbt\", \"YGOR_TELESCOPE\") +\n # \"/etc/config/MMConverter.conf\")\n # self.read_config_file(MMConvFile)\n # lo1b_value = freq_calc.get_lo1b()\n # self.seq.add_param(self.mng_name, \"testToneFreq\",\n # str(lo1b_value), 1)\n # self.seq.add_param(self.mng_name, \"loConfig\",\n # \"TrackA_TToneB\", 1)\n # if lo1b_value >= self.LO1_freq_pwr[self.num_pwr_readings - 1]:\n # power = self.LO1_pwr[self.num_pwr_readings - 1]\n # elif lo1b_value <= self.LO1_freq_pwr[0]:\n # power = self.LO1_pwr[0]\n # else:\n # for i in range(0, self.num_pwr_readings):\n # freq_diff = abs((lo1b_value) - self.LO1_freq_pwr[i])\n # if freq_diff < mindist:\n # ind = i\n # mindist = freq_diff\n # power = self.LO1_pwr[i]\n # if ind != self.num_pwr_readings:\n # if lo1b_value - self.LO1_freq_pwr[ind] > 0:\n # freq_diff = (self.LO1_freq_pwr[ind + 1] -\n # self.LO1_freq_pwr[ind])\n # freq_units = (self.LO1_freq_pwr[ind + 1] -\n # self.LO1_freq_pwr[ind])\n # power_units = (self.LO1_pwr[ind + 1] -\n # self.LO1_pwr[ind])\n # if power_units != 0:\n # intpol_units = power_units / freq_units\n # power = (self.LO1_pwr[ind] +\n # intpol_units * freq_diff)\n # elif lo1b_value - self.LO1_freq_pwr[ind] < 0:\n # power_units = (self.LO1_pwr[ind] -\n # self.LO1_pwr[ind - 1])\n # if power_units != 0:\n # intpol_units = power_units / freq_units\n # power = (self.LO1_pwr[ind] -\n # intpol_units * freq_units)\n\n # self.seq.add_param(self.mng_name, \"testTonePowerLevel\",\n # str(power), 3)\n # self.seq.add_param(self.mng_name, \"S8\", \"3\", 1)\n # elif self.rcvr_name in (\"Rcvr68_92\"):\n # self.seq.add_param(self.mng_name, \"loConfig\",\n # \"TrackA_BNotUsed\", 1)\n # self.seq.add_param(self.mng_name, \"testTonePowerLevel\",\n # \"-110\", 4)\n # self.seq.add_param(self.mng_name, \"testToneFreq\", \"17000\", 1)\n if self.rcvr_name in (\"Rcvr26_40\"):\n self.seq.add_param(\"LO1,MMConverterCrd\", \"sw1\", \"2\", 1)\n else:\n self.seq.add_param(\"LO1,MMConverterCrd\", \"sw1\", \"1\", 1)\n # self.seq.add_param(\"LO1,MMConverterCrd\", \"filter\", mmc_sw, 1)\n\n else: # disconnect lo1b thru s10 for rcvrs that do not use it\n self.seq.add_param(self.mng_name, \"loConfig\", \"TrackA_BNotUsed\", 1)\n self.seq.add_param(self.mng_name, \"S10\", \"2\")",
"def calibrate_power_ADC(self):\n self.send_packet('\\x61')",
"def d2out():\n\tsetState(\"D2\", \"-DI-PHDGN-02:CON\", CON_OUT)",
"def JC(lcsc1c2, Pc1, Pc2):\n\n JC = 1/(2*lcsc1c2 - (Pc1 + Pc2))\n return JC",
"def get_dc_offset(self):\n self.dev.write(1, 'A1')\n digit1, digit2 = self.dev.read(0x81, 64)[:2]\n # Save the data as voltage between 0.0 and 5.0\n self.data1.append((digit1 + 256*digit2)*5.0/1024)",
"def l2_from_l1c_dataset(self, datasetl1c, flags):\n if self.context.get_config_value(\"network\").lower() == \"w\":\n l2a_dim_sizes_dict = {\n \"wavelength\": len(datasetl1c[\"wavelength\"]),\n \"series\": len(np.unique(datasetl1c[\"series_id\"])),\n }\n dataset_l2a = self.hdsb.create_ds_template(\n l2a_dim_sizes_dict, \"W_L2A\", propagate_ds=datasetl1c, ds=datasetl1c\n )\n dataset_l2a = dataset_l2a.assign_coords(wavelength=datasetl1c.wavelength)\n\n series_id = np.unique(datasetl1c[\"series_id\"])\n dataset_l2a[\"series_id\"].values = series_id\n for variablestring in [\n \"acquisition_time\",\n \"viewing_azimuth_angle\",\n \"viewing_zenith_angle\",\n \"solar_azimuth_angle\",\n \"solar_zenith_angle\",\n \"epsilon\",\n \"rhof\",\n ]:\n temp_arr = np.empty(len(series_id))\n for i in range(len(series_id)):\n flagged = np.any(\n [\n DatasetUtil.unpack_flags(datasetl1c[\"quality_flag\"])[x]\n for x in flags\n ],\n axis=0,\n )\n ids = np.where(\n (datasetl1c[\"series_id\"] == series_id[i]) & (flagged == False)\n )\n # ids = np.where((datasetl1c['series_id'] == series_id[i]) & (\n # datasetl1c['quality_flag'] == 0))\n temp_arr[i] = np.mean(datasetl1c[variablestring].values[ids])\n dataset_l2a[variablestring].values = temp_arr\n\n if self.context.get_config_value(\"network\").lower() == \"l\":\n l2a_dim_sizes_dict = {\n \"wavelength\": len(datasetl1c[\"wavelength\"]),\n \"series\": len(datasetl1c[\"series_id\"]),\n }\n dataset_l2a = self.hdsb.create_ds_template(\n l2a_dim_sizes_dict, \"L_L2A\", propagate_ds=datasetl1c, ds=datasetl1c\n )\n dataset_l2a = dataset_l2a.assign_coords(wavelength=datasetl1c.wavelength)\n\n return dataset_l2a",
"def ab2_cd(CFL, uold1, uold2, unew):\n\n unew[1:-1] = uold1[1:-1] - 0.75 * CFL * (uold1[2:] - uold1[:-2]) +\\\n 0.25 * CFL * (uold2[2:] - uold2[:-2])\n unew[-1] = uold1[-1] - 1.5 * CFL * (uold1[-1] - uold1[-2]) +\\\n 0.5 * CFL * (uold2[-1] - uold2[-2])\n\n return unew",
"def disable_cl2(self):\n self.write_versa5(0x31,0x80) ## Disable divider output for clock2\n self.write_versa5(0x63,0x00) ## Disable clock2 output",
"def setdevs():\n\n #q1,q2,q3,q4 = (epics.caget(\"QUAD:LTU1:620:BCTRL\"),epics.caget(\"QUAD:LTU1:640:BCTRL\"),epics.caget(\"QUAD:LTU1:660:BCTRL\"),epics.caget(\"QUAD:LTU1:680:BCTRL\"))\n\n q1 = -79.7640302\n q2 = 79.67962984\n q3 = -83.36844826\n q4 = 68.4844249\n\n print q1,q2,q3,q4\n\n epics.caput(\"SIOC:SYS0:ML00:CALCOUT997\",q1) # QUAD:LTU1:620:BCTRL\n epics.caput(\"SIOC:SYS0:ML00:CALCOUT998\",q2) # QUAD:LTU1:640:BCTRL\n epics.caput(\"SIOC:SYS0:ML00:CALCOUT999\",q3) # QUAD:LTU1:660:BCTRL\n epics.caput(\"SIOC:SYS0:ML00:CALCOUT000\",q4) # QUAD:LTU1:680:BCTRL",
"def hsdpa_physical_downlink_settings(self):\r\r\n\r\r\n config_list = []\r\r\n\r\r\n config_list.append (\"\")\r\r\n\r\r\n config_list.append ( \"%-24s %-18s\" % (\"Channel( Carrier 1)\", \"Level\"))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"==================\", \"=====\"))\r\r\n\r\r\n pcpich_level = -10.2\r\r\n self.set_pcpich_code_level(carrier=1, leveldB=pcpich_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"P-CPICH\", pcpich_level))\r\r\n\r\r\n psch_level = -15.2\r\r\n ssch_level = psch_level\r\r\n pccpch_level = -12.2\r\r\n self.write('CONFigure:WCDMa:SIGN:DL:LEVel:PSCH %s' %psch_level)\r\r\n self.write('CONFigure:WCDMa:SIGN:DL:LEVel:SSCH %s' %ssch_level)\r\r\n self.write('CONFigure:WCDMa:SIGN:DL:LEVel:PCCPch %s' %pccpch_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"P-SCH\", psch_level))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"S-SCH\", ssch_level))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"P-CCPCH\", pccpch_level))\r\r\n\r\r\n\r\r\n # SCCPH power level and channelisation code\r\r\n sccpch_level = -12.2\r\r\n self.set_dl_chan_code_level(dl_chan='SCCPch', code=2, level_dB=sccpch_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"S-CCPCH\", sccpch_level))\r\r\n\r\r\n # PICH power level and channelisation code\r\r\n pich_level = -15.2\r\r\n self.set_dl_chan_code_level(dl_chan='PICH', code=2, level_dB=pich_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"PICH\", pich_level))\r\r\n\r\r\n # AICH power level and channelisation code\r\r\n aich_level = -15.2\r\r\n self.set_dl_chan_code_level(dl_chan='AICH', code=3, level_dB=aich_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"AICH\", aich_level))\r\r\n\r\r\n # DPCH power and channelisation code\r\r\n dpch_level = -18.2\r\r\n self.set_dl_chan_code_level(dl_chan='DPCH', code=3, level_dB=dpch_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"DPCH\", dpch_level))\r\r\n\r\r\n # F-DPCH power and channelisation ocde\r\r\n fdpch_level = -18.2\r\r\n self.set_dl_chan_code_level(dl_chan='FDPCh', code=6, level_dB=fdpch_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"F-DPCH\", fdpch_level))\r\r\n\r\r\n # DPCH enhanced settings\r\r\n self.configure_enhanced_dl_dpch()\r\r\n\r\r\n\r\r\n # *****************************************************************************\r\r\n # Configure 2 HS-SCCH: level, channelization code, UE ID and dummy UE ID\r\r\n # *****************************************************************************\r\r\n hssch_level_1 = -20.2\r\r\n hssch_level_2 = -20.2\r\r\n self.set_hssch_level(hssch_num=1, carrier=1, leveldB=hssch_level_1)\r\r\n self.set_hssch_level(hssch_num=2, carrier=1, leveldB=hssch_level_2)\r\r\n self.set_hssch_code(hssch_num=1, carrier=1, codeNum=2)\r\r\n self.set_hssch_code(hssch_num=2, carrier=1, codeNum=7)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-SCCH #1\", hssch_level_1))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-SCCH #2\", hssch_level_2))\r\r\n\r\r\n self.set_default_ue_id_hssch(carrier=1)\r\r\n\r\r\n # HS-PDSCH Enhanced Settings\r\r\n self.set_hsdsch_mpo(carrier=1, control=\"AUTO\", pwrOffsetManual=\"\")\r\r\n # unscheduled frame type for HSDPA\r\r\n # possible types are 'DUMMy', 'DTX'\r\r\n self.hsdsch_unsched_frames(carrier=1, usFrameType='DUMMY')\r\r\n\r\r\n # *****************************************************************************\r\r\n # Configure HS-PDSCH: level and first channelization code number\r\r\n # *****************************************************************************\r\r\n\r\r\n hsdsch_level = -1.2\r\r\n self.set_hsdsch_level(carrier=1, leveldB = hsdsch_level)\r\r\n self.set_hsdsch_chanelisation_code(code=1, carrier=1)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-PDSCH\", hsdsch_level))\r\r\n\r\r\n\r\r\n # // *****************************************************************************\r\r\n # Set level and channelization code of E-AGCH, E-HICH and E-RGCH.\r\r\n # *****************************************************************************\r\r\n eagch_level = -20.2\r\r\n ehich_level = -20.2\r\r\n ergch_level = -20.2\r\r\n self.set_dl_chan_code_level(dl_chan='EAGCh', code=3, level_dB=eagch_level)\r\r\n self.set_dl_chan_code_level(dl_chan='EHICh', code=6, level_dB=ehich_level)\r\r\n self.set_dl_chan_code_level(dl_chan='ERGCh', code=6, level_dB=ergch_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"E-AGCH\", eagch_level))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"E-HICH\", ehich_level))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"E-RGCH\", ergch_level))\r\r\n\r\r\n config_list.append (\"\")\r\r\n\r\r\n for line in config_list:\r\r\n print line\r\r\n\r\r\n if self.dc_hsdpa:\r\r\n\r\r\n self.hsdpa_physical_downlink_settings_carrier2()",
"def configure_adc(self, rng, res, unit = 'A'):\n self.write_to_serial(':conf:curr:dc ' + str(rng) + ',' + str(res))# + unit)",
"def LSM_acquisition(add):\n # control register\n CTRL0 = 0x1f # p.34, accelerator\n CTRL1 = 0x20\n CTRL2 = 0x21\n CTRL5 = 0x24 # p.36, magnetic\n CTRL6 = 0x25\n CTRL7 = 0x26\n FIFO_CTRL = 0x2e # p.40\n # accelerater\n OUT_X_L_A = 0x28\n OUT_X_H_A = 0x29\n OUT_Y_L_A = 0x2a\n OUT_Y_H_A = 0x2b\n OUT_Z_L_A = 0x2c\n OUT_Z_H_A = 0x2d\n # magentic\n OUT_X_L_M = 0x08\n OUT_X_H_M = 0x09\n OUT_Y_L_M = 0x0a\n OUT_Y_H_M = 0x0b\n OUT_Z_L_M = 0x0c\n OUT_Z_H_M = 0x0d\n\n # follow lsm303D arduino library\n # AFS = 0, +-2g scale\n bus.write_byte_data(add, CTRL2, 0x00)\n # 50 Hz AODR, all axis enable\n bus.write_byte_data(add, CTRL1, 0x57)\n # high resolution, 6.25Hz MODR\n bus.write_byte_data(add, CTRL5, 0x64)\n # +-4 gauss scale\n bus.write_byte_data(add, CTRL6, 0x20)\n # low power mode off, continuous conversion mode\n bus.write_byte_data(add, CTRL7, 0x00)\n # # FIFO mode\n # bus.write_byte_data(add, CTRL0, 0b01000000)\n # bus.write_byte_data(add, FIFO_CTRL, 0b01000000)\n # # accelerator with 12.5Hz, all axis enable\n # bus.write_byte_data(add, CTRL1, 0b00110111)\n # # magnetic 12.5Hz, high resolutn, temp en\n # bus.write_byte_data(add, CTRL5, 0b11100000)\n # # full scale range \\pm 12 gauss\n # bus.write_byte_data(add, CTRL6, 0b01101000)\n # # enable magnetic\n # bus.write_byte_data(add, CTRL7, 0x00)\n\n # accelerator accumulate\n while True:\n uint16_ax = (bus.read_byte_data(add, OUT_X_H_A) << 8) + \\\n bus.read_byte_data(add, OUT_X_L_A)\n uint16_ay = (bus.read_byte_data(add, OUT_Y_H_A) << 8) + \\\n bus.read_byte_data(add, OUT_Y_L_A)\n uint16_az = (bus.read_byte_data(add, OUT_Z_H_A) << 8) + \\\n bus.read_byte_data(add, OUT_Z_L_A)\n\n uint16_mx = (bus.read_byte_data(add, OUT_X_H_M) << 8) + \\\n bus.read_byte_data(add, OUT_X_L_M)\n uint16_my = (bus.read_byte_data(add, OUT_Y_H_M) << 8) + \\\n bus.read_byte_data(add, OUT_Y_L_M)\n uint16_mz = (bus.read_byte_data(add, OUT_Z_H_M) << 8) + \\\n bus.read_byte_data(add, OUT_Z_L_M)\n # accelerometer 12 bit left aligned\n # ax = twos_comp(uint16_ax>>4, 12)\n # ay = twos_comp(uint16_ay>>4, 12)\n # az = twos_comp(uint16_az>>4, 12)\n ax = twos_comp(uint16_ax, 16)\n ay = twos_comp(uint16_ay, 16)\n az = twos_comp(uint16_az, 16)\n\n mx = twos_comp(uint16_mx, 16)\n my = twos_comp(uint16_my, 16)\n mz = twos_comp(uint16_mz, 16)\n\n yield [ax, ay, az, mx, my, mz]",
"def testMAPLtoADL(self):\n \n dom, prob = self.load(\"testdata/logistics.domain.mapl\", \"testdata/logistics.p1.mapl\")\n\n t = translators.ADLCompiler()\n dom2 = t.translate(dom)\n prob2 = t.translate(prob)\n \n self.roundtrip(dom2, prob2)",
"def hsdpa_physical_downlink_settings_carrier2(self):\r\r\n carrier = 2\r\r\n\r\r\n config_list = []\r\r\n\r\r\n config_list.append ( \"%-24s %-18s\" % (\"Channel( Carrier 2)\", \"Level\"))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"==================\", \"=====\"))\r\r\n\r\r\n pcpich_level = -11\r\r\n self.set_pcpich_code_level(carrier=carrier, leveldB=pcpich_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"P-CPICH\", pcpich_level))\r\r\n\r\r\n\r\r\n # *****************************************************************************\r\r\n # Configure 2 HS-SCCH: level, channelization code, UE ID and dummy UE ID\r\r\n # *****************************************************************************\r\r\n hssch_level_1 = -18.0\r\r\n hssch_level_2 = -18.0\r\r\n self.set_hssch_level(hssch_num=1, carrier=carrier, leveldB=hssch_level_1)\r\r\n self.set_hssch_level(hssch_num=2, carrier=carrier, leveldB=hssch_level_2)\r\r\n self.set_hssch_code(hssch_num=1, carrier=carrier, codeNum=2)\r\r\n self.set_hssch_code(hssch_num=2, carrier=carrier, codeNum=7)\r\r\n\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-SCCH #1\", hssch_level_1))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-SCCH #2\", hssch_level_2))\r\r\n\r\r\n self.set_default_ue_id_hssch(carrier=carrier)\r\r\n\r\r\n # HS-PDSCH Enhanced Settings\r\r\n self.set_hsdsch_mpo(carrier=carrier, control=\"AUTO\", pwrOffsetManual=\"\")\r\r\n self.hsdsch_unsched_frames(carrier=carrier, usFrameType='DUMMY')\r\r\n\r\r\n # *****************************************************************************\r\r\n # Configure HS-PDSCH: level and first channelization code number\r\r\n # *****************************************************************************\r\r\n hsdsch_level = -1.6\r\r\n self.set_hsdsch_level(carrier=carrier, leveldB = hsdsch_level)\r\r\n self.set_hsdsch_chanelisation_code(carrier=carrier, code=1)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-PDSCH\", hsdsch_level))\r\r\n\r\r\n config_list.append (\"\")\r\r\n\r\r\n for line in config_list:\r\r\n print line",
"def CL(self):",
"def __init__(self, config_table, tuning_freq, center_freq, velocity, vdef, s12_value=None):\n # need\n # tuning_freq, velocity, vdef, center_freq\n\n super(LO1, self).__init__(config_table)\n\n self.mng_name = \"LO1\"\n # self.seq = MySeq()\n self.delta = 0\n self.velframe = None\n self.vdef = config_table[\"vdef\"]\n self.swtype = config_table[\"swtype\"]\n if \"CCB\" not in config_table[\"backend\"]:\n self.rcvr_name = config_table['receiver'] #rcvr.mng\n self.number_switching_periods = 0\n self.set_freq_params(config_table[\"DOPPLERTRACKFREQ\"])\n self.be = config_table[\"backend\"]\n # self.set_velocity_params(config_table, freq_calc)\n self.set_velocity_params(config_table, velocity, vdef)\n self.set_switching_params(config_table)\n # self.tuning_freq = freq_calc.get_tuning_freq()\n self.tuning_freq = tuning_freq\n self.center_freq = \"0\"\n # self.set_if_center_freq(freq_calc)\n self.set_if_center_freq(center_freq)\n # self.set_sw(ifpath, config_table[\"phasecal\"])\n self.set_sw(config_table[\"phasecal\"], s12_value=s12_value)\n # self.set_lo1b(freq_calc, ifpath, rcvr)\n self.set_lo1b()\n # self.set_lo1b_off()\n\n if \"RcvrArray18_26\" in config_table[\"receiver\"]:\n self.seq.remove_param((\"LO1\", \"autoSetLOPowerLevel\", \"1\"))\n self.seq.add_param(self.mng_name, \"testTonePowerLevel\",\n \"10\", 4)\n self.seq.add_param(self.mng_name, \"loPowerLevel\", \"12\", 4)\n self.seq.add_param(self.mng_name, \"autoSetLOPowerLevel\",\n \"0\", 3)\n else:\n if \"RcvrArray18_26\" not in config_table[\"receiver\"]:\n self.set_lo1ab_off()",
"def croscor(self):\n l=self.l\n if len(self.inputs['L0'])>1: \n title ='Align L0 inputs'\n chans0=self.chansl0\n cmddata='take1SSM(1,'\n CorellateGroup(self.vb,title,'1',chans0,'0','0','0','0','15',cmddata)\n if len(self.inputs['L1'])>1: \n title ='Align L1 inputs'\n chans1=self.chansl1\n cmddata='take1SSM(2,'\n CorellateGroup(self.vb,title,'1','0',chans1,'0','0','0','15',cmddata)\n if len(self.inputs['L2'])>1: \n title ='Align L2 inputs'\n chans2=self.chansl2\n cmddata='take1SSM(3,'\n CorellateGroup(self.vb,title,'1','0','0',chans2,'0','0','15',cmddata)\n if len(self.inputs['H0'])>1: \n title ='Align H0 inputs'\n chansh=self.chansh0\n cmddata='takerbSSM('\n CorellateGroup(self.vb,title,'1','0','0','0',chansh,'0','15',cmddata)\n if l==3: \n print \"Correlatin L0-L1 ----------------\"\n title ='Find L0-L1 inputs delay'\n chans0=self.l0anyinp\n chans1=self.l1anyinp\n cmddata='take2SSM(1,2,'\n CorellateGroup(self.vb,title,'1',chans0,chans1,'0','0','250','15',cmddata)\n if l==5: \n title ='Find L0-L2 inputs delay'\n chans0=self.l0anyinp\n chans2=self.l1anyinp\n cmddata='take2SSM(1,3,'\n CorellateGroup(self.vb,title,'1',chans0,'0',chans2,'0','3520','15',cmddata)\n if l==6: \n title ='Find L1-L2 inputs delay'\n chans1=self.l1anyinp\n chans2=self.l2anyinp\n cmddata='take2SSM(2,3,'\n CorellateGroup(self.vb,title,'1','0',chans1,chans2,'0','3312','15',cmddata)\n if l==7: \n chans0=self.l0anyinp\n chans1=self.l1anyinp\n chans2=self.l2anyinp\n title ='Find L0-L1 inputs delay'\n cmddata='take2SSM(1,2,'\n CorellateGroup(self.vb,title,'1',chans0,chans1,'0','0','250','15',cmddata)\n title ='Find L1-L2 inputs delay'\n cmddata='take2SSM(2,3,'\n CorellateGroup(self.vb,title,'1','0',chans1,chans2,'0','3312','15',cmddata)",
"def enable_cl2_sync_76p8(self,iskw=0,fskw=31):\n iskw = iskw & 0x0f\n iskw = iskw << 4\n fskw = fskw & 0x3f\n self.write_versa5(0x62,0x3b) ## Clock2 CMOS1 output, 3.3V\n self.write_versa5(0x3d,0x01) ## Set divide by 0x0110\n self.write_versa5(0x3e,0x10)\n self.write_versa5(0x31,0x81) ## Enable divider output for clock2\n self.write_versa5(0x3c,iskw) ## Write integer portion of skew\n self.write_versa5(0x3f,fskw) ## Write fractional portion of skew\n self.write_versa5(0x63,0x01) ## Enable clock2 output\n self.reset_versa5()",
"def readaccl(self):\r\n\t\tdata0 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_X_L_A)\r\n\t\tdata1 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_X_H_A)\r\n\t\t\r\n\t\txAccl = data1 * 256 + data0\r\n\t\tif xAccl > 32767 :\r\n\t\t\txAccl -= 65536\r\n\t\t\r\n\t\t\"\"\"Read data back from LSM330_OUT_Y_L_M(0x2A), 2 bytes\r\n\t\tY-Axis Mag LSB, Y-Axis Mag MSB\"\"\"\r\n\t\tdata0 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_Y_L_A)\r\n\t\tdata1 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_Y_H_A)\r\n\t\t\r\n\t\tyAccl = data1 * 256 + data0\r\n\t\tif yAccl > 32767 :\r\n\t\t\tyAccl -= 65536\r\n\t\t\r\n\t\t\"\"\"Read data back from LSM330_OUT_Z_L_M(0x2C), 2 bytes\r\n\t\tZ-Axis Mag LSB, Z-Axis Mag MSB\"\"\"\r\n\t\tdata0 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_Z_L_A)\r\n\t\tdata1 = bus.read_byte_data(LSM330_ACCL_ADDRESS, LSM330_OUT_Z_H_A)\r\n\t\t\r\n\t\tzAccl = data1 * 256 + data0\r\n\t\tif zAccl > 32767 :\r\n\t\t\tzAccl -= 65536\r\n\t\t\r\n\t\treturn {'x' : xAccl, 'y' : yAccl, 'z' : zAccl}",
"def init(self, scl: Pin, sda: Pin, *, freq: int = 400000) -> None:\n ...",
"def testMAPLtoPDDL(self):\n \n dom, prob = self.load(\"testdata/logistics.domain.mapl\", \"testdata/logistics.p1.mapl\")\n\n t = translators.MAPLCompiler()\n dom2 = t.translate(dom)\n prob2 = t.translate(prob)\n\n self.roundtrip(dom2, prob2)",
"def update(self, dacA, dacB):\n binaryA = int(dacA*self.slopeA + self.offsetA)\n self.device.i2c(LJTickDAC.DAC_ADDRESS,\n [48, binaryA // 256, binaryA % 256],\n SDAPinNum=self.sdaPin, SCLPinNum=self.sclPin)\n binaryB = int(dacB*self.slopeB + self.offsetB)\n self.device.i2c(LJTickDAC.DAC_ADDRESS,\n [49, binaryB // 256, binaryB % 256],\n SDAPinNum=self.sdaPin, SCLPinNum=self.sclPin)"
] | [
"0.6483886",
"0.6363984",
"0.63225305",
"0.61263555",
"0.5904488",
"0.58352244",
"0.56169915",
"0.5590398",
"0.54821926",
"0.52404267",
"0.51905435",
"0.5168966",
"0.51326644",
"0.509936",
"0.50831527",
"0.50602263",
"0.505986",
"0.505492",
"0.50499403",
"0.5048724",
"0.5006374",
"0.49470004",
"0.49363905",
"0.4915958",
"0.49031198",
"0.49006134",
"0.48817623",
"0.48669982",
"0.48652297",
"0.4825648"
] | 0.674299 | 0 |
Stop using CL1 and revert to default xtal oscillator input | def disable_cl1(self):
self.write_versa5(0x10,0xc4) ## Enable xtal and clock
self.write_versa5(0x21,0x81) ## Use and enable divider
self.write_versa5(0x13,0x00) ## Use CL1 input instead of xtal
self.write_versa5(0x10,0x80) ## Enable xtal input only
self.write_versa5(0x17,0x04) ## Change top multiplier to 0x44
self.write_versa5(0x18,0x40) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wave_tx_stop():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVHLT, 0, 0))",
"def stop():\n set_power(0)",
"def stop(self):\n self.change_power(0)",
"def stop():\n global running\n global reading\n global zeroed\n if zeroed == False:\n time.sleep(1)\n xy_stage.reset_input_buffer();\n changeStatus('Zeroing')\n\n sendSerial(xy_stage, \"0lo0;0or;\\r\\n\");\n time.sleep(com_sleep);\n print(recSerial(xy_stage));\n time.sleep(com_sleep);\n sendSerial(xy_stage,\"0pr\"+str(start_x)+\";1pr\"+str(start_y)+\";\\r\\n\");\n time.sleep(com_sleep);\n print(recSerial(xy_stage));\n\n running = False\n reading = False\n zeroed = True\n changeStatus('Ready')",
"def stop_step_sweep(self):\n self.write(\":SOUR:SWE:CONT:STAT OFF\")",
"def stop(self):\n self.change_x = 0",
"def stop(self):\n self.change_x = 0",
"def stop(self):\n self.change_x = 0",
"def stop(self):\n self.change_x = 0",
"def stop_motor(self):\n self.output(self.steering_pin, 0)\n self.pi.set_servo_pulsewidth(self.steering_pin, 0)",
"def stop( self ):\n self.data_source.si.daqStop()\n self.timer.stop()\n \n #re-enable the play button\n self.play_button.setEnabled(True)\n self.stop_button.setEnabled(False)\n self.spinbox_timestep.setEnabled(True)",
"def disable_cl2(self):\n self.write_versa5(0x31,0x80) ## Disable divider output for clock2\n self.write_versa5(0x63,0x00) ## Disable clock2 output",
"def stop_station(self):\n if self._sense_hat:\n self._sense_hat.clear()\n\n if self._log_timer:\n self._log_timer.cancel()\n\n if self._upload_timer:\n self._upload_timer.cancel()\n\n if self._update_timer:\n self._update_timer.cancel()",
"def rc_off(self):\n # reset control values\n channels = [1500] * 8\n controlout = OverrideRCIn(channels=channels)\n self.contolp.publish(controlout)\n self.rate.sleep()\n # send twice to make sure\n controlout = OverrideRCIn(channels=channels)\n self.contolp.publish(controlout)",
"def deinit(self):\n self._serial.write('spi.deinit()\\r\\n'.encode('utf-8'))\n sleep(2)\n self._serial.reset_input_buffer()",
"def rtsOff():\n pass",
"def enable_cl1_pll1(self):\n self.write_versa5(0x17,0x02) ## Change top multiplier to 0x22\n self.write_versa5(0x18,0x20)\n self.write_versa5(0x10,0xc0) ## Enable xtal and clock\n self.write_versa5(0x13,0x03) ## Switch to clock\n self.write_versa5(0x10,0x40) ## Enable clock input only, won't lock to master",
"def reset(self):\n self.t = 0\n # two outputs: the thrusters, u_r and u_l and stop neuron\n self.action = [0.0, 0.0, 0.0]\n # x, vx, y, vy, theta, omega\n # self.state = [2.0, 0.0, 2.0, 0.0, 0.0, 0.0]\n self.state = self.start_cnd\n x, vx, y, vy, theta, omega = self.state\n# print x, self.state\n self.init_distance = self.getDistance()\n \n self.solver = ode(self.dX)\n self.solver.set_integrator('dopri5') \n self.solver.set_initial_value(self.state, self.t)",
"def reset(self):\n self._timestep = np.array([0])",
"def interrupt(self):\n self.interrupt_tick_tocking = True",
"def poweron(self) -> None:\n self.servo_reset()",
"def reset(self):\n self.rst.value(0) # RST on\n self.sleep_us(100) # reset impulse has to be >100 ns and <100 ms\n self.rst.value(1) # RST off\n # Defaults after reset:\n self.power = self.POWER_DOWN\n self.addressing = self.ADDRESSING_HORIZ\n self.instr = self.INSTR_BASIC\n self.display_mode = self.DISPLAY_BLANK\n self.temp_coeff = self.TEMP_COEFF_0\n self.bias = self.BIAS_1_11\n self.voltage = 3060",
"def force(self, osc):\n pass",
"def reset_target(scope) -> None:\n if globals.cw_platform == \"CW303\" or globals.cw_platform == \"CWLITEXMEGA\":\n scope.io.pdic = 'low'\n time.sleep(0.1)\n scope.io.pdic = 'high_z' #XMEGA doesn't like pdic driven high\n time.sleep(0.1) #xmega needs more startup time\n else: \n scope.io.nrst = 'low'\n time.sleep(0.05)\n scope.io.nrst = 'high_z'\n time.sleep(0.05)",
"def pibooth_reset(cfg, hard):",
"def stop_calibration(self):\n self.socket.send_string('c')\n return self.socket.recv_string()",
"def stop(self):\n self.running = False\n self.hop_channel(\"auto\")",
"def turn_off(self):\n self._state = False\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":0 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'): \n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":0 }', 5)",
"def _reset(self) -> ts.TimeStep:",
"def stop(self):\n\n self.active = False\n angle_pwm = self.angle_to_pmw(const.Driving.NEUTRAL_STEERING_ANGLE)\n self.pwm.set_pwm(0, 0, angle_pwm)"
] | [
"0.60477453",
"0.59301674",
"0.5764608",
"0.5732112",
"0.57145166",
"0.5615371",
"0.5615371",
"0.5615371",
"0.5615371",
"0.559175",
"0.5590022",
"0.55622673",
"0.5554824",
"0.55445796",
"0.547578",
"0.5452735",
"0.54222983",
"0.54042745",
"0.54038775",
"0.5397799",
"0.539359",
"0.5379931",
"0.5373616",
"0.5373399",
"0.53679174",
"0.5356445",
"0.5355645",
"0.53442705",
"0.5327531",
"0.5323606"
] | 0.686816 | 0 |
Play a round Arguments ========= | def play(self, tround, context): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def RunTurn( lobound=1, hibound=20 ):\n\tpass",
"def newRound():\r\n pass",
"def strategy(self, game, args=()):",
"def __round__(self, *args, **kwargs): # real signature unknown\n pass",
"def __round__(self, *args, **kwargs): # real signature unknown\n pass",
"def __round__(self, *args, **kwargs): # real signature unknown\n pass",
"def __round__(self, *args, **kwargs): # real signature unknown\n pass",
"def __round__(self, *args, **kwargs): # real signature unknown\n pass",
"def __round__(self, *args, **kwargs): # real signature unknown\n pass",
"def __round__(self, *args, **kwargs): # real signature unknown\n pass",
"def __round__(self, *args, **kwargs): # real signature unknown\n pass",
"def __round__(self, *args, **kwargs): # real signature unknown\n pass",
"def __round__(self, *args, **kwargs): # real signature unknown\n pass",
"def __round__(self, *args, **kwargs): # real signature unknown\n pass",
"def __round__(self, *args, **kwargs): # real signature unknown\n pass",
"def __round__(self, *args, **kwargs): # real signature unknown\n pass",
"def __round__(self, *args, **kwargs): # real signature unknown\n pass",
"def __round__(self, *args, **kwargs): # real signature unknown\n pass",
"def __round__(self, *args, **kwargs): # real signature unknown\n pass",
"def play_game():\n pass",
"def sample(self, *args, **kwargs):",
"def main():\n play_game(progression)",
"def game_play(self):",
"def test_play(self):\n\t\tRockAI, PaperAI = self.DummyAI('r'), self.DummyAI('p')\n\t\tresult = rps_main.play(RockAI, PaperAI, rounds=58, verbosity=0)\n\n\t\tself.assertTrue(len(result) == 2, \"Check that the result is a 2-tuple, \\\n\t\t\t\t\t\t\t\tor *some* kind of length 2 container, anyway.\")\n\t\tself.assertTrue(result[0] == 0, \"First AI should score 0 points.\")\n\t\tself.assertTrue(result[1] == 58, \"Second AI should score 58 points.\")",
"def play_round(self):\n move1 = self.p1.move()\n move2 = self.p2.move()\n print(f\"P1: {move1} P2: {move2}\")\n self.p1.learn(move1, move2)\n self.p2.learn(move2, move1)\n \"\"\"Proneround_score and ptworound_score resets\n to 0 at beginning of every round.\"\"\"\n poneround_score = 0\n ptworound_score = 0\n if self.beats(move1, move2):\n print(\"Player 1 Wins This Round\")\n poneround_score = 1\n self.pone_score += 1\n elif self.beats(move2, move1):\n print(\"Player 2 Wins This Round\")\n ptworound_score = 1\n self.ptwo_score += 1\n else:\n print(\"Tie! No Points.\")\n print(f\"Round Points - P1: {poneround_score} | P2: {ptworound_score}\")",
"def play(self, rumble, **kwargs) -> Any:\n pass # abstract methods does not provides an implementation in Python",
"def play_round(self):\n print('='*10) # Round separation display\n print(f'Round {self.round_num}:')\n for player in self.players:\n\n # Player separation display:\n if player != self.players[0]:\n print('-' * 5)\n\n self.play_turn(player)\n \n # Return if exit conditions are met\n if (self.exit_flag) or (self.winner is not None) or (self.board.full()):\n return\n self.round_num += 1",
"def simulate(self):\n self.round += 1",
"def test_round(doctest):",
"def play_round(self):\r\n move1 = self.p1.move()\r\n move2 = self.p2.move()\r\n # Checks if User Wants to Quit Game:\r\n if move1 == \"quit\" or move2 == \"quit\":\r\n self.game_over(True)\r\n print(f\"Player One: {move1.upper()}\\nPlayer Two: {move2.upper()}\")\r\n self.keep_score(move1, move2)\r\n self.p1.learn(move1, move2)\r\n self.p2.learn(move2, move1)"
] | [
"0.6288704",
"0.5847064",
"0.57366675",
"0.5717588",
"0.5717588",
"0.5717588",
"0.5717588",
"0.5717588",
"0.5717588",
"0.5717588",
"0.5717588",
"0.5717588",
"0.5717588",
"0.5717588",
"0.5717588",
"0.5717588",
"0.5717588",
"0.5717588",
"0.5717588",
"0.56073916",
"0.56053",
"0.5558059",
"0.55553985",
"0.55322886",
"0.5512544",
"0.548411",
"0.5469658",
"0.5434947",
"0.54336536",
"0.54315853"
] | 0.6492613 | 0 |
Read evaluation dataset containing arms played, rewards observed, and contexts presented to the arms Arguments ========= | def readData(path):
try:
open(path)
dataset = np.loadtxt(path)
# arms played by uniformly-random policy as recorded in dataset
arms = dataset[:, 0].astype(int)
# rewards received by playing arms using a uniformly-random policy as
# recorded in dataset
rewards = dataset[:, 1]
# context vector
contexts = dataset[:, 2:]
except FileNotFoundError:
raise
return(arms, rewards, contexts) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _read_analogies(self):\n questions = []\n questions_skipped = 0\n with open(self._options.eval_data, \"rb\") as analogy_f:\n for line in analogy_f:\n if line.startswith(\":\"): # Skip comments.\n continue\n words = line.strip().lower().split(\" \")\n # print words\n ids = [self._cate2id.get(w.strip()) for w in words]\n # print ids\n if None in ids or len(ids) != 4:\n questions_skipped += 1\n else:\n questions.append(np.array(ids))\n print(\"Eval analogy file: \", self._options.eval_data)\n print(\"Questions: \", len(questions))\n print(\"Skipped: \", questions_skipped)\n questions = np.array(questions, dtype=np.int32)\n self._analogy_questions = questions\n self._target_field = np.array(\n list(set(questions[:, 3])), dtype=np.int32)\n np.random.shuffle(self._analogy_questions)",
"def read_analogies_file(eval_file='questions-words.txt', word2id={}):\n questions = []\n questions_skipped = 0\n with open(eval_file, \"rb\") as analogy_f:\n for line in analogy_f:\n if line.startswith(b\":\"): # Skip comments.\n continue\n words = line.strip().lower().split(b\" \") # lowercase\n ids = [word2id.get(w.strip()) for w in words]\n if None in ids or len(ids) != 4:\n questions_skipped += 1\n else:\n questions.append(np.array(ids))\n print(\"Eval analogy file: \", eval_file)\n print(\"Questions: \", len(questions))\n print(\"Skipped: \", questions_skipped)\n analogy_questions = np.array(questions, dtype=np.int32)\n return analogy_questions",
"def offlineEvaluate(mab, arms, rewards, contexts, nrounds=None): \n \n assert isinstance(nrounds, int), \"'nrounds' argument should be an integer.\"\n assert nrounds > 0, \"'nrounds' argument should be a positive integer.\"\n\n assert (not (np.isscalar(arms) or isinstance(arms, list))) and\\\n all([isinstance(np.asscalar(element), int) for element in arms]),\\\n \"'arms' argument should be an array of integers.\" \n assert (not (np.isscalar(rewards) or isinstance(rewards, list))) and\\\n all([isinstance(element, float) for element in rewards]),\\\n \"'rewards' argument should be an array of float.\" \n assert (not (np.isscalar(contexts) or isinstance(contexts, list))) and\\\n all([[isinstance(element, float) for element in elements] for elements in contexts]),\\\n \"'contexts' argument should be an array of integer.\" \n\n assert np.size(contexts, 0) == len(rewards) == len(arms), \\\n \"'contexts', 'rewards', and 'arms' arguments should have the same number of events.\"\n assert (np.size(contexts, 1) / mab.narms).is_integer() ,\\\n \"'contexts' argument should have same dimensions for each of the arms.\" \n \n arm_history = []\n per_round_rewards = []\n tround = 1\n for i in range(len(arms)):\n # Play arm\n played_arm = mab.play(tround, contexts[i]) \n # If played arm equals arm played by a uniformly-random policy\n if played_arm == arms[i]: \n reward = rewards[i]\n # Update MAB state\n mab.update(played_arm-1, reward, contexts[i]) \n # Store arm and rewards history\n arm_history.append([played_arm, reward])\n # Store observed reward per round \n per_round_rewards.append(reward) \n # if the desired number of matching arms are found, stop and \n # return per round rewards \n if tround == nrounds: \n return(per_round_rewards)\n # Increment tround (only if arm playe by bandit equals arm in the \n # dataset)\n tround += 1 \n \n return(per_round_rewards)",
"def load_data():\n print (\"Loading the arxiv.\")\n arxiv = datasets.load_arxiv(depth=2)\n\n print (\"Loading the RNN training data.\")\n print (\"Select depth (2/3/5):\")\n depth = int(input())\n rnn_training_data = datasets.training_data(\"rnn\", depth=depth)\n\n n_inputs = rnn_training_data.training.X.shape[2] # dimension of w2v model\n n_outputs = rnn_training_data.training.dimY\n\n return arxiv, rnn_training_data, n_inputs, n_outputs",
"def read_data():\n ADV_MAT = np.load('ADV.npy');\n ADJ_MAT = np.load('ADJ.npy');\n PR_MAT = np.load('PR.npy'); \n NN_MAT = np.load('NN.npy');\n for i in range(ADV_MAT.shape[0]):RUNNING_DATA['ADV___'+str(i)] = ADV_MAT[i];\n for i in range(ADJ_MAT.shape[0]):RUNNING_DATA['ADJ___'+str(i)] = ADJ_MAT[i];\n for i in range(PR_MAT.shape[0]):RUNNING_DATA['PR___'+str(i)] = PR_MAT[i];\n for i in range(NN_MAT.shape[0]):RUNNING_DATA['NN___'+str(i)] = NN_MAT[i];",
"def get_eval_data() -> GraphDataset:\n _load_data_if_needed()\n return eval_data",
"def get_data(self):\n\n self.read_expression()\n self.read_tfs()\n self.read_metadata()\n self.set_gold_standard_and_priors()",
"def evaluate(self):\n\n rew_rl = []\n for ckpt_path in self._config.eval_ckpt_paths:\n self._load_ckpt(ckpt_path, self._config.ckpt_num)\n\n logger.info(\n \"Run %d evaluations for %d environments\",\n self._config.num_eval,\n len(self._envs),\n )\n rollouts, info = self._evaluate(record_video=self._config.record_video)\n\n info_stat = info.get_stat()\n\n rew_rl.append(np.mean(info[\"rew_rl\"]))\n\n logger.info(\"All Eval Rew Values: %s\", rew_rl)\n logger.info(\"Eval Rew RL Average: %f, Std: %f\", np.mean(rew_rl), np.std(rew_rl))\n\n os.makedirs(\"result\", exist_ok=True)\n with h5py.File(\"result/{}.hdf5\".format(self._config.run_name), \"w\") as hf:\n for k, v in info.items():\n hf.create_dataset(k, data=info[k])\n with open(\"result/{}.txt\".format(self._config.run_name), \"w\") as f:\n for k, v in info_stat.items():\n f.write(\"{}\\t{:.03f} $\\\\pm$ {:.03f}\\n\".format(k, v[0], v[1]))",
"def test_run_experiment_lr_eval_with_dictionary(self):\n source = \"lr-eval-dict\"\n experiment_id = \"lr_eval_dict\"\n\n # set up a temporary directory since\n # we will be using getcwd\n temp_dir = tempfile.TemporaryDirectory(prefix=getcwd())\n\n old_file_dict = {\"pred\": \"data/files/predictions_scaled_with_subgroups.csv\"}\n\n new_file_dict = copy_data_files(temp_dir.name, old_file_dict, rsmtool_test_dir)\n\n config_dict = {\n \"predictions_file\": new_file_dict[\"pred\"],\n \"system_score_column\": \"score\",\n \"description\": \"An evaluation of LinearRegression predictions.\",\n \"human_score_column\": \"h1\",\n \"id_column\": \"id\",\n \"experiment_id\": \"lr_eval_dict\",\n \"subgroups\": \"QUESTION\",\n \"scale_with\": \"asis\",\n \"trim_min\": 1,\n \"trim_max\": 6,\n }\n\n check_run_evaluation(source, experiment_id, config_obj_or_dict=config_dict)",
"def load_eval_datasets(cfg):\n # Temporarily change dataset type to be frame_by_frame\n cur_dataset_type = cfg.dataset_type\n if cfg.dataset_type == 'graph_net':\n cfg.dataset_type = 'single_frame_graph_net'\n else:\n cfg.dataset_type = 'frame_by_frame'\n\n # Get the evaluation (frame by frame) datasets\n train_set, val_set, test_set = get_split_datasets(cfg.dataset)\n\n # Restore dataset type\n cfg.dataset_type = cur_dataset_type\n return train_set, val_set, test_set",
"def ml_evaluate_samples_path(self) -> str:\n return join(self.machine_learning_path, 'evaluate')",
"def evaluate(self, data, category, dims=None, overall=True):\n n_data = len(data)\n eval_scores = [{} for _ in range(n_data)]\n\n if dims == None:\n eval_dims = self.dimensions\n else:\n assert isinstance(dims, list)\n eval_dims = dims\n\n for dim in eval_dims:\n # Calculate average sentence-level scores for 'consistency' and 'fluency'\n if dim == 'consistency' or dim == 'fluency':\n src_list, output_list = [], []\n n_sents = [] # the number of sentences in each generated summary\n for i in range(n_data):\n source = data[i]['source']\n system_outputs = sent_tokenize(data[i]['system_output'])\n n_sents.append(len(system_outputs))\n for j in range(len(system_outputs)):\n src_list.append(source)\n output_list.append(system_outputs[j])\n input_list = add_question(dimension=dim, output=output_list, src=src_list, task=self.task)\n sent_score = self.scorer.score(input_list, self.task, category, dim)\n\n # Get average score for each sample\n start_idx = 0\n score = []\n for cur_n_sent in n_sents:\n # prevent denominator from being 0\n score.append(sum(sent_score[start_idx:start_idx + cur_n_sent]) / (cur_n_sent + 1e-6))\n start_idx += cur_n_sent\n\n # Calculate summary-level score for 'coherence' and 'relevance'\n elif dim == 'coherence' or dim == 'relevance':\n src_list, output_list, ref_list = [], [], []\n for i in range(n_data):\n src_list.append(data[i]['source'])\n output_list.append(data[i]['system_output'])\n if dim == 'relevance':\n ref_list.append(data[i]['reference'])\n input_list = add_question(dimension=dim, output=output_list, src=src_list, ref=ref_list, task=self.task)\n score = self.scorer.score(input_list, self.task, category, dim)\n\n # Please customize other dimensions here for summarization\n else:\n raise NotImplementedError('The input format for this dimension is still undefined. \\\n Please customize it first.')\n\n for i in range(n_data):\n eval_scores[i][dim] = score[i]\n\n # Customize your overall score here.\n if overall == True:\n for i in range(n_data):\n eval_scores[i]['overall'] = np.mean(list(eval_scores[i].values()))\n\n return eval_scores",
"def main(argv):\n # 0: Initial steps\n print_configuration_info() \n # fix random seed for reproducibility\n np.random.seed(7)\n # Make an instance of the class Utils\n utils = Utils()\n\n # Obtain the file number\n maxnumber = utils.find_file_maxnumber(RESULTS + DATASET + '/')\n filenumber = maxnumber + 1\n print('file number: ', filenumber)\n \n # 1: Load data (X and y_emb)\n print('Loading data')\n\n # Load activity_dict where every activity name has its associated word embedding\n with open(ACTIVITY_EMBEDDINGS) as f:\n activity_dict = json.load(f)\n \n # Load the activity indices\n with open(ACTIVITY_TO_INT) as f:\n activity_to_int_dict = json.load(f)\n \n # Load the index to activity relations \n with open(INT_TO_ACTIVITY) as f:\n int_to_activity = json.load(f)\n\n # Load embedding matrix, X and y sequences (for y, load both, the embedding and index version)\n embedding_matrix = np.load(EMBEDDING_WEIGHTS) \n X = np.load(X_FILE)\n #y_emb = np.load(Y_EMB_FILE)\n # We need the following two lines for StratifiedKFold\n y_index_one_hot = np.load(Y_INDEX_FILE) \n y_index = np.argmax(y_index_one_hot, axis=1)\n\n # To use oversampling methods in imbalance-learn, we need an activity_index:embedding relation\n # Build it using INT_TO_ACTIVITY and ACTIVITY_EMBEDDINGS files\n activity_index_to_embedding = {}\n for key in int_to_activity:\n activity_index_to_embedding[key] = activity_dict[int_to_activity[key]]\n\n\n max_sequence_length = X.shape[1] # TODO: change this to fit the maximum sequence length of all the datasets\n #total_activities = y_train.shape[1]\n ACTION_MAX_LENGTH = embedding_matrix.shape[1]\n TOTAL_ACTIVITIES = y_index_one_hot.shape[1]\n \n print('X shape:', X.shape) \n print('y index shape:', y_index.shape)\n \n print('max sequence length:', max_sequence_length)\n print('features per action:', embedding_matrix.shape[0])\n print('Action max length:', ACTION_MAX_LENGTH) \n print('Total activities:', TOTAL_ACTIVITIES)\n \n\n # 2: Generate K partitions of the dataset (KFold cross-validation) \n # TODO: Decide between KFold or StratifiedKFold\n # if StratifiedKFold \n skf = StratifiedKFold(n_splits=FOLDS)\n \n # if KFold\n #kf = KFold(n_splits = FOLDS)\n\n fold = 0\n # 4: For each partition (train, test):\n metrics_per_fold = utils.init_metrics_per_fold()\n \n #for train, test in kf.split(X):\n for train, test in skf.split(X, y_index):\n print(\"%d Train: %s, test: %s\" % (fold, len(train), len(test))) \n X_train = X[train]\n y_train = y_index_one_hot[train]\n y_train_index = y_index[train]\n X_val = X[test]\n y_val = y_index_one_hot[test] \n print('Activity distribution %s' % Counter(y_index)) \n\n # 3.1: Build the LSTM model\n print('Building model...')\n sys.stdout.flush()\n \n model = Sequential()\n \n model.add(Embedding(input_dim=embedding_matrix.shape[0], output_dim=embedding_matrix.shape[1], weights=[embedding_matrix], input_length=max_sequence_length, trainable=False))\n # Change input shape when using embeddings\n model.add(LSTM(512, return_sequences=False, recurrent_dropout=DROPOUT, dropout=DROPOUT, input_shape=(max_sequence_length, embedding_matrix.shape[1]))) \n # For regression use a linear dense layer with embedding_matrix.shape[1] size (300 in this case)\n # TODO: consider the need of normalization before calculating the loss (we may use a Lambda layer with L2 norm) \n model.add(Dense(TOTAL_ACTIVITIES))\n model.add(Activation('softmax'))\n # TODO: check different regression losses; cosine_proximity could be the best one for us? \n #model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mse', 'mae'])\n model.compile(loss=LOSS, optimizer='adam', metrics=['accuracy', 'mse', 'mae'])\n print('Model built')\n print(model.summary())\n sys.stdout.flush()\n \n # 3.2: Manage imbalanced data in the training set (SMOTE?) -> Conf option TREAT_IMBALANCE\n # NOTE: We may have a problem with SMOTE, since there are some classes with only 1-3 samples and SMOTE needs n_samples < k_neighbors (~5)\n # NOTE: RandomOverSampler could do the trick, however it generates just copies of current samples\n # TODO: Think about a combination between RandomOverSampler for n_samples < 5 and SMOTE?\n # TODO: First attempt without imbalance management\n if(TREAT_IMBALANCE == True):\n ros = RandomOverSampler(random_state=42) # sampling_strategy={4:10, 12:10, 14:10, 8:10, 13:10}\n print('Original dataset samples for training %s' % len(y_train_index))\n print('Original dataset shape for training %s' % Counter(y_train_index))\n X_train_res, y_train_index_res = ros.fit_resample(X_train, y_train_index)\n print('Resampled dataset samples for training %s' % len(y_train_index_res))\n print('Resampled dataset shape for training %s' % Counter(y_train_index_res))\n y_train_res = np_utils.to_categorical(y_train_index_res)\n \n print(\"y_train_res shape: \", y_train_res.shape)\n else:\n X_train_res = X_train\n y_train_res = y_train\n \n # 3.3: Train the model with the imbalance-corrected training set and use the test set to validate\n print('Training...') \n sys.stdout.flush()\n # Define the callbacks to be used (EarlyStopping and ModelCheckpoint)\n # TODO: Do we need EarlyStopping here?\n #earlystopping = EarlyStopping(monitor='val_loss', patience=100, verbose=0) \n # TODO: improve file naming for multiple architectures\n weights_file = WEIGHTS + DATASET + '/' + str(filenumber).zfill(2) + '-' + EXPERIMENT_ID + '-fold' + str(fold) + WEIGHTS_FILE_ROOT\n modelcheckpoint = ModelCheckpoint(weights_file, monitor='val_loss', save_best_only=True, verbose=0)\n callbacks = [modelcheckpoint]\n history = model.fit(X_train_res, y_train_res, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_data=(X_val, y_val), shuffle=True, callbacks=callbacks)\n # 3.4: Store the generated learning curves and metrics with the best model (ModelCheckpoint?) -> Conf option SAVE\n plot_filename = PLOTS + DATASET + '/' + str(filenumber).zfill(2) + '-' + EXPERIMENT_ID + '-fold' + str(fold)\n #plot_training_info(['loss'], True, history.history, plot_filename)\n if SAVE == True:\n utils.plot_training_info(['loss'], True, history.history, plot_filename)\n print(\"Plots saved in \" + PLOTS + DATASET + '/')\n print(\"Training finished\")\n \n # Print the best val_loss\n min_val_loss = min(history.history['val_loss'])\n print(\"Validation loss: \" + str(min_val_loss)+ \" (epoch \" + str(history.history['val_loss'].index(min_val_loss))+\")\") \n model.load_weights(weights_file)\n yp = model.predict(X_val, batch_size=BATCH_SIZE, verbose=1)\n # yp has the activity predictions (one-hot vectors) \n ypreds = np.argmax(yp, axis=1)\n\n # Calculate the metrics \n ytrue = np.argmax(y_val, axis=1)\n print(\"ytrue shape: \", ytrue.shape)\n print(\"ypreds shape: \", ypreds.shape)\n \n # Use scikit-learn metrics to calculate confusion matrix, accuracy, precision, recall and F-Measure\n \"\"\"\n cm = confusion_matrix(ytrue, ypreds)\n \n # Normalize the confusion matrix by row (i.e by the number of samples\n # in each class)\n cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n np.set_printoptions(precision=3, linewidth=1000, suppress=True)\n \n # Save also the cm to a txt file\n results_file_root = RESULTS + DATASET + '/' + str(filenumber).zfill(2) + '-' + EXPERIMENT_ID + '-fold' + str(fold)\n np.savetxt(results_file_root + '-cm.txt', cm, fmt='%.0f') \n \n np.savetxt(results_file_root+'-cm-normalized.txt', cm_normalized, fmt='%.3f')\n print(\"Confusion matrices saved in \" + RESULTS + DATASET + '/')\n \"\"\"\n # Plot non-normalized confusion matrix -> Conf option SAVE\n if SAVE == True:\n results_file_root = RESULTS + DATASET + '/' + str(filenumber).zfill(2) + '-' + EXPERIMENT_ID + '-fold' + str(fold)\n utils.plot_heatmap(ytrue, ypreds, classes=activity_to_int_dict.keys(),\n title='Confusion matrix, without normalization, fold ' + str(fold),\n path=results_file_root + '-cm.png')\n\n # Plot normalized confusion matrix\n utils.plot_heatmap(ytrue, ypreds, classes=activity_to_int_dict.keys(), normalize=True,\n title='Normalized confusion matrix, fold ' + str(fold),\n path=results_file_root + '-cm-normalized.png')\n\n \n #Dictionary with the values for the metrics (precision, recall and f1)\n metrics = utils.calculate_evaluation_metrics(ytrue, ypreds) \n metrics_per_fold = utils.update_metrics_per_fold(metrics_per_fold, metrics)\n # Update fold counter\n fold += 1\n\n # 5: Calculate the mean and std for the metrics obtained for each partition and store (always) \n metrics_per_fold = utils.calculate_aggregate_metrics_per_fold(metrics_per_fold) \n metrics_filename = RESULTS + DATASET + '/' + str(filenumber).zfill(2) + '-' + EXPERIMENT_ID + '-complete-metrics.json'\n with open(metrics_filename, 'w') as fp:\n json.dump(metrics_per_fold, fp, indent=4)\n print(\"Metrics saved in \" + metrics_filename)\n #print(metrics_per_fold)",
"def _read_in(config):\n # specify needed genes\n need_genes = _need_genes(config)\n idc = need_genes+['sample', 'project_id', 'sample_type', 'sampleType',\n 'OS', '_PATIENT', 'OS.time']\n\n if config['fpath']:\n # user gives file path\n mat = _read_in_file(config['fpath'], idc)\n elif config['dpath']:\n # user gives foder path where file was saved by cancer type\n mat = _read_in_folder(config['dpath'], config['cancer'], idc)\n else:\n info('Please set -i or -d')\n sys.exit(1)\n info('read in exp successfully')\n # check mat\n if mat.shape[0] == 0:\n info('No expression data loaded, please check reference files and given gene names')\n sys.exit(1)\n # check CTL\n if 'adj_gene' in config.keys() and config['adj_gene'] == 'CTL':\n mat['CTL'] = mat[['CD8A', 'CD8B', 'GZMB', 'GZMA', 'PRF1']].T.mean()\n return(mat)",
"def evaluate(self, data, category, dims=None, overall=True):\n n_data = len(data)\n eval_scores = [{} for _ in range(n_data)]\n\n if dims == None:\n eval_dims = self.dimensions\n else:\n assert isinstance(dims, list)\n eval_dims = dims\n\n for dim in eval_dims:\n # Calculate summation score for 'engagingness'\n if dim == 'engagingness':\n src_list, output_list, context_list = [], [], []\n n_sents = [] # the number of sentences in each generated response\n for i in range(n_data):\n source = data[i]['source']\n context = data[i]['context']\n system_outputs = sent_tokenize(data[i]['system_output'])\n n_sents.append(len(system_outputs))\n for j in range(len(system_outputs)):\n src_list.append(source)\n context_list.append(context)\n output_list.append(system_outputs[j])\n input_list = add_question(dimension=dim,\n output=output_list,\n src=src_list,\n context=context_list,\n task=self.task)\n sent_score = self.scorer.score(input_list, self.task, category, dim)\n\n # Get the summation score for each sample\n start_idx = 0\n score = []\n for cur_n_sent in n_sents:\n score.append(sum(sent_score[start_idx:start_idx + cur_n_sent]))\n start_idx += cur_n_sent\n\n # Calculate turn-level score for other dimensions\n elif dim in ['naturalness', 'coherence', 'groundedness', 'understandability']:\n src_list, output_list, context_list = [], [], []\n for i in range(n_data):\n src_list.append(data[i]['source'])\n output_list.append(data[i]['system_output'])\n context_list.append(data[i]['context'])\n input_list = add_question(dimension=dim,\n output=output_list,\n src=src_list,\n context=context_list,\n task=self.task)\n score = self.scorer.score(input_list, self.task, category, dim)\n\n # Please customize other dimensions here for summarization\n else:\n raise NotImplementedError('The input format for this dimension is still undefined. \\\n Please customize it first.')\n\n for i in range(n_data):\n eval_scores[i][dim] = score[i]\n\n # Customize your overall score here.\n if overall == True:\n for i in range(n_data):\n eval_scores[i]['overall'] = np.mean(list(eval_scores[i].values()))\n\n return eval_scores",
"def parse_evaluation_args(parser):\n parser.add_argument('--model-checkpoint', type=str,\n help='The model to evaluate.')\n parser.add_argument('--data-dir', type=str,\n default='data/pred-vrd/test/',\n help='Location of the data to evluate with.')",
"def run(self):\n data_provider = DataProvider(self.config)\n hex_attr_df = data_provider.read_hex_bin_attributes()\n hex_distance_df = data_provider.read_hex_bin_distances()\n city_states = data_provider.read_city_states(self.city_states_filename)\n neighborhood = data_provider.read_neighborhood_data()\n popular_bins = data_provider.read_popular_hex_bins()\n num_episodes = self.config['RL_parameters']['num_episodes']\n ind_episodes = self.config['RL_parameters']['ind_episodes']\n exp_decay_multiplier = self.config['RL_parameters']['exp_decay_multiplier']\n\n q_ind = None\n r_table = None\n xi_matrix = None\n\n best_episode = None\n best_model = {}\n\n progress_bar = tqdm(xrange(num_episodes))\n for episode_id in progress_bar:\n progress_bar.set_description(\"Episode: {}\".format(episode_id))\n current_best = -1000000\n\n # Create episode\n ind_exploration_factor = np.e ** (-1 * episode_id * exp_decay_multiplier / ind_episodes)\n\n episode = Episode(self.config,\n episode_id,\n ind_exploration_factor,\n hex_attr_df,\n hex_distance_df,\n city_states,\n neighborhood,\n popular_bins,\n q_ind,\n r_table,\n xi_matrix)\n\n # Run episode\n tables = episode.run()\n q_ind = tables['q_ind']\n r_table = tables['r_table']\n xi_matrix = tables['xi_matrix']\n episode_tracker = tables['episode_tracker']\n\n # Uncomment for logging if running a job, comment during experiments\n # otherwise it leads to insanely huge logging output which is useless\n\n # self.logger.info(\"\"\"\n # Expt: {} Episode: {} Earnings: {}\n # Pax rides: {} Relocation rides: {} Unmet demand: {}\n # \"\"\".format(self.expt_name, episode_id,\n # episode_tracker.gross_earnings,\n # episode_tracker.successful_waits,\n # episode_tracker.relocation_rides,\n # episode_tracker.unmet_demand))\n # self.logger.info(\"----------------------------------\")\n\n self.training_tracker.update_RL_tracker(\n episode_id, episode_tracker.gross_earnings,\n episode_tracker.successful_waits, episode_tracker.unsuccessful_waits,\n episode_tracker.unmet_demand, episode_tracker.relocation_rides,\n episode_tracker.DET, episode_tracker.DPRT, episode_tracker.DWT,\n episode_tracker.DRT, episode_tracker.DCT)\n\n # Keep track of the best episode\n if self.objective == 'revenue':\n if episode_tracker.gross_earnings >= current_best:\n best_episode = episode_tracker\n current_best = best_episode.gross_earnings\n else: # self.objective == 'pickups':\n if episode_tracker.successful_waits >= current_best:\n best_episode = episode_tracker\n current_best = episode_tracker.successful_waits\n\n # Keep track of the best model\n best_model['ind_exploration_factor'] = ind_exploration_factor\n best_model['config'] = self.config\n best_model['q_ind'] = q_ind\n best_model['r_table'] = r_table\n best_model['xi_matrix'] = xi_matrix\n best_model['training_tracker'] = self.training_tracker\n\n # After finishing training\n self.logger.info(\"Expt: {} Earnings: {} Met Demand: {} Unmet Demand: {}\".format(self.expt_name,\n best_episode.gross_earnings,\n best_episode.successful_waits,\n best_episode.unmet_demand))\n return best_episode, best_model, self.training_tracker",
"def load_eval_dataset(self):\n dict_path = get_eval_data(self.eval_path, self.src_lang, self.tgt_lang)\n\n pairs = []\n not_found_all = 0\n not_found_L1 = 0\n not_found_L2 = 0\n\n # Open the file and check if src and tgt word exists in the vocab\n with open(dict_path, \"r\") as f:\n for _, line in enumerate(f):\n word1, word2 = line.rstrip().split()\n if word1 in self.src_dico and word2 in self.tgt_dico:\n pairs.append((self.src_dico.index(word1), self.tgt_dico.index(word2)))\n else:\n not_found_all += 1\n not_found_L1 += int(word1 not in self.src_dico)\n not_found_L2 += int(word2 not in self.tgt_dico)\n print(\n \"Found %i pairs of words in the dictionary (%i unique). \"\n \" %i other pairs contained at least one unknown word \"\n \" (%i in src_lang, %i in tgt_lang)\"\n % (\n len(pairs),\n len(set([x for x, _ in pairs])),\n not_found_all,\n not_found_L1,\n not_found_L2,\n )\n )\n src_ind = [pairs[x][0] for x in range(len(pairs))]\n tgt_ind = [pairs[x][1] for x in range(len(pairs))]\n self.src_ind = np.asarray(src_ind)\n self.tgt_ind = np.asarray(tgt_ind)",
"def load_processed_data(animal, name, arguments):\r\n\tpath = os.path.join(paths.path2Output, animal, str(arguments['Function']), \r\n\t\t\t\t\t\tstr(arguments['Init']), str(arguments['Rank']), name)\r\n\t\r\n\tmeta_df = pd.read_csv(os.path.join(path,'meta_df.csv'))\r\n\troi_tensor = np.load(os.path.join(path,'roi_tensor.npy'))\r\n\tacti = np.load(os.path.join(path,'acti.npy'))\r\n\tnorm_acti = np.load(os.path.join(path,'norm_acti.npy'))\r\n\tsmoothed_acti = np.load(os.path.join(path,'smoothed_acti.npy'))\r\n\r\n\r\n\treturn meta_df, roi_tensor, acti, norm_acti, smoothed_acti",
"def read_data(\n self, path: str = \"src/data/data_aspects_tokens.csv\"\n ) -> Tuple[list, list]:\n data = pd.read_csv(path)\n data = self.summarize_review(data)\n self.dataset = data\n x = data[\"review_polarity\"].to_list()\n y = data[\"true_label\"].astype(int)\n\n return x, y",
"def main():\n\trelations = [json.loads(x) for x in open('tutorial/pdtb_trial_data.json')]\n\toutput_relations = [convert_to_output(x) for x in relations]\n\toutput_relations[1]['Connective']['TokenList'] = [0]\n\toutput_relations[3]['Arg1']['TokenList'].pop(4)\n\toutput_relations[4]['Arg2']['TokenList'].pop(4)\n\toutput_relations[5]['Arg2']['TokenList'].pop(4)\n\toutput_relations[6]['Sense'] = [u'Contingency.Condition'] # This will hurt sense recall\n\toutput_relations.pop(0) # This will hurt all precision\n\tscorer.evaluate(relations, output_relations)\n\treturn output_relations",
"def read_experiment_metrics(TRAIN_CONFIGS):\n metric_dir = TRAIN_CONFIGS.get(\"metrics_dir\")\n mpre = _metric_file_prefixes(TRAIN_CONFIGS)\n\n mtrain = pd.concat([read_metric_frame(f, train=True, metric_dir=metric_dir) for f in mpre],axis=1)\n mval = pd.concat([read_metric_frame(f, train=False, metric_dir=metric_dir) for f in mpre],axis=1)\n\n mtrain = mtrain.sort_index(axis=1,level=0)\n mval = mval.sort_index(axis=1,level=0)\n\n return mtrain, mval",
"def evaluate(self, data, category):\n n_data = len(data)\n eval_scores = [{} for _ in range(n_data)]\n\n # Calculate average sentence-level scores for factual consistency\n src_list, output_list = [], []\n n_sents = [] # the number of sentences in the claim\n for i in range(n_data):\n source = data[i]['source']\n system_outputs = sent_tokenize(data[i]['system_output'])\n n_sents.append(len(system_outputs))\n for j in range(len(system_outputs)):\n src_list.append(source)\n output_list.append(system_outputs[j])\n input_list = add_question(dimension=self.dim, output=output_list, src=src_list, task=self.task)\n sent_score = self.scorer.score(input_list, self.task, category, self.dim)\n\n # Get average score for each sample\n start_idx = 0\n score = []\n for cur_n_sent in n_sents:\n score.append(sum(sent_score[start_idx:start_idx + cur_n_sent]) / cur_n_sent)\n start_idx += cur_n_sent\n\n for i in range(n_data):\n eval_scores[i][self.dim] = score[i]\n\n return eval_scores",
"def read_data(filename_queue):\n\n reader = tf.RecordReader()\n _, serialized_sample = reader.read(filename_queue)\n context_features, sequences = tf.parse_single_sequence_example(\n serialized_sample,\n sequence_features={\n 'inp_seq': tf.FixedLenSequenceFeature((1,), tf.int64,\n allow_missing=False),\n 'out_seq': tf.FixedLenSequenceFeature((1,), tf.int64,\n allow_missing=False),\n }\n )\n\n # since the SequenceExample is stored in at least a 2D array, the first\n # dimension being the length of the sequence and the second being the feature\n # size for each item in the sequence, we need to flatten it. This is because\n # in our case, the items are merely token ids.\n inp_seq = tf.reshape(sequences['inp_seq'], [-1])\n out_seq = tf.reshape(sequences['out_seq'], [-1])\n\n return inp_seq, out_seq",
"def testData(self, ):\n count = 0\n while count < len(self.RAD_sequences_test):\n RAD_filename = self.RAD_sequences_test[count] \n RAD_complex = loader.readRAD(RAD_filename)\n if RAD_complex is None:\n raise ValueError(\"RAD file not found, please double check the path\")\n ### NOTE: Gloabl Normalization ###\n RAD_data = helper.complexTo2Channels(RAD_complex)\n RAD_data = (RAD_data - self.config_data[\"global_mean_log\"]) / \\\n self.config_data[\"global_variance_log\"]\n ### load ground truth instances ###\n gt_filename = loader.gtfileFromRADfile(RAD_filename, \\\n self.config_data[\"test_set_dir\"])\n gt_instances = loader.readRadarInstances(gt_filename)\n if gt_instances is None:\n raise ValueError(\"gt file not found, please double check the path\")\n\n ### NOTE: decode ground truth boxes to YOLO format ###\n gt_labels, has_label, raw_boxes = self.encodeToLabels(gt_instances)\n\n if has_label:\n yield (RAD_data, gt_labels, raw_boxes)\n count += 1",
"def test_run_experiment_lr_eval_with_object(self):\n source = \"lr-eval-object\"\n experiment_id = \"lr_eval_object\"\n\n configdir = join(rsmtool_test_dir, \"data\", \"experiments\", source)\n\n config_dict = {\n \"predictions_file\": \"../../files/predictions_scaled_with_subgroups.csv\",\n \"system_score_column\": \"score\",\n \"description\": \"An evaluation of LinearRegression predictions.\",\n \"human_score_column\": \"h1\",\n \"id_column\": \"id\",\n \"experiment_id\": \"lr_eval_object\",\n \"subgroups\": \"QUESTION\",\n \"scale_with\": \"asis\",\n \"trim_min\": 1,\n \"trim_max\": 6,\n }\n\n config_obj = Configuration(config_dict, context=\"rsmeval\", configdir=configdir)\n\n check_run_evaluation(source, experiment_id, config_obj_or_dict=config_obj)",
"def test_make_amr(self):\n basic_test_runner(self, 'amrs', nrows=0)",
"def load_expressions():\n with open('../data/BotCycle/expressions.json') as expressions_file:\n return json.load(expressions_file)",
"def load_data(self, batch_idx, args, mode=\"train\"):\n dat = self.dat\n if mode == \"train\":\n pfx = \"train\"\n elif mode == \"test\":\n pfx = \"test\"\n else:\n pfx = \"valid\"\n #pfx = \"train\" if train else \"valid\"\n loc = dat[\"%s_location\" % pfx] # nexamples x 3\n bsz = min(args.bsz, loc.size(0)-batch_idx)\n max_ctx_len = min(args.maxseqlen, loc[batch_idx:batch_idx+bsz, 1].max())\n self.word_ctx.resize_(max_ctx_len, bsz).zero_()\n self.answers.resize_(bsz).zero_()\n self.linenos.resize_(bsz).zero_()\n\n if args.std_feats or self.mt_loss != \"\":\n self.feats.resize_(max_ctx_len, bsz, 3).zero_()\n self.extr.resize_(max_ctx_len, bsz, self.extra_size).zero_()\n if args.speaker_feats:\n self.spee_feats.resize_(max_ctx_len, bsz, 2).zero_()\n\n if args.use_choices or (args.use_test_choices and mode != \"train\"):\n self.choicemask.resize_(bsz, max_ctx_len).zero_()\n\n if self.use_qidx:\n self.query_pos.resize_(bsz).fill_(-1) # assuming these always go together\n\n for b in xrange(bsz):\n ex_idx = batch_idx + b\n offset, ctx_len, self.linenos[b] = loc[ex_idx]\n capped_len = min(args.maxseqlen, ctx_len)\n answer_idx = offset + ctx_len\n self.answers[b] = dat[\"%s_data\" % pfx][answer_idx]\n\n self.word_ctx[-capped_len:, b].copy_(\n dat[\"%s_data\" % pfx][answer_idx-capped_len:answer_idx])\n if args.std_feats or self.mt_loss != \"\":\n self.feats[-capped_len:, b, 0].copy_(\n dat[\"%s_post\" % pfx][answer_idx-capped_len:answer_idx])\n self.feats[-capped_len:, b, 1].copy_(\n dat[\"%s_ner\" % pfx][answer_idx-capped_len:answer_idx])\n self.feats[-capped_len:, b, 2].copy_(\n dat[\"%s_sentence\" % pfx][answer_idx-capped_len:answer_idx])\n self.extr[-capped_len:, b, :].copy_(\n dat[\"%s_extr\" % pfx][answer_idx-capped_len:answer_idx])\n if args.speaker_feats:\n self.spee_feats[-capped_len:, b, 0].copy_(\n dat[\"%s_speech\" % pfx][answer_idx-capped_len:answer_idx])\n self.spee_feats[-capped_len:, b, 1].copy_(\n dat[\"%s_sid\" % pfx][answer_idx-capped_len:answer_idx])\n\n if args.use_choices or (args.use_test_choices and mode != \"train\"):\n bchoices = set(dat[\"%s_choices\" % pfx][ex_idx])\n [self.choicemask[b].__setitem__(jj, 1) for jj in xrange(max_ctx_len)\n if self.word_ctx[jj, b] in bchoices]\n\n if self.use_qidx:\n qpos = torch.nonzero(self.word_ctx[:, b] == self.query_idx)[0][0]\n self.query_pos[b] = qpos*bsz + b\n\n # if args.use_choices:\n # # get bsz x 2 tensor of idxs (need to transpose below to be right)\n # poss = torch.nonzero(self.word_ctx.t() == self.query_idx)\n # self.query_pos.copy_(poss[:, 1]) # 2nd col has nz col in transpose\n\n batch = {\"words\": self.word_ctx, \"answers\": self.answers}\n if args.std_feats or self.mt_loss != \"\":\n batch[\"feats\"] = self.feats\n batch[\"extr\"] = self.extr\n if args.speaker_feats:\n batch[\"spee_feats\"] = self.spee_feats\n if args.use_choices or (args.use_test_choices and mode != \"train\"):\n batch[\"choicemask\"] = self.choicemask\n if self.use_qidx:\n batch[\"qpos\"] = self.query_pos\n\n if self.mt_loss == \"idx-loss\":\n if batch_idx not in self.cache:\n targs = make_mt1_targ_idxs(batch, args.max_entities,\n args.max_mentions, self.per_idx)\n self.cache[batch_idx] = targs\n batch[\"mt1_targs\"] = self.cache[batch_idx]\n elif self.mt_loss == \"ant-loss\":\n if batch_idx not in self.cache:\n targs = make_mt2_targs(batch, args.max_entities,\n args.max_mentions, self.per_idx)\n self.cache[batch_idx] = targs\n batch[\"mt2_targs\"] = self.cache[batch_idx]\n\n return batch",
"def main(args):\n try:\n rec_path = project_path + \"/\" + args.rec\n test_data_path = project_path + \"/\" + args.test\n output_data_path = project_path + \"/\" + args.output\n\n rec = read_csv(rec_path)\n test = read_csv(test_data_path)\n\n accuracy = accuracy_calculator(rec, test)\n # Write to output file\n save_csv(accuracy, output_data_path)\n except Exception as e:\n logger.error(\"Unexpected error occurred when evaluation: \" + str(e))"
] | [
"0.5838571",
"0.5668012",
"0.55053437",
"0.54466045",
"0.5328532",
"0.532784",
"0.52090394",
"0.517614",
"0.5124689",
"0.5050656",
"0.5040259",
"0.50264454",
"0.4981156",
"0.49793994",
"0.49773458",
"0.4972295",
"0.4961568",
"0.49559405",
"0.49510598",
"0.49364892",
"0.49266294",
"0.49246383",
"0.4899822",
"0.4875574",
"0.48635274",
"0.4847717",
"0.48471606",
"0.48452654",
"0.48436108",
"0.4836397"
] | 0.6414555 | 0 |
Plot running per round cumulative reward for results on evaluation dataset for EpsilonGreedy, Upper Confidence Bound (UCB), and contextual LinUCB multiarmed bandits Arguments ========= | def plot(t):
assert isinstance(t, int), "'t' argument should be an integer."
assert t > 0, "'t' argument should be a positive integer."
# Initialize arrays with zeros to store mean cumulative rewards upto t
# rounds for each of the three implemented bandit algorithms
EpsGreedy_rewards = np.zeros(t)
UCB_rewards = np.zeros(t)
LinUCB_rewards = np.zeros(t)
# For each round, store the mean cumulative rewards upto that round
for i in range(1,t):
EpsGreedy_rewards[i] = np.sum(results_EpsGreedy[0:i]) / t
UCB_rewards[i] = np.sum(results_UCB[0:i]) / t
LinUCB_rewards[i] = np.sum(results_LinUCB[0:i]) / t
# Plot running per round cumulative reward
plt.plot(range(0,t), EpsGreedy_rewards, color='b', label='e-Greedy')
plt.plot(range(0,t), UCB_rewards, color='g', label='UCB')
plt.plot(range(0,t), LinUCB_rewards, color='orange', label='LinUCB')
plt.xlabel('Round')
plt.ylabel('Mean Cumulative Reward')
plt.title('Running Per Round Cumulative Reward')
plt.legend()
plt.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_cumreward_normalized(reward_cache_qlearning, reward_cache_SARSA):\n cum_rewards_q = []\n rewards_mean = np.array(reward_cache_qlearning).mean()\n rewards_std = np.array(reward_cache_qlearning).std()\n count = 0 # used to determine the batches\n cur_reward = 0 # accumulate reward for the batch\n for cache in reward_cache_qlearning:\n count = count + 1\n cur_reward += cache\n if(count == 10):\n # normalize the sample\n normalized_reward = (cur_reward - rewards_mean)/rewards_std\n cum_rewards_q.append(normalized_reward)\n cur_reward = 0\n count = 0\n \n cum_rewards_SARSA = []\n rewards_mean = np.array(reward_cache_SARSA).mean()\n rewards_std = np.array(reward_cache_SARSA).std()\n count = 0 # used to determine the batches\n cur_reward = 0 # accumulate reward for the batch\n for cache in reward_cache_SARSA:\n count = count + 1\n cur_reward += cache\n if(count == 10):\n # normalize the sample\n normalized_reward = (cur_reward - rewards_mean)/rewards_std\n cum_rewards_SARSA.append(normalized_reward)\n cur_reward = 0\n count = 0 \n # prepare the graph \n plt.plot(cum_rewards_q, label = \"q_learning\")\n plt.plot(cum_rewards_SARSA, label = \"SARSA\")\n plt.ylabel('Cumulative Rewards')\n plt.xlabel('Batches of Episodes (sample size 10) ')\n plt.title(\"Q-Learning/SARSA Convergence of Cumulative Reward\")\n plt.legend(loc='lower right', ncol=2, mode=\"expand\", borderaxespad=0.)\n plt.show()\n plt.savefig('cumulative_reward.png')",
"def plot_intermediate_model_callback(env):\n # Compute y_pred = prediction using the intermediate model, at current boosting iteration\n y_pred = env.model.predict(dmat)\n # \"Accuracy\" = the number of data points whose ranged label (y_lower, y_upper) includes\n # the corresponding predicted label (y_pred)\n acc = np.sum(np.logical_and(y_pred >= y_lower, y_pred <= y_upper)/len(X) * 100)\n accuracy_history.append(acc)\n \n # Plot ranged labels as well as predictions by the model\n plt.subplot(5, 3, env.iteration + 1)\n plot_censored_labels(X, y_lower, y_upper)\n y_pred_grid_pts = env.model.predict(xgb.DMatrix(grid_pts))\n plt.plot(grid_pts, y_pred_grid_pts, 'r-', label='XGBoost AFT model', linewidth=4)\n plt.title('Iteration {}'.format(env.iteration), x=0.5, y=0.8)\n plt.xlim((0.8, 5.2))\n plt.ylim((1 if np.min(y_pred) < 6 else 6, 200))\n plt.yscale('log')",
"def main():\n df = prof_conv_bwd_filter()\n df.to_csv(\"prof.cudnnConvBwdFilter.csv\")\n\n \"\"\"visualization, Roofline model\"\"\"\n df = pd.read_csv('prof.cudnnConvBwdFilter.csv', header=0, index_col=0)\n fig = plt.figure(figsize=(10,5))\n ax = fig.add_subplot(1, 1, 1)\n plot_rooline (ax, MACHINE_SPEC, PEAK_PERF, BAND_WIDTH)\n plot_result (ax, df)\n # fig.subplots_adjust(right=0.8)\n plt.subplots_adjust(left=0.1, right=0.6)\n plt.savefig('roofline.png')\n return",
"def visualize_data(dqn_rewards, ddqn_rewards):\n \n fig, ax = plt.subplots()\n x_values = list(range(1, dqn_rewards.size + 1))\n ax.plot(x_values, dqn_rewards, label='dqn rewards')\n ax.plot(x_values, ddqn_rewards, label='ddqn rewards')\n plt.xlabel('episodes')\n plt.title('Cumulative Reward per Game')\n plt.legend()\n plt.show()",
"def visualise_dataset_balancer_results(results, range=(-0.5, 0.5),\n colors=(\"#64B3DE\", \"#1f78b4\", \"#B9B914\", \"#FBAC44\", \"#bc1659\", \"#33a02c\", \"grey\", \"#b15928\", \"#6a3d9a\", \"#e31a1c\", \"#6ABF20\", \"#ff7f00\", \"#6a3d9a\"),\n exclude=(\"SVM (linear)\", \"Logistic regression\", \"Random forest\")):\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n file_name = \"raw_dump_{0}.txt\".format(current_time)\n with open(os.path.dirname(os.path.realpath(__file__)) + \"/../results/\" + file_name, \"wb\") as output_file:\n output_file.write(str(results))\n sns.set(style='ticks')\n fig = plt.figure(figsize=(10, 8))\n ax = fig.add_subplot(1, 1, 1)\n markers = [\"s\", \"d\", \"o\", \"^\", \"*\"]\n size = [150, 200, 200, 200, 250]\n hatches = [None, \"////\", \"..\"]\n\n # Move left y-axis and bottom x-axis to centre, passing through (0,0)\n ax.spines['left'].set_position('center')\n ax.spines['bottom'].set_position((\"axes\", 0.5))\n\n # Eliminate upper and right axes\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n\n # Show ticks in the left and lower axes only\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.set_axis_on()\n ax.spines['left'].set_color('black')\n ax.spines['bottom'].set_color('black')\n plt.xlabel(\"Change in TPR\")\n plt.ylabel(\"Change in TNR\")\n\n ax.xaxis.set_label_coords(0.1, 0.53)\n ax.yaxis.set_label_coords(0.53, 0.9)\n\n plt.ylim(range[0], range[1])\n plt.xlim(range[0], range[1])\n balancer_labels = ([], [])\n classifier_labels = ([], [])\n data_set_index = 0\n for (data_set, dataset_result) in results:\n\n none_true_pos_per_classifier = {}\n none_true_neg_per_classifier = {}\n\n for (classifier_description, result_arr) in dataset_result:\n for (balancer_description, results) in result_arr:\n if balancer_description == \"None\":\n none_true_pos_per_classifier[classifier_description] = results[3]\n none_true_neg_per_classifier[classifier_description] = results[4]\n break\n\n i = 0\n for (classifier_description, result_arr) in dataset_result:\n if classifier_description in exclude:\n continue\n balancer_index = 0\n for (balancer_description, results) in result_arr:\n if balancer_description != \"None\":\n if data_set_index == 0 and balancer_index == 0:\n classifier_labels[0].append(mpatches.Patch(color=colors[i], label=classifier_description, alpha=0.8))\n classifier_labels[1].append(classifier_description)\n ax.scatter(results[3] - none_true_pos_per_classifier[classifier_description], results[4] - none_true_neg_per_classifier[classifier_description],\n marker=markers[balancer_index % len(markers)], hatch=hatches[balancer_index % len(hatches)], s=size[balancer_index % len(markers)], alpha=0.8, color=colors[i],\n edgecolor=\"black\" if colors[i] != \"black\" else \"grey\", zorder=balancer_index % len(markers), lw=0.8)\n # Work around to get legend entries correct\n pt = ax.scatter(-99999999999, -9999999999, marker=markers[balancer_index % len(markers)], hatch=hatches[balancer_index % len(hatches)], s=200, alpha=0.8, color=\"white\",\n edgecolor=\"black\", zorder=data_set_index, lw=0.8)\n if i == 0:\n balancer_labels[0].append(pt)\n balancer_labels[1].append(balancer_description)\n balancer_index += 1\n i += 1\n data_set_index += 1\n legend = plt.legend(balancer_labels[0] + classifier_labels[0], balancer_labels[1] + classifier_labels[1], loc='lower center', bbox_to_anchor=(0.5, -0.2), fancybox=False, frameon=False, ncol=7)\n legend.get_frame().set_facecolor('#ffffff')\n\n sns.despine()\n current_time = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n plt.savefig(os.path.dirname(os.path.realpath(__file__)) + \"/../results/classifier_dataset_plt_{0}.png\".format(current_time), bbox_extra_artists=((legend,)), bbox_inches='tight')\n plt.close(fig)",
"def main():\n\n # choose number of data-points and sample a pair of vectors: the input\n # values and the corresponding target values\n N = 500\n inputs, targets = sample_data(N, arbitrary_function_2, seed=1)\n\n # specify the centres and scale of some rbf basis functions\n default_centres = np.linspace(0,1,21)\n default_scale = 0.03\n default_reg_param = 0.08\n\n # get the cross-validation folds\n num_folds = 4\n folds = create_cv_folds(N, num_folds)\n\n # evaluate then plot the performance of different reg params\n evaluate_reg_param(inputs, targets, folds, default_centres, default_scale)\n # evaluate then plot the performance of different scales\n evaluate_scale(inputs, targets, folds, default_centres, default_reg_param)\n # evaluate then plot the performance of different numbers of basis\n # function centres.\n evaluate_num_centres(\n inputs, targets, folds, default_scale, default_reg_param)\n\n plt.show()",
"def plot_results(sgd_train_acc, sgd_train_std, sgd_heldout_acc, sgd_heldout_std, sgd_test_acc,\n dt_train_acc, dt_train_std, dt_heldout_acc, dt_heldout_std, dt_test_acc,\n dt4_train_acc, dt4_train_std, dt4_heldout_acc, dt4_heldout_std, dt4_test_acc,\n stumps_train_acc, stumps_train_std, stumps_heldout_acc, stumps_heldout_std, stumps_test_acc):\n train_x_pos = [0, 4, 8, 12]\n cv_x_pos = [1, 5, 9, 13]\n test_x_pos = [2, 6, 10, 14]\n ticks = cv_x_pos\n\n labels = ['sgd', 'dt', 'dt4', 'stumps (4 x 50)']\n\n train_accs = [sgd_train_acc, dt_train_acc, dt4_train_acc, stumps_train_acc]\n train_errors = [sgd_train_std, dt_train_std, dt4_train_std, stumps_train_std]\n\n cv_accs = [sgd_heldout_acc, dt_heldout_acc, dt4_heldout_acc, stumps_heldout_acc]\n cv_errors = [sgd_heldout_std, dt_heldout_std, dt4_heldout_std, stumps_heldout_std]\n\n test_accs = [sgd_test_acc, dt_test_acc, dt4_test_acc, stumps_test_acc]\n\n fig, ax = plt.subplots()\n ax.bar(train_x_pos, train_accs, yerr=train_errors, align='center', alpha=0.5, ecolor='black', capsize=10, label='train')\n ax.bar(cv_x_pos, cv_accs, yerr=cv_errors, align='center', alpha=0.5, ecolor='black', capsize=10, label='held-out')\n ax.bar(test_x_pos, test_accs, align='center', alpha=0.5, capsize=10, label='test')\n ax.set_ylabel('Accuracy')\n ax.set_xticks(ticks)\n ax.set_xticklabels(labels)\n ax.set_title('Models')\n ax.yaxis.grid(True)\n ax.legend()\n plt.tight_layout()",
"def plot_envs_overall():\n AUC = []\n R_iter = []\n for i in range(len(envs)):\n env = envs[i]\n print(env)\n bestmodels, R_cum, aucurve = plot_rwds(envs_labels, cdict=color_dict,\n fdir=fdir + envs[i] + '-v1/',\n model_paths=model_paths, BEST=BEST, MAX_LEN=MAX_LEN(envs[i]),\n STEP=STEP, MAX_TRIALS=MAX_TRIALS,\n title=envs[i] + ' $R_{iter}$ ', step=max_lims[envs[i]][1],\n shape_dict=shape_dict,\n kfold=max_lims[envs[i]][1] * MAX_TRIALS)\n print('done print')\n R_iter.append(bestmodels)\n R = np.asarray(aucurve, dtype=float)\n AUC.extend(zip(np.mean(R, axis=1), np.std(R, axis=1) / np.sqrt(R.shape[1])))\n print('done. Computing AUC')\n for j in [1, 2]:\n for m in range(len(envs_labels) - 1):\n print_ttest(envs_labels, R, m, j)\n print('\\n')\n\n # Swimmer\n # t-test ( FM RPSP-VRPG )= [ -0.676586869332 , 0.507653821772 ]\n # t-test ( GRU RPSP-VRPG )= [ -11.4782215848 , 9.9484555701e-07 ]\n # t-test ( RPSP-Alt RPSP-VRPG )= [ -3.10454361915 , 0.00933872173406 ]\n # t-test ( FM RPSP-Alt )= [ 1.73508240067 , 0.111134237192 ]\n # t-test ( GRU RPSP-Alt )= [ -20.2985459977 , 2.4045809148e-09 ]\n # Hopper\n # t-test ( FM RPSP-VRPG )= [ -1.97496384693 , 0.0638921448438 ]\n # t-test ( GRU RPSP-VRPG )= [ -2.98367426585 , 0.0121047687853 ]\n # t-test ( RPSP-Alt RPSP-VRPG )= [ 2.25405603636 , 0.0382107798284 ]\n # t-test ( FM RPSP-Alt )= [ -3.92498523554 , 0.00123896806466 ]\n # t-test ( GRU RPSP-Alt )= [ -4.909161505 , 0.000577310309778 ]\n # Walker2d\n # t-test ( FM RPSP-VRPG )= [ -0.0793340586349 , 0.937667000948 ]\n # t-test ( GRU RPSP-VRPG )= [ -2.9657440937 , 0.00870691843556 ]\n # t-test ( RPSP-Alt RPSP-VRPG )= [ 2.23688724131 , 0.0412540309747 ]\n # t-test ( FM RPSP-Alt )= [ -2.05494189388 , 0.0601025184967 ]\n # t-test ( GRU RPSP-Alt )= [ -6.51606587482 , 5.3677973935e-06 ]\n # CartPole\n # t-test ( FM RPSP-VRPG )= [ -4.49013758712 , 0.00103201257316 ]\n # t-test ( GRU RPSP-VRPG )= [ 1.23597901022 , 0.232353991956 ]\n # t-test ( RPSP-Alt RPSP-VRPG )= [ -0.358340067592 , 0.726097854222 ]\n # t-test ( FM RPSP-Alt )= [ -8.06880999537 , 6.44596048562e-07 ]\n # t-test ( GRU RPSP-Alt )= [ 1.95090137725 , 0.0740240842398 ]\n # print (np.round(np.asarray(AUC)[[0, 1, 2, 3, 5, 9], :] / 1000.0, decimals=1))\n # print (reg_labels, filter_labels)\n return R_iter, AUC",
"def visualize_data(total_rewards):\n\n x_values = arange(0, len(total_rewards), 1)\n y_values = total_rewards\n plot(x_values, y_values)\n xlabel('episodes')\n ylabel('cumulative rewards')\n title('Reward by Episode')\n grid(True)\n show()",
"def plot_qlearning_smooth(reward_cache):\n mean_rev = (np.array(reward_cache[0:11]).sum())/10\n # initialize with cache mean\n cum_rewards = [mean_rev] * 10\n idx = 0\n for cache in reward_cache:\n cum_rewards[idx] = cache\n idx += 1\n smooth_reward = (np.array(cum_rewards).mean())\n cum_rewards.append(smooth_reward)\n if(idx == 10):\n idx = 0\n \n plt.plot(cum_rewards)\n plt.ylabel('Cumulative Rewards')\n plt.xlabel('Batches of Episodes (sample size 10) ')\n plt.title(\"Q-Learning Convergence of Cumulative Reward\")\n plt.legend(loc='lower left', ncol=2, mode=\"expand\", borderaxespad=0.)\n plt.show()",
"def main_paper(mode = 'q-diff'):\n mode_name = 'Shared Autonomy'\n if mode== 'override':\n mode_name = 'Overrider'\n\n #trains rl again and fills a q-table\n _, rl_agent = rl_alone(1000, fill_table = True)\n\n #prints the q-table\n for y in range(7):\n for x in range(7):\n state_ind = y*(7)+x\n q = rl_agent.get_q_values(state_ind)\n print('Q-table')\n print(state_ind, [x,y], np.around(q, decimals=5))\n\n in_li = []\n r_li = []\n rp_li=[]\n\n print(\"Begin looping through constraint values\")\n for i in np.arange(0.0, 1.025, 0.025):\n #cooperates and gets results\n rewards, actionsct, rewardsP = grid_human_co(coagent=rl_agent, threshold=i, verbose=False, mode=mode)\n print(\"Threshold: \", i)\n avgInter = np.mean(actionsct, axis=0)\n avgR = np.mean(rewards, axis=0)\n avgRP = np.mean(rewardsP, axis=0)\n in_li.append(avgInter)\n r_li.append(avgR)\n rp_li.append(avgRP)\n print('Avg num of interventions: ', avgInter)\n print('Avg reward: ', avgR)\n print('Avg reward penalized: ', avgRP)\n print()\n\n plt.figure()\n plt.plot(np.arange(0.0, 1.025, 0.025), in_li)\n plt.title(mode_name + ' Interventions')\n plt.xlabel(r\"$\\alpha$\")\n plt.ylabel('Average intervention count')\n plt.show()\n\n plt.figure()\n plt.title(mode_name + ' Returns', fontsize=14)\n plt.xlabel(r\"$\\alpha$\", fontsize=13)\n plt.ylabel('Average return', fontsize=13)\n plt.plot(np.arange(0.0, 1.025, 0.025), r_li,label='Average environment return')\n plt.plot(np.arange(0.0, 1.025, 0.025), rp_li,label=\"Average intervention return\")\n plt.legend(fontsize=9)\n plt.show()",
"def run(self):\n self.simulate_test_data()\n self.pipeline_test_data()\n self.plot_jump_flags_image()\n self.plot_groupdq_flags(pixel=[884, 550])\n self.plot_ramps_pre_post_correction(pixel=[884, 550])",
"def main(\n error_band_dir,\n output_dir,\n indep_var,\n ivar_start,\n ivar_stop,\n ivar_step,\n param_list,\n observable_list,\n Lambda_b,\n lambda_mult,\n p_decimal_list,\n orders,\n ignore_orders,\n interaction,\n X_ref_hash,\n prior_set,\n h,\n cbar_lower,\n cbar_upper,\n sigma,\n convention\n ):\n\n color_dict = {\n \"LOp\": plt.get_cmap(\"Greys\"),\n \"LO\": plt.get_cmap(\"Purples\"),\n \"NLO\": plt.get_cmap(\"Oranges\"),\n \"N2LO\": plt.get_cmap(\"Greens\"),\n \"N3LO\": plt.get_cmap(\"Blues\"),\n \"N4LO\": plt.get_cmap(\"Reds\")\n }\n\n fill_transparency = 1\n x = np.arange(ivar_start, ivar_stop, ivar_step)\n\n fig = plt.figure(figsize=(3.4, 3.4))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_zorder(15)\n ax.set_axisbelow(False)\n\n if indep_var == \"theta\":\n param_var = \"energy\"\n indep_var_label = r\"$\\theta$ (deg)\"\n param_var_label = r\"$E_{\\mathrm{lab}}\"\n param_var_units = r\"$ MeV\"\n xmin = 0\n xmax = 180\n else:\n param_var = \"theta\"\n indep_var_label = r\"$E$ (MeV)\"\n param_var_label = r\"$\\theta\"\n param_var_units = r\"^\\circ$\"\n xmin = 0\n xmax = 350\n\n for observable in observable_list:\n for param in param_list:\n if observable == [\"t\", \"t\", \"t\", \"t\"] or \\\n (observable == [\"0\", \"0\", \"0\", \"0\"] and indep_var == \"energy\"):\n ax.set_yscale(\"log\", nonposy='clip')\n\n ax.set_xlabel(indep_var_label)\n # ax.set_ylabel('')\n\n # if indep_var == \"theta\":\n # major_tick_spacing = 60\n # minor_tick_spacing = 20\n # ax.xaxis.set_major_locator(\n # ticker.MultipleLocator(major_tick_spacing))\n # ax.xaxis.set_minor_locator(\n # ticker.MultipleLocator(minor_tick_spacing))\n\n if indep_var == \"energy\":\n major_ticks = np.arange(0, 351, 100)\n # minor_ticks = np.arange(50, 351, 100)\n x_minor_locator = AutoMinorLocator(n=2)\n elif indep_var == \"theta\":\n major_ticks = np.arange(0, 181, 60)\n # minor_ticks = np.arange(0, 181, 20)\n x_minor_locator = AutoMinorLocator(n=3)\n\n ax.xaxis.set_minor_locator(x_minor_locator)\n ax.set_xticks(major_ticks)\n\n # Create the description box\n # text_str = r\"$C_{\" + observable[0] + observable[1] + \\\n # observable[2] + observable[3] + r\"}$\" + \", \" # + \"\\n\"\n text_str = indices_to_observable_name(observable)\n if observable == ['t', 't', 't', 't']:\n text_str += \" (mb)\"\n elif observable == ['0', '0', '0', '0']:\n text_str += \" (mb/sr)\"\n\n # Probably don't include this extra info. Leave for caption.\n #\n # text_str += \", \"\n # if observable != ['t', 't', 't', 't']:\n # text_str += \", \" + param_var_label + r\" = \" + str(param) + param_var_units + \", \" # + \"\\n\"\n # text_str += r\"$\\Lambda_b = \" + str(lambda_mult*Lambda_b) + r\"$ MeV\"\n\n # Don't put in a text box\n #\n # ax.text(.5, .96, text_str,\n # horizontalalignment='center',\n # verticalalignment='top',\n # multialignment='center',\n # transform=ax.transAxes,\n # bbox=dict(facecolor='white', alpha=1, boxstyle='square', pad=.5),\n # zorder=20)\n\n # Don't put in the title\n # plt.title(text_str, fontsize=10)\n\n # Instead use y axis\n ax.set_ylabel(text_str, fontsize=10)\n legend_patches = []\n\n try:\n npwa_name = npwa_filename(observable, param_var, param)\n npwa_file = DataFile().read(os.path.join(\"../npwa_data/\", npwa_name))\n npwa_plot, = ax.plot(npwa_file[0], npwa_file[1],\n color=\"black\", linewidth=1,\n label=\"NPWA\", zorder=10,\n linestyle=\"--\")\n except FileNotFoundError:\n npwa_plot = None\n\n # First get global min/max of all orders\n for i, order in enumerate(orders):\n # obs_name = observable_filename(\n # observable, indep_var, ivar_start, ivar_stop,\n # ivar_step, param_var, param, order)\n # dob_name = dob_filename(p_decimal_list[0], Lambda_b, obs_name)\n dob_name = dob_filename(\n observable, indep_var, ivar_start, ivar_stop,\n ivar_step, param_var, param, order, ignore_orders,\n Lambda_b, lambda_mult, X_ref_hash,\n p_decimal_list[0], prior_set, h, convention, None,\n cbar_lower, cbar_upper, sigma,\n potential_info=None)\n dob_file = DataFile().read(os.path.join(error_band_dir, dob_name))\n if i == 0:\n obs = dob_file[1]\n obs_min = np.min(obs)\n obs_max = np.max(obs)\n else:\n old_obs = obs\n obs = dob_file[1]\n # Probably the worst way to do this.\n obs_min = min(np.min(np.minimum(old_obs, obs)), obs_min)\n obs_max = max(np.max(np.maximum(old_obs, obs)), obs_max)\n\n # Decide the padding above/below the lines\n # This weights values far from 0 more heavily.\n # ymin = obs_min - .25 * abs(obs_min)\n # ymax = obs_max + .25 * abs(obs_max)\n ymin = -1\n ymax = 20\n ax.set_ylim([ymin, ymax])\n # ax.set_xlim([ivar_start, ivar_stop-1])\n ax.set_xlim([xmin, xmax])\n\n # Start layering the plots\n for i, order in enumerate(orders):\n # obs_name = observable_filename(\n # observable, indep_var, ivar_start, ivar_stop,\n # ivar_step, param_var, param, order)\n # dob_name = dob_filename(p_decimal_list[0], Lambda_b, obs_name)\n dob_name = dob_filename(\n observable, indep_var, ivar_start, ivar_stop,\n ivar_step, param_var, param, order, ignore_orders,\n Lambda_b, lambda_mult, X_ref_hash,\n p_decimal_list[0], prior_set, h, convention, None,\n cbar_lower, cbar_upper, sigma,\n potential_info=None)\n dob_file = DataFile().read(os.path.join(error_band_dir, dob_name))\n\n # Plot the lines\n obs = dob_file[1]\n ax.plot(x, obs, color=color_dict[order](.99), zorder=i)\n\n # Plot the error bands\n for band_num, p in enumerate(sorted(p_decimal_list, reverse=True)):\n # dob_name = dob_filename(p, Lambda_b, obs_name)\n dob_name = dob_filename(\n observable, indep_var, ivar_start, ivar_stop,\n ivar_step, param_var, param, order, ignore_orders,\n Lambda_b, lambda_mult, X_ref_hash,\n p, prior_set, h, convention, None,\n cbar_lower, cbar_upper, sigma,\n potential_info=None)\n dob_file = DataFile().read(os.path.join(error_band_dir, dob_name))\n obs_lower = dob_file[2]\n obs_upper = dob_file[3]\n ax.fill_between(\n x, obs_lower, obs_upper,\n facecolor=color_dict[order](\n (band_num + 1) / (len(p_decimal_list) + 1)\n ),\n color=color_dict[order](\n (band_num + 1) / (len(p_decimal_list) + 1)\n ),\n alpha=fill_transparency, interpolate=True, zorder=i)\n\n # Use block patches instead of lines\n # Use innermost \"dark\" color of bands for legend\n # legend_patches.append(\n # mp.patches.Patch(\n # color=color_dict[order](len(p_decimal_list) / (len(p_decimal_list) + 1)),\n # label=order,\n # ))\n legend_patches.append(\n mpatches.Rectangle(\n (1, 1), 0.25, 0.25,\n # color=color_dict[order](len(p_decimal_list) / (len(p_decimal_list) + 1)),\n edgecolor=color_dict[order](.9),\n facecolor=color_dict[order](len(p_decimal_list) / (len(p_decimal_list) + 1)),\n label=order,\n linewidth=1\n ))\n\n if npwa_plot is None:\n my_handles = legend_patches\n handler_dict = dict(zip(my_handles, [HandlerSquare() for i in legend_patches]))\n else:\n my_handles = [npwa_plot, *legend_patches]\n squares = [HandlerSquare() for i in legend_patches]\n line = HandlerLine2D(marker_pad=1, numpoints=None)\n handler_dict = dict(zip(my_handles, [line] + squares))\n\n ax.legend(loc=\"best\", handles=my_handles,\n handler_map=handler_dict,\n handletextpad=.8,\n handlelength=.6,\n fontsize=8)\n\n # Squeeze and save it\n plt.tight_layout()\n # plot_name = plot_obs_error_bands_filename(\n # observable, indep_var, ivar_start, ivar_stop,\n # ivar_step, param_var, param, orders[:i+1],\n # Lambda_b, p_decimal_list)\n plot_name = plot_obs_error_bands_filename(\n observable, indep_var, ivar_start, ivar_stop, ivar_step,\n param_var, param, orders[:i+1], ignore_orders, Lambda_b,\n lambda_mult, X_ref_hash, p_decimal_list,\n prior_set, h, convention, None, cbar_lower, cbar_upper,\n sigma, potential_info=None)\n fig.savefig(os.path.join(output_dir, plot_name), bbox_inches=\"tight\")\n\n call([\"epstopdf\", os.path.join(output_dir, plot_name)])\n call([\"rm\", os.path.join(output_dir, plot_name)])\n\n # Clear the axes for the next observable/parameter.\n plt.cla()",
"def visualize_reward_over_steps(self):\n save_dir = os.path.join(self.env.logdir, \"rewards\")\n os.makedirs(save_dir, exist_ok=True)\n if self.env.episode_steps > 0:\n results_plotter.EPISODES_WINDOW=50\n results_plotter.plot_curves([(np.arange(self.env.episode_steps),np.asarray(self.rewards_history[-self.env.episode_steps:]))],'step','Step rewards')\n plt.ylabel(\"reward\")\n plt.gcf().set_size_inches(8, 6)\n plt.savefig(save_dir + \"/reward_over_steps_episode{}.png\".format(self.env.episode_number))\n plt.close()",
"def show_reward(self, reward_):\n reward = reward_.reshape(self.cols, self.rows).T\n self.show_heatmap(reward, \"Pseudo Reward\")",
"def plot_convergence(\n optimizers: list = [\"COBYLA\", \"SLSQP\", \"L-BFGS-B\", \"NELDER-MEAD\"],\n g2N: float = 0.2,\n maxit: int = 10000,\n varform: list = [\"ry\"],\n depth: int = 3,\n nrep: int = 10,\n dataprefix: str = \"data/miniBMN\",\n datasuffix: str = \"h5\",\n figprefix: str = \"figures/miniBMN\",\n ht: float = 0.0,\n up: int = 1000,\n):\n # setup parameters\n params = dict()\n params[\"l\"] = str(g2N).replace(\".\", \"\")\n params[\"d\"] = depth\n params[\"v\"] = \"-\".join(varform)\n params[\"m\"] = maxit\n params[\"n\"] = nrep\n params[\"f\"] = dataprefix\n params[\"s\"] = datasuffix\n assert type(optimizers).__name__ == \"list\"\n # collect data\n result = collect_data(optimizers, params)\n # get best runs\n gs = dict()\n for r in optimizers:\n gs[r] = result.loc[r].groupby(\"rep\").apply(min).energy\n gsdf = pd.DataFrame.from_dict(gs, dtype=float)\n print(gsdf.describe().T[[\"min\", \"max\", \"mean\", \"std\"]])\n # Plot\n # select the best runs for each optimizer\n fig, ax = plt.subplots()\n for o in optimizers:\n result.loc[o, gsdf[o].idxmin()].plot(\n x=\"counts\", y=\"energy\", xlim=[0, up], label=o, ax=ax\n )\n ax.axhline(ht, c=\"k\", ls=\"--\", lw=\"2\", label=\"HT\")\n ax.set_xlabel(\"iterations\")\n ax.set_ylabel(\"VQE energy\")\n ax.legend(loc=\"upper right\")\n filename = f\"{figprefix}_l{params['l']}_convergence_{params['v']}_depth{params['d']}_nr{params['n']}_max{params['m']}_xlim{up}\"\n plt.savefig(f\"{filename}.pdf\")\n plt.savefig(f\"{filename}.png\")\n plt.savefig(f\"{filename}.svg\")\n plt.close()",
"def _plot(self, step, rewards, losses):\n plt.figure(figsize=(20, 5))\n plt.subplot(131)\n plt.title('Total Episode Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('MSE Loss')\n plt.plot(losses)\n plt.show()",
"def test_GBL_tau_inst():\n dz = 0.05\n z = numpy.arange(0., 80. + 1.5*dz, dz)\n\n # Fully ionized H and He\n x_ionH = 1.0\n x_ionHe = 2.0\n\n cosmo = {}\n cosmo['omega_M_0'] = numpy.array([[0.3],[0.6],[1.0]])\n cosmo['omega_lambda_0'] = 1. - cosmo['omega_M_0']\n cosmo['h'] = 0.65\n cosmo['omega_b_0'] = 0.02 / cosmo['h']**2.\n cosmo['Y_He'] = 0.24\n cd.set_omega_k_0(cosmo)\n\n tau_inst = cr.optical_depth_instant(z, x_ionH=x_ionH, x_ionHe=x_ionHe, \n **cosmo)\n tau_int = cr.integrate_optical_depth(x_ionH, x_ionHe, z, **cosmo)\n\n linestyle = ['-', ':', '--']\n \n pylab.figure()\n pylab.subplot(2,1,1)\n pylab.title(\"Compare to GB&L fig. 1 (astro-ph/9812125v3.)\")\n for i in range(len(linestyle)):\n pylab.plot(z, tau_inst[i], ls=linestyle[i], color='b')\n pylab.plot(z, tau_int[i], ls=linestyle[i], color='r')\n\n pylab.xlim(0,80)\n pylab.ylim(0,1)\n pylab.xlabel(r\"$\\mathrm{z_{ion}}$\")\n pylab.ylabel(r\"$\\tau$\")\n \n pylab.subplot(2,1,2)\n for i in range(len(linestyle)):\n pylab.plot(z, \n 1.e4 * (tau_int[i] - tau_inst[i])/tau_inst[i], \n ls=linestyle[i], color='k')\n diff = (tau_int[i] - tau_inst[i]) / tau_inst[i]\n diff[numpy.isnan(diff)] = 0.0\n print(\"max fractional error in num. int. = %.3g\" % \n numpy.max(numpy.abs(diff))\n )\n ntest.assert_array_less(numpy.abs(diff), \n numpy.zeros(diff.shape) + 2.e-4)\n\n pylab.xlim(0,40)\n pylab.xlabel(r\"$\\mathrm{z_{ion}}$\")\n pylab.ylabel(r\"$\\mathrm{10^4 \\times (num.\\tau - ana.\\tau)/ana.\\tau}$\")",
"def show_dcr_results(dg):\n cycle = dg.fileDB['cycle'].values[0]\n df_dsp = pd.read_hdf(f'./temp_{cycle}.h5', 'opt_dcr')\n # print(df_dsp.describe()) \n\n # compare DCR and A/E distributions\n fig, (p0, p1) = plt.subplots(2, 1, figsize=(8, 8))\n \n elo, ehi, epb = 0, 25000, 100\n \n # aoe distribution\n # ylo, yhi, ypb = -1, 2, 0.1\n # ylo, yhi, ypb = -0.1, 0.3, 0.005\n ylo, yhi, ypb = 0.05, 0.08, 0.0005\n nbx = int((ehi-elo)/epb)\n nby = int((yhi-ylo)/ypb)\n h = p0.hist2d(df_dsp['trapEmax'], df_dsp['aoe'], bins=[nbx,nby],\n range=[[elo, ehi], [ylo, yhi]], cmap='jet',\n norm=LogNorm())\n # p0.set_xlabel('Energy (uncal)', ha='right', x=1)\n p0.set_ylabel('A/E', ha='right', y=1)\n\n # dcr distribution\n # ylo, yhi, ypb = -20, 20, 1 # dcr_raw\n # ylo, yhi, ypb = -5, 2.5, 0.1 # dcr = dcr_raw / trapEmax\n # ylo, yhi, ypb = -3, 2, 0.1\n ylo, yhi, ypb = 0.9, 1.08, 0.001\n ylo, yhi, ypb = 1.034, 1.0425, 0.00005 # best for 64.4 us pz\n # ylo, yhi, ypb = 1.05, 1.056, 0.00005 # best for 50 us pz\n # ylo, yhi, ypb = 1.016, 1.022, 0.00005 # best for 100 us pz\n nbx = int((ehi-elo)/epb)\n nby = int((yhi-ylo)/ypb)\n h = p1.hist2d(df_dsp['trapEmax'], df_dsp['dcr'], bins=[nbx,nby],\n range=[[elo, ehi], [ylo, yhi]], cmap='jet',\n norm=LogNorm())\n p1.set_xlabel('Energy (uncal)', ha='right', x=1)\n p1.set_ylabel('DCR', ha='right', y=1)\n \n # plt.show()\n plt.savefig(f'./plots/dcr_cyc{cycle}.png', dpi=300)\n plt.cla()",
"def plot_results(self):\n [m, s] = self.run_model()\n barM = m.x[2:8]\n barS = s.x[2:8]\n T1vec = self.T1_extraction(self.subj)\n for i in T1vec:\n T1vec[T1vec == i] = int(i)\n T1vec = T1vec[2:8]\n barWidth = 25\n r2 = [x + barWidth for x in T1vec]\n plt.grid(b=True, linewidth=0.2)\n plt.bar(\n T1vec, barM, color=\"b\", width=barWidth, edgecolor=\"white\", label=\"Motor\"\n )\n plt.bar(r2, barS, color=\"r\", width=barWidth, edgecolor=\"white\", label=\"Sensory\")\n plt.xlabel(\"T1\", fontweight=\"bold\")\n plt.ylabel(\"Partial contribution\", fontweight=\"bold\")\n plt.legend()\n plt.title(\n \"Partial contribution of cortical layers to motor and sensory operations\"\n )\n plt.show()\n return barM, barS, T1vec",
"def reproduce_wadley_results(carbon=4, show=False, sd_range=(1,4), res=2.0):\n\n os.makedirs(runDir + \"/results/figures/wadley_plots/\", exist_ok=True)\n\n if carbon == 4:\n angle = \"eta\"\n xlabel = \"$\\\\eta=C_4'^{i-1}-P^i-C_4'^i-P^{i+1}$\"\n ylabel = \"$\\\\theta=P^i-C_4'^i-P^{i+1}-C_4'^{i+1}$\"\n elif carbon == 1:\n angle = \"eta_prime\"\n xlabel = \"$\\\\eta'=C_1'^{i-1}-P^i-C_1'^i-P^{i+1}$\"\n ylabel = \"$\\\\theta'=P^i-C_1'^i-P^{i+1}-C_1'^{i+1}$\"\n else:\n exit(\"You overestimate my capabilities !\")\n\n \n if not path.isfile(runDir + f\"/data/wadley_kernel_{angle}_{res}A.npz\"):\n\n # Get a worker number to position the progress bar\n global idxQueue\n thr_idx = idxQueue.get()\n setproctitle(f\"RNANet statistics.py Worker {thr_idx+1} reproduce_wadley_results(carbon={carbon})\")\n\n pbar = tqdm(total=2, desc=f\"Worker {thr_idx+1}: eta/theta C{carbon} kernels\", unit=\"kernel\", position=thr_idx+1, leave=False)\n\n # Extract the angle values of c2'-endo and c3'-endo nucleotides\n with sqlite3.connect(runDir + \"/results/RNANet.db\") as conn:\n conn.execute('pragma journal_mode=wal')\n df = pd.read_sql(f\"\"\"SELECT {angle}, th{angle} \n FROM (\n SELECT chain_id FROM chain JOIN structure ON chain.structure_id = structure.pdb_id\n WHERE chain.rfam_acc = 'unmappd' AND structure.resolution <= {res} AND issue = 0\n ) AS c NATURAL JOIN nucleotide\n WHERE puckering=\"C2'-endo\" \n AND {angle} IS NOT NULL \n AND th{angle} IS NOT NULL;\"\"\", conn)\n c2_endo_etas = df[angle].values.tolist()\n c2_endo_thetas = df[\"th\"+angle].values.tolist()\n df = pd.read_sql(f\"\"\"SELECT {angle}, th{angle} \n FROM (\n SELECT chain_id FROM chain JOIN structure ON chain.structure_id = structure.pdb_id\n WHERE chain.rfam_acc = 'unmappd' AND structure.resolution <= {res} AND issue = 0\n ) AS c NATURAL JOIN nucleotide \n WHERE form = '.' \n AND puckering=\"C3'-endo\" \n AND {angle} IS NOT NULL \n AND th{angle} IS NOT NULL;\"\"\", conn)\n c3_endo_etas = df[angle].values.tolist()\n c3_endo_thetas = df[\"th\"+angle].values.tolist()\n \n # Create arrays with (x,y) coordinates of the points\n values_c3 = np.vstack([c3_endo_etas, c3_endo_thetas])\n values_c2 = np.vstack([c2_endo_etas, c2_endo_thetas])\n\n # Approximate the Densité by a gaussian kernel\n kernel_c3 = st.gaussian_kde(values_c3)\n kernel_c2 = st.gaussian_kde(values_c2)\n\n # Create 100x100 regular (x,y,z) values for the plot\n xx, yy = np.mgrid[0:2*np.pi:100j, 0:2*np.pi:100j]\n positions = np.vstack([xx.ravel(), yy.ravel()])\n f_c3 = np.reshape(kernel_c3(positions).T, xx.shape)\n pbar.update(1)\n f_c2 = np.reshape(kernel_c2(positions).T, xx.shape)\n pbar.update(1)\n\n # Save the data to an archive for later use without the need to recompute\n np.savez(runDir + f\"/data/wadley_kernel_{angle}_{res}A.npz\",\n c3_endo_e=c3_endo_etas, c3_endo_t=c3_endo_thetas,\n c2_endo_e=c2_endo_etas, c2_endo_t=c2_endo_thetas,\n kernel_c3=f_c3, kernel_c2=f_c2)\n pbar.close()\n idxQueue.put(thr_idx)\n else:\n setproctitle(f\"RNANet statistics.py reproduce_wadley_results(carbon={carbon})\")\n\n f = np.load(runDir + f\"/data/wadley_kernel_{angle}_{res}A.npz\")\n c2_endo_etas = f[\"c2_endo_e\"]\n c3_endo_etas = f[\"c3_endo_e\"]\n c2_endo_thetas = f[\"c2_endo_t\"]\n c3_endo_thetas = f[\"c3_endo_t\"]\n f_c3 = f[\"kernel_c3\"]\n f_c2 = f[\"kernel_c2\"]\n xx, yy = np.mgrid[0:2*np.pi:100j, 0:2*np.pi:100j]\n\n # notify(f\"Kernel computed for {angle}/th{angle} (or loaded from file).\")\n\n # exact counts:\n hist_c2, xedges, yedges = np.histogram2d(c2_endo_etas, c2_endo_thetas, bins=int(2*np.pi/0.1), \n range=[[0, 2*np.pi], [0, 2*np.pi]])\n hist_c3, xedges, yedges = np.histogram2d(c3_endo_etas, c3_endo_thetas, bins=int(2*np.pi/0.1), \n range=[[0, 2*np.pi], [0, 2*np.pi]])\n cmap = cm.get_cmap(\"jet\")\n color_values = cmap(hist_c3.ravel()/hist_c3.max())\n\n for x, y, hist, f, l in zip( (c3_endo_etas, c2_endo_etas), \n (c3_endo_thetas, c2_endo_thetas), \n (hist_c3, hist_c2), \n (f_c3, f_c2), (\"c3\",\"c2\")):\n # cut hist and kernel\n hist_sup_thr = hist.mean() + sd_range[1]*hist.std()\n hist_cut = np.where( hist > hist_sup_thr, hist_sup_thr, hist)\n f_sup_thr = f.mean() + sd_range[1]*f.std()\n f_low_thr = f.mean() + sd_range[0]*f.std()\n f_cut = np.where(f > f_sup_thr, f_sup_thr, f)\n f_cut = np.where(f_cut < f_low_thr, 0, f_cut)\n levels = [ f.mean()+f.std(), f.mean()+2*f.std(), f.mean()+4*f.std()]\n\n # histogram:\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n xpos, ypos = np.meshgrid(xedges[:-1], yedges[:-1], indexing=\"ij\")\n ax.bar3d(xpos.ravel(), ypos.ravel(), 0.0, 0.09, 0.09, hist_cut.ravel(), color=color_values, zorder=\"max\")\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n fig.savefig(runDir + f\"/results/figures/wadley_plots/wadley_hist_{angle}_{l}_{res}A.png\")\n if show:\n fig.show()\n plt.close()\n\n # Smoothed joint distribution\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_surface(xx, yy, f_cut, cmap=cm.get_cmap(\"coolwarm\"), linewidth=0, antialiased=True)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n fig.savefig(runDir + f\"/results/figures/wadley_plots/wadley_distrib_{angle}_{l}_{res}A.png\")\n if show:\n fig.show()\n plt.close()\n\n # 2D Wadley plot\n fig = plt.figure(figsize=(5,5))\n ax = fig.gca()\n ax.scatter(x, y, s=1, alpha=0.1)\n ax.contourf(xx, yy, f, alpha=0.5, cmap=cm.get_cmap(\"coolwarm\"), levels=levels, extend=\"max\")\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n fig.savefig(runDir + f\"/results/figures/wadley_plots/wadley_{angle}_{l}_{res}A.png\")\n if show:\n fig.show()\n plt.close()\n\n setproctitle(f\"RNANet statistics.py reproduce_wadley_results(carbon={carbon}) finished\")\n\n # print(f\"[{worker_nbr}]\\tComputed joint distribution of angles (C{carbon}) and saved the figures.\")",
"def plot_batch_throughput(simulator, show_ci=True):\n # Check for convergence mode\n if not simulator.convergence:\n raise NotImplementedError(\"This plot function is implemented only \"\n \"for simulators with convergence mode enabled.\")\n init()\n stats = simulator.batch_stats\n time = list(item['timestamp'] for item in stats)\n mean = list(item['mean'] for item in stats)\n sem = list(item['sem'] for item in stats)\n upper_ci = []\n lower_ci = []\n xlabel = 'Time (ns)'\n ylabel = 'Throughput (Gbit/s)'\n title = 'Batch Mean Throughput'\n if show_ci:\n for i in range(len(time)):\n if sem[i] > 1:\n sem[i] = 1\n upper_ci.append(mean[i] * (1 + sem[i]))\n lower_ci.append(mean[i] * (1 - sem[i]))\n fig, ax = plt.subplots(1, 1, figsize=(6, 3), dpi=300)\n sns.lineplot(x=time, y=mean, ax=ax, lw=1, color=sns.color_palette('RdBu_r')[0])\n if show_ci:\n ax.fill_between(time, lower_ci, upper_ci, fc=sns.color_palette('RdBu_r')[2], ec=None, alpha=.75)\n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n ax.xaxis.grid(True)\n ax.yaxis.grid(True)\n ax.set_title(title, fontsize=13)\n fig.tight_layout()\n return fig",
"def show_ransac():\n\n # simulate normal data\n activeday = 10000\n lazyday = 3000\n stepcounts, truelabels = make_blobs(n_samples=31, centers=[[activeday]], cluster_std=750, random_state=8)\n for i in range(len((stepcounts))):\n if (i % 6 == 0) and (i != 0):\n stepcounts[i] = np.random.normal(loc=lazyday, scale=750)\n stepcounts[i-1] = np.random.normal(loc=lazyday, scale=750)\n\n # apply trend detection\n steptrender = DailyStepCountTrendDetection(stepcounts)\n slope = steptrender.normalize_and_fit()\n\n # output regression slope and whether or not alarming\n alarmflag = steptrender.is_most_recent_trend_alarming()\n if alarmflag:\n print 'The slope is %f which is alarming' % slope\n else:\n print 'The slope is %f which is not alarming' % slope\n\n # plot results\n linex = np.arange(0, 31)\n pointsx = range(0, 31)\n liney = steptrender.get_model().predict(linex[:, np.newaxis])\n plt.plot(linex, liney, 'r-', label='RANSAC regressor')\n pointsy = normalize(stepcounts, norm='l1', axis=0)\n plt.plot(pointsx, pointsy, 'ko', label='Step Counts')\n plt.legend(loc='lower left')\n\n plt.show()",
"def runUCB(self):\n \n #Init vars, N number of user sessions, d=number of ads\n N = self.myDS.shape[0] \n d = self.myDS.shape[1] \n total_reward=0\n self.opt_selected=[]\n \n #Declare vars to count to calculate upper bounds\n numbers_of_selections = [0] * d\n sums_of_rewards = [0] * d\n \n #Calcultate confidance bounds\n for n in range(0,N):\n ad=0\n max_upper_bound=0\n for i in range (0,d):\n if (numbers_of_selections[i]>0):\n average_reward=sums_of_rewards[i]/numbers_of_selections[i]\n delta_i=math.sqrt(3/2 * math.log(n+1) / numbers_of_selections[i])\n upper_bound=average_reward+delta_i\n else:\n upper_bound=1e400\n if upper_bound>max_upper_bound:\n max_upper_bound=upper_bound\n ad = i\n self.opt_selected.append(ad)\n numbers_of_selections[ad]=numbers_of_selections[ad]+1\n reward=self.myDS.values[n,ad]\n sums_of_rewards[ad]=sums_of_rewards[ad]+reward\n total_reward=total_reward+reward\n \n return total_reward",
"def make_summary_plot(run_lists, file_descriptor, attr='sipm1.threeSampleAmpl'):\n biases = []\n gains = []\n pes = []\n currs = []\n gainerrs = []\n quad_terms = []\n quad_errs = []\n for row in sorted(run_lists):\n biases.append(row[0])\n gain_out = fit_gain(row[1], attr=attr)\n out_tuple = gain_out[0]\n gains.append(out_tuple[0])\n gainerrs.append(out_tuple[3])\n smeans = sorted(gain_out[1])\n currs.append(0.5*(smeans[-1] + smeans[-2]))\n pes.append(currs[-1]/gains[-1])\n quad_terms.append(out_tuple[1])\n quad_errs.append(out_tuple[4])\n\n maxgain = max(gains)\n gains = np.array(gains)/maxgain\n gainerrs = np.array(gainerrs)/maxgain\n # gainerrs = 0.1*gains\n\n currs = np.array(currs)/max(currs)\n pes = np.array(pes)\n pe_errs = gainerrs/gains*pes\n maxpe = max(pes)\n fig, ax1 = plt.subplots()\n\n coeffs, V = np.polyfit(biases, gains, 1, w=1.0/gainerrs, cov=True)\n breakdown = -1*coeffs[1]/coeffs[0]\n\n breakdown_sigma = sigma_from_cov(coeffs, V)\n\n # calculate sigmas throughout range\n vals, vecs = np.linalg.eig(V)\n U = np.transpose(vecs)\n xs_for_error = np.arange(breakdown - 0.1, max(biases) + 0.1, 0.01)\n gain_sigmas = sig_from_diag(xs_for_error, U, vals)\n error_band_ys = np.array([i*coeffs[0] + coeffs[1] for i in xs_for_error])\n ax1.fill_between(xs_for_error, error_band_ys + gain_sigmas,\n error_band_ys - gain_sigmas, facecolor='red', alpha=0.5)\n\n fitline = [i*coeffs[0] + coeffs[1] for i in biases] + [0]\n fitbiases = biases + [breakdown]\n\n ax1.set_title('bias scan %s' % file_descriptor)\n fitplot = ax1.plot(fitbiases, fitline, 'r-')\n gainplot = ax1.errorbar(\n biases, gains, yerr=gainerrs, fmt='ro', markersize=10)\n currplot = ax1.plot(biases, currs, 'g*', markersize=15)\n ax1.set_ylim(0, 1.105)\n ax1.set_xlim([breakdown - 0.1, max(biases) + 0.1])\n ax1.set_xlabel('bias voltage [V]')\n ax1.set_ylabel('relative gain, charge [a.u.]')\n\n ticks = [breakdown]\n ticks.extend([bias for bias in biases[::2]])\n tick_labels = ['%.1f $\\pm$ %.1f' % (breakdown, breakdown_sigma)]\n tick_labels.extend([str(bias) for bias in biases[::2]])\n ax1.set_xticks(ticks)\n ax1.set_xticklabels(tick_labels)\n ax1.grid()\n ax1.get_xticklabels()[0].set_color('r')\n\n ax2 = ax1.twinx()\n peplot = ax2.errorbar(biases, pes, yerr=pe_errs, fmt='b^', markersize=10)\n ax2.set_ylabel('pe', color='b')\n ax2.set_ylim(0, maxpe*1.105)\n ax2.set_xlim([breakdown - 0.1, max(biases) + 0.1])\n for tick in ax2.get_yticklabels():\n tick.set_color('b')\n ax1.legend([gainplot[0]]+currplot+[peplot[0]]+fitplot,\n ['gain', 'charge', 'pes', 'gain fit'],\n loc='best', numpoints=1)\n\n plt.savefig('pdfs/breakdownPlot%s.pdf' % file_descriptor)\n plt.show()\n\n quadploterrs = 0.5/np.sqrt(quad_terms)*quad_errs\n plt.errorbar(biases, np.sqrt(quad_terms)*100, yerr=quadploterrs*100, fmt='ko')\n plt.xlim(min(biases) - 0.1, max(biases) + 0.1)\n plt.xlabel('bias [V]')\n plt.ylabel('sqrt(quadratic term) [%]')\n plt.title('quadratic terms %s' % file_descriptor)\n\n plt.savefig('pdfs/quadraticTerms%s.pdf' % file_descriptor)\n plt.show()",
"def main(showSamples=True, showConfusion=True):\n ndigit = 10\n elambda = [0.4, 0.6, 0.8]\n for i in elambda:\n test(ndigit, i, showSamples, showConfusion)\n if showSamples:\n pltmulti('graphs.pdf')",
"def plot(self) -> None:\n if self.__fig is None:\n self.__fig = plt.figure()\n\n xv = []\n yv = []\n for x in np.arange(self.state_min(), self.state_max(), self.state_step()):\n xv.append(x)\n yv.append(self.reward(x))\n ax = self.__fig.gca()\n ax.set_xlabel('X (State)')\n ax.set_ylabel('Y (Reward)')\n ax.set_title('Reward Function')\n ax.plot(xv, yv)\n plt.pause(self.__plot_pause)\n plt.show(block=False)\n return",
"def produce_cgchart(ytrue, ypred):\n\n yprobas = np.append((1-ypred).reshape(-1,1), ypred.reshape(-1,1), axis=1)\n # 0's and 1's\n print(yprobas.shape)\n areas = plot_cumulative_gain(ytrue, yprobas)",
"def plot_lr(self, show_text=True, show_moms=True):\n phase_limits = [0]\n for nb_batch, phase in zip(self.nb_batches, self.phases):\n phase_limits.append(phase_limits[-1] + nb_batch * phase.epochs)\n if not in_ipynb():\n plt.switch_backend('agg')\n np_plts = 2 if show_moms else 1\n fig, axs = plt.subplots(1,np_plts,figsize=(6*np_plts,4))\n if not show_moms: axs = [axs]\n for i in range(np_plts): axs[i].set_xlabel('iterations')\n axs[0].set_ylabel('learning rate')\n axs[0].plot(self.iterations,self.lrs)\n if show_moms:\n axs[1].set_ylabel('momentum')\n axs[1].plot(self.iterations,self.momentums)\n if show_text: \n for i, phase in enumerate(self.phases):\n text = phase.opt_fn.__name__\n if phase.wds is not None: text+='\\nwds='+str(phase.wds)\n if phase.beta is not None: text+='\\nbeta='+str(phase.beta)\n for k in range(np_plts):\n if i < len(self.phases)-1:\n draw_line(axs[k], phase_limits[i+1])\n draw_text(axs[k], (phase_limits[i]+phase_limits[i+1])/2, text) \n if not in_ipynb():\n plt.savefig(os.path.join(self.save_path, 'lr_plot.png'))",
"def plot_results(self):\n\n\n f1, ax1 = plt.subplots()\n h1, = ax1.plot(self.history[\"step\"], self.history[\"trainLoss\"],\\\n \"b-\", label=\"Loss - Train\")\n h2, = ax1.plot(self.history[\"step\"], self.history[\"validLoss\"],\\\n \"b.\", label=\"Loss - Validation\")\n\n ax1.set_ylabel(\"Loss\", color = \"blue\")\n ax1.tick_params(\"y\", color = \"blue\")\n ax1.yaxis.label.set_color(\"blue\")\n ax1.set_xlabel(\"Training Steps [{}]\".format(self.FLAGS.eval_every))\n\n ax2 = ax1.twinx()\n h3, = ax2.plot(self.history[\"step\"], self.history[\"trainAccr\"], \"r-\",\\\n label = \"Accuracy - Train\")\n h4, = ax2.plot(self.history[\"step\"], self.history[\"validAccr\"], \"r.\",\\\n label = \"Accuracy - Validation\")\n\n ax2.set_ylabel(\"Accuracy\", color = \"red\")\n ax2.tick_params(\"y\", color = \"red\")\n ax2.yaxis.label.set_color(\"red\")\n\n hds = [h1,h2,h3,h4]\n lbs = [l.get_label() for l in hds]\n ax1.legend(hds, lbs)\n f1.tight_layout()\n plt.savefig(\"trainingHistory.png\")\n\n plt.close(f1)\n #plt.show()"
] | [
"0.6006481",
"0.5967127",
"0.5966327",
"0.5892455",
"0.5861487",
"0.58340496",
"0.5791899",
"0.5786175",
"0.56671864",
"0.5652789",
"0.56424904",
"0.5639421",
"0.563851",
"0.55813694",
"0.55700576",
"0.55688906",
"0.55589575",
"0.55489814",
"0.55477333",
"0.5528384",
"0.5522314",
"0.5516444",
"0.5515409",
"0.55092645",
"0.5469398",
"0.5467704",
"0.5451695",
"0.54495066",
"0.5447527",
"0.5417748"
] | 0.70368356 | 0 |
Returns a date suffix for a given date | def filter_date_suffix(date_str: str):
day = int(date_str[-2:])
if 4 <= day <= 20 or 24 <= day <= 30:
suffix = "th"
else:
suffix = ["st", "nd", "rd"][day % 10 - 1]
return date_str + suffix | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __num_suffix(self, check_in_date):\n date_value = str(check_in_date).split(' ')\n day_value = date_value[0][:-2]\n date_value[0] = day_value\n return ' '.join(date_value)",
"def _build_tag_suffix() -> str:\n now = datetime.datetime.now(tz=datetime.timezone.utc).astimezone()\n return now.strftime(\".%Y%m%d.0\")",
"def custom_strftime(format, t): \n return t.strftime(format).replace(\"{S}\", str(t.day) + suffix(t.day))",
"def get_date_prefix(date, prefix_tmpl=STD_DATE_PREFIX):\n return prefix_tmpl.format(date.year, date.month, date.day)",
"def shortDate(self, date):\n return u'%s %02i' % (date.pMonth(), date.day())",
"def rebuildDate(date):\n parts = date.split(\" \")\n parts[1] = parts[1][:-1]\n eDate = parts[2] + '-' + parts[0] + '-' + parts[1]\n return eDate",
"def suffix(d): \n return \"th\" if 11<=d<=13 else {1:\"st\",2:\"nd\",3:\"rd\"}.get(d%10, \"th\")",
"def _calculate_date(day_of_year):\n date = datetime.datetime.strptime(str(day_of_year), '%j')\n return date.strftime('%d-%b')",
"def _get_neat_date(date: datetime) -> str:\n month_selector = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\",\n \"October\", \"November\", \"December\"]\n month_string = month_selector[date.month - 1]\n\n day = date.day\n\n if day == 1 or day == 21 or day == 31:\n suffix = \"st\"\n elif day == 2 or day == 22:\n suffix = \"nd\"\n elif day == 3 or day == 23:\n suffix = \"rd\"\n else:\n suffix = \"th\"\n\n neat_date = f\"{month_string} {day}{suffix}\"\n return neat_date",
"def file_suffix(self):\n return f'{self.image_count:05}' if self.sequential_naming else \\\n datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")",
"def humanize_day(day_num):\n if 11 <= day_num <= 13:\n suffix = 'th'\n else:\n r = day_num % 10\n if r == 1:\n suffix = 'st'\n elif r == 2:\n suffix = 'nd'\n elif r == 3:\n suffix = 'rd'\n else:\n suffix = 'th'\n return str(day_num) + suffix",
"def get_date(date):\n return date",
"def _get_date_in_words(number_date):\n month_list = ['Jan ', 'Feb ', 'Mar ', 'Apr ', 'May ', 'Jun ', 'Jul ', 'Aug ', 'Sep ', 'Oct ', 'Nov ', 'Dec ']\n month = int(number_date[5:7]) - 1\n day = str(int(number_date[-2:]))\n return month_list[month] + day",
"def get_date_day(date):\n cut_date = date.split('-')\n return cut_date[2]",
"def convert_name(path):\n local_timezone = get_localzone()\n aware_dt = datetime.fromtimestamp(os.path.getctime(path), local_timezone)\n date = aware_dt.strftime(\"%Y-%m-%d\")\n regex = re.compile(\"^\\d+-?(.+)\")\n match = regex.search(path.name)\n if match: stem = match.group(1)\n else: stem = \"\"\n\n return f\"{date}-{stem}\"",
"def buildDate(date):\n parts = date.split(\"-\")\n yDate = parts[1] + \" \" + parts[2] + ', ' + parts[0]\n return yDate",
"def reformat_subway_dates(date):\n date_formatted = datetime.datetime.strptime(date, '%m-%d-%y')\n date_formatted = date_formatted.strftime('%Y-%m-%d')\n return date_formatted",
"def date_to_term(date):\n term = \"H\"\n if date.month < 6:\n term = \"V\"\n elif date.month < 8 or date.month == 8 and date.day < 20:\n term = \"S\"\n return f\"{term}{date.year % 100}\"",
"def date_to_final_str(date_obj: datetime) -> str:\n return date_obj.strftime(\"%Y-%m-%d\")",
"def get_month_name(date, abbreviated_or_full):\n if abbreviated_or_full == constants.str_abbreviated:\n return date.strftime('%b')\n elif abbreviated_or_full == constants.str_full:\n return date.strftime('%B')\n else:\n err_msg = str_possible_values('abbreviated_or_full',\n [constants.str_abbreviated, constants.str_full])\n\n raise ValueError(err_msg)",
"def ingame_formatted(dt: datetime) -> str:\n return dt.strftime(\"%Y - %B\")",
"def reformat_date(mdy_date_string):\n date = mdy_date_string.split('/')\n return f\"{date[2]}-{date[0]}-{date[1]}\" # difficult to read",
"def date_pattern():\n\n from datetime import datetime\n\n # Current time\n now = datetime.now()\n # Getting date\n date_val = now.strftime('%d_%b_%Y')\n # Getting hour:min:sec\n hour_val = now.strftime('%H%M%S')\n # Getting microsecond\n micro_val = now.strftime('%f')[:2]\n\n # Returns a str in described format\n return f'{date_val}_{hour_val}{micro_val}'",
"def add_year_suffix(reference_list):\n pass",
"def get_filename(out_dir, file_date, extension):\n return path.join(out_dir, f'CrossrefCitations_{file_date}.{extension}')",
"def reformatdate(self, date):\n# print('DATE', self.__str__())\n if 'dummy' in date:\n return '1970_01_01'\n# datesplit = date.split('/')\n datesplit = date.split('-') # Really? This had to be changed?!\n# print('DATE', date, datesplit)\n\n # dates used to be as follows\n# month = datesplit[0]\n# day = datesplit[1]\n# year = datesplit[2]\n\n # dates as of 12 June 2018 now done this way\n year = datesplit[0]\n month = datesplit[1]\n day = datesplit[2]\n\n return year + '_' + month + '_' + day",
"def get_filename(self, path, prefix, suffix, date, period):\n return os.path.join(path,\n '%s%s%s' % (\n prefix,\n self.get_filename_date(date,\n params=dict(period=period)),\n suffix))",
"def reformat_date(mdy_date_string):\n month, day, year = mdy_date_string.split('/')\n return f\"{year}-{month}-{day}\"",
"def suffix(string, suffix, sep = '_'):\n if suffix == 'production':\n suffixed = string\n else:\n suffixed = string + sep + suffix\n return suffixed",
"def date_format(date_str):\r\n return date_str.replace(\"/\", \"_\")"
] | [
"0.7249425",
"0.67349887",
"0.6392717",
"0.6302706",
"0.62556237",
"0.6224853",
"0.6213826",
"0.61899906",
"0.6069033",
"0.59627014",
"0.59461755",
"0.5929941",
"0.59197515",
"0.5891935",
"0.58643836",
"0.585514",
"0.5840551",
"0.5837133",
"0.58069605",
"0.58027524",
"0.580255",
"0.579444",
"0.57865983",
"0.5783474",
"0.57752943",
"0.5758497",
"0.5727761",
"0.57084113",
"0.5683588",
"0.56741774"
] | 0.78005004 | 0 |
Pads a number or string with fillchar to the specified width. | def filter_pad(val: Union[int, str], width: int, fillchar: str = '0') -> str:
return str(val).rjust(width, fillchar) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pad(number, width=0):\n return str(number).zfill(width)",
"def pad(text, width, pad_character=\" \"):\n\n length = len(text)\n if width < 0 and length < -width:\n return text + (-width - length) * pad_character\n elif width > 0 and length < width:\n return (width - length) * pad_character + text\n else:\n return text",
"def ljust(self, width, fillchar=' '):\n return asarray(ljust(self, width, fillchar))",
"def _fixed_width_str(self, x, fill=' '):\n x_str = str(x)\n l = len(x_str)\n pad = self.width - l\n if pad < 0:\n raise Exception(\"Your string is too long!\")\n return fill * pad + x_str",
"def str_padding(length, val):\n return '{0:<{fill}}'.format(val, fill=length)",
"def rjustText( text = \"\", fillchar= \" \", fieldwidth = 78 ):\n ansistring_text = stringExtends.ansiStringClass( \"\" )\n if isinstance( text, ( str, unicode ) ):\n ansistring_text.Text = text\n\n ansistring_fillchar = stringExtends.ansiStringClass( \" \" )\n if isinstance( fillchar, ( str, unicode ) ):\n ansistring_fillchar.Text = fillchar\n\n return_fieldwidth = 78\n if isinstance( fieldwidth, ( int, float ) ):\n return_fieldwidth = int( fieldwidth )\n\n r = stringExtends.ansiStringClass( \"\" )\n if ansistring_text.rawTextLen() < return_fieldwidth:\n # need to do a little math ro figure out padding length, and apply padding\n padding_length = int( math.floor( ( return_fieldwidth - ansistring_text.rawTextLen() ) / ansistring_fillchar.rawTextLen() ) )\n r.Text = ( ansistring_fillchar.ansiTextFormat() * padding_length )\n if ( ansistring_text.rawTextLen() + r.rawTextLen() ) < return_fieldwidth:\n r.Text += ansistring_fillchar.ansiSlice( 0, ( return_fieldwidth - ( r.rawTextLen( ) + ansistring_text.rawTextLen( ) ) ) )\n r.Text += ansistring_text.ansiTextFormat()\n else:\n # we have to slice into the original text since it's longer than the fieldwidth\n r.Text = ansistring_text.ansiSlice( 0, return_fieldwidth )\n\n return r.Text",
"def ljustText( text = \"\", fillchar= \" \", fieldwidth = 78 ):\n ansistring_text = stringExtends.ansiStringClass( \"\" )\n if isinstance( text, ( str, unicode ) ):\n ansistring_text.Text = text\n\n ansistring_fillchar = stringExtends.ansiStringClass( \" \" )\n if isinstance( fillchar, ( str, unicode ) ):\n ansistring_fillchar.Text = fillchar\n\n return_fieldwidth = 78\n if isinstance( fieldwidth, ( int, float ) ):\n return_fieldwidth = int( fieldwidth )\n\n r = stringExtends.ansiStringClass( \"\" )\n if ansistring_text.rawTextLen() < return_fieldwidth:\n # need to do a little math ro figure out padding length, and apply padding\n padding_length = int( math.floor( ( return_fieldwidth - ansistring_text.rawTextLen() ) / ansistring_fillchar.rawTextLen() ) )\n r.Text = ansistring_text.ansiTextFormat() + ( ansistring_fillchar.ansiTextFormat() * padding_length )\n if r.rawTextLen() < return_fieldwidth:\n r.Text += ansistring_fillchar.ansiSlice( 0, ( return_fieldwidth - r.rawTextLen() ) )\n else:\n # we have to slice into the original text since it's longer than the fieldwidth\n r.Text = ansistring_text.ansiSlice( 0, return_fieldwidth )\n\n return r.Text",
"def rjust(a, width, fillchar=' '):\n a_arr = numpy.asarray(a)\n width_arr = numpy.asarray(width)\n size = int(numpy.max(width_arr.flat))\n if numpy.issubdtype(a_arr.dtype, numpy.bytes_):\n fillchar = asbytes(fillchar)\n return _vec_string(\n a_arr, type(a_arr.dtype)(size), 'rjust', (width_arr, fillchar))",
"def rjust(self, width, fillchar=' '):\n return asarray(rjust(self, width, fillchar))",
"def ljust(a, width, fillchar=' '):\n a_arr = numpy.asarray(a)\n width_arr = numpy.asarray(width)\n size = int(numpy.max(width_arr.flat))\n if numpy.issubdtype(a_arr.dtype, numpy.bytes_):\n fillchar = asbytes(fillchar)\n return _vec_string(\n a_arr, type(a_arr.dtype)(size), 'ljust', (width_arr, fillchar))",
"def pad_digits(x, width):\n if pd.notnull(x):\n return '{0:0{1}d}'.format(int(x), width)\n else:\n return x",
"def ljust(self, width, fillchar, _difference):\n return self + self._filler(fillchar, _difference)",
"def _padboth(width, s):\n fmt = \"{0:^%ds}\" % width\n return fmt.format(s)",
"def _padleft(width, s):\n fmt = \"{0:>%ds}\" % width\n return fmt.format(s)",
"def _pad_shorter(sequence: str) -> str:\n return sequence.ljust(3, \"X\")",
"def rjust(self, width, fillchar, _difference):\n return self._filler(fillchar, _difference) + self",
"def r_pad(arg, length):\n if length <= len(arg):\n return arg\n else:\n return arg + \" \" * (length - len(arg))",
"def pad(plain, size):\n offset = size - (len(plain) % size)\n return plain + chr(offset) * offset",
"def fill(text, width=80):\n return os.linesep.join(text[i:i+width] for i in range(0, len(text), width))",
"def fill(text, width=80):\n return os.linesep.join(text[i:i+width] for i in range(0, len(text), width))",
"def fill(text, width=80):\n return os.linesep.join(text[i:i+width] for i in range(0, len(text), width))",
"def fill(text, width=80):\n return os.linesep.join(text[i:i+width] for i in range(0, len(text), width))",
"def fill(text, width=80):\n return os.linesep.join(text[i:i+width] for i in range(0, len(text), width))",
"def fill(text,width=60):\r\n pars = [textwrap.fill(text,width) for text in text.split('\\n')]\r\n return '\\n'.join(pars)",
"def pad(data, padding_char, length):\n if is_null_or_empty(padding_char):\n padding_char = DEFAULT_PAD_CHAR\n\n padded = create(padding_char, length)\n string_buf = padded + data + padded\n return string_buf",
"def pad_chars(text, encoding=None, null_terminate=True, pad_to_multiple_of=4):\n if pad_to_multiple_of < 0 or not isinstance(pad_to_multiple_of, int):\n raise ValueError(\"pad must be an integer greater than zero.\")\n if encoding is not None:\n encoded = text.encode(encoding) + (b\"\\0\" if null_terminate else b\"\")\n else:\n encoded = text + (\"\\0\" if null_terminate else \"\")\n pad = b\"\\0\" if encoding is not None else \"\\0\"\n while len(encoded) % pad_to_multiple_of != 0:\n encoded += pad\n return encoded",
"def _padright(width, s):\n fmt = \"{0:<%ds}\" % width\n return fmt.format(s)",
"def fill_with_spaces(line: string, width: int) -> string:\n size = len(line)\n spaces_left = width - size\n return line + (' ' * spaces_left)",
"def pad_ansi(text, width, char, left=False):\n current_width = len(ANSI_PATTERN.sub('', text))\n parts = [text, (width - current_width) * char]\n if left:\n parts = reversed(parts)\n return ''.join(parts)",
"def pad_left(x, block_size=3, fill=0):\n if len(x) > block_size:\n return x\n else:\n right = np.array(list(str(x)))\n left = np.repeat(str(fill), block_size - right.size )\n return \"\".join(np.concatenate([left, right]))"
] | [
"0.7816895",
"0.7491213",
"0.7455374",
"0.72899914",
"0.7258319",
"0.72047716",
"0.71930504",
"0.7182079",
"0.7174301",
"0.7163619",
"0.7056676",
"0.6992811",
"0.6971006",
"0.6785328",
"0.67768764",
"0.67735404",
"0.67346054",
"0.67320627",
"0.6665845",
"0.6665845",
"0.6665845",
"0.6665845",
"0.6665845",
"0.6646392",
"0.6639841",
"0.6612712",
"0.6602197",
"0.6589918",
"0.6535106",
"0.653317"
] | 0.82074344 | 0 |
Override the builtin Jinja default filter to set the `boolean` param to True by default | def filter_default(value, default_value: str = '', boolean: bool = True) -> str:
return jinja2.filters.do_default(value, default_value, boolean) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def post_formatter(self, value):\n if isinstance(value, bool):\n return value and 'true' or None\n return value",
"def test_with_value(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"class\" %}\\n'\n '{% if some_bool %}truthy{% endif %}\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context({\n 'some_bool': True,\n })),\n '<span class=\"truthy\">')",
"def form_Boolean(request):\n schema = schemaish.Structure()\n schema.add('myBooleanField', schemaish.Boolean())\n form = formish.Form(schema, 'form')\n return form",
"def test_without_value(self):\n t = Template('{% load djblets_utils %}'\n '<span{% attr \"class\" %}\\n'\n '{% if some_bool %}falsy{% endif %}\\n'\n '{% endattr %}>')\n\n self.assertEqual(\n t.render(Context({\n 'some_bool': False,\n })),\n '<span>')",
"def boolean_value(cls, json_field: str, value: bool) -> \"JsonPattern\":\n return jsii.sinvoke(cls, \"booleanValue\", [json_field, value])",
"def boolean_process(field, is_true=False):\n bstring = \"FALSE\"\n if is_true:\n bstring = \"TRUE\"\n return {\n field: bstring\n }",
"def boolean_function(bool_variable):\n\tif bool_variable:\n\t\treturn \"The boolean variable is True\"\n\telse:\n\t\treturn \"The boolean variable is False\"",
"def _set_bool(name, value, context):\n if name in os.environ:\n envval = os.environ.get(name).lower()\n if envval in [\"1\", \"true\", \"y\", \"yes\"]:\n context[name] = True\n elif envval in [\"0\", \"false\", \"n\", \"no\"]:\n context[name] = False\n else:\n raise ValueError(f\"{name} is a boolean, cannot match '{os.environ[name]}'\")\n\n _set_default(name, value, context)",
"def form_BooleanWithDefaults(request):\n schema = schemaish.Structure()\n schema.add('myBooleanTrue', schemaish.Boolean())\n schema.add('myBooleanFalse', schemaish.Boolean())\n form = formish.Form(schema, 'form')\n form.defaults = {'myBooleanTrue':True,'myBooleanFalse':False}\n return form",
"def set_boolean(x):\n\n if x:\n return \"True\"\n else:\n return \"False\"",
"def parse_debug_value(value):\r\n if isinstance(value, bool):\r\n return value\r\n try:\r\n from webassets.env import parse_debug_value\r\n return parse_debug_value(value)\r\n except ValueError:\r\n raise template.TemplateSyntaxError(\r\n '\"debug\" argument must be one of the strings '\r\n '\"true\", \"false\" or \"merge\", not \"%s\"' % value)",
"def to_es_bool(boolean_value):\n return '1' if boolean_value else '0'",
"def get_string(self):\n boolean_value_string = str(bool(self._boolean_value)).lower()\n return BOOLEAN_TEMPLATE.substitute(\n boolean_name = str(self._boolean_name),\n boolean_value = boolean_value_string)",
"def __init__(self, boolean_name, boolean_value):\n self._boolean_name = process_for_latex(boolean_name)\n self._boolean_value = boolean_value",
"def set_jinja_before_request():\n resource_provider.set_jinja_globals()",
"def _format_bool_(value):\n\n from ocgis.util.helpers import format_bool\n\n return format_bool(value)",
"def template_check(value):\n if isinstance(value, str):\n return value.lower() == \"true\"\n return value",
"def bool_to_on_off(boolean: bool):\n if boolean:\n return \"on\"\n return \"off\"",
"def test_boolean_in_serializer() -> None:\n assert cv.custom_serializer(cv.boolean) == {\n \"type\": \"boolean\",\n }",
"def add_bool_as_scalar(self, node):\n if node.value == 'true' or node.value == 'false' :\n return self.construct_yaml_bool(node)\n return self.construct_scalar(node)",
"def give_me_a_boolean():\n return True\n pass",
"def variable_boolean(self, value):\n\n text_value = to_text(value)\n text_value = text_value.lower()\n\n if text_value == 'true' or text_value == 'false':\n return True\n\n return False",
"def configure_filters(app):\n\n for (name, filter) in _filters.iteritems():\n app.jinja_env.filters[name] = filter",
"def option_default_true(arg: Any) -> bool:\n\n if isinstance(arg, bool):\n return arg\n\n if arg is None:\n return True\n\n sanitized = arg.strip().lower()\n\n if sanitized == \"true\":\n return True\n elif sanitized == \"false\":\n return False\n else:\n raise ValueError(f\"Directive option argument '{arg}' is not valid. \"\n f\"Valid arguments are 'true' or 'false'.\")",
"def bool_validator_advice(validator_args):\n \n return \" {True, False}\"",
"def make_bool(value):\n def make_value():\n return verify.Term(verify.BOOLEAN, value)\n return make_value",
"def format_bool(b):\n return \"YES\" if b else \"NO\"",
"def convert_boolean(cls, param, value):\r\n return True",
"def boolean(self, boolean):\n\n self._boolean = boolean",
"def get_filter():\n return render_template(\"filter_js.html\")"
] | [
"0.59055126",
"0.57202244",
"0.5688953",
"0.5675066",
"0.5647763",
"0.5530957",
"0.5526335",
"0.54568887",
"0.54474694",
"0.544175",
"0.5375499",
"0.53713053",
"0.5349298",
"0.5309314",
"0.5227932",
"0.5188103",
"0.51851726",
"0.5111529",
"0.51056075",
"0.5075433",
"0.5073452",
"0.506985",
"0.50645113",
"0.50501776",
"0.5030611",
"0.50287426",
"0.5021171",
"0.5020128",
"0.5010474",
"0.5010125"
] | 0.64101535 | 0 |
Renders a Template with a task as its context. | def render_from_task(template: Union[FlexGetTemplate, str], task: 'Task') -> str:
variables = {'task': task, 'task_name': task.name}
variables.update(extra_vars())
return render(template, variables) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def render_template(self, context=None):\n if context is None:\n context = self.get_template_context()\n return self.get_template_object().render(context)",
"def render(self, _template, context=None):\n variables = {}\n if context:\n variables.update(context)\n rv = self.jinja2.render_template(_template, **variables)\n self.response.write(rv)",
"def render(self, tmpl_name, context_env):\n return self.tmpl._render(tmpl_name, context_env)",
"def render(self, task: \"TaskView\") -> Any:\n return None",
"def render(self, _template, **context):\n context['_request'] = self.request\n self.response.write(self.jinja2.render_template(_template, **context))",
"def template_file(task, template, path, jinja_filters=None, **kwargs):\n jinja_filters = jinja_filters or {} or task.nornir.config.jinja_filters\n merged = merge_two_dicts(task.host, kwargs)\n text = jinja_helper.render_from_file(\n template=template,\n path=path,\n host=task.host,\n jinja_filters=jinja_filters,\n **merged\n )\n return Result(host=task.host, result=text)",
"def render(self, template: str, **vars) -> str:\n vars.setdefault('ctx', self._ctx)\n return self._renderer.render(template, **vars)",
"def render(self, activity, context, typename=None):\n if not isinstance(context, dict):\n raise ContextTypeException('context must be dict. it should not Context or RequestContext')\n template_names = self.get_template_names(activity, typename)\n template = select_template(template_names)\n context = self.prepare_context(activity, context,\n typename=typename)\n return template.render(context)",
"def render(self, template: str, **vars) -> str:",
"def _render(self) -> str:\n html = self._template.render(self._transient_context)\n self._transient_context = None\n return html",
"def _render_template(self, tplfile, env):\n with open(tplfile) as fp:\n tpl = Template(fp.read())\n return tpl.render(Context(env))",
"def template(template_name, **props):\n return render_template(template_name, **template_context(**props))",
"def _render_content(template_name, workflow: Workflow):\n content = render_template(\n dag_name=workflow.dag_name,\n template_name=template_name,\n relations=workflow.task_group_relations,\n task_groups=list(workflow.task_groups.values()),\n )\n return content",
"def render(self, tmpl_file, context):\n template = Template(tmpl_file.read_text(), keep_trailing_newline=True)\n return template.render(context)",
"def _render_content(self, template_name, workflow: Workflow, props: PropertySet):\n converted_job_properties: Dict[str, Union[List[str], str]] = {\n key: comma_separated_string_to_list(value) for key, value in props.job_properties.items()\n }\n task_map = {\n task_group.name: [task.task_id for task in task_group.tasks]\n for task_group in workflow.task_groups.values()\n }\n\n content = render_template(\n template_name=template_name,\n dag_name=workflow.dag_name,\n schedule_interval=self.schedule_interval,\n start_days_ago=self.start_days_ago,\n job_properties=converted_job_properties,\n config=props.config,\n relations=workflow.task_group_relations,\n task_groups=list(workflow.task_groups.values()),\n dependencies=workflow.dependencies,\n task_map=task_map,\n )\n return content",
"def render_template(template: str, context: dict) -> str:\n if template is None:\n return \"\"\n return Template(template).render(Context(context))",
"def render(filename, context):\n\ttemplate = parser.Template(open(TEMPLATES_DIR + '/' + filename).read())\n\treturn template.eval(context)",
"def render(self):\n context = {'groups': self._groups}\n\n return loader.render_to_string(self._template_path, dictionary=context)",
"async def render(\n self, filename: str, *args: dict, **kwargs: typing.Any\n ) -> str:\n with self._enable_async():\n return await self._get_template(filename).render_async(\n *args, **kwargs\n )",
"def render_template(self, template_path, context={}):\n template_str = self.load_resource(template_path)\n return Template(template_str).render(Context(context))",
"def render(self, template, *args, **kwargs):\n self._render(template, sys.stdout, *args, **kwargs)",
"def render_template(self, template_path, context={}):\n template_str = self.resource_string(template_path)\n return Template(template_str).render(Context(context))",
"def render_template(self, template_path, context={}):\n template_str = self.resource_string(template_path)\n return Template(template_str).render(Context(context))",
"def render_response(self, *args, **kwargs):\n if self.template_name is not None:\n template = get_template(loader, self.template_name)\n self.response.write(template.render(**self.get_context(*args, **kwargs)))\n else:\n raise ValueError('No template provided.')",
"def render_template(template_name, **context):\n ctx = stack.top\n return _render(_lookup(ctx.app).get_template(template_name),\n context, ctx.app)",
"def render_template(self, template_path, context = {}):\n template_str = self.load_resource(template_path)\n return Template(template_str).render(Context(context))",
"def render(self, template_name, **kwargs):\n raise NotImplementedError()",
"def render_template(self, template_name, **kwargs):\n template = django_template_loader.get_template(template_name)\n return template.render(DjangoContext(kwargs))",
"def render(*args, **kwargs):\n if args:\n assert len(args) == 1, \\\n 'Expected exactly one argument, but got %r' % (args,)\n template = loader.load(args[0])\n else:\n template = cherrypy.thread_data.template\n ctxt = Context(url=cherrypy.url)\n ctxt.push(kwargs)\n return template.generate(ctxt)",
"def html(self, **kwargs):\n # get bas context_data\n context_data = self.get_context_data(**kwargs)\n # setup the context object for it\n context = loader.Context(context_data)\n # render the template_source_body with current set of context\n body = loader.render_to_string(self.template_source_body, context)\n # add the rendered body to the underlying wrapper template\n context_data = self.get_context_data(body=body)\n # rerender it\n context = loader.Context(context_data)\n return self.template.render(context)"
] | [
"0.7163159",
"0.69389516",
"0.69019514",
"0.684778",
"0.67317486",
"0.67039704",
"0.6675844",
"0.66062564",
"0.65639883",
"0.6550063",
"0.65461814",
"0.65187407",
"0.6497274",
"0.6492231",
"0.6465071",
"0.644912",
"0.6448845",
"0.64252526",
"0.63980466",
"0.63831687",
"0.63649416",
"0.63634825",
"0.63634825",
"0.6335922",
"0.6288833",
"0.62854403",
"0.6270934",
"0.6270107",
"0.6258607",
"0.6248719"
] | 0.7313202 | 0 |
Evaluate a jinja `expression` using a given `context` with support for `LazyDict`s (`Entry`s.) | def evaluate_expression(expression: str, context: Mapping) -> Any:
if environment is not None:
compiled_expr = environment.compile_expression(expression)
# If we have a LazyDict, grab the underlying store. Our environment supports LazyFields directly
if isinstance(context, LazyDict):
context = context.store
return compiled_expr(**{**context, **extra_vars()})
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_jinja_context(include_dict=None):\n context = {\n \"app_name\": app_config[\"APP\"][\"app_name\"],\n \"app_version\": app_config[\"APP\"][\"app_version\"],\n \"app_description\": app_config[\"APP\"][\"app_description\"],\n \"app_author\": app_config[\"APP\"][\"app_author\"],\n \"app_author_website\": app_config[\"APP\"][\"app_author_website\"],\n \"is_user_logged_in\": True if get_user_id() else False\n }\n if include_dict:\n context = {**context, **include_dict} # merge dictionaries\n return context",
"def insert_evaluate_variables(text, var_dict):\n if isinstance(text, list):\n text.insert(0, '{% load quest_render_tags %}')\n rndr_string = '\\n'.join(text)\n else:\n rndr_string = r'{% load quest_render_tags %} ' + text\n\n var_dict_rendered = {}\n for key, values in var_dict.iteritems():\n var_dict_rendered[key] = values[1]\n\n tmplte = Template(rndr_string)\n cntxt = Context(var_dict_rendered)\n return tmplte.render(cntxt)",
"def render_template(\n env: jinja2.Environment,\n template: str,\n context: dict\n) -> str:\n return env.from_string(template).render(context)",
"def render_i18n_expression(econtext, name):\n name = name.strip() # pylint: disable=redefined-argument-from-local\n if '.' in name:\n names = name.split('.')\n context = econtext.get(names[0])\n for name in names[1:-1]: # pylint: disable=redefined-argument-from-local\n context = getattr(context, name)\n attr = names[-1]\n else:\n context = econtext.get('context')\n attr = name\n request = econtext.get('request')\n return II18n(context).query_attribute(attr, request=request)",
"def _render(template, context, app):\n context.update(app.jinja_env.globals)\n app.update_template_context(context)\n try:\n rv = template.render(**context)\n template_rendered.send(app, template=template, context=context)\n return rv\n except:\n translate = app.config.get(\"MAKO_TRANSLATE_EXCEPTIONS\")\n if translate:\n translated = TemplateError(template)\n raise translated\n else:\n raise",
"def context(template):\n\n return {\n v.key: v.read()\n for v in [Variable(name) for name in extract_variables(template)]\n }",
"def jinja():\n template_path = '/tmp/pycheat-jinja-template.html'\n output_path = '/tmp/pycheat-jinja-output.html'\n\n # create the testing template\n with open(template_path, 'w') as f:\n f.write(\"\"\"Testing template with {{athlet_type}}:\n{% for a in athlets %}\n{{a.name}} is from {{a['country']}}\n{% endfor %}\"\"\")\n\n # testing dict with variables\n context = {\n 'athlet_type': 'tennis players',\n 'athlets': [\n {'name': 'Roger Federer', 'country': 'SUI'},\n {'name': 'Rafael Nadal', 'country': 'ESP'},\n {'name': 'Novak Djokovic', 'country': 'SRB'}\n ]\n }\n\n import jinja2\n import os\n # render the template\n template_dir, template_filename = os.path.split(template_path)\n loader = jinja2.FileSystemLoader(template_dir)\n\n # whitespace control:\n # http://jinja.pocoo.org/docs/2.9/templates/#whitespace-control\n jinja_env = jinja2.Environment(loader=loader, trim_blocks=True,\n lstrip_blocks=True)\n template = jinja_env.get_template(template_filename)\n rendered_output = template.render(context)\n # print and write the result to the file\n print rendered_output\n with open(output_path, 'w') as f:\n f.write(rendered_output.encode('utf-8'))",
"def eval_jinja2_exprs(self, jinja2_vars):\n exprs = self.jinja2_exprs\n # Loop until we stop finding undefined variables\n while True:\n tmpl = _build_jinja2_expr_tmp(exprs)\n if len(tmpl.strip()) == 0:\n return {}\n\n # look for undefined things\n env = jinja2.Environment()\n ast = env.parse(tmpl)\n undefined = jinja2.meta.find_undeclared_variables(ast)\n undefined = {u for u in undefined if u not in jinja2_vars}\n\n # if we found them, remove the offending statements\n if len(undefined) > 0:\n new_exprs = {}\n for var, expr in exprs.items():\n if not any(u in expr for u in undefined):\n new_exprs[var] = expr\n exprs = new_exprs\n else:\n break\n\n # rebuild the template and render\n tmpl = _build_jinja2_expr_tmp(exprs)\n if len(tmpl.strip()) == 0:\n return {}\n\n # get a new parser since it carries state about the jinja2 munging\n # that we don't want to ruin\n _parser = _get_yaml_parser()\n return _parser.load(jinja2.Template(tmpl).render(**jinja2_vars))",
"def render_template(template, **template_values):\n # retrieve the html template\n t = jinja_environment.get_template(template)\n\n # render the html template with th given dictionary\n return t.render(template_values)",
"def render(template, context):\n if not template:\n return None\n\n text = \"\"\n filename = \"templates/\" + template\n with open(filename) as f:\n text = f.read()\n # First compile template into extended base template.\n is_child = re.search(extend_search, text.splitlines()[0])\n if is_child:\n base_filename = \"templates/\" + is_child.group(2)\n with open(base_filename) as base:\n text = extend_template(base.read(), text)\n # Run conditional checks\n has_conditions = re.search(if_search, text)\n if has_conditions:\n text = render_conditionals(text, context)\n # Replace any variables passed to the render function.\n for replace in context.replaces.keys():\n arg_search = re.compile(\"{{ \" + replace + \" }}\")\n text = re.sub(arg_search, context.replaces[replace], text)\n return text",
"def context_to_dict(context):\n \n if not isinstance(context, template.Context):\n return context\n \n dict_out = {}\n \n # This helps us handle the order of dictionaries in the context. By\n # default, the most recent (and therefore most significant/important)\n # sub-dictionaries are at the front of the list. This means that variables\n # defined later on need to be processed last, hence the use of the\n # `reversed()` built-in.\n for sub_dict in reversed(context.dicts):\n dict_out.update(sub_dict)\n return dict_out",
"def render(template_name, context, template_dir=DEFAULT_TEMPLATES_DIR,\n jinja_env_args=None):\n env_kwargs = jinja_env_args or {}\n templates = jinja2.Environment(\n loader=jinja2.FileSystemLoader(template_dir), **env_kwargs)\n template = templates.get_template(template_name)\n return template.render(context)",
"def render(self, source: str, context: dict):\n\n # Creating new class which will be used as a template context.\n context_class = type('RenderContext', (Context,), {})\n\n # All callable objects in context.\n helpers = {}\n\n for key, value in context.items():\n\n # Install each callable object as a context class property.\n if callable(value):\n setattr(context_class, 'helper_' + key, Helper(value))\n helpers[key] = value\n\n # Helper function is run only when context dict has it name as a key.\n # Use template context class to create dict.\n render_context = context_class(context)\n\n result = pystache.render(source, render_context)\n\n return result",
"def render(self, _template, context=None):\n variables = {}\n if context:\n variables.update(context)\n rv = self.jinja2.render_template(_template, **variables)\n self.response.write(rv)",
"def render_in_context(context, template, local_context=None):\n\n if context is None:\n context = Context()\n\n if not hasattr(template, \"render\"): # Quacks like a template?\n try:\n engine = context.template.engine\n except AttributeError:\n engine = Engine.get_default()\n\n if isinstance(template, (list, tuple)):\n template = engine.select_template(template)\n else:\n template = engine.get_template(template)\n\n with context.push(local_context):\n return template.render(context)",
"def expand_template(self, template, context):\n r = Template(template).render(Context(context))\n logging.debug(\"LDAP: Expanding template: '%s' -> '%s'\" % (template, r))\n return r",
"def set_jinja_before_request():\n resource_provider.set_jinja_globals()",
"def evaluate(value):\n return render_template('index.html', value=value)",
"def __init__(self, text, *contexts):\n self.context = {}\n for context in contexts:\n self.context.update(context)\n\n self.all_vars = set()\n self.loop_vars = set()\n\n # We construct a function in source form, then compile it and hold onto\n # it, and execute it to render the template.\n code = CodeBuilder()\n\n code.add_line(\"def render_function(context, do_dots):\")\n code.indent()\n vars_code = code.add_section()\n code.add_line(\"result = []\")\n code.add_line(\"append_result = result.append\")\n code.add_line(\"extend_result = result.extend\")\n code.add_line(\"to_str = str\")\n\n buffered = []\n\n def flush_output():\n \"\"\"Force `buffered` to the code builder.\"\"\"\n if len(buffered) == 1:\n code.add_line(\"append_result(%s)\" % buffered[0])\n elif len(buffered) > 1:\n code.add_line(\"extend_result([%s])\" % \", \".join(buffered))\n del buffered[:]\n\n ops_stack = []\n\n # Split the text to form a list of tokens.\n tokens = re.split(r\"(?s)({{.*?}}|{%.*?%}|{#.*?#})\", text)\n\n squash = in_joined = False\n\n for token in tokens:\n if token.startswith('{'):\n start, end = 2, -2\n squash = (token[-3] == '-')\n if squash:\n end = -3\n\n if token.startswith('{#'):\n # Comment: ignore it and move on.\n continue\n elif token.startswith('{{'):\n # An expression to evaluate.\n expr = self._expr_code(token[start:end].strip())\n buffered.append(\"to_str(%s)\" % expr)\n else:\n # token.startswith('{%')\n # Action tag: split into words and parse further.\n flush_output()\n\n words = token[start:end].strip().split()\n if words[0] == 'if':\n # An if statement: evaluate the expression to determine if.\n if len(words) != 2:\n self._syntax_error(\"Don't understand if\", token)\n ops_stack.append('if')\n code.add_line(\"if %s:\" % self._expr_code(words[1]))\n code.indent()\n elif words[0] == 'for':\n # A loop: iterate over expression result.\n if len(words) != 4 or words[2] != 'in':\n self._syntax_error(\"Don't understand for\", token)\n ops_stack.append('for')\n self._variable(words[1], self.loop_vars)\n code.add_line(\n \"for c_{} in {}:\".format(\n words[1],\n self._expr_code(words[3])\n )\n )\n code.indent()\n elif words[0] == 'joined':\n ops_stack.append('joined')\n in_joined = True\n elif words[0].startswith('end'):\n # Endsomething. Pop the ops stack.\n if len(words) != 1:\n self._syntax_error(\"Don't understand end\", token)\n end_what = words[0][3:]\n if not ops_stack:\n self._syntax_error(\"Too many ends\", token)\n start_what = ops_stack.pop()\n if start_what != end_what:\n self._syntax_error(\"Mismatched end tag\", end_what)\n if end_what == 'joined':\n in_joined = False\n else:\n code.dedent()\n else:\n self._syntax_error(\"Don't understand tag\", words[0])\n else:\n # Literal content. If it isn't empty, output it.\n if in_joined:\n token = re.sub(r\"\\s*\\n\\s*\", \"\", token.strip())\n elif squash:\n token = token.lstrip()\n if token:\n buffered.append(repr(token))\n\n if ops_stack:\n self._syntax_error(\"Unmatched action tag\", ops_stack[-1])\n\n flush_output()\n\n for var_name in self.all_vars - self.loop_vars:\n vars_code.add_line(f\"c_{var_name} = context[{var_name!r}]\")\n\n code.add_line('return \"\".join(result)')\n code.dedent()\n self._render_function = code.get_globals()['render_function']",
"def _render_context(self, template, block, **context):\n return u''.join(block(template.new_context(context)))",
"def render(self, template, context):\n try:\n template = self.environment.from_string(template)\n except TemplateSyntaxError as e:\n raise TemplateError(e)\n try:\n return template.render(**context)\n except (UndefinedError, TypeError) as e:\n raise TemplateError(e)",
"def render_template(template, **kwargs):\n\n template_loader = jinja2.FileSystemLoader(searchpath=\"templates/\")\n template_env = jinja2.Environment(loader=template_loader)\n template = template_env.get_template(template)\n return template.render(**kwargs)",
"def _exec(self, bound_names, args, kw):\n if not kw.has_key('args'):\n kw['args'] = args\n bound_names['options'] = kw\n\n try:\n response = self.REQUEST.RESPONSE\n if not response.headers.has_key('content-type'):\n response.setHeader('content-type', self.content_type)\n except AttributeError:\n pass\n \n security = getSecurityManager()\n bound_names['user'] = security.getUser()\n \n # Retrieve the value from the cache.\n keyset = None\n if self.ZCacheable_isCachingEnabled():\n # Prepare a cache key.\n keyset = {'here': self._getContext(),\n 'bound_names': bound_names}\n result = self.ZCacheable_get(keywords=keyset)\n if result is not None:\n # Got a cached value.\n return result\n\n # Execute the template in a new security context.\n security.addContext(self)\n try:\n result = self._render_as(extra_context=bound_names,\n RESPONSE=response)\n if keyset is not None:\n # Store the result in the cache.\n self.ZCacheable_set(result, keywords=keyset)\n return result\n finally:\n security.removeContext(self)",
"def render( context, *args, **kwargs ):",
"def uses_template(template):\n def wrapper(func):\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n template_path = template\n ctx = func(*args, **kwargs)\n if type(ctx) is dict:\n try:\n return render_template(template_path,\n inators=ctx['inators'])\n except KeyError:\n try:\n return render_template(template_path,\n inator=ctx['inator'])\n except KeyError:\n return render_template(template_path, inators=ctx)\n else:\n return ctx\n return wrapped\n return wrapper",
"def get_render_fn(self):\n def render(environment, ctxt_data, file_path):\n \"Renders a jinja2 template\"\n logging.debug(\"Rendering with context data %s\", ctxt_data)\n\n template = environment.get_template(file_path)\n return template.render(**ctxt_data)\n return render",
"def evaluate(\n self, user: Optional[User], request: Optional[HttpRequest], **kwargs\n ) -> Any:\n try:\n expression = NATIVE_ENVIRONMENT.from_string(self.expression)\n except TemplateSyntaxError as exc:\n raise PropertyMappingExpressionException from exc\n try:\n response = expression.render(user=user, request=request, **kwargs)\n if isinstance(response, Undefined):\n raise PropertyMappingExpressionException(\"Response was 'Undefined'\")\n return response\n except UndefinedError as exc:\n raise PropertyMappingExpressionException from exc",
"def _get_datastore_value_for_expression(self, key, value, config_schema_item=None):\n from st2common.services.config import deserialize_key_value\n\n config_schema_item = config_schema_item or {}\n secret = config_schema_item.get('secret', False)\n\n try:\n value = render_template_with_system_and_user_context(value=value,\n user=self.user)\n except Exception as e:\n # Throw a more user-friendly exception on failed render\n exc_class = type(e)\n original_msg = str(e)\n msg = ('Failed to render dynamic configuration value for key \"%s\" with value '\n '\"%s\" for pack \"%s\" config: %s ' % (key, value, self.pack_name, original_msg))\n raise exc_class(msg)\n\n if value:\n # Deserialize the value\n value = deserialize_key_value(value=value, secret=secret)\n else:\n value = None\n\n return value",
"def get_let_map_eval(self, context, eval, check_missing=False):\n if self._let:\n eval = eval or context.eval\n let_map = {k: eval(v) for k, v in self._let.items()}\n if check_missing:\n for k, v in iteritems(let_map):\n if getattr(v, \"moya_missing\", False):\n raise errors.ElementError(\n \"let:{} must not be missing (it is {!r})\".format(k, v)\n )\n return let_map\n else:\n return {}",
"def render(self, activity, context, typename=None):\n if not isinstance(context, dict):\n raise ContextTypeException('context must be dict. it should not Context or RequestContext')\n template_names = self.get_template_names(activity, typename)\n template = select_template(template_names)\n context = self.prepare_context(activity, context,\n typename=typename)\n return template.render(context)"
] | [
"0.5919327",
"0.5836634",
"0.5666903",
"0.5475967",
"0.5411495",
"0.5406394",
"0.53799814",
"0.5325048",
"0.53121567",
"0.526088",
"0.52421707",
"0.51469576",
"0.5130527",
"0.51130056",
"0.5075381",
"0.50708956",
"0.5069272",
"0.49246582",
"0.4900586",
"0.48931527",
"0.48894367",
"0.48759538",
"0.4875663",
"0.4864211",
"0.48633695",
"0.4852893",
"0.4841217",
"0.48410353",
"0.48119375",
"0.47967005"
] | 0.705329 | 0 |
Print settings on console. | def print_settings(config):
print("----------------------------------------")
print("SETTINGS")
print("----------------------------------------")
for key, value in config:
print("%s=%s" % (key, value))
print("----------------------------------------") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def printSettings():\n print \">>>\\n>>> SettingsTool: global variables:\"\n for variable, value in globals().items():\n if variable.count('__')>1: continue\n print \">>> %-16s = %s\"%(variable,value)\n print \">>>\"",
"def printConfig():\n # Why not log instead? Are we asking user to confirm settings?\n pass # until implemented",
"def print_settings(self, title=None):\n if title:\n print(title)\n print('Contents of imagenode.yaml:')\n pprint.pprint(self.config)\n print()",
"def print_options(self):\n for option in self._options.items():\n print \"{0} = {1}\".format(option[0], option[1])",
"def print_config(self):\n for key in self._config.keys():\n print('[{0}] = {1}'.format(key, self._config[key]))",
"def print_config(self):\n for key in CONFIG_KEYS:\n print('--- ' + key + ' ---')\n print(CONFIG_KEYS[key])",
"def showpreferences():\n print()\n for property,value in middleware.preference.__dict__.items():\n if property.startswith('_') and not callable(property): continue\n print('\\t{0}: {1}'.format(property, value))\n print()",
"def printConf(self):\n print \"\"\n for pname, pvalue in self.neededParams.items():\n print pname, pvalue\n for pname, pvalue in self.optionalParams.items():\n print pname, pvalue",
"def printSettings(self, value=None):\n\t\tout = []\n\t\tif value:\n\t\t\tfor item in self.listMatchingSettings(value):\n\t\t\t\tout.append(str(item[0]) + ' : ' + str(item[1]) + '\\nDesc: ' + str(item[2]))\n\t\telse:\n\t\t\tfor key in sorted(self.settings.iterkeys()):\n\t\t\t\tout.append(str(key) + ' : ' + str(self.settings[key][0]) + '\\nDesc: ' + str(self.settings[key][1]))\n\t\treturn out",
"def show_config(config, args):\n pprint.pprint(config)",
"def printMe(self):\n tempDict = self.whoAreYou()\n for key in tempDict.keys():\n self.raiseADebug(' {0:15}: {1}'.format(key,str(tempDict[key])))\n tempDict = self.getInitParams()\n self.raiseADebug(' Initialization Parameters:')\n for key in tempDict.keys():\n self.raiseADebug(' {0:15}: {1}'.format(key,str(tempDict[key])))\n tempDict = self.myCurrentSetting()\n self.raiseADebug(' Current Setting:')\n for key in tempDict.keys():\n self.raiseADebug(' {0:15}: {1}'.format(key,str(tempDict[key])))",
"def print_config(self, options=()):\n if len(options) == 0:\n options_to_print = sorted(self._config.keys())\n else:\n options_to_print = options\n\n for key in options_to_print:\n if key in self._config:\n config_value = self._config[key].get_highest_priority()\n actual_value = self._raw_get(key) # for multiple this is a combined value\n print(\n '{key}: {value} - prio: {priority}, source: {source}'.format(\n key=key,\n value=actual_value,\n priority=config_value.priority,\n source=config_value.source))",
"def print_config(_run):\n final_config = _run.config\n config_mods = _run.config_modifications\n print(_format_config(final_config, config_mods))",
"def showconfig():\n print(yaml.dump(CONFIG))",
"def showSettings():\n cq = dz()\n cq.abag()",
"def pprint(self, parameter_s=''):\n ptformatter = self.shell.display_formatter.formatters['text/plain']\n ptformatter.pprint = bool(1 - ptformatter.pprint)\n print('Pretty printing has been turned',\n ['OFF','ON'][ptformatter.pprint])",
"def print_cfg(self, out=stdout):\n print(self.cmaboss_sim.str_cfg(), file=out)",
"def print_config_option(args, run):\n print_config(run)\n print(\"-\" * 79)",
"def print(self):\n print(self.pretty_str())",
"def console(self):\n fricas_console()",
"def printSetup(self):\n if not self.rank:\n logging.info('Setting up printing options')\n\n freq, args = self.pargs['print'][0], self.pargs['print'][1:]\n\n self.lmp.command('thermo_style custom' + (' {}' * len(args)).format(*args))\n self.lmp.command('thermo {}'.format(freq))\n self.lmp.command('thermo_modify norm no lost ignore')",
"def test_print_config(self) -> None:\n out = io.StringIO()\n with contextlib.redirect_stdout(out):\n self.config.print()\n self.assertEqual(\n out.getvalue().rstrip(),\n \"{}: {}\\n{}\".format(\"q2\", \"abcdefghij\", \"^\".rjust(7)),\n )",
"def print_out():\n pass",
"def print_settings(args, clear=True):\n s = \" # GPSeq analysis of FISH signals v%s\\n\" % version\n\n s += \"\"\"\n --- INPUT =======\n\n FISH table : %s\n Image folder : %s\n Output folder : %s\n\n Mask folder : %s\n Mask prefix : '%s'\n\n --- ANALYSIS ====\n\n Dilation : %d\n Aspect (Z Y X) : %s\n Distance type : %s\nSkipped channels : %s\n Pole fraction : %.3f\n #bins : %d\n\n --- FLAGS =======\n\n 2D masks : '%s'\n Labeled : %r\n Compressed : %r\n Dilate over Z : %r\nUse dilation only\nfor dot assignment : %r\n\n Plot all : %r\nPlot compartments : %r\n\n --- ADVANCED ====\n\n Input regexp : %s\n Delim : '%s'\n Threads : %d\n Debug mode : %r\n \"\"\" % (\n args.dotCoords,\n args.imdir,\n args.outdir,\n args.mask_folder,\n args.mask_prefix,\n args.dilate,\n str(args.aspect),\n args.dist_type,\n str(args.skip_channels),\n args.pole,\n args.nbins,\n args.manual_2d_masks,\n args.labeled,\n args.compressed,\n args.doZdilation,\n args.dilate_for_assignment_only,\n not args.noplot,\n not args.no_compartment_plot,\n args.inreg,\n args.delim,\n args.threads,\n args.DEBUG_MODE,\n )\n\n if clear:\n print(\"\\033[H\\033[J%s\" % s)\n else:\n print(s)\n return s",
"def print_configuration():\n configlog.info(\"-\" * 50)\n configlog.info(\"Initializing with the following configuration\")\n configlog.info(\"Check constants.py to change any of the following\")\n configlog.info(\"-\" * 50)\n configlog.info(\"COMPANY_NAME: {}\".format(COMPANY_NAME))\n configlog.info(\"ACTIVITY_TYPE_FILTER: {}\".format(ACTIVITY_TYPE_FILTER))\n configlog.info(\"APPLY_ACTIVITY_FILTER: {}\".format(APPLY_ACTIVITY_FILTER))\n configlog.info(\"-\" * 50)\n configlog.info(\"Assuming an input dataset with the following features\")\n configlog.info(\"-\" * 50)\n configlog.info(\"BUDGET_COLUMN_NAME: {}\".format(BUDGET_COLUMN_NAME))\n configlog.info(\"COMPANY_COLUMN_NAME: {}\".format(COMPANY_COLUMN_NAME))\n configlog.info(\"ACTIVITY_COLUMN_NAME: {}\".format(ACTIVITY_COLUMN_NAME))\n configlog.info(\"COUNTRY_COLUMN_NAME: {}\".format(COUNTRY_COLUMN_NAME))\n configlog.info(\"-\" * 50)\n configlog.info(\"Fallback data sources\")\n configlog.info(\"-\" * 50)\n configlog.info(\"DEFAULT_URL: {}\".format(DEFAULT_URL))\n configlog.info(\"DEFAULT_LOCAL_DATA_PATH: {}\".format(DEFAULT_LOCAL_DATA_PATH))\n configlog.info(\"-\" * 50)",
"def pprint(self, level: int, *values):\n if abs(self.max_verbosity - level + 1) < self.verbosity:\n print(*values)",
"def p(self):\n self.printstdout = True",
"def view(self,\n print_global_settings=True,\n print_general_settings=True,\n print_tmp_vals=False,\n print_results=True,\n **kws\n ):\n\n print(self.name)\n\n if print_global_settings:\n print(\"Global settings:\")\n pprint.pprint(self.global_settings)\n print()\n\n if print_general_settings:\n print(\"General settings:\")\n pprint.pprint(self.settings[self.name]['General'])\n print()\n\n for i, x in enumerate(self.routine_template):\n print(f\"Step {i}, {x[0].__name__} ({x[1]})\")\n print(\"Settings:\")\n pprint.pprint(x[2], indent=4)\n\n if print_tmp_vals:\n try:\n print(\"Temporary values:\")\n pprint.pprint(x[3], indent=4)\n except IndexError:\n pass\n print()\n\n if print_results:\n print_step_results(self)",
"def showInfo(self):\n print(\n f\"Preferences: {stripnl(MessageToJson(self.radioConfig.preferences))}\\n\")\n self.showChannels()",
"def settings_show(ctx):\n path = ctx.obj['load_path']\n if not path:\n _raise_settings_not_found()\n with open(path) as handle:\n click.echo(json.dumps(json.load(handle), indent=2, sort_keys=True))"
] | [
"0.7606205",
"0.7408411",
"0.72713363",
"0.7174278",
"0.7161238",
"0.6935702",
"0.68697244",
"0.68273026",
"0.66850144",
"0.6652348",
"0.6641773",
"0.66384244",
"0.6636062",
"0.6621884",
"0.66079605",
"0.6571476",
"0.6557688",
"0.6518967",
"0.64918625",
"0.64774805",
"0.6420875",
"0.64037436",
"0.6401229",
"0.63718665",
"0.63681364",
"0.6364425",
"0.63224477",
"0.63195634",
"0.6316511",
"0.63080895"
] | 0.786409 | 0 |
Sets pi(a|s) = 1 and pi(a'|s) = 0 for a' != a. | def update_pi(self, s, a):
for a_p in self.env.moves:
self.pi[(a_p, s)] = (a == a_p) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def perm_af_parity(pi):\n n = len(pi)\n a = [0] * n\n c = 0\n for j in xrange(n):\n if a[j] == 0:\n c += 1\n a[j] = 1\n i = j\n while pi[i] != j:\n i = pi[i]\n a[i] = 1\n return (n - c) % 2",
"def _make_zero(p):\n\n return [pi == 0 for pi in p]",
"def _confined_angle_pi(a):\n while a<-_math.pi:\n a+=2*_math.pi\n while a>=_math.pi:\n a-=2*_math.pi\n return a",
"def compute_ppmi(a):\n np.fill_diagonal(a, 0)\n a = my_scale_sim_mat(a)\n (p, q) = np.shape(a)\n col = np.sum(a, axis=0)\n col[col == 0] = 1\n ppmi = np.log((float(p)*a)/col[None, :])\n idx_nan = np.isnan(ppmi)\n ppmi[idx_nan] = 0\n ppmi[ppmi < 0] = 0\n return ppmi",
"def set_pi(self, pi: np.ndarray) -> None:\n assert pi.ndim == 3, \"expected three-dimensional policy\"\n assert np.allclose(pi.sum(axis=2), 1), \"policy not normalized\"\n assert np.all(pi >= 0), \"policy has negative probabilities\"\n self.pi = pi",
"def pi():\r\n getcontext().prec += 2\r\n agm0 = agm1 = pow2 = dec.Decimal(1)\r\n denm = dec.Decimal(0.25)\r\n agm2 = dec.Decimal(0.5).sqrt()\r\n # Uses the elliptic integral relation to the agm to calculate pi.\r\n while True:\r\n agm1 = (agm1 + agm2) / 2\r\n agm2 = (agm0 * agm2).sqrt()\r\n diff = agm1 - agm0\r\n agm0 = agm1\r\n denm -= pow2 * diff * diff\r\n if diff == 0:\r\n ave = (agm1 + agm2) / 2\r\n arc = ave * ave / denm\r\n getcontext().prec -= 2\r\n return +arc\r\n pow2 *= 2",
"def parity(it):\n \n return sum(it)%2",
"def parity(n):\n if n%2==0:\n p=1\n else:\n p=-1\n return p",
"def wallacepi(precision=1e-03):\n my_pi = 1\n product = 1\n i = 1\n\n while abs(math_pi - my_pi) > precision:\n product *= (4*i**2) / ((4*i**2)-1)\n my_pi = product * 2\n i += 1\n \n return [my_pi,precision,i]",
"def set_pi(self, val):\n self.__pi = val",
"def map_zero_one(x, a, b):\n assert b > a\n s = 1./(b - a)\n t = a/(a-b)\n y = s*x + t\n y[y>1] = 1\n y[y<0] = 0\n return y",
"def inverse_modulo_p(a, p):\n prime = p\n \n while a < 0:\n a += prime\n \n y1 = 1\n y2 = 0\n \n while a != 1:\n q = (p // a) % prime\n # use of integer division // speeded algorithm up by huge factor\n \n # save temporary values\n tmp_a = a\n tmp_y2 = y2\n # compute all these simultaneously\n a = (p - (q*a)) % prime\n p = tmp_a\n y2 = y1\n y1 = (tmp_y2 - (q*y1)) % prime\n \n return y1 % prime",
"def psi(a):",
"def solve_ok(number: int) -> int:\n return no_ones(number) % 2",
"def pi(self,sigma,y0=0.5):\n y = y0\n for k in range(len(sigma)-1,-1,-1):\n if sigma[k]==0:\n y = self.if0(y)\n else:\n y = self.if1(y)\n return y",
"def setStructureTrueOrFalse(a):\n inputIma = np.copy(a)\n inputImb = np.copy(a)\n ndims = np.ndim(a)\n if ndims == 2:\n inputImb[1][1] = 1\n inputIma[1][1] = 0\n deletableTemp = countObjects(inputImb, inputIma)\n else:\n inputImb[1][1][1] = 1\n inputIma[1][1][1] = 0\n deletableTemp = countObjects(inputImb, inputIma)\n return deletableTemp",
"def psi_inplace(a):",
"def reset(self,):\n \n self.i = 0\n self.pi = 1.0\n self.si = 0.0\n self.pi_min = float(\"inf\")\n self.si_min = float(\"inf\")",
"def _confined_angle_0(a):\n while a < 0:\n a += 2*_math.pi\n while a >= 2*_math.pi:\n a -= 2*_math.pi\n return a",
"def pi(self):\n return self(self._real_field().pi())",
"def parity(p):\n\n f = dict(zip(sorted(p), p))\n seen, neven = set(), 0\n\n for x in p:\n if x in seen:\n continue\n\n c, l = x, 0\n while c not in seen:\n seen.add(c)\n l += 1\n c = f[c]\n\n neven += (l - 1) % 2\n\n return 1 if neven % 2 == 0 else -1",
"def __xor__(p1, p2):\n return not isparallel(p1, p2) and (abs(p1 * p2) < 10*_eps )",
"def is_zero(self, a):\n return not a",
"def reprime(self):\n self.__primed = 1",
"def estimate_pi():\n total = 0\n k = 0\n factor = 2 * sqrt(2) / 9801\n while True:\n num = factorial(4 * k) * (1103 + 26390 * k)\n den = factorial(k) ** 4 * 396 ** (4 * k)\n term = factor * num / den\n total += term\n\n if abs(term) < 1e-15:\n break\n k += 1\n\n return 1 / total",
"def pi_chudnovsky(one):\n k = 1\n a_k = one\n a_sum = one\n b_sum = 0\n C = 640320\n C3_OVER_24 = C**3 // 24\n while 1:\n a_k *= -(6*k-5)*(2*k-1)*(6*k-1)\n a_k //= k*k*k*C3_OVER_24\n a_sum += a_k\n b_sum += k * a_k\n k += 1\n if a_k == 0:\n break\n total = 13591409*a_sum + 545140134*b_sum\n pi = (426880*sqrt(10005*one, one)*one) // total\n return pi",
"def unwrap_phases(a):\r\n pi = np.pi\r\n\r\n diffs = np.diff(a)\r\n mod_diffs = np.mod(diffs + pi, 2 * pi) - pi\r\n neg_pi_idx = np.where(mod_diffs == -1 * np.pi)\r\n pos_idx = np.where(diffs > 0)\r\n this_idx = np.intersect1d(neg_pi_idx[0], pos_idx[0])\r\n mod_diffs[this_idx] = pi\r\n correction = mod_diffs - diffs\r\n correction[np.where(np.abs(diffs) < pi)] = 0\r\n a[1:] += np.cumsum(correction)\r\n\r\n return a",
"def sturm(P):\n inf = float('inf')\n assert P.isreal()\n A = P\n B = A.prime()\n l1 = [A(-inf)]\n l2 = [A(inf)]\n while B:\n l1.append(B(-inf))\n l2.append(B(inf))\n B, A = -A % B, B\n return Polynomial.sign_changes(l1) - Polynomial.sign_changes(l2)",
"def get_pi_acc(Pi_est, Pi_true, method='random_walk', **kwargs):\n A_est = get_adjmat_bp(Pi_est > 0)\n A_true = get_adjmat_bp(Pi_true > 0)\n\n G_est = nx.from_numpy_array(A_est)\n G_true = nx.from_numpy_array(A_true)\n\n sim = graph_similarity(G_est, G_true, method=method, **kwargs)\n return sim",
"def fn(i):\n if i == 2*n-1 or ans[i] and fn(i+1): return True \n for x in reversed(range(1, n+1)): \n if x not in ans: \n ii = x if x > 1 else 0 \n if i+ii < 2*n-1 and ans[i] == ans[i+ii] == 0: \n ans[i] = ans[i+ii] = x\n if fn(i+1): return True \n ans[i] = ans[i+ii] = 0"
] | [
"0.64008385",
"0.60723084",
"0.5754091",
"0.54634887",
"0.54298794",
"0.5418518",
"0.5401177",
"0.5367925",
"0.53521436",
"0.5345882",
"0.52901685",
"0.52851814",
"0.5267636",
"0.52551055",
"0.52317315",
"0.5228241",
"0.5217047",
"0.5203342",
"0.5200641",
"0.51997817",
"0.51698124",
"0.510693",
"0.5068873",
"0.5062923",
"0.50594485",
"0.5036163",
"0.503449",
"0.5018657",
"0.500764",
"0.50037426"
] | 0.6752598 | 0 |
Returns a list of state estimates at steps `step_list` for MSE. | def estimate_state(self, step_list, start_state=None, seed=0):
self.seed(seed)
self.importance_sampling(max(step_list), start_state=start_state,
step_list=step_list)
estimates_arr = np.array(self.estimates)
self.estimates = []
return estimates_arr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def modelpartition_list_expectation_step(vislist, evis_all_list, modelpartition_list, **kwargs):\n\n def make_e(vis, modelpartition, evis_all):\n # Return the estep for a given skymodel\n evis = copy_visibility(vis)\n tvis = copy_visibility(vis, zero=True)\n tvis = predict_skymodel_visibility(tvis, modelpartition[0])\n tvis = apply_gaintable(tvis, modelpartition[1])\n # E step is the data model for a window plus the difference between the observed data_models\n # and the summed data models or, put another way, its the observed data minus the\n # summed visibility for all other windows\n evis.data['vis'][...] = tvis.data['vis'][...] + vis.data['vis'][...] - evis_all.data['vis'][...]\n return evis\n\n return [make_e(vislist, csm, evis_all_list) for csm in modelpartition_list]",
"def _calc_msve(self):\n v = []\n for state in self._env.state_iterator():\n feature_vector = self._features.vector(state)\n v.append(utils.state_value(feature_vector, self.theta))\n\n self.msve.append(utils.rmse(v, self._true_values))",
"def fit_list(tHMMobj_list, tolerance=1e-9, max_iter=1000):\n\n # Step 0: initialize with random assignments and do an M step\n # when there are no fixed emissions, we need to randomize the start\n init_all_gammas = [[sp.multinomial.rvs(n=1, p=[1. / tHMMobj.num_states] * tHMMobj.num_states, size=len(lineage))\n for lineage in tHMMobj.X] for tHMMobj in tHMMobj_list]\n\n if len(tHMMobj_list) > 1: # it means we are fitting several concentrations at once.\n do_M_E_step_atonce(tHMMobj_list, init_all_gammas)\n else: # means we are fitting one condition at a time.\n do_M_E_step(tHMMobj_list[0], init_all_gammas[0])\n\n # Step 1: first E step\n MSD_list, NF_list, betas_list, gammas_list = map(list, zip(*[do_E_step(tHMM) for tHMM in tHMMobj_list]))\n old_LL = np.sum([np.sum(calculate_log_likelihood(NF)) for NF in NF_list])\n\n # first stopping condition check\n for _ in range(max_iter):\n do_M_step(tHMMobj_list, MSD_list, betas_list, gammas_list)\n MSD_list, NF_list, betas_list, gammas_list = map(list, zip(*[do_E_step(tHMM) for tHMM in tHMMobj_list]))\n new_LL = np.sum([np.sum(calculate_log_likelihood(NF)) for NF in NF_list])\n if new_LL - old_LL < tolerance:\n break\n\n old_LL = new_LL\n\n return MSD_list, NF_list, betas_list, gammas_list, new_LL",
"def errorList(self, ylist, predictedlist):\n\n e = [0 for i in range(self.size)]\n for i in range(self.size):\n e[i] = round(ylist[i] - predictedlist[i], 3)\n \n errors = Vector(self.size)\n errors.changeVals(e)\n return errors",
"def process_exp_values(exp_data_list):\n exp_data_values = []\n for exp_data in exp_data_list:\n exp_data_values.append(process_exp_value(exp_data))\n return exp_data_values",
"def getDensityOfStates(self, Elist):\n\t\treturn _modes.freerotor_densityofstates(Elist, self.frequencies, 1 if self.linear else 0)",
"def _get_estimate_matrices(self):\n params_mapping = {\n \"state_transition\": \"transition_matrices\",\n \"process_noise\": \"transition_covariance\",\n \"measurement_offsets\": \"observation_offsets\",\n \"transition_offsets\": \"transition_offsets\",\n \"measurement_noise\": \"observation_covariance\",\n \"measurement_function\": \"observation_matrices\",\n \"initial_state\": \"initial_state_mean\",\n \"initial_state_covariance\": \"initial_state_covariance\",\n }\n valid_ems = _validate_estimate_matrices(\n input_ems=self.estimate_matrices, all_ems=list(params_mapping.keys())\n )\n\n em_vars = [params_mapping[em_var] for em_var in valid_ems]\n return em_vars",
"def getDensityOfStates(self, Elist):\n\t\tpass",
"def train(self, epochs):\n mse_history = []\n for iteration in range(epochs):\n error = []\n for user_id in range(self.latent_user_preferences.shape[0]):\n for item_id in range(self.latent_item_features.shape[0]):\n rating = self.ratings[user_id, item_id]\n if not np.isnan(rating):\n predicted_rating = self.predict_rating(user_id, item_id)\n err = predicted_rating - rating\n error.append(err)\n self.sgd_optimizer(user_id, item_id, err)\n mse = (np.array(error) ** 2).mean() \n if (iteration % 2) == 0:\n print('Iteration %d/%d:\\tMSE=%.6f' % (iteration, epochs, mse))\n mse_history.append(mse)\n return mse_history",
"def value_iteration(env):\n Qvalues = torch.zeros(NUM_STATES)\n\n max_iterations = 100\n mse = []\n for _ in range(max_iterations):\n Vtemp = torch.zeros(NUM_STATES)\n # for each state we search for best move\n for st in range(NUM_STATES):\n max_value, _ = next_step_evaluation(env, st, Qvalues)\n #Qvalues[st] = max_value.item()\n Vtemp[st] = max_value.item()\n mse.append(((Qvalues-Vtemp)**2).mean())\n Qvalues = Vtemp\n\n return Qvalues, mse",
"def evaluate(model, num_steps=1000):\n\n episode_rewards = [0.0]\n obs = env.reset()\n for i in range(num_steps):\n # _states are only useful when using LSTM policies\n action, _states = model.predict(obs)\n # here, action, rewards and dones are arrays\n # because we are using vectorized env\n obs, rewards, dones, info = env.step(action)\n\n # Stats\n episode_rewards[-1] += rewards[0]\n if dones[0]:\n obs = env.reset()\n episode_rewards.append(0.0)\n\n # Compute mean reward for the last 100 episodes\n mean_100ep_reward = round(np.mean(episode_rewards[-100:]), 1)\n print(\"Mean reward:\", mean_100ep_reward, \"Num episodes:\", len(episode_rewards))\n\n return mean_100ep_reward",
"def train_prediction(self, episodes=100, batch_size=32, steps=100):\n hidden_state = None\n for episode in range(episodes):\n state_data = self.env.dataset(batch_size=batch_size)\n state = self.state_to_observation(state_data)\n\n prediction = self.model(state, hidden_state)\n loss = F.mse_loss(prediction['value'], state_data['reward'])\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()",
"def ordinary_is(self, n_episodes, start_state=None, step_list=None):\n step_list = [] if step_list is None else step_list\n q_steps = []\n for episode in range(n_episodes + 1):\n trajs = self.generate_trajectory(start_state=start_state, det=False)\n G = 0\n W = 1\n for (i, (s, a, r)) in enumerate(trajs[::-1]):\n G = self.gamma * G + r\n self.is_returns[(s, a)].append(W * G)\n W *= self.target[(a, s)] / self.b[(a, s)]\n if W == 0:\n break\n if episode in step_list:\n for a in self.env.moves:\n self.Q[(start_state, a)] = np.sum(self.is_returns[(s, a)]) / episode\n self.estimates.append(self.target_estimate(start_state))",
"def modelpartition_list_maximisation_step_arlexecute_workflow(evislist, skymodel_list, **kwargs):\n\n def make_skymodel(ev, skymodel):\n return (modelpartition_list_fit_skymodel(ev, skymodel, **kwargs),\n modelpartition_list_fit_gaintable(ev, skymodel, **kwargs))\n\n return [arlexecute.execute(make_skymodel)(evislist[i], skymodel_list[i]) for i, _ in enumerate(evislist)]",
"def getDensityOfStates(self, Elist, V=1.0):\n\t\treturn _modes.translation_densityofstates(Elist, self.mass, self.dimension, V)",
"def save_expval_post_meas_values():\n targets = []\n for statevec in save_expval_final_statevecs():\n values = {}\n for label, (mat, qubits) in save_expval_params().items():\n inner_dict = {}\n for j in [\"00\", \"01\", \"10\", \"11\"]:\n # Check if non-zero measurement probability for given\n # measurement outcome for final statevector\n vec = Statevector.from_label(j)\n if not np.isclose(vec.data.dot(statevec.data), 0):\n # If outcome is non-zero compute expectation value\n # with post-selected outcome state\n inner_dict[hex(int(j, 2))] = vec.data.conj().dot(vec.evolve(mat, qubits).data)\n values[label] = inner_dict\n targets.append(values)\n return targets",
"def modelpartition_list_expectation_all(vislist, modelpartition_list):\n \n def predict_and_apply(ovis, modelpartition):\n tvis = copy_visibility(ovis, zero=True)\n tvis = predict_skymodel_visibility(tvis, modelpartition[0])\n tvis = apply_gaintable(tvis, modelpartition[1])\n return tvis\n\n evislist = [predict_and_apply(vislist, csm) for csm in modelpartition_list]\n\n return sum_predict_results(evislist)",
"def eval_list(self, exps: ExperimentList):\n for e in exps:\n self.eval(e)",
"def save_expval_pre_meas_values():\n targets = []\n for statevec in save_expval_final_statevecs():\n values = {}\n for label, (mat, qubits) in save_expval_params().items():\n values[label] = statevec.data.conj().dot(statevec.evolve(mat, qubits).data)\n targets.append(values)\n return targets",
"def set_view_steps(self, steps_list):\n self._data_dict[self.KEY_VIEW_STEPS] = steps_list",
"def set_workflow_steps(self, steps_list):\n self._data_dict[self.KEY_WF_STEPS] = steps_list",
"def evaluate_sce_list(sce_list, strand_state_list, breaks):\n best_mismatch_distance = None\n best_ground_state = None\n best_is_valid = None\n best_sce_list = None\n for w_ground_state, c_ground_state in [(2, 0), (1, 1), (0, 2)]:\n w_state, c_state = w_ground_state, c_ground_state\n mismatch_distance = 0\n valid = True\n for i in range(len(breaks) - 1):\n start = breaks[i]\n end = breaks[i + 1]\n w_actual_state, c_actual_state = strand_state_list[i]\n for sce_pos, w_state_diff, c_state_diff in sce_list:\n if sce_pos == start:\n w_state += w_state_diff\n c_state += c_state_diff\n # Test whether this sequence of SCEs has led to an impossible ground state\n # (at least under the assumption that the cell is diploid).\n if (w_state < 0) or (c_state < 0):\n valid = False\n if (w_actual_state, c_actual_state) != (w_state, c_state):\n mismatch_distance += end - start\n if (best_mismatch_distance is None) or ((valid, -mismatch_distance) > (best_is_valid, -best_mismatch_distance)):\n best_is_valid = valid\n best_mismatch_distance = mismatch_distance\n best_ground_state = (w_ground_state, c_ground_state)\n best_sce_list = copy.copy(sce_list)\n return best_is_valid, best_ground_state, best_mismatch_distance",
"def weighted_is(self, n_episodes, start_state=None, step_list=None):\n step_list = [] if step_list is None else step_list\n q_steps = []\n for episode in range(n_episodes + 1):\n trajs = self.generate_trajectory(start_state=start_state, det=False)\n G = 0\n W = 1\n for (i, (s, a, r)) in enumerate(trajs[::-1]):\n G = self.gamma * G + r\n self.C[(s, a)] += W\n self.Q[(s, a)] += (W / self.C[(s, a)]) * (G - self.Q[(s, a)])\n W *= self.target[(a, s)] / self.b[(a, s)]\n if W == 0:\n break\n if episode in step_list:\n self.estimates.append(self.target_estimate(start_state))",
"def evaluate(env, model, num_env, iter_step):\n episode_rewards = []\n episode_reward = np.zeros((num_env))\n obs = env.reset()\n for _ in tqdm(range(iter_step)):\n action, _states = model.predict(obs)\n obs, reward, done, info = env.step(action)\n episode_reward += reward\n for i in range(num_env):\n if done[i]:\n episode_rewards.append(episode_reward[i])\n episode_reward[i] = 0\n return episode_rewards",
"def kstep(model, data, Ydata, k):\n Yprime_next = []\n Ydata_next = []\n \n kMSE_list = np.zeros(k)\n SE_list = np.zeros(k)\n \n for i in range(k):\n\n Xconv_nxt = np.dot(data, model.transition_matrices, )\n Yprime_nxt = np.dot(model.observation_matrices, Xconv_nxt.T)\n \n Yprime_next.append(Yprime_nxt) \n Ydata_next.append(Ydata[i+1:,:])\n \n Xconv = Xconv_nxt\n DY_nxt = Ydata[i+1:,:] - Yprime_nxt[:,i+1:].T\n kMSE_list[i] = np.mean(DY_nxt**2)\n SE_list[i] = np.sum(DY_nxt**2)\n \n print('MSE %i: ' %i, np.mean(DY_nxt**2))\n \n return kMSE_list, SE_list, Yprime_next, Ydata_next",
"def _mse(self, trace, **inputs):\n phen_mse = []\n for idx in np.random.randint(0, len(trace), 500):\n step = self.trace[idx]\n exp_pred = np.dot(inputs['gwas_gen'],\n step['beta_med'].T).ravel()\n phen_pred = step['alpha'] * exp_pred\n phen_mse = np.mean((inputs['gwas_phen'] - phen_pred) ** 2)\n mean_mse = np.mean(phen_mse)\n return mean_mse",
"def _list_estimators():\n estimators = ['Natural', 'Davis-Peebles', 'Hewett', 'Hamilton', 'Landy-Szalay']\n return estimators",
"def step(self,action_list,verbose=False):\n rewards,dones = np.zeros((self.n_snakes),dtype=np.float), np.zeros((self.n_snakes)).astype('bool')\n for i in range(self.n_snakes):\n eaten_food = self.snakes[i].move(action_list[i],self.food_pos)\n if eaten_food == True : \n self.update_food()\n rewards[i]=1; # Eaten food \n if self.hasCollided() == True:\n self.snakes[i].reset(self.sample_empty_pos())\n rewards[i]=-1;\n dones[i]=True;\n\n self.display(verbose=verbose)\n observation = self.get_observation();\n observation = np.repeat(observation,repeats=self.n_snakes,axis=0)\n states = self.get_state()\n return observation,rewards,dones,states;",
"def save_expval_final_statevecs():\n # Get pre-measurement statevectors\n statevecs = []\n # State |+1>\n statevec = Statevector.from_label(\"+1\")\n statevecs.append(statevec)\n # State |00> + |11>\n statevec = (Statevector.from_label(\"00\") + Statevector.from_label(\"11\")) / np.sqrt(2)\n statevecs.append(statevec)\n # State |10> -i|01>\n statevec = (Statevector.from_label(\"10\") - 1j * Statevector.from_label(\"01\")) / np.sqrt(2)\n statevecs.append(statevec)\n return statevecs",
"def mse(targets: List[float], preds: List[float]) -> float:\n return mean_squared_error(targets, preds)"
] | [
"0.58687896",
"0.53922796",
"0.5387855",
"0.5174597",
"0.5147441",
"0.5109813",
"0.51070565",
"0.5071416",
"0.5022036",
"0.49926993",
"0.49717492",
"0.4959728",
"0.49549443",
"0.4910653",
"0.49028057",
"0.48820263",
"0.4873446",
"0.4863553",
"0.48388338",
"0.4818197",
"0.47978723",
"0.47756663",
"0.4773455",
"0.4761659",
"0.47498256",
"0.47483435",
"0.4747093",
"0.4736802",
"0.47235805",
"0.47233942"
] | 0.6557831 | 0 |
input Pixels should be an np.array of 64 integers (valued between 0 to 15) there's no return value, but this should show an image of that digit in an 8x8 pixel square Be sure to run %matplotlib at your ipython prompt before using this! | def show_digit( Pixels ):
from matplotlib import pyplot as plt
print(Pixels.shape)
Patch = Pixels.reshape((8,8))
plt.figure(1, figsize=(4,4))
plt.imshow(Patch, cmap=plt.cm.gray_r, interpolation='nearest') # plt.cm.gray_r # plt.cm.hot
plt.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_digit( Pixels ):\r\n print(Pixels.shape)\r\n Patch = Pixels.reshape((8,8))\r\n plt.figure(1, figsize=(4,4))\r\n plt.imshow(Patch, cmap=plt.cm.gray_r, interpolation='nearest') # cm.gray_r # cm.hot\r\n plt.show()",
"def disImg(data=None,colorbar=False):\n size = np.sqrt(len(data[4:]))\n xmm = data[0]\n ymm = data[1]\n pl.matshow(data[4:].reshape(size,size),fignum=False)\n if colorbar == True:\n pl.colorbar()\n pl.xlim(0,size-1)\n pl.ylim(0,size-1)\n pl.xlabel('Pixels')\n pl.ylabel('Pixels')\n pl.grid(color='yellow')",
"def plot_pixel_array(arr, figsize=(10, 10)):\n arr = arr.squeeze()\n plt.figure(figsize=figsize)\n plt.imshow(arr, cmap=plt.cm.bone)\n plt.show()",
"def display(array):\n plt.figure()\n plt.imshow(array)\n plt.show()",
"def plot_digits():\n digits = load_digits()\n for i in range(25):\n plt.subplot(5, 5, i + 1)\n plt.imshow(digits.images[i], cmap='binary')\n plt.axis('off')\n\n plt.show()",
"def plot_numbers(X, labels, examples, imwidth):\n\n plotting_image = numpy.zeros((imwidth*10,imwidth*examples), dtype='uint8')\n for y in range(10):\n digits = X[:,labels==y].T\n for x, image in enumerate(digits[numpy.random.randint(0,len(digits),(examples,))]):\n plotting_image[y*imwidth:(y+1)*imwidth, x*imwidth:(x+1)*imwidth] = image.reshape(imwidth, imwidth)\n\n mpl.imshow(plotting_image, cmap=mpl.cm.Greys)\n mpl.axis('off')\n mpl.title('Pen-digits Examples')",
"def display_image ( X ):\r\n\t# on teste que le tableau contient bien 256 valeurs\r\n\tif X.size != 256:\r\n\t\traise ValueError ( \"Les images doivent etre de 16x16 pixels\" )\r\n\r\n\t# on cree une image pour imshow: chaque pixel est un tableau a 3 valeurs\r\n\t# (1 pour chaque canal R,G,B). Ces valeurs sont entre 0 et 1\r\n\tY = X / X.max ()\r\n\timg = np.zeros ( ( Y.size, 3 ) )\r\n\tfor i in range ( 3 ):\r\n\t\timg[:,i] = X\r\n\r\n\t# on indique que toutes les images sont de 16x16 pixels\r\n\timg.shape = (16,16,3)\r\n\r\n\t# affichage de l'image\r\n\tplt.imshow( img )\r\n\tplt.show ()",
"def display_image(X):\n\n\tim = X.reshape(28, 28)\n\ttemp = plt.imshow(im)\n\tplt.show()",
"def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)",
"def testImage():\n width = 200\n height = 200\n image = BitMap( width, height )\n \n # create a loop in order to draw some pixels\n \n for col in range(width):\n if col % 10 == 0: print 'col is', col\n for row in range(height):\n if col % 10 == 0 or row % 10 == 0:\n image.plotPoint( col, row ) \n \n # we have now looped through every image pixel\n # next, we write it out to a file\n \n image.saveFile( \"test.bmp\" )\n #changing the col and row number determines how big the grid is for the picture or how zoomed in it is. Changing the and to or just makes the grid go from dotted grid to lines.",
"def muestraPokemon(bytes):\n image = Image.open(io.BytesIO(bytes))\n data = np.array(image)\n plt.imshow(data)\n plt.axis('off')\n plt.show()",
"def imshow(image):\n iio.imshow(dtype.im2uint(image))",
"def plot_generated_images(decoder):\n\n # display a nxn 2D manifold of digits\n n = 15\n digit_size = 28\n\n figure = np.zeros((digit_size * n, digit_size * n))\n # linearly spaced coordinates corresponding to the 2D plot\n # of digit classes in the latent space\n grid_x = np.linspace(-4, 4, n)\n grid_y = np.linspace(-4, 4, n)[::-1]\n\n # start sampling z1 and z2 in the ranges grid_x and grid_y\n for i, yi in enumerate(grid_y):\n for j, xi in enumerate(grid_x):\n z_sample = np.array([[xi, yi]])\n x_decoded = decoder.predict(z_sample)\n digit = x_decoded[0].reshape(digit_size, digit_size)\n slice_i = slice(i * digit_size, (i + 1) * digit_size)\n slice_j = slice(j * digit_size, (j + 1) * digit_size)\n figure[slice_i, slice_j] = digit\n\n # plot the results\n plt.figure(figsize=(6, 5))\n start_range = digit_size // 2\n end_range = n * digit_size + start_range + 1\n pixel_range = np.arange(start_range, end_range, digit_size)\n sample_range_x = np.round(grid_x, 1)\n sample_range_y = np.round(grid_y, 1)\n plt.xticks(pixel_range, sample_range_x)\n plt.yticks(pixel_range, sample_range_y)\n plt.xlabel(\"z[0]\")\n plt.ylabel(\"z[1]\")\n plt.imshow(figure, cmap='Greys_r')\n plt.show()",
"def show_digit(self):\n x_train, _, _, _ = self._load_data()\n plt.imshow(x_train[0], cmap=plt.cm.binary)\n plt.show()",
"def display_image(mat):\n\timg = Image.fromarray(mat)\n\timg.show()",
"def render(filename,i):\n print('running render')\n A = np.genfromtxt(filename,skip_header=1,dtype=float,delimiter=',')\n img = np.array(A[i,:],copy=True)\n print(img.shape)\n img = img.reshape(28,28)\n img = 255 - img\n print(img.shape)\n plt.imshow(img, cmap=\"gray\", vmin=0, vmax=255)\n plt.savefig(\"img\" + str(i)+\"render\"+ \".png\")",
"def draw_image(self):\n \n pixel_array = self.imageprepare(self.image_path)\n newArr = self.reshape_pixel_array(pixel_array)\n plt.imshow(newArr, interpolation='nearest')\n plt.savefig('MNIST_IMAGE.png')#save MNIST image\n plt.show()#Show / plot that image",
"def _visualize_input(self, input):\n self.writer.add_image('input', make_grid(input[0, 0, :, :], nrow=8, normalize=True))",
"def display_images(digits_im):\n i = 0\n\n for img in digits_im:\n if i < N_NEIGHBOURS:\n # Visualize your data\n im_max = np.max(img)\n img = PIXELS * (np.abs(im_max - img) / im_max)\n res = cv2.resize(img, (DIM, DIM), interpolation=cv2.INTER_CUBIC)\n cv2.imwrite('digit ' + str(i) + '.png', res)\n i += 1\n else:\n break",
"def show_image(self, pic, prediction=None):\n digitmap = {\n 0: [(0,0), (1,0), (2,0), (3,0), (4,0), (0,1), (4,1), (0,2), (1,2), (2,2), (3,2), (4,2)],\n 1: [(0,2), (1,2), (2,2), (3,2), (4,2)],\n 2: [(0,0), (0,1), (0,2), (1,2), (2,0), (2,1), (2,2), (3,0), (4,0), (4,1), (4,2)],\n 3: [(0,0), (0,1), (0,2), (1,2), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)],\n 4: [(0,0), (0,2), (1,0), (1,2), (2,0), (2,1), (2,2), (3,2), (4,2)],\n 5: [(0,0), (0,1), (0,2), (1,0), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)],\n 6: [(0,0), (0,1), (0,2), (1,0), (2,0), (2,1), (2,2), (3,0), (3,2), (4,0), (4,1), (4,2)],\n 7: [(0,0), (0,1), (0,2), (1,2), (2,2), (3,2), (4,2)],\n 8: [(0,0), (1,0), (2,0), (3,0), (4,0), (0,1), (4,1), (0,2), (1,2), (2,2), (3,2), (4,2), (2,1)],\n 9: [(0,0), (0,1), (0,2), (1,0), (1,2), (2,0), (2,1), (2,2), (3,2), (4,0), (4,1), (4,2)]\n }\n\n pic = pic.reshape((28,28)).copy()\n if prediction is not None:\n for pos in digitmap[prediction]:\n pic[pos]=255\n plt.imshow(pic, cmap='gray_r')",
"def display(array):\n if isinstance(array, np.ndarray):\n plt.imshow(array)\n plt.show()\n else:\n raise TypeError(\"display() needs a numpy ndarray as parameter, \"\n f\"got {type(array)}\")",
"def testBinaryImage():\n ALIEN = \"0\"*8 + \"11011011\"*2 + \"0\"*8 + \"00001000\" + \\\n \"01000010\" + \"01111110\" + \"0\"*8\n # this function is imported from cs5png.py\n NUM_ROWS = 8\n NUM_COLS = 8\n binaryIm( ALIEN, NUM_COLS, NUM_ROWS )\n # that should create a file, binary.png, in this\n # directory with the 8x8 image...",
"def visualize_digits(tensor_to_visualize):\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n with queues.QueueRunners(sess):\n images_np = sess.run(tensor_to_visualize)\n plt.axis('off')\n plt.imshow(np.squeeze(images_np), cmap='gray')",
"def dat_imshow(x):\n plt.imshow(x,interpolation='nearest',aspect='auto')",
"def find_pixels(self):\n ref_image=Image.open('sample0000.png')\n imarray=np.array(ref_image)\n ref_image.close()\n self.number_of_pix=imarray.shape\n print self.number_of_pix\n ref_image=None\n imarray=None",
"def print_image(input):\n\timage=get_image(input)\n\tnz = image.get_zsize()\n\tfor iz in xrange(nz): print_slice(input, iz)",
"def ensquared_one_pix(array, pix_scale, new_scale=40, plot=True):\n\n n = int(new_scale // pix_scale)\n minPix, maxPix = (pix + 1 - n) // 2, (pix + 1 + n) // 2\n ens = array[minPix:maxPix, minPix:maxPix]\n # print(ens.shape)\n energy = np.sum(ens)\n\n if plot:\n mapp = 'viridis'\n f, (ax1, ax2) = plt.subplots(1, 2)\n ax1 = plt.subplot(1, 2, 1)\n square = Rectangle((minPix-0.5, minPix-0.5), n, n, linestyle='--', fill=None, color='white')\n ax1.add_patch(square)\n img1 = ax1.imshow(array, cmap=mapp)\n ax1.set_title('%.1f mas pixels' % (pix_scale))\n img1.set_clim(0, 1)\n plt.colorbar(img1, ax=ax1, orientation='horizontal')\n\n ax2 = plt.subplot(1, 2, 2)\n img2 = ax2.imshow(ens, cmap=mapp)\n ax2.set_title('%d mas window' %new_scale)\n img1.set_clim(0, 1)\n plt.colorbar(img2, ax=ax2, orientation='horizontal')\n\n return energy",
"def visualize_digits(tensor_to_visualize):\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n with tf.contrib.slim.queues.QueueRunners(sess):\n images_np = sess.run(tensor_to_visualize)\n plt.axis('off')\n plt.imshow(np.squeeze(images_np), cmap='gray')",
"def find_input():\n\n mat = [[5,0],[0,0],[0,0]]\n num_range = [0,2]\n sub = 3 #Number of pixels to be substituted.\n\n input = [mat, num_range, sub]\n\n return input",
"def plot_10_by_10_images(images):\n\n\tn = images.shape[0]\n\n\tq = n // 10\n\tr = n%10\n\tprint n,q,r\n\n\tfig = plt.figure()\n\tplt.ion()\n\n\tfor x in range(q):\n\t\tprint x\n\t\tif not x%10:\n\t\t\tplt.clf()\n\t\tfor y in range(10):\n\t\t\tax = fig.add_subplot(10, 10, 10*y+x%10+1)\n\t\t\tax.matshow(images[10*y+x%10], cmap = mpl.cm.binary)\n\t\t\tplt.xticks(np.array([]))\n\t\t\tplt.yticks(np.array([]))\n\t\tplt.show()\n\t\t_=raw_input(\"Press enter to show next 10\")"
] | [
"0.7884607",
"0.6769907",
"0.674633",
"0.6648369",
"0.6536538",
"0.64752847",
"0.64704466",
"0.6445182",
"0.64258",
"0.63998693",
"0.6364624",
"0.6281803",
"0.62751114",
"0.62262917",
"0.62021554",
"0.61980116",
"0.616065",
"0.61412615",
"0.6044806",
"0.5989991",
"0.59517574",
"0.58862716",
"0.58810186",
"0.5880656",
"0.58715904",
"0.585022",
"0.5833847",
"0.5794253",
"0.5741239",
"0.5737957"
] | 0.7971584 | 0 |
Parse date based on different formats | def parseDate(date):
formats = [
"D MMM YY, hh:mm a",
"YYYY-MM-DDTHH:mm:ss+00:00",
"ddd, D MMM YYYY HH:mm:ss +0530", # NDTV
"ddd, D MMM YYYY HH:mm:ss +0100", # skynews
"ddd, D MMM YYYY HH:mm:ss -0400", # reuters
"D MMM, YYYY", # espn cricket
"ddd, D MMM YYYY HH:mm:ss GMT", # times of india
"ddd, D MMM YYYY HH:mm:ss +0200", # lifrea
"ddd, D MMM YYYY HH:mm:ss +0000", # linux, ubuntu
"ddd, D MMM YYYY HH:mm:ss -0700", # iTunes
]
for f in formats:
try:
parsed_date = tryDateFormat(date, f)
return parsed_date.format("D MMM YY, hh:mm a")
except Exception as e:
pass
else:
return "Invalid date" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_date(date_string: str) -> Union[datetime.datetime, str]:\n for date_format in KNOWN_DATE_FORMATS:\n try:\n date = datetime.datetime.strptime(date_string, date_format)\n return date\n except ValueError:\n continue\n return date_string",
"def parse_date_str(self, date_str, date_format=None):\n if not date_format:\n date_format = self.parsing_date_format\n result = datetime.datetime.strptime(date_str, date_format).date()\n if result.year < 1900:\n # we have a typo in the house. Just use 2000 + last-two-digits\n year = (result.year % 100) + 2000\n result = result.replace(year=year)\n return result",
"def parse_date(text):\n\n for fmt in ('%Y-%m-%d', '%d-%m-%Y'):\n try:\n return datetime.datetime.strptime(text, fmt)\n except ValueError:\n pass\n\n raise ValueError('Dates should be in YYYY-MM-DD or DD-MM-YYYY format')",
"def _parse(self, date_str, format='%Y-%m-%d'):\n from pandas import to_datetime\n rv = to_datetime(date_str, format=format)\n if hasattr(rv, 'to_datetime'):\n rv = rv.to_datetime()\n return rv",
"def parse_date(date):\n\n return dateutil.parser.parse(date)",
"def filter_parsedate(val):\n return dateutil_parse.parse(val)",
"def parse_date(text):\n parser = CustomParser(dayfirst=True, yearfirst=True)\n try:\n date = parse(text, parserinfo=parser, fuzzy=True)\n except:\n date = None\n return date",
"def unify_date_format(date):\n if type(date) == str:\n try:\n date = dateutil.parser.parse(date) \n except:\n pass\n return date",
"def parse_known_date_formats(dt_string):\n for fmt in ('%Y%m%d', '%Y%m%d %H:%M', '%m/%d/%Y', '%m/%d/%Y %H:%M'):\n try:\n return datetime.strptime(dt_string, fmt)\n except ValueError:\n pass\n raise ValueError(\"No valid date format found.\"\n \"See https://tidesandcurrents.noaa.gov/api/ \"\n \"for list of accepted date formats.\")",
"def full_parse_date(text: str) -> Date:\n pred = dateparser.parse(text)\n return pred.year * 10000 + pred.month * 100 + pred.day",
"def parse_date(date_str, date_format=DATE_FORMAT):\n if date_str is None:\n return None\n return pd.datetime.strptime(date_str, '%Y%m%d')",
"def parse_date(date_str, date_re):\n date_str_tidy = date_str.replace('-', '')\n date_str_tidy = date_str_tidy.replace(' ', '')\n match = re.search(date_re, date_str_tidy)\n if match:\n year = match.group(3)\n if len(year) == 2:\n year = '19' + year\n try:\n date_utc = datetime.datetime(\n int(year), int(match.group(1)), int(match.group(2)),\n 0, 0, tzinfo=datetime.timezone.utc).isoformat()\n except:\n return (date_str, True)\n else:\n return (date_utc, False)\n else:\n return (date_str, True)",
"def _str_to_date(self, date):\n return datetools.date_parser(date)",
"def parse_date(date_string, format):\n try:\n return datetime.strptime(date_string, format)\n except ValueError:\n return None",
"def date_parse(date_string) -> datetime:\n return datetime.strptime(date_string, DATE_FMT)",
"def _dateutil_parser_parse(date_string):\n parsed_date = dateutil.parser.parse(date_string)\n return parsed_date",
"def parse_date(putative_date, return_format=False):\n # FIXME: use Ledger functions to parse dates, not mine.\n formats = [\"%Y-%m-%d\", \"%Y/%m/%d\"]\n for f in formats:\n try:\n d = datetime.datetime.strptime(putative_date, f).date()\n break\n except ValueError as e:\n continue\n try:\n if return_format:\n return d, f\n else:\n return d\n except UnboundLocalError:\n raise ValueError(\"cannot parse date from format %s: %s\" % (f, e))",
"def parse_date(input):\n input = input.strip()\n if input == '':\n return None, None\n\n # Parse the start\n mo = yyyymmdd_re.match(input)\n if not mo:\n mo = yyyymmdd_hyphen_re.match(input)\n if not mo:\n mo = ddmmyyyy_re.match(input)\n if not mo:\n mo = ddmmyyyy_hyphen_re.match(input)\n if mo:\n start = Date(*map(lambda x: x and int(x), (mo.group('year'), mo.group('month'), mo.group('day'))))\n else:\n return None, 'N'\n\n\n # Check if we're at the end of the input\n pos = mo.end()\n if pos == len(input):\n return DateRange(start, start), None\n\n # Check for a range specifier\n mo = range_re.match(input, pos)\n if mo:\n pos = mo.end()\n else:\n return DateRange(start, start), 'T'\n\n # Parse the end date\n mo = yyyymmdd_re.match(input, pos)\n if not mo:\n mo = yyyymmdd_hyphen_re.match(input, pos)\n if not mo:\n mo = ddmmyyyy_re.match(input, pos)\n if not mo:\n mo = ddmmyyyy_hyphen_re.match(input, pos)\n if mo:\n end = Date(*map(lambda x: x and int(x), (mo.group('year'), mo.group('month'), mo.group('day'))))\n else:\n return DateRange(start, start), 'T'\n\n pos = mo.end()\n if pos == len(input):\n return DateRange(start, end), None\n return DateRange(start, end), 'T'",
"def parse_date(date) -> datetime:\n\n if type(date) == datetime:\n return date\n try:\n date_object = datetime.strptime(date.replace(\" \", \"\"), \"%m/%d/%Y\")\n except (TypeError, ValueError) as exc:\n print(\"Cannot format time \" + str(exc), file=sys.stderr)\n return None\n return date_object",
"def try_parsing_date(text):\n for fmt in ('%I %p', '%I %M %p', '%I:%M %p'):\n try:\n return datetime.datetime.strptime(text, fmt)\n except ValueError:pass\n if \":\" in text:\n return datetime.datetime.strptime(text+\" \"+\n (\"AM\" if int(text.split(\":\")[0])>=8 else \"PM\"), '%I:%M %p')\n return datetime.datetime.strptime(text+\" \"+\n (\"AM\" if int(text)>=8 else \"PM\"), '%I %p')",
"def parse_date(str_date):\n return ciso8601.parse_datetime(str_date)",
"def parse_date_str(self, date_str, date_format=DATE_FORMAT):\n try:\n return self.adjust_year(datetime.strptime(date_str, date_format).date())\n except ValueError:\n return None",
"def parse_date_arg(date_arg):\n return datetime.datetime.strptime(date_arg, DATE_FORMAT)",
"def parse_date(date_str):\n date_str = re.sub(r\"[ .-]\", '/', date_str.strip())\n date_time = datetime.strptime(date_str, \"%d/%m/%Y\")\n return date_time.date()",
"def parse(string, format):\n # Count the number of spaces in the format string (N), and\n # truncate everything after the (N+1)th space\n spaces = format.count(' ') + 1\n string = ' '.join(string.split()[:spaces])\n\n try:\n result = dt.datetime.strptime(string, format)\n except ValueError, err:\n raise CannotParse(str(err))\n else:\n return result",
"def __parsedate(matchobj,ordering):\n dayformat = \"\"\n monthformat = \"\"\n yearformat = \"\"\n sep = \"\"\n year = matchobj.group(\"year\")\n month = matchobj.group(\"month\")\n day = matchobj.group(\"day\")\n sep = \"\" if matchobj.group(\"sep\") is None else matchobj.group(\"sep\")\n\n dayformat = \"%d\" if day else \"\"\n\n if year:\n yearformat = \"%y\" if len(year)==4 else \"%Y\"\n else:\n yearformat = \"\"\n\n try:\n month = int(month)\n monthformat = \"%m\"\n except:\n monthformat = \"%B\" if len(month)>3 else \"%b\"\n\n if ordering == 'dmy':\n formatstring = dayformat+sep+monthformat+sep+yearformat\n elif ordering == 'myd':\n formatstring = monthformat+sep+yearformat+sep+dayformat\n elif ordering == 'mdy':\n formatstring = monthformat+sep+dayformat+sep+yearformat\n elif ordering == 'ymd':\n formatstring = yearformat+sep+monthformat+sep+dayformat\n elif ordering == 'ydm':\n formatstring = yearformat+sep+dayformat+sep+monthformat\n else:\n raise Exception(\"A date formating error occurred\")\n\n return formatstring",
"def preprocess_date(date_):\n if 'JAN' in date_:\n date_ = date_.replace('JAN', '01')\n elif 'FEB' in date_:\n date_ = date_.replace('FEB', '02')\n elif 'MAR' in date_:\n date_ = date_.replace('MAR', '03')\n elif 'APR' in date_:\n date_ = date_.replace('APR', '04')\n elif 'MAY' in date_:\n date_ = date_.replace('MAY', '05')\n elif 'JUN' in date_:\n date_ = date_.replace('JUN', '06')\n elif 'JUL' in date_:\n date_ = date_.replace('JUL', '07')\n elif 'AUG' in date_:\n date_ = date_.replace('AUG', '08')\n elif 'SEP' in date_:\n date_ = date_.replace('SEP', '09')\n elif 'OCT' in date_:\n date_ = date_.replace('OCT', '10')\n elif 'NON' in date_:\n date_ = date_.replace('NON', '11')\n elif 'DEC' in date_:\n date_ = date_.replace('DEC', '12')\n if date_[-2:] > '17':\n date_ = date_[:6] + '19' + date_[-2:]\n else:\n date_ = date_[:6] + '20' + date_[-2:]\n return datetime.strptime(date_, '%d-%m-%Y')",
"def str2date(date_str, date_format):\n return datetime.strptime(date_str, date_format)",
"def _parse_date(date_string, date_type):\n # If date_string is None return None\n if date_string is None:\n return None\n\n # Parse rfc3339 dates from string\n elif date_type == \"rfc3339\":\n if date_string[-3] == \":\":\n date_string = date_string[:-3] + date_string[-2:]\n return datetime.datetime.strptime(date_string, \"%Y-%m-%dT%H:%M:%S%z\")\n\n # Parse date only strings\n elif date_type == \"date-only\":\n if re.match(r\"^(\\d){4}-00-00$\", date_string):\n return datetime.datetime.strptime(date_string, \"%Y-00-00\").date()\n else:\n return datetime.datetime.strptime(date_string, \"%Y-%m-%d\").date()\n \n elif date_type == \"date-time\":\n return datetime.datetime.strptime(date_string, \"%Y-%m-%d %H:%M:%S\")",
"def parse_date_input(input, formats=DT_INPUT_FORMATS):\n\n for fmt in formats:\n try:\n return datetime.datetime.strptime(input, fmt)\n except (ValueError):\n pass\n raise ValueError(\"'%(input)s' is not a valid datetime.\" % vars())"
] | [
"0.7429762",
"0.72900164",
"0.7274739",
"0.72164947",
"0.71738756",
"0.7167267",
"0.71480685",
"0.71087646",
"0.69975185",
"0.6994544",
"0.6974348",
"0.69636685",
"0.696295",
"0.6944045",
"0.6920026",
"0.6899698",
"0.68938076",
"0.68740517",
"0.68342584",
"0.6803827",
"0.6792624",
"0.67904925",
"0.673859",
"0.6736696",
"0.67066264",
"0.6690309",
"0.66896015",
"0.663759",
"0.66312677",
"0.6593226"
] | 0.74297434 | 1 |
Get aragon voting contract as object. | def get_aragon_voting(
net: str, address: str,
etherscan_api_key: str, retries: int,
):
if not brownie.network.is_connected():
brownie.network.connect(net)
abi = get_abi(etherscan_api_key, address, net, retries)
impl_address = get_implementation_address(
address, abi, net
)
impl_abi = get_abi(etherscan_api_key, impl_address, net, retries)
return brownie.Contract.from_abi(
'AragonVoting', address, impl_abi
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_vote(self, id: int) -> dict:",
"def to_object(cls, query_dict: Dict):\n vote = Vote()\n vote.question = query_dict.get(\"question\")\n vote.user = query_dict.get(\"user_id\")\n vote.value = query_dict.get(\"value\")\n vote.id = query_dict.get(\"id\")\n return vote",
"def get_obv(data):\n if data is None:\n raise EmptyDataError('[!] Invalid data value')\n\n result = TA.OBV(data)\n if result is None:\n raise IndicatorException\n return result",
"def test_create_object(self):\n\n candidate1 = pyrankvote.Candidate(\"Per\")\n candidate2 = pyrankvote.Candidate(\"Maria\")\n candidate3 = pyrankvote.Candidate(\"Aase\")\n\n ranked_candidates=(candidate1, candidate2, candidate3)\n\n ballot = pyrankvote.Ballot(ranked_candidates)\n self.assertTupleEqual(ranked_candidates, ballot.ranked_candidates)",
"def get_contract(self, name):\n return self.contracts[name]",
"def get_for_user(self, obj, user):\n if not user.is_authenticated:\n return None\n content_object = ContentType.objects.get_for_model(obj)\n try:\n vote = self.get(voter=user, content_type=content_object, object_id=obj._get_pk_val())\n\n except ObjectDoesNotExist:\n #print('No vote by {user} on {object}'.format(user=user, object=obj))\n return None\n\n return vote",
"def voter_votes(request, election, voter_uuid):\n voter = Voter.get_by_election_and_uuid(election, voter_uuid)\n votes = CastVote.get_by_voter(voter)\n return [v.toJSONDict() for v in votes]",
"def vote_proposal(self, id: bytes, voter_address: 'Address', vote_type: int, current_block_height: int,\n tx_hash: bytes, timestamp: int, main_preps: list) -> (bool, int, dict):\n if not self._validate_vote_type(vote_type):\n revert(f\"Invalid vote parameter: {vote_type}\")\n\n if not self._check_registered_proposal(id):\n revert(\"No registered proposal\")\n\n proposal_info = ProposalInfo.from_bytes(self._proposal_list[id])\n\n if proposal_info.end_block_height < current_block_height:\n revert(\"This proposal has already expired\")\n\n if proposal_info.status == NetworkProposalStatus.CANCELED:\n revert(\"This proposal has already canceled\")\n\n _VOTE_TYPE_IN_STR = \"agree\" if vote_type == NetworkProposalVote.AGREE else \"disagree\"\n _NO_VOTE_IN_STR = \"noVote\"\n\n addresses_of_voters_agreeing_or_disagreeing = \\\n [voter[\"address\"] for voter in proposal_info.vote[\"agree\"][\"list\"]] \\\n + [voter[\"address\"] for voter in proposal_info.vote[\"disagree\"][\"list\"]]\n\n if str(voter_address) in addresses_of_voters_agreeing_or_disagreeing:\n revert(\"Already voted\")\n\n if str(voter_address) not in proposal_info.vote[\"noVote\"][\"list\"]:\n revert(\"No permission - only for main prep when network proposal registered\")\n\n for main_prep in main_preps:\n if main_prep.address == voter_address:\n voter_in_dict = self._generate_voter_in_dict(tx_hash, timestamp, main_prep)\n\n proposal_info.vote[_VOTE_TYPE_IN_STR][\"list\"].append(voter_in_dict)\n proposal_info.vote[_VOTE_TYPE_IN_STR][\"amount\"] += voter_in_dict[\"amount\"]\n\n proposal_info.vote[_NO_VOTE_IN_STR][\"list\"].remove(voter_in_dict[\"address\"])\n proposal_info.vote[_NO_VOTE_IN_STR][\"amount\"] -= voter_in_dict[\"amount\"]\n\n # set status\n approved = False\n if proposal_info.status == NetworkProposalStatus.VOTING:\n if self._check_vote_result(vote_type, proposal_info):\n if vote_type == NetworkProposalVote.AGREE:\n proposal_info.status = NetworkProposalStatus.APPROVED\n approved = True\n else:\n proposal_info.status = NetworkProposalStatus.DISAPPROVED\n elif len(proposal_info.vote[\"noVote\"][\"list\"]) == 0:\n # All voters voted but the status is still VOTING. Set status to DISAPPROVED\n proposal_info.status = NetworkProposalStatus.DISAPPROVED\n\n self._proposal_list[id] = proposal_info.to_bytes()\n\n return approved, proposal_info.type, proposal_info.value",
"def get_election(self, id: int) -> dict:",
"def get_for_user(self, obj, user):\r\n if not user.is_authenticated():\r\n return None\r\n ctype = ContentType.objects.get_for_model(obj)\r\n try:\r\n vote = self.get(content_type=ctype, object_id=obj._get_pk_val(),\r\n user=user)\r\n except models.ObjectDoesNotExist:\r\n vote = None\r\n return vote",
"def get_for_user(self, obj, user):\n if not user.is_authenticated():\n return None\n ctype = ContentType.objects.get_for_model(obj)\n try:\n vote = self.get(content_type=ctype, object_id=obj._get_pk_val(),\n user=user)\n except models.ObjectDoesNotExist:\n vote = None\n return vote",
"def getActaVotacion(self):\n return None",
"def parse_voting(\n aragon_voting, abi_storage: CachedStorage,\n vote_number: int\n) -> List[Union[Call, str]]:\n script_code = str(aragon_voting.getVote(vote_number)[-1])\n return decode_evm_script(script_code, abi_storage)",
"def get_contracts(self):\n return self.contracts",
"def get_raw(self):\n if not self.ops:\n return\n ops = [self.operations.Op_wrapper(op=o) for o in list(self.ops)]\n data = {\n 'author': self.proposer,\n 'title': self.title,\n 'memo': self.memo,\n 'proposed_operations': [o.json() for o in ops],\n 'expiration_time': formatTimeFromNow(self.proposal_expiration),\n 'extensions': [],\n }\n if self.proposal_review:\n data.update({\"review_period_time\": formatTimeFromNow(self.proposal_review)})\n\n ops = self.operations.Proposal_create(**data)\n return self.operation_class(ops)",
"def get_instance(self, contract_name: str) -> None:\n self._validate_name_and_references(contract_name)\n factory = self.contract_factories[contract_name]\n address = to_canonical_address(self.deployment_data[contract_name][\"address\"])\n contract_kwargs = {\n \"abi\": factory.abi,\n \"bytecode\": factory.bytecode,\n \"bytecode_runtime\": factory.bytecode_runtime,\n }\n return self.w3.eth.contract(address=address, **contract_kwargs)",
"def getProposal(rc_id):\n\n db = getDB()\n proposal = db.proposals.find_one({\"rc_id\": rc_id})\n if not proposal:\n return None\n votes = db.proposal_votes.find({\"proposal_id\": proposal[\"_id\"]})\n yes, no = 0, 0\n for vote in votes:\n if vote[\"yes_vote\"]:\n yes += 1\n else:\n no += 1\n proposal[\"yes\"] = yes\n proposal[\"no\"] = no\n return proposal",
"def test_get_concise_contract():\n contract_concise = ContractHandler.get_concise_contract(\"DataTokenTemplate\")\n assert isinstance(contract_concise, ConciseContract)",
"def encrypt_ballot(request, election):\n answers = utils.from_json(request.POST['answers_json'])\n ev = homomorphic.EncryptedVote.fromElectionAndAnswers(election, answers)\n return ev.ld_object.includeRandomness().toJSONDict()",
"def getObject(self):\n parent = aq_parent(self)\n obj = None\n try:\n obj = parent.unrestrictedTraverse(self.getPath()) \n except:\n log.error(\"Unable to get object from brain. Path: {0}. Catalog may be out of sync.\".format(self._result.uid))\n return obj",
"def get_o(self):\n return self.o",
"def get_vzo(data):\n if data is None:\n raise EmptyDataError('[!] Invalid data value')\n\n result = TA.VZO(data)\n if result is None:\n raise IndicatorException\n return result",
"def getZopeObj(self, path):\n return self.getObjByPath(path)",
"def vote(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'votes')\r\n request = http.Request('POST', url, {'to': '1'})\r\n\r\n return request, parsers.parse_json",
"def raw_data(self):\n\n return self._injest_json_data_for_vote()",
"def obj(self):\n return self._obj",
"def one_voter(request, election, voter_uuid):\n voter = Voter.get_by_election_and_uuid(election, voter_uuid)\n if not voter:\n raise Http404\n return voter.toJSONDict()",
"def retrieve_google_civic_election_id_for_voter(voter_id):\n google_civic_election_id = 0\n success = False\n\n if positive_value_exists(voter_id):\n try:\n ballot_item_query = BallotItem.objects.filter(\n voter_id__exact=voter_id,\n )\n ballot_item_list = list(ballot_item_query[:1])\n if ballot_item_list:\n one_ballot_item = ballot_item_list[0]\n google_civic_election_id = one_ballot_item.google_civic_election_id\n success = True\n except BallotItem.DoesNotExist:\n pass\n\n results = {\n 'success': success,\n 'google_civic_election_id': google_civic_election_id,\n }\n return results",
"def obj(self) -> object:\n pass",
"def extract_vote_stance(vote):\n print \"Extracting stances from vote: %s\" % vote\n\n bill_pointer = vote[0]\n for_or_agn = vote[1]\n\n # Check if the name used in vote is the name of the bill, bill number, or\n # a synonym\n query = {\"$or\": [{\"name\": bill_pointer}, \n {\"synonyms\": { \"$in\" : [ bill_pointer ] }},\n {\"bnumber\" : bill_pointer} ] }\n bill = get(BILL, query) \n if not bill:\n print \"WARNING Bill not found: %s\" % bill_pointer\n return []\n\n if for_or_agn == FOR:\n return bill.stance_for\n elif for_or_agn == AGN:\n return bill.stance_agn\n else:\n print \"ERROR extracting stance. Expected FOR or AGN. Received %s\" % for_or_agn\n return []"
] | [
"0.5435771",
"0.53329545",
"0.5219299",
"0.5188242",
"0.5178088",
"0.5061383",
"0.5034543",
"0.49983376",
"0.49737984",
"0.49379542",
"0.4928847",
"0.49039114",
"0.4894972",
"0.48840657",
"0.48681977",
"0.4842652",
"0.48310107",
"0.4791282",
"0.478338",
"0.47696742",
"0.47668836",
"0.47536603",
"0.47090998",
"0.47070408",
"0.46713266",
"0.466714",
"0.46639436",
"0.46550027",
"0.46497273",
"0.46457"
] | 0.6489815 | 0 |
Decode aragon voting with specific number. | def parse_voting(
aragon_voting, abi_storage: CachedStorage,
vote_number: int
) -> List[Union[Call, str]]:
script_code = str(aragon_voting.getVote(vote_number)[-1])
return decode_evm_script(script_code, abi_storage) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def decodeVigenere(self, key):\n\n key = key.upper().replace(\" \", \"\")\n decode = Vig(key)\n planeText = decode.decode(self.cipherText)\n \n if (self.verbose == 1):\n print(planeText)\n \n return(planeText)",
"def decode(self, number: int) -> typing.Union[int, str]:\n return number",
"def decode (bv, validate = False):\n \n # Would be nice to check the bit count here..\n # if validate:\n # assert (len(bv)==FIX: SOME NUMBER)\n r = {}\n r['MessageID'] = 1\n r['RepeatIndicator'] = int(bv[6:8])\n r['UserID'] = int(bv[8:38])\n r['NavigationStatus'] = int(bv[38:42])\n r['ROT'] = aisbinary.signedIntFromBV(bv[42:50])\n r['SOG'] = Decimal(int(bv[50:60])) / Decimal('10')\n r['PositionAccuracy'] = int(bv[60:61])\n r['longitude'] = Decimal(aisbinary.signedIntFromBV(bv[61:89])) / Decimal('600000')\n r['latitude'] = Decimal(aisbinary.signedIntFromBV(bv[89:116])) / Decimal('600000')\n r['COG'] = Decimal(int(bv[116:128])) / Decimal('10')\n r['TrueHeading'] = int(bv[128:137])\n r['TimeStamp'] = int(bv[137:143])\n r['RegionalReserved'] = 0\n r['Spare'] = 0\n r['RAIM'] = bool(int(bv[148:149]))\n r['state_syncstate'] = int(bv[149:151])\n r['state_slottimeout'] = int(bv[151:154])\n r['state_slotoffset'] = int(bv[154:168])\n return r",
"def decode(self, value):\r\n pass",
"def parse_creative_serving_decision(data):\n return json.loads(base64.b64decode(data))",
"def dec2int(r: str) -> int:",
"def _decode_vint(buf):\n ctr = 0\n result = 0\n tmp = bytearray(1)\n partial = False\n while 1:\n count = buf.readinto(tmp)\n if count == 0:\n raise EndOfMessage(partial)\n else:\n partial = True\n result |= (tmp[0] & 0x7f) << (7 * ctr)\n if not (tmp[0] >> 7): break\n ctr += 1\n return result",
"def test_decoder(self):\n from sosbeacon.utils import number_decode\n\n encoded = 'b6'\n number = number_decode(encoded)\n self.assertEqual(number, 123)",
"def decode(a):\n return decode(a)",
"def decode(data): #@NoSelf",
"def _decode_index_value(self, index, value):\n if index.endswith(\"_int\"):\n return int(value)\n else:\n return value",
"def var_id(v):\n return int(sort_vid_split(v)[1])",
"def decode_int(n):\n return stuct.unpack(\">I\", n)[0]",
"def clean_number_plate(self, vrn):\n cleaned = re.sub(r'[^\\dA-Z]', '', vrn)\n\n if re.match(r'^[A-Z]{2}', cleaned) and len(cleaned) == 7:\n if cleaned[2] == 'O':\n cleaned = cleaned[:2] + '0' + cleaned[3:]\n if cleaned[2] == 'I':\n cleaned = cleaned[:2] + '1' + cleaned[3:]\n if cleaned[3] == 'O':\n cleaned = cleaned[:3] + '0' + cleaned[4:]\n if cleaned[3] == 'I':\n cleaned = cleaned[:3] + '1' + cleaned[4:]\n\n if re.match(r'^B', cleaned) and len(cleaned) == 7:\n if cleaned[1] == 'O':\n cleaned = cleaned[:1] + '0' + cleaned[2:]\n if cleaned[1] == 'I':\n cleaned = cleaned[:1] + '1' + cleaned[2:]\n if cleaned[2] == 'O':\n cleaned = cleaned[:2] + '0' + cleaned[3:]\n if cleaned[2] == 'I':\n cleaned = cleaned[:2] + '1' + cleaned[3:]\n if cleaned[3] == 'O':\n cleaned = cleaned[:3] + '0' + cleaned[4:]\n if cleaned[3] == 'I':\n cleaned = cleaned[:3] + '1' + cleaned[4:]\n\n if re.match(r'^[A-Z]{2}', cleaned) and len(cleaned) == 8:\n if cleaned[0] == 'Y':\n cleaned = 'V' + cleaned[1:]\n if cleaned[1] == 'Y':\n cleaned = cleaned[0] + 'V' + cleaned[2:]\n\n return cleaned",
"def decode(self, x):\n return x",
"def decode(b):\n\n if b.startswith(\"0z\"):\n b = b[2:]\n\n l, i, v = len(b), 0, 0\n for x in b:\n v += _value(x) * (BASE ** (l - (i + 1)))\n i += 1\n\n return v",
"def _decode_35701(data):\n start_byte = 0\n n_bytes = 2\n var_id = struct.unpack('<H', data[start_byte:start_byte + n_bytes])[0]\n if var_id == 29995:\n start_byte += n_bytes\n n_bytes = 4\n n_points = struct.unpack(\n '<I', data[start_byte:start_byte + n_bytes])[0]\n return {'n_points': n_points}",
"def decode(self, encoded):",
"def decode(self, value):\r\n return value",
"def decode(self, s):",
"def decode(self, s):",
"def test_onion_parse():\n vec = get_vector('onion-test-v0.json')\n o = vec['onion']\n o = onion.RoutingOnion.from_hex(o)\n\n assert(o.version == 0)\n assert(bytes.hex(o.hmac) == 'b8640887e027e946df96488b47fbc4a4fadaa8beda4abe446fafea5403fae2ef')\n\n assert(o.to_bin() == bytes.fromhex(vec['onion']))",
"def decode_signed_value( name, value ):",
"def parse_input(string):\n return [int(vote) for vote in string.split()]",
"def _decode_string(box_string):\r\n if box_string == \"no_box\":\r\n return np.zeros((0,4))\r\n else:\r\n try:\r\n boxes = np.array([np.array([int(eval(i)) for i in box.split(\" \")])\r\n for box in box_string.split(\";\")])\r\n return boxes\r\n except:\r\n print(box_string)\r\n print(\"Submission is not well formatted. empty boxes will be returned\")\r\n return np.zeros((0,4))",
"def test_decode_numbers(self, number, base, expected):\n self.assertEqual(positional.decode(number, base), expected)",
"def decode(data):\n raise NotImplementedError",
"def decode_ia(ia: int) -> str:\n if not isinstance(ia, int):\n ia = struct.unpack('>H', ia)[0]\n return '{}.{}.{}'.format((ia >> 12) & 0x1f, (ia >> 8) & 0x07, (ia) & 0xff)",
"def opinion_vote(mode, verbose, revision):\n judge = VotingJudge(mode, revision)\n flags = judge.vote()\n if verbose is True:\n click.echo(\"Vote resulted in %i flags:\" % len(flags))\n for f in flags:\n format_flag(f)",
"def convertToFloat(vote):\n if vote == 'y':\n return 1\n if vote == 'n':\n return -1\n if vote == '?':\n return 0"
] | [
"0.552153",
"0.5235955",
"0.5231673",
"0.5132123",
"0.50863487",
"0.50287044",
"0.5017056",
"0.501696",
"0.49951592",
"0.49941075",
"0.49753332",
"0.4963806",
"0.49543387",
"0.48979822",
"0.48958716",
"0.48876846",
"0.48868448",
"0.488496",
"0.48771974",
"0.48609972",
"0.48609972",
"0.48461953",
"0.48375046",
"0.48366266",
"0.48096502",
"0.47888115",
"0.478755",
"0.4752693",
"0.4741279",
"0.47411513"
] | 0.5924441 | 0 |
Run multiprocessing of sampledata files. We use multiple threads by splitting the VCF file into chunks and using the vcf_subset function of cyvcf2. | def run_multiprocessing(args, function):
vcf_fn = args.data_file
num_processes = args.num_threads
if num_processes > 1:
# Split the VCF into chunks
callset = allel.read_vcf(vcf_fn, fields=["variants/CHROM", "variants/POS"])
pos_list = callset["variants/POS"]
chroms = callset["variants/CHROM"]
assert np.all(chroms == chroms[0])
chrom = str(chroms[0])
def get_chromosome_chunks(lst, num_processes):
length = len(lst)
n = math.ceil(length / num_processes)
chunks = list()
for index, i in enumerate(range(0, length, n)):
if index != num_processes - 1:
chunks.append(
(
args,
args.output_file + str(index),
(chrom + ":" + str(lst[i]) + "-" + str(lst[i + n])),
)
)
else:
chunks.append(
(
args,
args.output_file + str(index),
(chrom + ":" + str(lst[i]) + "-" + str(lst[-1])),
)
)
return chunks
chunks = get_chromosome_chunks(pos_list, num_processes)
chunks_iter = iter(chunks)
reports = list()
completed_files = list()
with multiprocessing.Pool(processes=num_processes, maxtasksperchild=10) as pool:
for index, row in enumerate(pool.map(function, chunks_iter)):
reports.append(row)
print(
"Processed Chunk {}: {} with {} sites added.".format(
index, chunks[index][2], row["num_sites"]
)
)
if row["num_sites"] > 0:
completed_files.append(index)
else:
os.remove(args.output_file + str(index) + "-lock")
# Combine reports and print
master_report = reports[0]
for report in reports[1:]:
for var_type, val in report.items():
master_report[var_type] += val
print(master_report)
# Combine sampledata files
filenames = completed_files
all_samples = []
for name in filenames:
all_samples.append(tsinfer.load(args.output_file + str(name)))
os.remove(args.output_file + str(name))
samples = all_samples[0].copy(args.output_file)
samples.append_sites(*all_samples[1:])
samples.finalise()
assert np.all(np.diff(samples.sites_position[:]) > 0)
else:
raise ValueError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main(input_folder, output_folder, bounding_boxes_file, cores, resampling,\n order):\n logger = logging.getLogger(__name__)\n logger.info('Resampling')\n\n if not os.path.exists(output_folder):\n os.mkdir(output_folder)\n print('resampling is {}'.format(str(resampling)))\n bb_df = pd.read_csv(bounding_boxes_file)\n bb_df = bb_df.set_index('PatientID')\n files_list = [\n f for f in glob.glob(input_folder + '/**/*.nii.gz', recursive=True)\n ]\n resampler = Resampler(bb_df, output_folder, order, resampling=resampling)\n with Pool(cores) as p:\n p.map(resampler, files_list)",
"def addVCFSubsetJobs(self, workflow=None, inputData=None, db_vervet=None, sampleIDFile=None, transferOutput=True,\\\n\t\t\t\t\t\trefFastaFList=None, GenomeAnalysisTKJar=None,\\\n\t\t\t\t\t\tmaxContigID=None, outputDirPrefix=\"\"):\n\t\tif workflow is None:\n\t\t\tworkflow = self\n\t\tif GenomeAnalysisTKJar is None:\n\t\t\tGenomeAnalysisTKJar = workflow.GenomeAnalysisTKJar\n\t\tif refFastaFList is None:\n\t\t\trefFastaFList = self.refFastaFList\n\t\t\n\t\tsys.stderr.write(\"Adding vcf-subset jobs for %s vcf files ... \"%(len(inputData.jobDataLs)))\n\t\tno_of_jobs= 0\n\t\t\n\t\t\n\t\ttopOutputDir = \"%sVCFSubset\"%(outputDirPrefix)\n\t\ttopOutputDirJob = self.addMkDirJob(outputDir=topOutputDir)\n\t\tno_of_jobs += 1\n\t\t\n\t\treturnData = PassingData()\n\t\treturnData.jobDataLs = []\n\t\tfor jobData in inputData.jobDataLs:\n\t\t\tinputF = jobData.vcfFile\n\t\t\tchr = self.getChrFromFname(inputF.name)\n\t\t\tif maxContigID:\n\t\t\t\tcontig_id = self.getContigIDFromFname(inputF.name)\n\t\t\t\ttry:\n\t\t\t\t\tcontig_id = int(contig_id)\n\t\t\t\t\tif contig_id>maxContigID:\t#skip the small contigs\n\t\t\t\t\t\tcontinue\n\t\t\t\texcept:\n\t\t\t\t\tsys.stderr.write('Except type: %s\\n'%repr(sys.exc_info()))\n\t\t\t\t\timport traceback\n\t\t\t\t\ttraceback.print_exc()\n\t\t\tinputFBaseName = os.path.basename(inputF.name)\n\t\t\tcommonPrefix = inputFBaseName.split('.')[0]\n\t\t\toutputVCF = File(os.path.join(topOutputDir, '%s.subset.vcf'%(commonPrefix)))\n\t\t\tvcfSubsetJob = self.addVCFSubsetJob(workflow, executable=workflow.vcfSubset, vcfSubsetPath=workflow.vcfSubsetPath, \\\n\t\t\t\t\t\tsampleIDFile=sampleIDFile,\\\n\t\t\t\t\t\tinputVCF=inputF, outputF=outputVCF, \\\n\t\t\t\t\t\tparentJobLs=[topOutputDirJob]+jobData.jobLs, transferOutput=False, job_max_memory=200,\\\n\t\t\t\t\t\textraArguments=None, extraDependentInputLs=None)\n\t\t\t\n\t\t\t#2012.10.5\n\t\t\t#selectVariants would generate AC, AF so that TrioCaller could read it.\n\t\t\t#samtools uses 'AC1' instead of AC, 'AF1' instead of AF.\n\t\t\tVCF4OutputF = File(os.path.join(topOutputDir, '%s.niceformat.vcf'%commonPrefix))\n\t\t\tvcfConvertJob = self.addSelectVariantsJob(workflow, SelectVariantsJava=workflow.SelectVariantsJava, \\\n\t\t\t\t\tinputF=vcfSubsetJob.output, outputF=VCF4OutputF, \\\n\t\t\t\t\trefFastaFList=refFastaFList, parentJobLs=[vcfSubsetJob], \\\n\t\t\t\t\textraDependentInputLs=[], transferOutput=False, \\\n\t\t\t\t\textraArguments=None, job_max_memory=2000, interval=chr)\n\t\t\t\n\t\t\tVCFGzipOutputF = File(\"%s.gz\"%VCF4OutputF.name)\n\t\t\tVCFGzipOutput_tbi_F = File(\"%s.gz.tbi\"%VCF4OutputF.name)\n\t\t\tbgzip_tabix_VCF_job = self.addBGZIP_tabix_Job(workflow, bgzip_tabix=workflow.bgzip_tabix, \\\n\t\t\t\t\tparentJobLs=[vcfConvertJob], inputF=vcfConvertJob.output, outputF=VCFGzipOutputF, \\\n\t\t\t\t\ttransferOutput=transferOutput)\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\treturnData.jobDataLs.append(PassingData(jobLs=[bgzip_tabix_VCF_job], vcfFile=VCFGzipOutputF, \\\n\t\t\t\t\t\t\t\t\ttbi_F=VCFGzipOutput_tbi_F, \\\n\t\t\t\t\t\t\t\t\tfileLs=[VCFGzipOutputF, VCFGzipOutput_tbi_F]))\n\t\t\t\n\t\tsys.stderr.write(\"%s jobs.\\n\"%(self.no_of_jobs))\n\t\treturn returnData",
"def process_data(fileprefix=DEFAULT_FILE_PREFIX):\n\n # TODO wow this is uggo code\n FILE_PREFIX = fileprefix\n\n MAX_SAMP=1500\n\n # Get data from file\n tf_record_file_names = [join(FILE_PREFIX, f) for f in listdir(FILE_PREFIX) if isfile(join(FILE_PREFIX, f)) and 'tfrecord' in f]\n assert len(tf_record_file_names) > 0\n\n dataset_it = iter(tf.data.TFRecordDataset(tf_record_file_names, compression_type='').take(MAX_SAMP))\n\n # Run the computation !\n with tqdm_joblib(tqdm(desc=\"My calculation\", total=MAX_SAMP)) as progress_bar:\n results = Parallel(n_jobs=-1)(\n delayed(_process_single)(data) for data in dataset_it\n )",
"def data_process(self):\n logging.info('Processing the data and split files')\n lines = Utility.file_len(self.fname)\n self.lines_to_be, self.split_files = Utility.split_files(self.fname, lines,\n cpu_count().real)",
"def prepare_subset_vcf_files_by_population():\n if not os.path.exists(VCF_BY_POPULATION_PATH):\n print(\"preparing subset vcf by population\")\n os.makedirs(VCF_BY_POPULATION_PATH)\n sleep(10)\n vcf_tools_runner = VCFToolsDockerRunner()\n samples = glob(f\"{SAMPLES_FOLDER}/*.csv\")\n\n with tqdm(total=len(samples)) as pbar:\n for sample in samples:\n sample = sample.replace('\\\\', '/')\n sample_name = get_filename_from_path(sample)\n sample_path = \"/\".join([IMAGE_SHARE_FOLDER_PATH] + sample.split('/')[1:])\n pbar.set_description(f\"Processing {sample_name}\")\n vcf_tools_runner(\n f\"vcf-subset -c {sample_path} \"\n f\"{IMAGE_SHARE_FOLDER_PATH}/{GENOTYPE_DATA_FOLDER}/{MERGED_VCF_FILE_NAME} | fill-an-ac > \"\n f\"{IMAGE_SHARE_FOLDER_PATH}/{VCF_BY_POPULATION_FOLDER}/{sample_name}.vcf\")\n pbar.update(1)\n else:\n print(f\"Subset VCF files by population already exist in: {VCF_BY_POPULATION_PATH}\")",
"def process(self, args):\n for benchmark_file in args.benchmark_files:\n self.process_individual_file(benchmark_file)\n self.total_files += 1",
"def _sample_subblocks(threadidx):\n while True:\n try:\n job = jobs.get_nowait()\n except Queue.Empty:\n print \"QUEUE IS EMPTY\"\n return\n print '{0}: processing job {1}'.format(threadidx, job[0])\n\n # sample random superblocks\n features[threadidx] = sample_clips_random(\n video=job[1],\n framesize=cfg.data.superblock_framesize,\n horizon=cfg.data.superblock_horizon,\n temporal_subsampling=cfg.data.temp_subsample,\n nsamples=nsamples_per_file)\n if np.any(np.isnan(features[threadidx])):\n raise ValueError('nan detected in random samples')\n\n # get blocks from superblocks\n features[threadidx] = sample_clips_dense_from_multiple_videos(\n features[threadidx].reshape((features[threadidx].shape[0],\n cfg.data.superblock_horizon,\n cfg.data.superblock_framesize,\n cfg.data.superblock_framesize)),\n framesize=cfg.data.framesize,\n horizon=cfg.data.horizon,\n temporal_subsampling=cfg.data.temp_subsample,\n stride=cfg.data.stride, verbose=False)\n if np.any(np.isnan(features[threadidx])):\n raise ValueError('nan detected in sub-samples')\n\n # whiten the samples\n if cfg.pca.method == 'blockwise':\n features[threadidx] = pca.whiten(\n data=features[threadidx], V=V, m0=m0, s0=s0,\n var_fracs=var_fracs, nprincomps=nprinc,use_gpu=False,\n retain_var=cfg.pca.retain_var)\n else:\n features[threadidx] = pca.whiten(\n data=features[threadidx].reshape(\n (features[threadidx].shape[0] * cfg.data.horizon,\n -1)),\n V=V, m0=m0, s0=s0,\n var_fracs=var_fracs, nprincomps=nprinc, use_gpu=False,\n retain_var=cfg.pca.retain_var).reshape(\n (features[threadidx].shape[0], -1))\n\n if np.any(np.isnan(features[threadidx])):\n raise ValueError('nan detected in whitened samples')\n\n # get the mappings\n model_mutex.acquire()\n features[threadidx] = model.mappingsNonoise_batchwise(\n features[threadidx], batchsize=1000)\n model_mutex.release()\n if np.any(np.isnan(features[threadidx])):\n raise ValueError('nan detected in mappings')\n # concatenate the mappings\n features[threadidx] = features[threadidx].reshape((\n nsamples_per_file, -1))\n\n input_mutex.acquire()\n kmeanstraindata[\n job[0] * nsamples_per_file:(job[0] + 1) * nsamples_per_file, :] =\\\n features[threadidx]\n # thread 0 should flush\n if threadidx == 0:\n kmeanstrainfile.flush()\n input_mutex.release()",
"def execute(self, sample_files: pd.DataFrame, reference_file: Path, ncores: int = 1) -> ExecutorResults:\n pass",
"def pull_sample_data(self, sample_id: str, sample_size: int or None = None, include_controls: bool = True,\n output_format: str = 'dataframe', columns_default: str = 'marker') -> None or list:\n db = connection.get_db(alias='core')\n db_name = db.name\n file_grp = self.pull_sample(sample_id)\n if not file_grp:\n return None\n files = file_grp.files\n # Fetch data\n if not include_controls: # Fetch data for primary file only\n file_id = [f for f in files if f.file_type == 'complete'][0].file_id\n connection.disconnect('core')\n connection._connections = {}\n connection._connection_settings = {}\n connection._dbs = {}\n FileGroup._collection = None\n\n complete = data_from_file(file_id=file_id,\n filegrp_id=file_grp.id,\n db_name=db_name,\n sample_size=sample_size,\n output_format=output_format,\n columns_default=columns_default)\n connection.connect(db=db_name, alias='core')\n return [complete]\n # Fetch data for primary file & controls\n files = [f.file_id for f in file_grp.files]\n connection.disconnect('core')\n connection._connections = {}\n connection._connection_settings = {}\n connection._dbs = {}\n FileGroup._collection = None\n\n pool = Pool(cpu_count())\n f = partial(data_from_file,\n filegrp_id=file_grp.id,\n db_name=db_name,\n sample_size=sample_size,\n output_format=output_format,\n columns_default=columns_default)\n data = pool.map(f, files)\n pool.close()\n pool.join()\n connection.connect(db=db_name, alias='core')\n return data",
"def run_skim(self):\n # Split input into chunks for processin\n skim_files = glob.glob(self.args.input + \"*.root\")\n # Make processing pool\n pool = Pool(processes=self.args.ncore)\n # Map processing to _run function\n pool.imap(_run_skim, skim_files)\n # Close and join pool\n pool.close()\n pool.join()",
"def send_data_to_mutiple_threads(parsed_file, collection_name):\n for i in range(thread_count):\n len_file_segment = len(parsed_file) // thread_count\n if (i * len_file_segment) + len_file_segment + 1 > len(parsed_file):\n divided_product_file = parsed_file[i * len_file_segment:]\n else:\n divided_product_file = parsed_file[i *\n len_file_segment: (i * len_file_segment) + len_file_segment]\n\n thread = threading.Thread(target=thread_insert_data(\n divided_product_file, collection_name))\n thread.start()",
"def run_map(self):\n # Split input into chunks for processing\n files = self.split_list()\n # Make processing pool\n pool = Pool(processes=self.args.ncore)\n # Map processing to _run function\n self.output = pool.map(_run, files)\n # Close and join pool\n pool.close()\n pool.join()",
"def addSplitNamVCFJobs(self, workflow, inputData=None, db_vervet=None, transferOutput=True,\\\n\t\t\t\t\t\tmaxContigID=None, outputDirPrefix=\"\"):\n\t\tsys.stderr.write(\"Adding split Nam VCF-file jobs for %s vcf files ... \"%(len(inputData.jobDataLs)))\n\t\tno_of_jobs= 0\n\t\t\n\t\t\n\t\ttopOutputDir = \"%sSampleInUCLAID\"%(outputDirPrefix)\n\t\ttopOutputDirJob = self.addMkDirJob(outputDir=topOutputDir)\n\t\tno_of_jobs += 1\n\t\t\n\t\treturnData = PassingData()\n\t\treturnData.jobDataLs = []\n\t\tfor jobData in inputData.jobDataLs:\n\t\t\tinputF = jobData.vcfFile\n\t\t\tif maxContigID:\n\t\t\t\tcontig_id = self.getContigIDFromFname(inputF.name)\n\t\t\t\ttry:\n\t\t\t\t\tcontig_id = int(contig_id)\n\t\t\t\t\tif contig_id>maxContigID:\t#skip the small contigs\n\t\t\t\t\t\tcontinue\n\t\t\t\texcept:\n\t\t\t\t\tsys.stderr.write('Except type: %s\\n'%repr(sys.exc_info()))\n\t\t\t\t\timport traceback\n\t\t\t\t\ttraceback.print_exc()\n\t\t\tinputFBaseName = os.path.basename(inputF.name)\n\t\t\tcommonPrefix = inputFBaseName.split('.')[0]\n\t\t\toutputVCF = File(os.path.join(topOutputDir, '%s.vcf'%(commonPrefix)))\n\t\t\tabstractMapperJob = self.addAbstractMapperLikeJob(workflow, executable=workflow.SplitNamVCFIntoMultipleSingleChrVCF, \\\n\t\t\t\t\tinputVCF=inputF, outputF=outputVCF, \\\n\t\t\t\t\tparentJobLs=[topOutputDirJob]+jobData.jobLs, transferOutput=False, job_max_memory=200,\\\n\t\t\t\t\textraArguments=None, extraDependentInputLs=[])\n\t\t\t\n\t\t\tVCFGzipOutputF = File(\"%s.gz\"%outputVCF.name)\n\t\t\tVCFGzipOutput_tbi_F = File(\"%s.gz.tbi\"%outputVCF.name)\n\t\t\tbgzip_tabix_VCF_job = self.addBGZIP_tabix_Job(workflow, bgzip_tabix=workflow.bgzip_tabix, \\\n\t\t\t\t\tparentJobLs=[abstractMapperJob], inputF=abstractMapperJob.output, outputF=VCFGzipOutputF, \\\n\t\t\t\t\ttransferOutput=transferOutput)\n\t\t\t\n\t\t\treturnData.jobDataLs.append(PassingData(jobLs=[bgzip_tabix_VCF_job], vcfFile=VCFGzipOutputF, \\\n\t\t\t\t\t\t\t\t\ttbi_F=VCFGzipOutput_tbi_F, \\\n\t\t\t\t\t\t\t\t\tfileLs=[VCFGzipOutputF, VCFGzipOutput_tbi_F]))\n\t\t\t\n\t\t\tno_of_jobs += 2\n\t\tsys.stderr.write(\"%s jobs.\\n\"%(self.no_of_jobs))\n\t\treturn returnData",
"def multiproc_vca(subcube_locs,channels,output_loc,fig_loc,dimensions):\n\t\n\twith schwimmbad.MultiPool() as pool:\n\t\tprint('started multi processing')\n\t\tprint(datetime.datetime.now())\n\n\t\t#create the lists for multiprocessing\n\t\t#vcacube=[f'{subcube_locs}_{dimensions}x{dimensions}_x{i}_y{j}.fits' for j in np.arange(0,dimensions) for i in np.arange(0,dimensions)]\n\t\tvcacube=[f'{k}_{dimensions}x{dimensions}_x{i}_y{j}.fits' for k in subcube_locs for j in np.arange(0,dimensions) for i in np.arange(0,dimensions)]\n\t\tchansamps=[channels for j in np.arange(0,dimensions) for k in subcube_locs for i in np.arange(0,dimensions)]\n\t\t#arrayloc=[f'{output_loc}_{dimensions}x{dimensions}_x{i}_y{j}' for j in np.arange(0,dimensions) for i in np.arange(0,dimensions)]\n\t\tarrayloc=[f'{k}_{dimensions}x{dimensions}_x{i}_y{j}' for k in output_loc for j in np.arange(0,dimensions) for i in np.arange(0,dimensions)]\n\t\t#figloc=[f'{fig_loc}_{dimensions}x{dimensions}_x{i}_y{j}' for j in np.arange(0,dimensions) for i in np.arange(0,dimensions)]\n\t\tfigloc=[f'{k}_{dimensions}x{dimensions}_x{i}_y{j}' for k in fig_loc for j in np.arange(0,dimensions) for i in np.arange(0,dimensions)]\n\n\n\t\tinputs=list(zip(vcacube,chansamps,arrayloc,figloc))\n\t\tprint(f'THESE ARE THE INPUTS FOR MULTIPROCESSING:{inputs}')\n\n\t\tout = list(pool.map(do_vca, inputs))\n\t\tprint('finished multiprocessing')\n\t\tprint(datetime.datetime.now())\n\tprint(out)",
"def read_data(self, path, **kwargs):\n\n from glob import glob\n import os\n sc = self.sc\n pdt_lc = np.dtype([('pos', 'f4', 3),('vel', 'f4', 3)])\n\n blockids = kwargs['blockids']\n\n def set_particle_IDs_partition(index, iterator): \n \"\"\"\n Use the aggregate partition counts to set monotonically increasing \n particle indices\n \"\"\"\n p_counts = partition_counts.value\n local_index = 0\n start_index = sum([p_counts[i] for i in range(index)])\n for arr in iterator:\n arr['iOrder'] = range(start_index + local_index, start_index + local_index + len(arr))\n arr['iGroup'] = loc_to_glob_map_b.value[index]\n local_index += len(arr)\n yield arr\n \n def read_file(index, i, chunksize=102400): \n for part,filename in i:\n timein = time.time()\n with open(filename,'rb') as f: \n header = f.read(62500)\n while True:\n chunk = f.read(chunksize*24)\n if len(chunk): \n p_arr = np.frombuffer(chunk, pdt_lc)\n new_arr = np.zeros(len(p_arr), dtype=pdt)\n new_arr['pos'] = p_arr['pos']\n yield new_arr\n else: \n t_elapsed = time.time()-timein\n rate = os.path.getsize(filename)/1e6/t_elapsed\n print 'spark_fof: reading %s took %d seconds in partition %d, %f MB/sec'%(filename, t_elapsed, index, rate)\n break\n \n # determine which files to read\n get_block_ids = re.compile('blk\\.(\\d+)\\.(\\d+)\\.(\\d+)?')\n\n if blockids is None: \n files = glob(os.path.join(self.path,'*/*'))\n else: \n files = []\n for dirname, subdirlist, filelist in os.walk(path):\n try: \n dirnum = int(os.path.basename(dirname))\n if dirnum in blockids: \n for f in filelist:\n ids = get_block_ids.findall(f)\n if len(ids) > 0:\n if all(int(x) in blockids for x in ids[0]):\n files.append(os.path.join(dirname,f))\n except ValueError: \n pass\n\n files.sort()\n nfiles = len(files) \n self.nPartitions = nfiles\n\n print 'spark_fof: Number of input files: ', nfiles\n\n # get particle counts per partition\n nparts = {i:_get_nparts(filename,62500,pdt_lc.itemsize) for i,filename in enumerate(files)}\n\n print 'spark_fof: Total number of particles: ', np.array(nparts.values()).sum()\n \n # set up the map from x,y,z to partition id \n ids = map(lambda x: tuple(map(int, get_block_ids.findall(x)[0])), files)\n ids_map = {x:i for i,x in enumerate(ids)}\n self.ids_map = ids_map\n loc_to_glob_map_b = self.local_to_global_map\n \n ids_map_b = sc.broadcast(ids_map)\n loc_to_glob_map_b = sc.broadcast(loc_to_glob_map_b)\n\n partition_counts = sc.broadcast(nparts)\n\n rec_rdd = (sc.parallelize(zip(ids,files), numSlices=self.nPartitions)\n .map(lambda (id,filename): (ids_map_b.value[id],filename))\n .partitionBy(self.nPartitions).cache()\n .mapPartitionsWithIndex(read_file, preservesPartitioning=True)\n .mapPartitionsWithIndex(set_particle_IDs_partition, \n preservesPartitioning=True))\n \n return rec_rdd",
"def main(): \n \n # parse command line arguments\n parser = argparse.ArgumentParser(description='Runs variant calling on pileup file and stores in vfc file')\n parser.add_argument('--use-read-quality', default=False, action='store_true',\n help='tells the algorithm to estimate p from read qualities')\n parser.add_argument('--call-less-positions', default=False, action='store_true',\n help='tells the program to call less positions (not whole pileup file)')\n parser.add_argument('--input-file', default='merged-normal.pileup', type=str,\n help='path to input file in pileup format')\n parser.add_argument('--output-file', default='Make name from input name', type=str,\n help='name for the output vcf file. If not given, will be created from input file name')\n parser.add_argument('--p', default='0.99', type=float,\n help='probability estimate of one nucleotide read being correct, used by vc algorithm')\n parser.add_argument('--positions-to-call', default='10000', type=int,\n help='how many positions to call if call-less-positions set to true')\n args = parser.parse_args()\n if args.output_file == 'Make name from input name':\n args.output_file = args.input_file + '.vcf'\n \n variant_caller = VariantCaller()\n sample = 'SAMPLE1'\n \n # creates vcf file\n create_vcf_start = time.time()\n vcf = create_vcf_file(args.output_file, sample)\n create_vcf_end = time.time()\n print('Vcf header created. Elapsed time: {}'.format(create_vcf_end - create_vcf_start))\n\n main_loop_start = time.time()\n position_count = 0\n variant_caller_time = 0\n positions_with_variants = 0\n write_vcf_time = 0\n\n for pileup_line in pileup_reader(args.input_file):\n # calls variant for each pileup line\n variant_caller_start = time.time()\n variant_caller.call_variant(pileup_line, args.p, args.use_read_quality)\n if pileup_line['alts'] != '.':\n positions_with_variants += 1\n variant_caller_time += time.time() - variant_caller_start\n\n # writes line in VCF file\n write_vcf_start = time.time()\n write_vcf_line(pileup_line, vcf, sample)\n write_vcf_time = time.time() - write_vcf_start\n\n position_count += 1\n if args.call_less_positions and (position_count >= args.positions_to_call):\n break\n \n main_loop_end = time.time()\n total_running_time = main_loop_end - main_loop_start\n\n print('Processed {} positions. Found variants at {} positions.'.format(position_count, positions_with_variants))\n\n print('Total running time is {}'.format(total_running_time))\n print('Pileup reader: {}'.format(total_running_time - variant_caller_time - write_vcf_time))\n print('Variant calling: {}'.format(variant_caller_time))\n print('Vcf writing: {}'.format(write_vcf_time))",
"def vcf_samples(vcffile):\n try:\n vcf_reader = vcf.Reader(open(vcffile, 'r'))\n return vcf_reader.samples\n except Exception as error:\n print(f\"Could not read vcffile {vcffile}: continuing without vcf data: {str(error)}\")\n\n return []",
"def multicore_find_and_download_songs(kwargs):\n reference_file = kwargs[\"reference_file\"]\n lines = []\n with open(reference_file, \"r\", encoding=\"utf-8\") as file:\n for line in file:\n lines.append(line)\n cpu_count = kwargs[\"multi_core\"]\n number_of_songs = len(lines)\n songs_per_cpu = number_of_songs // cpu_count\n extra_songs = number_of_songs % cpu_count\n\n cpu_count_list = []\n for cpu in range(cpu_count):\n songs = songs_per_cpu\n if cpu < extra_songs:\n songs = songs + 1\n cpu_count_list.append(songs)\n\n index = 0\n file_segments = []\n for cpu in cpu_count_list:\n right = cpu + index\n segment = lines[index:right]\n index = index + cpu\n file_segments.append(segment)\n\n processes = []\n segment_index = 0\n for segment in file_segments:\n p = multiprocessing.Process(\n target=multicore_handler, args=(segment_index, segment, kwargs.copy())\n )\n processes.append(p)\n segment_index += 1\n\n for p in processes:\n p.start()\n for p in processes:\n p.join()",
"def main(file_list):\n data_store = {}\n \n for file in file_list:\n sample_id = get_sample_id(file)\n data_store[sample_id] = {}\n data_store[sample_id][\"sample_type\"], data_store[sample_id][\"out_filename\"], data_store[sample_id][\"out_location\"] = check_name(file, sample_id)\n data_store[sample_id][\"df_parameters\"], data_store[sample_id][\"df_values\"], data_store[sample_id][\"df_parameters_for_values\"] = data_in(file, sample_id)\n if data_store[sample_id][\"sample_type\"] == \"EFC\":\n binder_mass = efc_calcs(data_store[sample_id][\"df_parameters\"])\n elif data_store[sample_id][\"sample_type\"] == \"OPC\":\n binder_mass = opc_calcs(data_store[sample_id][\"df_parameters\"])\n data_store[sample_id][\"df_values\"] = tidy_val_df(data_store[sample_id][\"df_values\"], binder_mass)\n data_store[sample_id][\"df_parameters\"] = tidy_param_df(sample_id, data_store[sample_id][\"df_parameters\"], data_store[sample_id][\"out_filename\"])\n for key, value in data_store.items():\n write_to_excel(key, value[\"df_parameters\"], value[\"df_values\"], value[\"df_parameters_for_values\"], value[\"out_location\"])",
"def step_parallel(in_csv_filename, terrestrial_data, marine_data, ancillary_path,\n out_csv_filename, from_gbif=True):\n csv_filename_pairs, header = get_chunk_files(\n in_csv_filename, out_csv_filename=out_csv_filename)\n\n# in_csv_fn, out_csv_fn = csv_filename_pairs[0]\n# intersect_csv_and_shapefiles(in_csv_fn, terrestrial_data,\n# marine_data, ancillary_path, out_csv_fn, False)\n\n with ProcessPoolExecutor() as executor:\n for in_csv_fn, out_csv_fn in csv_filename_pairs:\n executor.submit(\n intersect_csv_and_shapefiles, in_csv_fn, terrestrial_data,\n marine_data, ancillary_path, out_csv_fn, from_gbif)\n\n try:\n outf = open(out_csv_filename, 'w', encoding='utf-8')\n outf.write('{}'.format(header))\n smfile_linecount = 0\n for _, small_csv_fn in csv_filename_pairs:\n curr_linecount = get_line_count(small_csv_fn) - 1\n print('Appending {} records from {}'.format(\n curr_linecount, small_csv_fn))\n # Do not count header\n smfile_linecount += (curr_linecount)\n lineno = 0\n try:\n for line in open(small_csv_fn, 'r', encoding='utf-8'):\n # Skip header in each file\n if lineno == 0:\n pass\n else:\n outf.write('{}'.format(line))\n lineno += 1\n except Exception as inner_err:\n print('Failed to write {} to merged file; {}'.format(small_csv_fn, inner_err))\n except Exception as outer_err:\n print('Failed to write to {}; {}'.format(out_csv_filename, outer_err))\n finally:\n outf.close()\n\n lgfile_linecount = get_line_count(out_csv_filename) - 1\n print('Total {} of {} records written to {}'.format(\n lgfile_linecount, smfile_linecount, out_csv_filename))",
"def compute_chunk_features(mp3_file):\n # Extract MP3 file to a mono, 10kHz WAV file\n sox_command = \"/usr/local/bin/sox\"\n out_file = 'temp.wav'\n #cmd = sox_command % (out_file, mp3_file)\n temp2 = subprocess.call([sox_command, mp3_file,'-r 10000','-c 1',out_file])\n # Read in chunks of data from WAV file\n wav_data1, wav_data2 = read_wav(out_file)\n # We'll cover how the features are computed in the next section!\n return np.array(features(wav_data1)), np.array(features(wav_data2))",
"def GatherVcfs(\n b: hb.Batch,\n input_vcfs: List,\n disk_size: int,\n output_vcf_path: str = None,\n) -> Job:\n j = b.new_job('VQSR: FinalGatherVcf')\n j.image(utils.GATK_IMAGE)\n j.memory(f'16G')\n j.storage(f'{disk_size}G')\n j.declare_resource_group(\n output_vcf={'vcf.gz': f'{NAME}_gathered.vcf.gz', 'vcf.gz.tbi': f'{NAME}_gathered.vcf.gz.tbi'}\n )\n\n input_cmdl = ' '.join([f'--input {v}' for v in input_vcfs])\n j.command(\n f\"\"\"set -euo pipefail\n # --ignore-safety-checks makes a big performance difference so we include it in \n # our invocation. This argument disables expensive checks that the file headers \n # contain the same set of genotyped samples and that files are in order \n # by position of first record.\n gatk --java-options -Xms6g \\\\\n GatherVcfsCloud \\\\\n --gather-type BLOCK \\\\\n {input_cmdl} \\\\\n --output {j.output_vcf['vcf.gz']}\n tabix {j.output_vcf['vcf.gz']}\"\"\"\n )\n if output_vcf_path:\n b.write_output(j.output_vcf, f'{output_vcf_path}{NAME}_gathered{LABEL}')\n return j",
"def multiple_files_analysis(lower_wavelength, upper_wavelength, CCD_height, CCD_width, CCD_height_corr, CCD_width_corr, file_paths, file_path_corr_data, progress_update):\r\n \r\n all_files_data = []\r\n FWHM_data = []\r\n central_energy_data = []\r\n counter = 1\r\n\r\n for file_path in file_paths:\r\n analysis = QDot_Spectroscopy(file_path=r\"{}\".format(file_path), meas_corr_curve_file_path=r\"{}\".format(file_path_corr_data), lower_wavelength=lower_wavelength, upper_wavelength=upper_wavelength, CCD_height=CCD_height, CCD_width=CCD_width, CCD_height_corr=CCD_height_corr , CCD_width_corr=CCD_width_corr)\r\n\r\n twod_map_raw = analysis.matrix_map(bkg_reduction=False, data_correction=False)\r\n twod_map_no_bkg = analysis.matrix_map(bkg_reduction=True, data_correction=False)\r\n twod_map_raw_corr = analysis.matrix_map(bkg_reduction=False, data_correction=True)\r\n twod_map_no_bkg_corr = analysis.matrix_map(bkg_reduction=True, data_correction=True)\r\n Q_Dot_plot_data, fit_statistics = analysis.gaussian_fit()\r\n\r\n file_analysis = [twod_map_raw, twod_map_no_bkg, twod_map_raw_corr, twod_map_no_bkg_corr, Q_Dot_plot_data, fit_statistics]\r\n all_files_data.append(file_analysis)\r\n\r\n # Creates a histogram from the collected FWHM and central energy data from all the analyzed datafales containing quantumdots\r\n for FWHM_value in fit_statistics['FWHM'].to_numpy():\r\n FWHM_data.append(FWHM_value)\r\n for CE_value in fit_statistics['Centre'].to_numpy():\r\n central_energy_data.append(CE_value)\r\n\r\n progress_update.emit(counter * 100/len(file_paths))\r\n counter += 1\r\n \r\n return all_files_data, FWHM_data, central_energy_data",
"def load_batch(self, fpath, match, in_num):\n if in_num == None:\n in_num = input('Please specify IN number: ')\n\n if match == None:\n match = input('Please specify filename string to match for batch loading (ex. \\'_s2_\\'): ')\n\n # get a list of all matching files\n glob_match = f'{fpath}/*{match}*'\n files = glob.glob(glob_match)\n\n # load & concatenate files into a single dataframe\n data = pd.concat((pd.read_csv(file, header = [0, 1], index_col = 0, parse_dates=True, low_memory=False) for file in files)).sort_index()\n\n # extract sampling frequency\n s_freq = 1/(data.index[1] - data.index[0]).total_seconds()\n\n # reset the index to continuous time\n ind_freq = str(int(1/s_freq*1000000))+'us'\n ind_start = '1900-01-01 00:00:00.000'\n ind = pd.date_range(start = ind_start, periods=len(data), freq=ind_freq)\n data.index = ind\n\n # set metadata & attributes\n self.metadata = {'file_info':{'in_num': in_num, 'files': files, 'dir': fpath,\n 'match_phrase': match},\n 'analysis_info':{'s_freq': s_freq} }\n self.data = data\n self.s_freq = s_freq",
"def compute_chunk_features(mp3_file):\n # On convertit le fichier mp3 en un fichier wav mono, 1avec un sample rate de 10000Hertz: on utilise\n # On utilise l'application sox \"c:/Program Files (x86)/sox-14.4.0/sox\"\n\n sox_command = \"./sox-14.4.0/sox\"\n out_file = 'temp.wav'\n #cmd = sox_command % (out_file, mp3_file)\n temp2 = subprocess.call([sox_command, mp3_file,'-r 10000','-c 1',out_file])\n # Read in chunks of data from WAV file\n wav_data1, wav_data2 = read_wav(out_file)\n # We'll cover how the features are computed in the next section!\n return np.array(features(wav_data1)), np.array(features(wav_data2))",
"def process_csvs(conn: Connection, basedir: Path) -> None:\n process_files(conn, basedir/\"files.csv\")\n process_notes(conn, basedir/\"notes.csv\")\n process_links(conn, basedir/\"links.csv\")\n process_clusters(conn, basedir/\"clusters.csv\")\n process_bibliography(conn, basedir/\"bibliography.csv\")\n process_citations(conn, basedir/\"citations.csv\")",
"def main(save_dir, img_dir, df, fname_col):\n\tpool = mp.Pool(mp.cpu_count())\n\tresult = pool.map(multi_run_wrapper,[(save_dir, img_dir, \n\t\t\t\t\t\tfname) for fname in df[fname_col].values[0:4]])",
"def _copyDataSetForFCSFileSample(self):\n\n # Get the datasets for the FCSFile sample\n dataSets = self._getDataSetForFCSFileSample()\n\n # Get all fcs files for the datasets\n dataSetFiles = self._getFilesForDataSets(dataSets)\n if len(dataSetFiles) == 0:\n self._message = \"Could not retrieve files for datasets from FCSFile sample.\"\n self._logger.error(self._message)\n return False\n\n # Store at the experiment level\n self._currentPath = self._experimentPath\n\n # Copy the files\n for fcsFile in dataSetFiles:\n self._copyFile(fcsFile, self._currentPath)\n\n # Return success\n return True",
"def calculate_mixture_features(args):\n workspace = args.workspace\n speech_dir = args.speech_dir\n noise_dir = args.noise_dir\n data_type = args.data_type\n fs = cfg.sample_rate\n dir_name = args.dir_name\n\n fid_clean = open(speech_dir, 'r')\n lines_clean = fid_clean.readlines()\n fid_clean.close()\n\n fid_reverb = open(noise_dir, 'r')\n lines_reverb = fid_reverb.readlines()\n fid_reverb.close()\n\n for files_clean, files_reverb in zip(lines_clean, lines_reverb):\n\n files_clean = files_clean.strip('\\n')\n files_reverb = files_reverb.strip('\\n')\n\n fid = open(files_clean,'r')\n wavLines_clean = fid.readlines()\n fid.close()\n fid = open(files_reverb,'r')\n wavLines_reverb = fid.readlines()\n fid.close()\n\n cnt = 0 \n\n for wavs_clean, wavs_reverb in zip(wavLines_clean, wavLines_reverb):\n \n t1 = time.time()\n # cnt = 0\n\n wav_name_clean, wav_path_clean = wavs_clean.split()\n wav_name_reverb, wav_path_reverb = wavs_reverb.split()\n \n # Read clean speech audio. \n (speech_audio, _) = read_audio(wav_path_clean, target_fs=fs)\n \n # Read reverb speech audio. \n (noise_audio, _) = read_audio(wav_path_reverb, target_fs=fs)\n \n # Cut reverb speech to the same length as clean speech. \n if len(noise_audio) > len(speech_audio):\n noise_audio = noise_audio[0: len(speech_audio)]\n \n # Extract spectrogram. \n mixed_complx_x = calc_sp(noise_audio, mode='complex')\n speech_x = calc_sp(speech_audio, mode='magnitude')\n\n # Write out features. \n out_feat_path = os.path.join(workspace, \"features\", \"spectrogram\", \n data_type, dir_name, \"%s.p\" % wav_name_reverb)\n create_folder(os.path.dirname(out_feat_path))\n data = [mixed_complx_x, speech_x, wav_name_reverb]\n pickle.dump(data, open(out_feat_path, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)\n \n # Print. \n if cnt % 100 == 0:\n print(cnt)\n # print(mixed_complx_x)\n # print(speech_x)\n \n cnt += 1\n\n print(\"Extracting feature time: %s\" % (time.time() - t1))",
"def main(output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n\n baseurl = 'http://codeandbeer.org/virtual/BigData/Labs/'\n files = ['Booking-20151012-1322.csv', 'Booking-20181025-1232.csv']\n for filename in files:\n r = requests.get(baseurl+filename, stream=True)\n if r.status == 200:\n with open(output_filepath+\"/\"+filename, \"wb\") as f:\n f.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)"
] | [
"0.60877764",
"0.6062956",
"0.6029365",
"0.5959184",
"0.5855619",
"0.5839044",
"0.5821658",
"0.5806925",
"0.5791175",
"0.5772094",
"0.5660737",
"0.564774",
"0.54801124",
"0.5463999",
"0.5415949",
"0.5407162",
"0.5381161",
"0.53543305",
"0.5353785",
"0.5335812",
"0.53349584",
"0.53005266",
"0.52933437",
"0.527095",
"0.5259969",
"0.52597106",
"0.5248237",
"0.5242822",
"0.521794",
"0.52164066"
] | 0.62807316 | 0 |
Returns the variants from this VCF with duplicate sites filtered out. If any site position appears more than once, throw all variants away. If target_sites_pos is not None, only returns variants from this VCF which are present in the target sampledata file. | def filter_duplicates_target(vcf, target_sites_pos=None):
if target_sites_pos is not None:
def site_in_target(site):
return site in target_sites_pos
else:
def site_in_target(site):
return True
row = next(vcf, None)
bad_pos = -1
for next_row in vcf:
if bad_pos == -1 and next_row.POS != row.POS:
if site_in_target(row.POS):
yield row
else:
if bad_pos == -1:
bad_pos = row.POS
elif bad_pos != next_row.POS:
bad_pos = -1
row = next_row
if row is not None and bad_pos != -1 and site_in_target(row.POS):
yield row | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_equivalent_sites(self, site):\n for sites in self.equivalent_sites:\n if site in sites:\n return sites\n\n raise ValueError(\"Site not in structure\")",
"def sites(self):\n return self.data.sites.values",
"def exclude_duplicates(self, variants):\n \n unique_vars = {}\n for variant in variants:\n key = variant[0].child.get_key()\n if key not in unique_vars:\n unique_vars[key] = list(variant)\n else:\n result = variant[1]\n inh = variant[2]\n hgnc = variant[3]\n \n # append the check type and inheritance type to the first\n # instance of the variant\n unique_vars[key][1] += [x for x in result if x not in unique_vars[key][1]]\n unique_vars[key][2] += [x for x in inh if x not in unique_vars[key][2]]\n \n unique_vars[key][1] = sorted(unique_vars[key][1])\n unique_vars[key][2] = sorted(unique_vars[key][2])\n \n # add the gene IDs that are unique to the current variant\n # to the merged variant\n genes = [x for x in hgnc if x not in unique_vars[key][3]]\n unique_vars[key][3] += genes\n \n unique_vars = [tuple(unique_vars[x]) for x in unique_vars]\n \n return unique_vars",
"def get_filtered_pedigree_with_samples(self, _user=None):\n result = []\n seen = set()\n # Only select cases that have an active variant set.\n # TODO Perspectively, we need to distinguish between Small and Structural VariantSets.\n for case in self.get_active_smallvariant_cases():\n for line in case.get_filtered_pedigree_with_samples():\n if line[\"patient\"] not in seen:\n result.append(line)\n seen.add((case.name, line[\"patient\"]))\n return result",
"def process_VCF(input_vcf, targets_file, out_vcf = None) :\n\n\tfVCF_OUT = None\n\tif out_vcf is not None :\n\t\tfVCF_OUT = open(out_vcf, 'w')\n\tfDUP_OUT = open(targets_file, 'w')\n\n\tvariants_dict = {}\n\tvariants_list = []\n\tnum_redundant, num_kept = 0, 0\n\tfINVCF = open(input_vcf, 'r')\n\tfor line in fINVCF :\n\t\tif line.startswith('#') :\n\t\t\tif line.startswith(\"#CHROM\") :\n\t\t\t\tindividuals = re.split('\\t', line.strip())[9:]\n\t\t\t\tstdout.write(\"%d individuals included in the VCF file: %s\\n\" %(len(individuals), input_vcf))\n\t\t\tif fVCF_OUT :\n\t\t\t\tfVCF_OUT.write(line)\n\t\telse :\n\t\t\ttmp_line = re.split('\\t', line.strip())\n\t\t\tref_base = tmp_line[3]\n\t\t\talt_base = tmp_line[4]\n\t\t\tchrom_id = tmp_line[0]\n\t\t\tchrom_pos = tmp_line[1]\n\t\t\tqual = tmp_line[5]\n\t\t\tfilter = tmp_line[6]\t\t\t\t\t# PASS or FILTERED by VQSR #\n\t\t\t# fix sites having different types of calls: redundant calls #\n\t\t\tif not variants_dict.has_key(chrom_id+':'+chrom_pos) :\n\t\t\t\tvariants_dict[chrom_id+':'+chrom_pos] = line.strip()\n\t\t\t\tvariants_list.append(chrom_id+':'+chrom_pos)\n\t\t\telse :\n\t\t\t\tnum_redundant += 1\n\t\t\t\tsame_site_diff_call = re.split('\\t', variants_dict[chrom_id+':'+chrom_pos])\n\t\t\t\ttmp_qual = same_site_diff_call[5]\n\t\t\t\ttmp_filter = same_site_diff_call[6]\n\t\t\t\ttmp_alt_base = same_site_diff_call[4]\n\t\t\t\tfDUP_OUT.write(\"%s\\n%s\\n\" %(variants_dict[chrom_id+':'+chrom_pos], line.strip()))\n\t\t\t\tif (tmp_filter != \"PASS\" and filter != \"PASS\") or (filter == \"PASS\" and tmp_filter == \"PASS\") :\t\t# if two different call both passed the VQSR or both not, we remove it from the final call set #\t\n\t\t\t\t\tvariants_dict.pop(chrom_id+':'+chrom_pos)\n\t\t\t\t\tvariants_list.remove(chrom_id+':'+chrom_pos)\n\t\t\t\t\tif filter == \"PASS\" :\n\t\t\t\t\t\tstdout.write(chrom_id+\" \"+chrom_pos+\" both pass\\n\")\n\t\t\t\t\telse :\n\t\t\t\t\t\tstdout.write(chrom_id+\" \"+chrom_pos+\" both filtered\\n\")\n\t\t\t\telif filter == \"PASS\" and tmp_filter != filter :\n\t\t\t\t\tstdout.write(chrom_id+\" \"+chrom_pos + \" second kept\\n\")\n\t\t\t\t\tvariants_dict[chrom_id+':'+chrom_pos] = line.strip()\n\t\t\t\t\tnum_kept += 1\n\t\t\t\telif tmp_filter == \"PASS\" and tmp_filter != filter :\n\t\t\t\t\tstdout.write(chrom_id+\" \"+chrom_pos + \" first kept\\n\")\n\t\t\t\t\tnum_kept += 1\n\tstdout.write(\"%d\\t%d\\n\" %(num_redundant, num_kept))\n\n\tif fVCF_OUT :\n\t\tfor i in range(len(variants_list)) :\n\t\t\tfVCF_OUT.write(\"%s\\n\" %(variants_dict[variants_list[i]]))\n\t\tfVCF_OUT.close()\n\tfINVCF.close()",
"def duplicate_subassemblies(self):\n dupes = set()\n top_dupes = defaultdict(set)\n\n # first, make a list of all duplicated non-leaf fragments\n for flow in self.fg.flows():\n for dirn in ('Input', 'Output'):\n asm = set(k for k in self.fg.fragments_with_flow(flow, direction=dirn) if k.term.is_fg)\n if len(asm) > 1:\n dupes |= asm\n\n # next, screen them down to top-level frags (frags whose parents are nonduplicated)\n for frag in dupes:\n if frag.reference_entity not in dupes:\n top_dupes[frag.flow, frag.direction].add(frag)\n\n # finally, generate the duplicate sets\n for v in top_dupes.values():\n yield v",
"def variants(\n self,\n *,\n samples=None,\n isolated_as_missing=None,\n alleles=None,\n impute_missing_data=None,\n copy=None,\n left=None,\n right=None,\n ):\n interval = self._check_genomic_range(left, right)\n if impute_missing_data is not None:\n warnings.warn(\n \"The impute_missing_data parameter was deprecated in 0.3.0 and will\"\n \" be removed. Use ``isolated_as_missing=False`` instead of\"\n \"``impute_missing_data=True``.\",\n FutureWarning,\n )\n # Only use impute_missing_data if isolated_as_missing has the default value\n if isolated_as_missing is None:\n isolated_as_missing = not impute_missing_data\n if copy is None:\n copy = True\n # See comments for the Variant type for discussion on why the\n # present form was chosen.\n variant = tskit.Variant(\n self,\n samples=samples,\n isolated_as_missing=isolated_as_missing,\n alleles=alleles,\n )\n if left == 0 and right == self.sequence_length:\n start = 0\n stop = self.num_sites\n else:\n start, stop = np.searchsorted(self.sites_position, interval)\n\n if copy:\n for site_id in range(start, stop):\n variant.decode(site_id)\n yield variant.copy()\n else:\n for site_id in range(start, stop):\n variant.decode(site_id)\n yield variant",
"def sites(self):\n return self._sites",
"def sites(self):\n return self._sites",
"def find_pose_sample_outliers(self):\n # Find outliers in target poses\n outliers = []\n for sample in self._pose_samples:\n # Find nearest poses for the target one.\n pose_landmarks = sample.landmarks.copy()\n pose_classification = self.__call__(pose_landmarks)\n class_names = [class_name for class_name, count in pose_classification.items() if count == max(pose_classification.values())]\n\n # Sample is an outlier if nearest poses have different class or more than\n # one pose class is detected as nearest.\n if sample.class_name not in class_names or len(class_names) != 1:\n outliers.append(PoseSampleOutlier(sample, class_names, pose_classification))\n\n return outliers",
"def _extract_all_variant_locations(args):\n contig, variants_path = args\n locations = set()\n with pysam.VariantFile(variants_path) as vcf:\n for record in vcf.fetch(contig):\n locations.add((record.chrom, record.pos))\n\n return locations",
"def get_variants_by_protein_position(self, _transcript_id, _protein_pos):\n try:\n p = self.proteins[_transcript_id]\n if _protein_pos not in self.proteinPos[_transcript_id]:\n raise ValueError(\"Peptide does not start a \"\n \"{pos} in protein with transcript ID {transcript}\".format(pos=_protein_pos,\n transcript=_protein_pos))\n var = dict()\n fs = dict()\n shift = 0\n for i in xrange(_protein_pos):\n for v in p.vars.get(i, []):\n if v.type in [VariationType.FSDEL, VariationType.FSINS]:\n shift = (v.get_shift()+shift) % 3\n if shift:\n fs.setdefault(i-_protein_pos, []).append(v)\n else:\n fs.clear()\n for j in xrange(_protein_pos, _protein_pos+len(self)):\n for v in p.vars.get(j, []):\n var.setdefault(j, []).append(v)\n fs.update(var)\n return fs\n except KeyError:\n raise ValueError(\"Peptide does not origin from protein with \"\n \"transcript ID {transcript}\".format(transcript=_transcript_id))",
"def sites(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DirectLineSiteArgs']]]]:\n return pulumi.get(self, \"sites\")",
"def rm_duplicates(self):\n # get uniq representation of existing detection documents\n existing = set(ed.uniq_data for ed in self.existing_detections)\n # remove duplicates\n for idx in xrange(len(self.new_detections)-1, -1, -1):\n nd = self.new_detections[idx]\n if nd.uniq_data in existing:\n self.new_detections.pop(idx)",
"def get_family_with_filtered_pedigree_with_samples(self, _user=None):\n result = defaultdict(list)\n seen = set()\n # Only select cases that have an active variant set.\n # TODO Perspectively, we need to distinguish between Small and Structural VariantSets.\n for case in self.get_active_smallvariant_cases():\n for line in case.get_filtered_pedigree_with_samples():\n if line[\"patient\"] not in seen:\n result[case.name].append(line)\n seen.add((case.name, line[\"patient\"]))\n return dict(result)",
"def get_unique_snps(self):\n\n for chromosome in self.snpsites.keys():\n\n for position in self.snpsites[chromosome].keys():\n for filenumber in range(len(self.vcffilenames)):\n\n if (\n self.snpsites[chromosome][position][filenumber] == True\n and sum(self.snpsites[chromosome][position]) == 1\n ): # First any(array) finds\n self.snp_positions[self.vcffilenames[filenumber]][chromosome][\n position\n ].update({\"unique\": True})\n elif (\n sum(self.snpsites[chromosome][position]) >= 2\n ): # there might be snp at same position but with different alt base\n\n snp_index = [\n i\n for i, j in enumerate(self.snpsites[chromosome][position])\n if j == True\n ]\n\n totalindex = len(snp_index)\n # Lets check the alt base in these vcf files using index\n # lets get array of alt bases from each file\n alt_snps = []\n for index in snp_index:\n alt_snps.append(\n self.snp_positions[self.vcffilenames[index]][\n chromosome\n ][position][\"alt\"]\n )\n\n # get the counts of the elements\n\n counts = self.count_list_elements_occurrences(alt_snps)\n\n for index in range(len(counts)):\n if counts[index] == 1:\n # this is unique, so occurred once\n self.snp_positions[self.vcffilenames[snp_index[index]]][\n chromosome\n ][position].update(\n {\"unique\": True}\n ) # vcffilenames[snp_index[index]] = this will be the filename\n # print(\"this is unique\", vcffilenames[snp_index[index]], chromosome, position, self.snp_positions[vcffilenames[snp_index[index]]][chromosome][position])\n\n # else:\n # \tvcf_database[\"self.snp_positions\"][chromosome + \"_\" + position].update({\"unique\":False})\n\n return",
"def vcf_to_position_set(path: str):\n with pysam.VariantFile(path) as v:\n contigs = v.header.contigs\n vs = set()\n with Pool() as workers:\n for r in workers.imap_unordered(_extract_all_variant_locations, (\n (contig, path)\n for contig in contigs)):\n vs.update(r)\n return vs",
"def getSites(self):\n return self.SelectedSites",
"def Sites(self):\n if self._sites is None or len(self._sites) == 0:\n return None\n return self._sites",
"def deduplicate_motif_sites(sites, length):\n for sites_by_region in sites:\n idx = 0\n if len(sites_by_region) > 1:\n while idx + 1 < len(sites_by_region):\n site_curr = sites_by_region[idx]\n site_next = sites_by_region[idx + 1]\n if site_next.start - site_curr.start < length:\n if site_curr.score >= site_next.score:\n sites_by_region.pop(idx + 1)\n else:\n sites_by_region.pop(idx)\n else:\n idx += 1\n return sites",
"def selectOfSample(self, indexes):\n index_set = set()\n for idx in indexes:\n i = list(self.sample[self.sample['masked'] == False].index)[idx]\n index_set.add(i)\n for ind in list(self.sample[self.sample['masked'] == False].index):\n if ind not in index_set:\n self.sample.at[ind, 'masked'] = True\n return index_set",
"def remove_duplicates(self, hits):\n\t\tseen = set()\n\t\tkeep = []\n\n\t\tfor i in range(len(hits)):\n\t\t\tif hits[i][\"Text\"] not in seen:\n\t\t\t\tseen.add(hits[i][\"Text\"])\n\t\t\t\tkeep.append(hits[i])\n\n\t\treturn keep",
"def filter_dups(saved_home, dups_info_home):\n orig_context_file = open(os.path.join(saved_home, 'data_for_corenlp', 'kp20k_training_context_for_corenlp.txt'),\n encoding='utf-8')\n context_lines = orig_context_file.readlines()\n orig_allkeys_file = open(os.path.join(saved_home, 'data_for_corenlp', 'kp20k_training_keyword_for_corenlp.txt'),\n encoding='utf-8')\n allkeys_lines = orig_allkeys_file.readlines()\n assert len(context_lines) == len(allkeys_lines)\n\n # filter out the duplicates in the validation and the testing datasets and the kp20k training dataset itself\n dups_info_datasets = ['kp20k_training', 'kp20k_validation', 'kp20k_testing',\n 'inspec_testing', 'krapivin_testing',\n 'nus_testing', 'semeval_testing']\n total_filtered_idx_set = set()\n for dataset in dups_info_datasets:\n filtered_idx_set = set()\n dups_info_file = open(\n os.path.join(dups_info_home, '{}_context_nstpws_dups_w_kp20k_training.txt'.format(dataset)), encoding='utf-8')\n for line in dups_info_file:\n line = line.strip()\n # inspec_testing_48 kp20k_training_433051 jc_sc:0.7368; affine invariants of convex polygons | affine invariants of convex polygons\n dups, titles = line.split(';')\n src_dup, filtered_dup, _ = dups.split()\n src_idx = int(src_dup.strip().split('_')[-1])\n filtered_idx = int(filtered_dup.strip().split('_')[-1])\n if dataset != 'kp20k_training':\n filtered_idx_set.add(filtered_idx)\n else:\n if src_idx not in filtered_idx_set:\n filtered_idx_set.add(filtered_idx)\n total_filtered_idx_set = total_filtered_idx_set.union(filtered_idx_set)\n print('Num of filtered kp20k training data: {}'.format(len(total_filtered_idx_set)))\n\n # also filter out the invalid data samples\n print('Finding the invalid data samples in the original kp20k training ...')\n for corpus_idx in tqdm(range(len(context_lines))):\n if context_lines[corpus_idx].strip().split() == [''] or allkeys_lines[corpus_idx].strip().split(' ; ') == ['']:\n total_filtered_idx_set.add(corpus_idx)\n print('Num of filtered kp20k training data: {}'.format(len(total_filtered_idx_set)))\n\n total_filtered_idxes = sorted(list(total_filtered_idx_set))\n for filter_idx in total_filtered_idxes:\n context_lines[filter_idx] = '\\n'\n allkeys_lines[filter_idx] = '\\n'\n\n filtered_context_file = open(os.path.join(saved_home, 'data_for_corenlp',\n 'kp20k_training_context_for_corenlp_filtered.txt'),\n 'w', encoding='utf-8')\n filtered_context_file.writelines(context_lines)\n\n filtered_allkeys_file = open(os.path.join(saved_home, 'data_for_corenlp',\n 'kp20k_training_keyword_for_corenlp_filtered.txt'),\n 'w', encoding='utf-8')\n filtered_allkeys_file.writelines(allkeys_lines)\n\n orig_context_file = open(os.path.join(saved_home, 'data_for_corenlp',\n 'kp20k_training_filtered_for_corenlp_idxes.txt'),\n 'w', encoding='utf-8')\n orig_context_file.write(' '.join([str(idx) for idx in total_filtered_idxes]) + '\\n')\n orig_context_file.write(str(len(total_filtered_idxes)) + '\\n')",
"def remove_duplicates(possible_vulns):\n return list(set(possible_vulns))",
"def filter_dups(self):\n def dups_filter():\n dups = set()\n for g1, g2, w in self.gen:\n if (min(g1, g2), max(g1, g2)) in dups:\n continue\n dups.add((min(g1, g2), max(g1, g2)))\n yield g1, g2, w\n return self.filter(dups_filter())",
"def without_duplicates(self) -> \"SampleDataSet\":\n return SampleDataSet(self._data.drop_duplicates())",
"def _remove_duplicates(self):\n self.search_query = remove_duplicates(self.search_query)",
"def sites(self) -> Sequence[LatticeSite]:\n return self._sites",
"def count_sites_under_condition_vcf_to_set(vcf_file,chrom,start,end,mincov=0,maxcov=10000,inds=\"all\",bgzip=True,nb_ind_with_min_cov=\"all\",nalleles=[1,2],snps=False):\n\tset_ok_sites = set()\n\tinput_vcf=vcf.Reader(fsock=None, filename=vcf_file, compressed=bgzip, prepend_chr=\"False\", strict_whitespace=False)#open the vcf parser\n\tnsites_OK=0\n\tnsites_total=0\n\t#print \"in count_sites_under_condition_vcf nb_ind_with_min_cov :\",nb_ind_with_min_cov, \" inds\", ind\n\tif chrom!=\"all\":\n\t\t\t#print chrom,start,end\n\t\t\tcheck=len(sh.tabix(vcf_file,str(chrom)+\":\"+str(start)+\"-\"+str(end)))\n\t\t\t#print check\n\t\t\t#print \"check;' \",check,\"'\"\n\t\t\tif check==0: \n\t\t\t\treturn [0,0]\n\t\t\tfor record in input_vcf.fetch(chrom,start,end):# for every site\n\t\t\t\tcond=checkSnp_Cov(input_vcf,record,mincov,maxcov,inds=inds,nalleles=nalleles,nb_ind_with_min_cov=nb_ind_with_min_cov,snps=snps)# check if the site respect our condition\n\t\t\t\tnsites_total+=1\n\t\t\t\tif cond:# if it does\n\t\t\t\t\t#if any([int(sample['DP'])<5 for sample in record.samples]): print [int(sample['DP']) for sample in record.samples] # to check this argument nb_ind_with_min_cov\n\t\t\t\t\tset_ok_sites.add(str(record.CHROM)+\"_\"+str(record.POS))\n\treturn set_ok_sites",
"def get_list_of_sites(self):\n\n return self.site_db_obj.get_list_of_sites()"
] | [
"0.5047111",
"0.48624134",
"0.4791929",
"0.47910732",
"0.47692823",
"0.4745043",
"0.47057143",
"0.469012",
"0.469012",
"0.46753037",
"0.45791847",
"0.45689055",
"0.4561133",
"0.45400792",
"0.45036858",
"0.4499734",
"0.4495113",
"0.44825026",
"0.44688737",
"0.44688693",
"0.44481",
"0.44377595",
"0.44276807",
"0.43879327",
"0.43750745",
"0.43728653",
"0.43715346",
"0.43622532",
"0.43562543",
"0.43539077"
] | 0.72600794 | 0 |
This function creates path to save the training results and weights. | def make_path(data_dir, base, exp_name):
train_path = os.path.join(data_dir, 'Train')
valid_path = os.path.join(data_dir, 'Validation')
if (not os.path.isdir(train_path)) or (not os.path.isdir(valid_path)):
print('Please split images into train directory and validation directory.')
exit()
freeze_weight_save_path = os.path.join(base, 'save_weights_initial', exp_name + '.h5')
unfreeze_weight_save_path = os.path.join(base, 'save_weights_final', exp_name + '.h5')
freeze_img_save_path = os.path.join(base, 'save_plots_initial', exp_name)
unfreeze_img_save_path = os.path.join(base, 'save_plots_final', exp_name)
if not os.path.exists(os.path.join(base, 'save_weights_initial')):
os.makedirs(os.path.join(base, 'save_weights_initial'))
if not os.path.exists(os.path.join(base, 'save_weights_final')):
os.makedirs(os.path.join(base, 'save_weights_final'))
if not os.path.exists(os.path.join(base, 'save_plots_initial')):
os.makedirs(os.path.join(base, 'save_plots_initial'))
if not os.path.exists(os.path.join(base, 'save_plots_final')):
os.makedirs(os.path.join(base, 'save_plots_final'))
if not os.path.exists(freeze_img_save_path):
os.makedirs(freeze_img_save_path)
if not os.path.exists(unfreeze_img_save_path):
os.makedirs(unfreeze_img_save_path)
return train_path, valid_path, freeze_weight_save_path, unfreeze_weight_save_path, freeze_img_save_path, unfreeze_img_save_path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_save_directories(self):\n # Set the name for the saved model and training summary directory\n self.model_dir = op.join('../logs', self.name, 'models')\n self.train_summary_dir = op.join('../logs', self.name, 'training_summary')\n\n if not op.exists(self.model_dir):\n if not op.exists(op.join('../logs', self.name)):\n if not op.exists('../logs'):\n os.mkdir('../logs')\n os.mkdir(op.join('../logs', self.name))\n os.mkdir(self.model_dir)\n\n if not op.exists(self.train_summary_dir):\n if not op.exists(op.join('../logs', self.name)):\n if not op.exists('../logs'):\n os.mkdir('../logs')\n os.mkdir(op.join('../logs', self.name))\n os.mkdir(self.train_summary_dir)\n return self",
"def save_weights(self, path):\n Path(path).mkdir(parents=True, exist_ok=True)\n self.model.save_weights(path)",
"def set_model_for_train(self):\n if self.train_time is None:\n self.train_time = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n\n self.curr_folder = data_functions.create_path(\n self.save_path, self.train_time)\n logger.info(f\"training results will be stored in: {self.curr_folder}\")\n\n self.save_model_params()\n self.train_generator, self.val_generator = \\\n self.clarifruit_train_val_generators()\n keras_logs_path = self.set_model_checkpint()\n\n return keras_logs_path",
"def _create_dir(self):\n images_train_dir = os.path.join('images', self.name, 'train')\n images_test_dir = os.path.join('images', self.name, 'test')\n log_dir = os.path.join('log', self.name)\n model_dir = os.path.join('checkpoint', self.name)\n if not os.path.exists(images_train_dir):\n os.makedirs(images_train_dir)\n\n if not os.path.exists(images_test_dir):\n os.makedirs(images_test_dir)\n\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n\n return images_train_dir, images_test_dir, log_dir, model_dir",
"def save(self, folder):\n self.generator.save_weights('%s/generator.h5'%folder)\n self.critic.save_weights('%s/critic.h5'%folder)",
"def build_result_folder(timestamp=str(int(time.time()))):\n out_path = os.path.abspath(os.path.join(os.path.curdir, \"runs\", timestamp))\n print(\"Writing to {}\\n\".format(out_path))\n\n data_path = os.path.abspath(os.path.join(out_path, \"data\"))\n evaluation_path = os.path.abspath(os.path.join(out_path, \"evaluation\"))\n\n if not os.path.exists(out_path):\n os.makedirs(data_path)\n os.makedirs(evaluation_path)\n return out_path",
"def form_results():\n folder_name = \"/{0}_{1}_{2}_{3}_{4}_{5}_Adversarial_Autoencoder\". \\\n format(time.strftime(\"%Y-%m-%d %Hh%Mm%Ss\", time.localtime()), z_dim,\n learning_rate, batch_size, n_epochs, beta1)\n tensorboard_path = results_path + folder_name + '/Tensorboard'\n saved_model_path = results_path + folder_name + '/Saved_models/'\n log_path = results_path + folder_name + '/log'\n if not os.path.exists(results_path + folder_name):\n os.mkdir(results_path + folder_name)\n os.mkdir(tensorboard_path)\n os.mkdir(saved_model_path)\n os.mkdir(log_path)\n return tensorboard_path, saved_model_path, log_path",
"def form_results():\n folder_name = \"/{0}_{1}_{2}_{3}_{4}_{5}_autoencoder\". \\\n format(datetime.datetime.now(), z_dim, learning_rate, batch_size, n_epochs, beta1)\n tensorboard_path = results_path + folder_name + '/Tensorboard'\n saved_model_path = results_path + folder_name + '/Saved_models/'\n log_path = results_path + folder_name + '/log'\n if not os.path.exists(results_path + folder_name):\n os.mkdir(results_path + folder_name)\n os.mkdir(tensorboard_path)\n os.mkdir(saved_model_path)\n os.mkdir(log_path)\n return tensorboard_path, saved_model_path, log_path",
"def form_results():\n folder_name = \"/{0}_{1}_{2}_{3}_{4}_{5}_Basic_NN_Classifier\". \\\n format(datetime.datetime.now(), z_dim, learning_rate, batch_size, n_epochs, beta1)\n tensorboard_path = results_path + folder_name + '/Tensorboard'\n saved_model_path = results_path + folder_name + '/Saved_models/'\n log_path = results_path + folder_name + '/log'\n if not os.path.exists(results_path + folder_name):\n os.mkdir(results_path + folder_name)\n os.mkdir(tensorboard_path)\n os.mkdir(saved_model_path)\n os.mkdir(log_path)\n return tensorboard_path, saved_model_path, log_path",
"def save(self, directory='saves/'):\n # Create dirpath for temporary dir\n if directory[-1] != '/':\n directory += '/'\n dirpath = directory + self.name + '/'\n\n if not os.path.exists(dirpath): \n os.makedirs(dirpath)\n else:\n raise Exception(f'Path {dirpath} already exists.')\n\n # DQNs & Optimizer\n torch.save(self.policy_net.state_dict(), f'{dirpath}dqn.pth')\n torch.save(self.optimizer.state_dict(), f'{dirpath}optimizer.pth')\n\n # Trainer pamameters\n params = {}\n for p in self.DEFAULT_VALUES.keys():\n params[p] = getattr(self, p)\n\n with open(f'{dirpath}trainer_parameters.pick', 'wb') as file:\n pickle.dump(params, file)\n\n # Zip the saves in one .zip archive\n zippath = f'{directory}{self.name}'\n shutil.make_archive(zippath, 'zip', dirpath)\n\n # Remove the directory dirpath and files inside\n shutil.rmtree(dirpath)\n\n # Display\n print(f'Model saved at {zippath}.zip')",
"def make_dir(self):\n folder_name = list()\n model_tags = {'lr': self.lr,\n 'dim': self.rnn_dim,\n 'drop': self.dropout}\n\n for key, value in model_tags.items():\n folder_name.append('{}-{}'.format(key, value))\n folder_name = '_'.join(folder_name)\n current_time = dt.now().strftime('%Y%m%d-%H%M%S')\n folder_path = os.path.join(self.model_checkpoint_path,\n self.model_wrapper.__class__.__name__,\n folder_name,\n current_time)\n os.makedirs(folder_path)\n model_path = os.path.join(folder_path, 'saved_model')\n return folder_path, model_path",
"def create_save_folder(self):\n absolute_output = os.path.abspath(self.output).replace(\"\\\\\", \"/\")\n if self.paddle_length_factor is not None:\n self.save_folder = f\"{absolute_output}/{self.env_name}/PaddleLength_\" \\\n f\"{self.paddle_length_factor}/session{self.session}\"\n else:\n self.save_folder = f\"{absolute_output}/{self.env_name}/StandardEnv/session{self.session}\"\n tmp_folder = self.save_folder\n\n folder_tree = []\n while True:\n if not os.path.exists(self.save_folder):\n folder_tree.insert(0, self.save_folder)\n self.save_folder = self.save_folder[:self.save_folder.rindex(\"/\")]\n else:\n self.save_folder = tmp_folder\n break\n for folder in folder_tree:\n os.mkdir(folder)",
"def make_path(self):\n folders = [\n f\"{self.save_path}{self.name}/json/\",\n f\"{self.save_path}{self.name}/images/\",\n ]\n if hasattr(self, \"masks\"):\n folders.append(f\"{self.save_path}{self.name}/masks/\")\n for folder in folders:\n if not os.path.exists(folder):\n os.makedirs(folder)",
"def save_training(self):\n\n filename = str(hashlib.sha1(str(self.training_data).encode(\"utf-8\"))\n .hexdigest())\n path = \"./training/\" + filename + \".json\"\n\n data = {\n \"states\": self.states,\n \"transitions\": self.transitions,\n \"matrix\": self.matrix.tolist()\n }\n\n with open(path, \"w\") as outfile:\n json.dump(data, outfile)",
"def make_path(params):\n if not tf.gfile.IsDirectory(params.ckpt_path):\n tf.gfile.MakeDirs(params.ckpt_path)\n if not tf.gfile.IsDirectory(params.best_ckpt_path):\n tf.gfile.MakeDirs(params.best_ckpt_path)\n if not tf.gfile.IsDirectory(params.summary_path):\n tf.gfile.MakeDirs(params.summary_path)\n if not os.path.isdir(params.log_path):\n os.makedirs(params.log_path)\n if not tf.gfile.IsDirectory(params.map_path):\n tf.gfile.MakeDirs(params.map_path)\n if not tf.gfile.IsDirectory(params.vocab_path):\n tf.gfile.MakeDirs(params.vocab_path)\n if not tf.gfile.IsDirectory(params.result_path):\n tf.gfile.MakeDirs(params.result_path)\n if not tf.gfile.IsDirectory(params.config_path):\n tf.gfile.MakeDirs(params.config_path)",
"def create_folders():\n if not os.path.exists(\"data/train-npy/\"):\n os.makedirs(\"data/train-npy/\")\n if not os.path.exists(\"data/test-npy/\"):\n os.makedirs(\"data/test-npy/\")\n if not os.path.exists(\"data/valid-npy/\"):\n os.makedirs(\"data/valid-npy/\")",
"def save(path=\"neu\", working_dir=\"..\", in_results=True):\n try:\n if in_results:\n os.mkdir(working_dir + \"/results/\" + path)\n else:\n os.mkdir(working_dir + \"/\" + path)\n print(\"Created Folder\")\n except OSError as e:\n pass\n if in_results:\n toDirectory = working_dir + \"/results/\" + path\n else:\n toDirectory = working_dir + \"/\" + path\n fromDirectory1 = working_dir + \"/\" + \"build/data\"\n fromDirectory2 = working_dir + \"/\" + \"build/log\"\n copy_tree(fromDirectory1, toDirectory)\n copy_tree(fromDirectory2, toDirectory)",
"def create_train(train_img_path):\n\n f = open(\"train.txt\", \"w+\")\n for subdirs, dirs, files in os.walk(train_img_path):\n for filename in files:\n if filename.endswith(\".jpg\"):\n train_image_path = os.path.join(train_img_path, filename)\n print(train_image_path)\n f.write(train_image_path + \"\\n\")\n f.close()",
"def _directory_path(self):\n if not os.path.isdir(self.new_img_dir) : os.mkdir(self.new_img_dir)\n if not os.path.isdir(os.path.join(self.new_img_dir, \"train\")) : os.mkdir(os.path.join(self.new_img_dir, \"train\"))\n if not os.path.isdir(os.path.join(self.new_img_dir, \"test\")) : os.mkdir(os.path.join(self.new_img_dir, \"test\"))",
"def _save(self, tmp_checkpoint_dir):\n checkpoint_path = os.path.join(tmp_checkpoint_dir, \"model_weights\")\n self.model.save_weights(checkpoint_path, save_format=\"tf\")\n return tmp_checkpoint_dir",
"def save_model(self):\n saved_path = self.config.path_tmp / self.model.model_name\n saved_path.mkdir(parents=True, exist_ok=True)\n self.model.save_weights(str(saved_path / 'model.vec'))",
"def save(self, experiment_dir):\n date_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime())\n\n if self.eval_results is not None:\n # print(self.eval_results)\n assert isinstance(self.eval_results, dict)\n # present the dict in str form\n # res_str = ''.join(''.join(str(x) for x in tup) for tup in self.eval_results.items())\n\n self._path = os.path.join(\n experiment_dir, self.CHECKPOINT_DIR_NAME, date_time,\n )\n path = self._path\n\n if os.path.exists(path):\n shutil.rmtree(path)\n os.makedirs(path)\n\n torch.save(\n {'epoch': self.epoch, 'optimizer': self.optimizer},\n os.path.join(path, self.TRAINER_STATE_NAME)\n )\n torch.save(self.model, os.path.join(path, self.MODEL_NAME))\n\n # save parameters to txt\n txt_file = open(os.path.join(path, self.PARAMETERS), \"w\")\n\n txt_file.write(f\"ckpt name: '{date_time}'\\n\")\n txt_file.write(f\"epoch: {self.epoch}\\n\")\n\n if self.eval_results is not None: \n for key, value in self.eval_results.items():\n txt_file.write(str(key)+': '+str(value)+'\\n')\n # if 'acc' in self.eval_results:\n # txt_file.write(f\"acc: {self.eval_results['acc']}\\n\")\n # if 'p' in self.eval_results:\n # txt_file.write(f\"p: {self.eval_results['p']}\\n\")\n # if 'r' in self.eval_results:\n # txt_file.write(f\"r: {self.eval_results['r']}\\n\")\n # if 'f1' in self.eval_results:\n # txt_file.write(f\"f1: {self.eval_results['f1']}\\n\")\n \n txt_file.close()\n\n return path",
"def set_save_path(save_path):\n # Reset model directory\n model_idx = 0\n save_path_new = save_path\n while True:\n if os.path.isfile(os.path.join(save_path_new, 'conf.yml')):\n # Training of the first model have not been finished yet\n model_idx += 1\n save_path_new = save_path + '_' + str(model_idx)\n else:\n break\n if not os.path.isdir(save_path_new):\n os.mkdir(save_path_new)\n return save_path_new",
"def build_out_path(d):\n assert \"output_dir\" in d, \"Dictionary must have output dir\"\n\n path = d.pop(\"output_dir\")\n\n weights_path = path + \"weights_\"\n log_path = path + \"log_\"\n train_data_path = path + \"train_data_\"\n\n # Sort dictionary for consistency\n odict = OrderedDict(sorted(d.items(), key=lambda t: t[0]))\n\n for key, val in zip(odict.keys(), odict.values()):\n if key not in (\"input_dir\", \"loss_type\", \"typical_epochs\", \"num_epochs\"):\n text = str(key) + \"_\" + str(val) + \"_\"\n weights_path += text\n log_path += text\n train_data_path += text\n\n weights_path += \".pth\"\n log_path += \".log\"\n train_data_path += \".pkl\"\n\n return weights_path, log_path, train_data_path",
"def get_train_output_paths(self, random_effect_name):\n output_dir = path_join(self.root_output_dir, random_effect_name)\n output_model_dir = path_join(output_dir, MODELS)\n training_score_dir = path_join(output_dir, TRAINING_SCORES)\n validation_score_dir = path_join(output_dir, VALIDATION_SCORES)\n return output_model_dir, training_score_dir, validation_score_dir",
"def save(self, folder):\n if not path.exists(folder):\n os.makedirs(folder)\n param = {\n \"model\": self.__class__.__name__,\n \"nr_labels\": self.nr_labels,\n \"nr_features\": self.nr_features,\n \"nr_codes\": self.nr_codes,\n \"bias\": self.bias,\n \"pred_kwargs\": self.pred_params.to_dict(),\n }\n param = self.append_meta(param)\n with open(\"{}/param.json\".format(folder), \"w\") as f:\n f.write(json.dumps(param, indent=True))\n smat_util.save_matrix(\"{}/W.npz\".format(folder), self.W)\n smat_util.save_matrix(\"{}/C.npz\".format(folder), self.C)",
"def save_model_weights(model, output_path):\n output_dir = os.path.dirname(output_path)\n if output_dir and not os.path.exists(output_dir):\n os.makedirs(output_dir)\n \n model.save_weights(output_path)",
"def save(self):\n filename = os.path.join(self.directory, 'experiment.json')\n with open(filename, 'w') as f:\n json.dump(self.report, f, indent=2, sort_keys=True)\n filename = os.path.join(self.directory, 'training_progress.csv')\n with open(filename, 'w') as csvfile:\n csv.writer(csvfile).writerows(self.history)\n filename = os.path.join(self.directory, 'learned_parameters.npy')\n parameters = lasagne.layers.get_all_param_values(self.__network)\n parameters = parameters\n numpy.save(filename, parameters)",
"def save_folder(date_time, sfid, logs_folder, checkpoints_folder):\n date_now = str(date_time.date())\n time_now = str(date_time.time())\n sf = \"saved_models/\" + date_now + \"_\" + time_now + \"_\" \\\n + os.path.basename(__file__).split('.')[0] + '_' + sfid\n if not os.path.isdir(sf):\n os.makedirs(sf)\n\n lf = sf +'/' + logs_folder\n if not os.path.isdir(lf):\n os.makedirs(lf)\n chkf = sf +'/' +checkpoints_folder\n if not os.path.isdir(chkf):\n os.makedirs(chkf)\n\n\n return sf, lf, chkf",
"def make_save_path(self):\n if self.paddle_length_factor is not None:\n for gym_env in self.env.envs:\n gym_env.scale_paddle_height(self.paddle_length_factor)\n self.save_path = f\"{self.save_folder}/{self.save_name}_paddle_length_{self.paddle_length_factor}\"\n else:\n self.save_path = f\"{self.save_folder}/{self.save_name}\""
] | [
"0.7026639",
"0.68025696",
"0.67583746",
"0.67400897",
"0.67305654",
"0.6729066",
"0.6711609",
"0.671136",
"0.6676605",
"0.6649801",
"0.66478485",
"0.6634715",
"0.6595856",
"0.6550144",
"0.6548502",
"0.6500597",
"0.64827853",
"0.6480804",
"0.64794177",
"0.6461388",
"0.6460311",
"0.6433146",
"0.6428362",
"0.6396122",
"0.638564",
"0.636663",
"0.6360706",
"0.63455397",
"0.6338841",
"0.6334792"
] | 0.6858212 | 1 |
Generate output images for parcel footprints. | def create_output_image(building_footprint, parcel_footprint, file_path):
fig, ax = plt.subplots(figsize=(10, 10))
gpd.overlay(building_footprint, parcel_footprint, how="symmetric_difference").plot(
ax=ax, color="lightgray"
)
parcel_footprint.geometry.exterior.buffer(0.25).plot(ax=ax, color="black")
building_footprint.plot(ax=ax, color="black")
ax.patch.set_facecolor("white")
ax.patch.set_edgecolor("white")
fig.patch.set_visible(False)
ax.axis("off")
fig.savefig(
file_path,
bbox_inches="tight",
pad_inches=0,
facecolor="white",
edgecolor="white",
quality=IMG_QUALITY,
)
plt.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_input_image(parcel_footprint, file_path):\n fig, ax = plt.subplots(figsize=(10, 10))\n parcel_footprint.plot(ax=ax, color=\"lightgray\")\n parcel_footprint.geometry.exterior.buffer(0.25).plot(ax=ax, color=\"black\")\n\n ax.patch.set_facecolor(\"white\")\n ax.patch.set_edgecolor(\"white\")\n fig.patch.set_visible(False)\n ax.axis(\"off\")\n fig.savefig(\n file_path,\n bbox_inches=\"tight\",\n pad_inches=0,\n facecolor=\"white\",\n edgecolor=\"white\",\n quality=IMG_QUALITY,\n )\n plt.close()",
"def generate_image(self):\n pass",
"def __make_footprint(self, abspath_dir_img, shp_out='teste-foot'):\n tif = Utils.get_file(abspath_dir_img, is_tif=True)\n print('\\nGerando footprint.\\n')\n MakeFootprint.footprint(tif, shp_out)\n print('\\nRemovendo shapefile do footprint.\\n')\n poly = MakeFootprint.shp_to_wkt(shp_out)\n poly = wkt.loads(poly)\n multipoly = MultiPolygon([poly])\n shutil.rmtree(shp_out)\n return multipoly.wkt",
"def make_pbeam_images(metadata, in_dir, write_tag):\n filenames = metadata['FITSImageFilename']\n for i, in_file in enumerate(filenames):\n kat_target = katpoint.Target(metadata['KatpointTargets'][i])\n\n out_filebase = os.path.splitext(in_file)[0]\n out_filebase_pb = out_filebase + '_PB'\n log.info('Write primary beam corrected FITS output: %s',\n out_filebase_pb + FITS_EXT)\n\n in_path = os.path.join(in_dir + write_tag, in_file)\n pb_dir = _productdir(metadata, in_dir, i, '_PB', write_tag)\n\n os.mkdir(pb_dir)\n pbc_path = os.path.join(pb_dir, out_filebase_pb + FITS_EXT)\n raw_image = pbc.read_fits(in_path)\n beam_model = pbc.get_beam_model(raw_image.header)\n pbc_image = pbc.primary_beam_correction(beam_model, raw_image, px_cut=0.1)\n pbc.write_new_fits(pbc_image, in_path, outputFilename=pbc_path)\n\n log.info('Write primary beam corrected PNG output: %s',\n out_filebase_pb + PNG_EXT)\n _caption_pngs(pb_dir, out_filebase_pb,\n kat_target, 'PB Corrected', contrast=PB_CONTRAST)",
"def postprocess(out_heatmaps, org_im, org_im_shape, org_im_path, output_dir,\n visualization):\n preds, num_joints = save_predict_results(out_heatmaps)\n scale_horizon = org_im_shape[1] * 1.0 / 384\n scale_vertical = org_im_shape[0] * 1.0 / 384\n preds = np.multiply(preds, (scale_horizon, scale_vertical)).astype(int)\n if visualization:\n icolor = (255, 137, 0)\n ocolor = (138, 255, 0)\n rendered_im = org_im.copy()\n for j in range(num_joints):\n x, y = preds[j]\n cv2.circle(rendered_im, (x, y), 3, icolor, -1, 16)\n cv2.circle(rendered_im, (x, y), 6, ocolor, 1, 16)\n # check whether output_dir is existent or not\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n elif os.path.isfile(output_dir):\n os.remove(output_dir)\n os.makedirs(output_dir)\n # save image\n save_im_name = os.path.join(\n output_dir, 'rendered_{}.jpg'.format(\n os.path.splitext(os.path.basename(org_im_path))[0]))\n cv2.imwrite(save_im_name, rendered_im)\n print('image saved in {}'.format(save_im_name))\n\n # articulation\n articulation_points = OrderedDict()\n articulation_points['left_ankle'] = list(preds[0])\n articulation_points['left_knee'] = list(preds[1])\n articulation_points['left_hip'] = list(preds[2])\n articulation_points['right_hip'] = list(preds[3])\n articulation_points['right_knee'] = list(preds[4])\n articulation_points['right_ankle'] = list(preds[5])\n articulation_points['pelvis'] = list(preds[6])\n articulation_points['thorax'] = list(preds[7])\n articulation_points['upper neck'] = list(preds[8])\n articulation_points['head top'] = list(preds[9])\n articulation_points['right_wrist'] = list(preds[10])\n articulation_points['right_elbow'] = list(preds[11])\n articulation_points['right_shoulder'] = list(preds[12])\n articulation_points['left_shoulder'] = list(preds[13])\n articulation_points['left_elbow'] = list(preds[14])\n articulation_points['left_wrist'] = list(preds[15])\n return articulation_points",
"def create_png_images(self):\n if self.subject is None:\n print Console.WARNING + 'You need to specify a subject first' + Console.ENDC\n return\n\n check_dir_of = self.locations.check_dir_of\n check_dir_of(self.locations.HISTO_PNG_U)\n check_dir_of(self.locations.HISTO_PNG)\n check_dir_of(self.locations.SOURCE_PNG)\n\n\n\n fmap_img = ImageUtils.load_nifti_image(self.locations.HIST_FMAP) #loading subject nifti files\n volumes = []\n try:\n for s in self.locations.SOURCES:\n volumes.append(ImageUtils.load_nifti_image(s))\n except IOError as e:\n print Console.FAIL + 'There are errors loading nifi files for subject %s'%self.subject + Console.ENDC\n return False\n \n\n num_slices = volumes[0].shape[2] #use first volume to check expected number of slices\n\n self.locations.create_empty_dir(self.locations.IMAGES_DIR)\n\n print 'Creating input PNGs for %s'%self.subject\n for k, vol in enumerate(volumes):\n for i in range(num_slices):\n imslice = ImageUtils.data_to_bytescale_rgb(vol[:, :, i])\n im = Image.fromarray(imslice)\n im.save(self.locations.SOURCE_PNG % (self.locations.LABELS[k],i))\n\n \n print 'Creating histology PNGs for %s'%self.subject\n for i in range(num_slices):\n\n im_unscaled = ImageUtils.data_to_unscaled_rgb(fmap_img[:, :, i]); #keeps the original values\n im_unscaled = Image.fromarray(im_unscaled)\n im_unscaled = im_unscaled.filter(ImageFilter.GaussianBlur(radius=2)) #Filter requested by Ali Khan\n im_unscaled.save(self.locations.HISTO_PNG_U % i)\n\n im_scaled = ImageUtils.data_to_bytescale_rgb(fmap_img[:,:,i]); # bytescaled histology\n im_scaled = Image.fromarray(im_scaled)\n im_scaled = im_scaled.filter(ImageFilter.GaussianBlur(radius=2)) #Filter requested by Ali Khan\n im_scaled.save(self.locations.HISTO_PNG % i)\n\n print\n return True",
"def print_multipage(path: Path, tick: int, rgb_image: np.ndarray, heatmaps: np.ndarray):\n with PdfPages(path) as pdf:\n height, width, _channels = rgb_image.shape\n figsize = width / float(DPI), height / float(DPI)\n fig = plt.figure(figsize=figsize)\n ax = fig.add_axes([0, 0, 1, 1])\n ax.axis(\"off\")\n ax.imshow(rgb_image)\n pdf.attach_note(\"RGB camera image\")\n pdf.savefig(fig, bbox_inches=\"tight\")\n plt.close()\n\n rescaled_heatmaps = np.power(1.16, np.log(heatmaps))\n heatmaps_shape = rescaled_heatmaps.shape\n\n for command in range(heatmaps_shape[0]):\n for waypoint in range(heatmaps_shape[1]):\n fig = plt.figure()\n ax = fig.add_axes([0, 0, 1, 1])\n ax.axis(\"off\")\n ax.imshow(\n rescaled_heatmaps[command, waypoint, ...],\n cmap=\"inferno\",\n interpolation=\"nearest\",\n )\n pdf.attach_note(f\"command {command} heatmap {waypoint}\")\n pdf.savefig(fig, bbox_inches=\"tight\")\n plt.close()\n\n fig = plt.figure()\n ax = fig.add_axes([0, 0, 1, 1])\n ax.axis(\"off\")\n ax.imshow(\n np.sum(rescaled_heatmaps[command, ...], axis=0),\n cmap=\"inferno\",\n interpolation=\"nearest\",\n )\n pdf.attach_note(f\"command {command} combined heatmap\")\n pdf.savefig(fig, bbox_inches=\"tight\")\n plt.close()\n\n d = pdf.infodict()\n d[\"Title\"] = f\"intervention-learning tick {tick}\"\n d[\"Author\"] = \"intervention-learning\"",
"def create_artificial_image(self):\n background = self.BGI.create_background_image()\n star_PSF = self.PSF.create_star_PSF()\n header = self.HDR.create_header()\n\n fits.writeto(self.image_dir + self.image_name + '.fits',\n background + star_PSF, overwrite=True, header=header)",
"def build_filler_images(self):",
"def make_image(self):\n\n if self.type == 'passthrough':\n return\n render_template(\n os.path.dirname(self.main_module),\n os.path.basename(self.main_module_path),\n self.language,\n self.requirements,\n self.whitelist,\n self.type,\n into=self.code_dir)\n self.build()",
"def print_images_out_statistics(self):\n self._print_images_statistics(self._images_out_folder, self._pose_class_names)",
"def generate_images(self, model, test_input, step, dst_dir):\n prediction = model(test_input)\n\n plt.figure(figsize=(12, 12))\n display_list = [test_input[0], prediction[0]]\n title = ['Input Image', 'Predicted Image']\n\n for i in range(2):\n plt.subplot(1, 2, i+1)\n plt.title(title[i])\n # getting the pixel values between [0, 1] to plot it.\n plt.imshow(display_list[i] * 0.5 + 0.5)\n plt.axis('off')\n filename = os.path.join(dst_dir, 'generated_imgs_at_step_{:06d}.png'.format(step))\n plt.savefig(filename)",
"def create_image_pyramids(self):\r\n curr_cam0_img = self.cam0_curr_img_msg.image\r\n # self.curr_cam0_pyramid = cv2.buildOpticalFlowPyramid(\r\n # curr_cam0_img, self.config.win_size, self.config.pyramid_levels, \r\n # None, cv2.BORDER_REFLECT_101, cv2.BORDER_CONSTANT, False)[1]\r\n self.curr_cam0_pyramid = curr_cam0_img\r\n\r\n curr_cam1_img = self.cam1_curr_img_msg.image\r\n # self.curr_cam1_pyramid = cv2.buildOpticalFlowPyramid(\r\n # curr_cam1_img, self.config.win_size, self.config.pyramid_levels, \r\n # None, cv2.BORDER_REFLECT_101, cv2.BORDER_CONSTANT, False)[1]\r\n self.curr_cam1_pyramid = curr_cam1_img",
"def combinePILObjects(self, imgArray, doPrint=True, multiCol=False, ignoreRotate=False):\n if multiCol:\n # Multiple columns object (e.g. printing wearther forecast). imgArray is then an array of arrays.\n imArray = [ self.combinePILObjects(i, doPrint=False, ignoreRotate=True) for i in imgArray]\n # Determine height pre multicol\n orgMaxHeight=0\n for im in imArray:\n h = im[0].size[1]\n if h > orgMaxHeight:\n orgMaxHeight = h\n numCols = len(imArray)\n imgMaster = self.imBox(self.printerConf['printerWidth'], orgMaxHeight/numCols)\n # Paste the columns together\n offset = 0\n numCols = len(imArray)\n colWidth = self.printerConf['printerWidth'] / numCols\n for i in imArray:\n imgMaster.paste(i[0].resize([colWidth, int(i[0].size[1]*1./numCols)]),(offset,0))\n offset += colWidth \n else:\n # Calculate height\n height = 0\n imgTooWide=False\n for i in range(len(imgArray)):\n img = imgArray[i]\n # If an image is too large\n if img.size[0] > self.printerConf['printerWidth']:\n # resize image\n imgArray[i] = img.resize([self.printerConf['printerWidth'],\n int(img.size[1]*float(self.printerConf['printerWidth'])/img.size[0])])\n height += imgArray[i].size[1]\n # Create \n imgMaster = self.imBox(self.printerConf['printerWidth'], height)\n offset = 0\n for img in imgArray:\n imgMaster.paste(img,(0,offset))\n offset += img.size[1]\n if self.printerConf['rotate'] and not ignoreRotate:\n imgMaster = imgMaster.rotate(180)\n\n height = imgMaster.size[1]\n bytes_io = BytesIO()\n imgMaster.save(bytes_io, format=\"PNG\")\n bytes_io.seek(0)\n imgData = bytes_io.read()\n if doPrint:\n bytes_io.seek(0)\n self.p.image(bytes_io, impl=self.printerConf['printType'])\n # return: PIL-object, height (int), PNG-file\n return(imgMaster, height, imgData)",
"def _repr_png_(self):\n mol = self.owner.mol\n keku = IPythonConsole.kekulizeStructures\n size = IPythonConsole.molSize\n opts = IPythonConsole.drawOptions\n return Draw._moltoimg(\n mol, size, self.aix, \"\", returnPNG=True, drawOptions=opts,\n kekulize=keku, highlightBonds=self.bix\n )",
"def _repr_png_(self):\n mol = self.owner.mol\n keku = IPythonConsole.kekulizeStructures\n size = IPythonConsole.molSize\n opts = IPythonConsole.drawOptions\n return Draw._moltoimg(\n mol, size, self.aix, \"\", returnPNG=True, drawOptions=opts,\n kekulize=keku, highlightBonds=self.bix\n )",
"def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)",
"def footprint(self, layout):\n footprint = layout.write_mat([self._problem.footprint], *self._dim)\n return self._batch(footprint)",
"def save(self):\n\n self.image.save(\"./output/\" + self.name + \" pg\" + str(self._page) + \".png\")",
"def generate_and_save_images(model, seed, output_path, title):\n\n predictions = model(tf.Variable(seed, trainable=False))\n\n fig = plt.figure(figsize=(4,4))\n\n for i in range(predictions.shape[0]):\n plt.subplot(4, 4, i+1)\n plt.imshow(denormalize_generate_image(predictions[i, :, :, 0]), cmap='gray')\n plt.axis('off')\n if not os.path.exists(output_path):\n os.mkdir(output_path)\n\n plt.savefig(os.path.join(output_path, '{}.png'.format(title)))\n plt.close()",
"def pdftoimages(input_dir,output_dir): \n dirListing = os.listdir(input_dir)\n files = []\n imagespath = output_dir\n for item in dirListing:\n files.append(item)\n n = len(files)\n for num in range(n):\n doc = fitz.open(input_dir+\"/\"+files[num])\n for img in doc.getPageImageList(0):\n xref = img[0]\n pix = fitz.Pixmap(doc, xref)\n if pix.n < 5: # this is GRAY or RGB\n pix.writePNG(os.path.join(imagespath,\"p%s-%s.png\" % (num, xref)))\n else: # CMYK: convert to RGB first\n pix1 = fitz.Pixmap(fitz.csRGB, pix)\n pix1.writePNG(os.path.join(imagespath,\"p%s-%s.png\" % (num, xref)))\n pix1 = None \n pix=None\n break",
"def generate_data():\n for subdir, dirs, files in os.walk(legend_images_dir):\n for _file in files:\n getTables(_file)\n\n file_list = []\n for subdir, dirs, files in os.walk(pdf_output_dir):\n for _file in files:\n if _file.endswith('.pdf'):\n file_list.append(_file)\n\n print (\"Writing merged output in Output.pdf...\")\n current_dir = os.getcwd()\n mergeOutput(file_list, current_dir + \"/Output.pdf\")\n\n clean()",
"def main(args):\n input_image = make_input_image(args.input_format, args.input_images)\n output_pattern = f\"output/{args.prefix}{{}}.png\"\n \n # Exit early if input == output\n if args.input_format == args.output_format:\n print(\"Input format = output format. Copying files.\")\n input_image.write(output_pattern)\n return\n\n output_image = make_output_image(args.output_format, args.output_shape)\n\n input_is_sphere = args.input_format == \"sphere\"\n output_is_sphere = args.output_format == \"sphere\"\n\n try:\n if input_is_sphere:\n output_image.project_from(input_image)\n output_image.write(output_pattern)\n elif output_is_sphere:\n input_image.unproject_to(output_image)\n output_image.write(output_pattern)\n else:\n raise NotImplementedError\n except NotImplementedError:\n print(f\"Sorry, {args.input_format} -> {args.output_format} not supported.\")\n return",
"def init_output(self):\n if \"full_generated_data_file\" not in self.annotation_config:\n raise RuntimeError(\n 'Field \"full_generated_data_file\" not defined in project config file'\n )\n filename = self.annotation_config[\"full_generated_data_file\"]\n\n if self.annotation_config[\"full_generate_format\"] == \"real_valued\":\n assert self.annotation_config[\"segmentation_method\"] == \"semantic\"\n\n if GENERATE_SUBDIR in os.environ:\n generate_subdirs = [int(os.environ[GENERATE_SUBDIR])]\n else:\n num_subdirs = len(self.annotation_config[\"subdir_paths\"])\n generate_subdirs = range(num_subdirs)\n\n # produce output for each subdir\n for subdir_num in generate_subdirs:\n # get dimensions of region\n annot_map, _, _ = get_annot_map(\n self.annotation_config, subdir_num\n ) # shape (tiles_x, tiles_y, tiles_z)\n\n if self.annotation_config[\"trim_generated_extent\"]:\n # define output extent based on populated tiles\n populated_tiles = np.stack(\n np.where(annot_map)\n ) # shape (3, num_tiles)\n extent_origin = np.min(populated_tiles, axis=1)\n extent_size = (\n np.max(populated_tiles, axis=1) - extent_origin + 1\n )\n self.extent_origins[subdir_num] = extent_origin\n annotation_extent = extent_size * self.tile_size\n cover_percentage = np.prod(extent_size) / np.prod(\n annot_map.shape\n )\n print(\n \"ext orig\",\n extent_origin,\n \"max\",\n np.max(populated_tiles, axis=1),\n \"size\",\n extent_size,\n )\n print(\n \"Producing output over extent %s (%.2f%% of full)\"\n % (annotation_extent, cover_percentage)\n )\n else:\n annotation_extent = np.array(annot_map.shape) * self.tile_size\n print(\n \"Producing output over full extent %s\"\n % (annotation_extent,)\n )\n\n output_full_path = os.path.join(\n self.annotation_config[\"project_folder\"],\n self.annotation_config[\"subdir_paths\"][subdir_num],\n filename,\n )\n\n if self.annotation_config[\"full_generate_format\"] == \"real_valued\":\n output_dtype = \"f\"\n # todo: allow multiple classes to be output. currently semantic segmentations are squashed into one class.\n num_classes = 1 # self.annotation_config[\"semantic_segmentation_classes\"]\n output_shape = [num_classes] + annotation_extent.tolist()\n else:\n output_dtype = \"i\"\n output_shape = annotation_extent.tolist()\n\n # initialise HDF5 file for output\n if os.path.exists(output_full_path):\n raise RuntimeError(\n \"Output file %s already exists\" % output_full_path\n )\n h5file = h5py.File(output_full_path, \"w\")\n print(\n \"creating array generated_data with shape\",\n output_shape,\n \"dtype\",\n output_dtype,\n )\n h5_dataset_name = self.annotation_config[\n \"source_hdf5_dataset_name\"\n ]\n h5_dataset = h5file.create_dataset(\n h5_dataset_name, shape=output_shape, dtype=output_dtype\n )\n self.output_datasets[subdir_num] = h5_dataset",
"def write_file(self):\n if self.it_num % 5 == 0:\n #plt.imshow(self.grid)\n #plt.savefig(\"output%.4d.png\" % self.it_num, bbox_inches='tight')\n io.savemat(\"MLOutput%.4d\" % self.it_num, { \"Grid\":self.grid})",
"def save_images(self):\n for q in range(self.N_itr):\n plt.clf()\n self.plot_EM_estimate(q)\n plt.savefig('img%d.png' % (100 + q))",
"def __make_processing(self, img_name, abspath_dir_img, id_foot):\n data = {}\n data['data'] = ImageInfo.get_date(abspath_dir_img)\n data['total_part'] = TOTAL_PART\n data['nuvens'] = ImageInfo.get_cloud(abspath_dir_img)\n self.__make_tms(abspath_dir_img)\n data['geom'] = self.__make_footprint(abspath_dir_img, shp_out=id_foot)\n abspath_rgb, img_name_rgb = ImageInfo.get_image_rgb(\n abspath_dir_img, img_name\n )\n data['tms'] = ImageInfo.get_xml_tms(img_name_rgb)\n data['image'] = img_name_rgb\n data['quicklook'] = self.__make_png(abspath_rgb)\n data['path'] = ImageInfo.get_path(img_name)\n return data",
"def create_png(image, label):\n sv = \"/home/avojtekova/Desktop/final_results/star_det/generated_images/\" \n \n for i in range(len(image)):\n data = fits.getdata(image[i][0], ext = 0)\n norm = ImageNormalize(data,interval = ZScaleInterval(), stretch = LinearStretch())\n \n print(image[i][0])\n plt.imshow(data, cmap='Greys_r', origin='lower', norm=norm)#[1250:1750, 2000:2500] add this when you want just part of image \n plt.title(label[i])\n plt.axis('off')\n plt.tight_layout()\n plt.legend\n if i<2:\n if not os.path.isdir(sv + image[i][0][-33:-25] + \"/\") :\n os.makedirs(sv + image[i][0][-33:-25] + \"/\")\n plt.savefig(sv + image[i][0][-33:-25] + \"/\" + label[i]+ \"_\" + image[i][0][-33:-25] + \"_big.png\", dpi = 1000)#,bbox_inches='tight', pad_inches = 0) \n else:\n if not os.path.isdir(sv + image[i][0][-40:-32] + \"/\") :\n os.makedirs(sv + image[i][0][-40:-32] + \"/\")\n plt.savefig(sv + image[i][0][-40:-32] + \"/\" + label[i]+image[i][0][-40:-32] + \"_big.png\", dpi = 1000)#,bbox_inches='tight', pad_inches = 0)\n plt.close()",
"def workflow(save_dir):\n year = 2016\n month_series = range(1, 13)\n total_potential_biomass_multiplier = 48.8\n total_standing_biomass_multiplier = 45.25\n biomass_jitter = 3.\n diet_sufficiency_multiplier = 0.28\n diet_sufficiency_jitter = 0.01\n avg_animal_density = 0.0175\n animal_density_jitter = 0.005\n\n # twelve months of precipitation rasters covering the study area\n precip_basename_list = [\n 'chirps-v2.0.{}.{:02d}.tif'.format(year, month) for month in\n month_series]\n\n # reclassify 0 to NoData in CHIRPS rasters\n output_precip_dir = os.path.join(save_dir, 'precip')\n if not os.path.exists(output_precip_dir):\n os.makedirs(output_precip_dir)\n for bn in precip_basename_list:\n base_raster = os.path.join(PRECIP_DIR, bn)\n target_raster = os.path.join(output_precip_dir, bn)\n pygeoprocessing.raster_calculator(\n [(base_raster, 1)], zero_to_nodata, target_raster,\n gdal.GDT_Float32, _IC_NODATA)\n\n # generate outputs\n for month in month_series:\n precip_raster = os.path.join(\n output_precip_dir, 'chirps-v2.0.{}.{:02d}.tif'.format(year, month))\n\n total_potential_biomass_path = os.path.join(\n save_dir, 'potential_biomass_{}_{:02d}.tif'.format(year, month))\n pygeoprocessing.raster_calculator(\n [(precip_raster, 1)] + [(path, 'raw') for path in [\n total_potential_biomass_multiplier,\n biomass_jitter]],\n precip_to_correlated_output, total_potential_biomass_path,\n gdal.GDT_Float32, _IC_NODATA)\n\n total_standing_biomass_path = os.path.join(\n save_dir, 'standing_biomass_{}_{:02d}.tif'.format(year, month))\n pygeoprocessing.raster_calculator(\n [(precip_raster, 1)] + [(path, 'raw') for path in [\n total_standing_biomass_multiplier,\n biomass_jitter]],\n precip_to_correlated_output, total_standing_biomass_path,\n gdal.GDT_Float32, _IC_NODATA)\n\n diet_sufficiency_path = os.path.join(\n save_dir, 'diet_sufficiency_{}_{:02d}.tif'.format(year, month))\n pygeoprocessing.raster_calculator(\n [(precip_raster, 1)] + [(path, 'raw') for path in [\n diet_sufficiency_multiplier,\n diet_sufficiency_jitter]],\n precip_to_correlated_output, diet_sufficiency_path,\n gdal.GDT_Float32, _IC_NODATA)\n\n animal_density_path = os.path.join(\n save_dir, 'animal_density_{}_{:02d}.tif'.format(year, month))\n pygeoprocessing.raster_calculator(\n [(precip_raster, 1)] + [(path, 'raw') for path in [\n avg_animal_density,\n animal_density_jitter]],\n precip_to_animal_density, animal_density_path,\n gdal.GDT_Float32, _IC_NODATA)",
"def createPrettyPlots(self, inputDir, outputDir, fnExtension = 'png'):\n number_of_entries_per_row = 4\n number_of_files_per_column = 2\n imageWidth = 200 # 1600 org is four times as large\n imageHeight = 150 # 1200\n nTmessage(\"Updating index files for input directory: %s\" % inputDir)\n if os.path.exists(outputDir):\n# nTmessage(\"Removing output directory: %s\" % outputDir)\n shutil.rmtree(outputDir)\n # end if\n# nTmessage(\"Creating output directory: %s\" % outputDir)\n os.mkdir(outputDir)\n# nTdebug(\"Doing copyCingHtmlJsAndCssToDirectory\")\n copyCingHtmlJsAndCssToDirectory(outputDir) \n# htmlDir = os.path.join(cingRoot, \"HTML\")\n fnMatchPattern = '*.' + fnExtension\n image_fn_list = glob(os.path.join(inputDir,fnMatchPattern)) \n inputDirBase = os.path.basename(inputDir)\n# nTdebug(\"Got relative part of inputDir: %s\" % inputDirBase) # e.g. plotTrend\n image_code_list = []\n for image_fn in image_fn_list:\n _root, image_code, _ext = nTpath(image_fn)\n image_code_list.append(image_code)\n # end for \n ## Get the number of files required for building an index\n number_of_images_all_present = len(image_code_list)\n number_of_images_per_file = number_of_entries_per_row * number_of_files_per_column\n ## Number of files with indexes in google style\n number_of_files = int(number_of_images_all_present / number_of_images_per_file)\n if number_of_images_all_present % number_of_images_per_file:\n number_of_files += 1\n # end if\n nTmessage(\"Creating %s pages for %s image codes\" % (number_of_files, number_of_images_all_present))\n# nTmessage(\"Generating %s index html files\" % (number_of_files))\n\n file_name = os.path.join (self.base_dir, \"data\", self.results_base, \"indexPplot.html\")\n file_content = open(file_name, 'r').read()\n old_string = r\"<!-- INSERT NEW TITLE HERE -->\"\n new_string = capitalizeFirst( inputDirBase )\n file_content = string.replace(file_content, old_string, new_string)\n old_string = r\"<!-- INSERT NEW FOOTER HERE -->\"\n file_content = string.replace(file_content, old_string, self.htmlFooter)\n old_string = r\"<!-- INSERT GOOGLE ANALYTICS TEMPLATE HERE -->\"\n file_content = string.replace(file_content, old_string, GOOGLE_ANALYTICS_TEMPLATE)\n old_string = r\"<!-- INSERT GOOGLE PLUS ONE TEMPLATE HERE -->\"\n file_content = string.replace(file_content, old_string, GOOGLE_PLUS_ONE_TEMPLATE)\n ## Count will track the number of entries done per index file\n images_done_per_file = 0\n ## Following variable will track all done sofar\n images_done_all = 0\n ## Tracking the number in the current row. Set for the rare case that there\n ## are no entries at all. Otherwise it will be initialize on first pass.\n num_in_row = 0\n ## Tracking the index file number\n file_id = 1\n ## Text per row in an index file to insert\n insert_text = ''\n ## Repeat for all entries plus a dummy pass for writing the last index file\n for image_code in image_code_list + [ None ]:\n ## Finish this index file\n ## The last index file will only be written once...\n if images_done_per_file == number_of_images_per_file or images_done_all == number_of_images_all_present:\n begin_image_count = number_of_images_per_file * (file_id - 1) + 1\n end_image_count = min(number_of_images_per_file * file_id,\n number_of_images_all_present)\n# nTdebug(\"begin_image_count, end_image_count, number_of_images_all_present: %5d %5d %5d\" % (\n# begin_image_count, end_image_count, number_of_images_all_present))\n # image_code is just the base name of the file name.\n new_string = \"Images: %s-%s of %s.\" % (\n begin_image_count,\n end_image_count,\n number_of_images_all_present\n )\n old_string = r\"<!-- INSERT NEW RESULT STRING HERE -->\" \n new_file_content = string.replace(file_content, old_string, new_string)\n # Always end the row by adding dummy columns\n if num_in_row != number_of_entries_per_row:\n insert_text += (number_of_entries_per_row - num_in_row) * 2 * r\"<td> </td>\" + r\"</tr>\"\n # end if\n ## Create the new index file from the example one by replacing a string\n ## with the new content.\n old_string = r\"<!-- INSERT NEW ROWS HERE -->\"\n new_file_content = string.replace(new_file_content, old_string, insert_text)\n\n first_string = '<a href=\"index_%s.html\">First < <</a>' % 1\n final_string = '<a href=\"index_%s.html\">Last > ></a>' % number_of_files\n prev_string = ''\n if file_id > 1:\n prev_string = '<a href=\"index_%s.html\">Previous <</a>' % ( file_id - 1)\n # end if\n next_string = ''\n if file_id < number_of_files:\n next_string = '<a href=\"index_%s.html\">> Next</a>' % ( file_id + 1)\n # end if\n first_link = max(1, file_id - number_of_files_per_column)\n last_link = min(number_of_files, file_id + number_of_files_per_column - 1)\n links_string = ''\n for link in range(first_link, last_link + 1):\n ## List link but don't include a link out for the current file_id\n if link == file_id:\n links_string += ' <B>%s</B>' % link\n else:\n links_string += ' <a href=\"index_%s.html\">%s</a>' % (\n link, link)\n # end if\n # end for\n old_string = r\"<!-- INSERT NEW LINKS HERE -->\"\n new_string = 'Result pages: ' + ' '.join([first_string, prev_string, links_string, next_string, final_string])\n new_file_content = string.replace(new_file_content, old_string, new_string)\n ## Make the first index file name still index.html\n new_file_name = os.path.join( outputDir, 'index_%s.html' % file_id)\n if not file_id:\n new_file_name = os.path.join( outputDir, '/index.html' )\n # end if \n writeTextToFile(new_file_name, new_file_content) \n images_done_per_file = 0\n num_in_row = 0\n insert_text = \"\"\n file_id += 1\n # end for\n ## Build on current index file\n ## The last iteration will not execute this block because of this clause\n if images_done_all < number_of_images_all_present:\n images_done_all += 1\n images_done_per_file += 1\n ## Get the html code right by abusing the formatting chars.\n ## as in sprintf etc.\n imageRelUrl = os.path.join( '..', inputDirBase, image_code + '.' + fnExtension)\n tmp_string = \"\"\"\n<td> <a href=\"%(imageRelUrl)s\"> <img SRC=\"%(imageRelUrl)s\" border=\"0\" width=\"%(imageWidth)s\" height=\"%(imageHeight)s\"> </a> </td>\"\"\" % dict(\n imageRelUrl=imageRelUrl, imageWidth=imageWidth, imageHeight=imageHeight)\n num_in_row = images_done_per_file % number_of_entries_per_row\n if num_in_row == 0:\n num_in_row = number_of_entries_per_row\n # end if\n if num_in_row == 1:\n # Start new row\n tmp_string = \"\\n<tr>\" + tmp_string\n elif (num_in_row == number_of_entries_per_row):\n # End this row\n tmp_string = tmp_string + \"\\n</tr>\"\n # end if\n insert_text += tmp_string\n # end if\n # end if\n index_file_first = 'index_1.html'\n index_file = os.path.join(outputDir, 'index.html')\n ## Assume that a link that is already present is valid and will do the job\n# nTdebug('Symlinking: %s %s' % (index_file_first, index_file))\n symlink(index_file_first, index_file)"
] | [
"0.66542184",
"0.6168913",
"0.61026806",
"0.6090104",
"0.60365087",
"0.5999179",
"0.5749918",
"0.56669164",
"0.56426716",
"0.56304854",
"0.56185645",
"0.5585054",
"0.55716985",
"0.55307716",
"0.55136627",
"0.55136627",
"0.5498659",
"0.54859626",
"0.54828817",
"0.5458757",
"0.54574287",
"0.54352695",
"0.543194",
"0.541147",
"0.54114294",
"0.54106814",
"0.5410668",
"0.54018104",
"0.5398934",
"0.53957695"
] | 0.69139814 | 0 |
Returns gdf with bag_gdf that are within a kadaster. Based on perceel_id. | def join_kadaster_bag_info(kadaster_gdf, bag_gdf):
return gpd.sjoin(bag_gdf, kadaster_gdf, op="within") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_dgbs(self):\n cursor = job(self.logger, partial(find_job, 'find', {'spread': 'dgb'}))\n equities = sorted([item['eq'] for item in cursor])\n dgbs = self._find_dgbs(equities)\n _show_dgbs(dgbs)\n return True",
"def find_voting_precincts_in_district(state=48, district=7, leg_body='US-REP'):\r\n vps_in_district_GeoJSON = get_voting_precincts_geojson_filename(\r\n state=state, district=district, leg_body=leg_body)\r\n \r\n if not os.path.isfile(vps_in_district_GeoJSON):\r\n voting_precincts_file = get_statewide_voting_precincts_geojson_filename(state)\r\n \r\n district_file = get_district_geojson_filename(\r\n state=state, district=district, leg_body=leg_body)\r\n \r\n get_district_file(state=state, district=district, leg_body=leg_body)\r\n\r\n get_statewide_voting_precincts(state=state)\r\n \r\n print( \"Finding voting precincts in district\" )\r\n district_boundary = gpd.read_file(district_file)\r\n voting_precincts = gpd.read_file(voting_precincts_file)\r\n \r\n print( \"Finding voting precincts that touch the district boundary\" )\r\n vps_touching_district_bool = voting_precincts.touches(district_boundary.geometry[0])\r\n \r\n print( \"Finding voting precincts that intersect the district boundary\" )\r\n vps_intersecting_district_bool = voting_precincts.intersects(district_boundary.geometry[0])\r\n \r\n print( \"Filtering the voting precincts\" )\r\n for index in vps_touching_district_bool[vps_touching_district_bool==True].index:\r\n vps_intersecting_district_bool.loc[index] = False\r\n\r\n vps_in_district = voting_precincts[vps_intersecting_district_bool]\r\n \r\n print( \"Finding blockgroups to filter based on threshold\" )\r\n intersections = vps_in_district.intersection(district_boundary.geometry[0])\r\n\r\n areas_of_intersections = intersections.area\r\n indx_out = []\r\n for vp_index, vp in vps_in_district.iterrows():\r\n area_of_intersection = areas_of_intersections[vp_index]\r\n vp_area = GeoSeries(vp.geometry).area[0]\r\n\r\n share_of_intersection = area_of_intersection / vp_area\r\n \r\n if share_of_intersection < 0.10:\r\n indx_out.append(vp_index)\r\n\r\n #print( \"\\nBlock Group: \", bg.GEOID )\r\n #print( \"Area: \", str(bg_area) )\r\n #print( \"Share of Intersection: \", str(share_of_intersection) )\r\n \r\n vps_to_remove_bool = pd.Series([False]*len(voting_precincts))\r\n\r\n for index in indx_out:\r\n vps_to_remove_bool.loc[index] = True\r\n\r\n vps_to_remove = voting_precincts[vps_to_remove_bool]\r\n\r\n for index in vps_to_remove_bool[vps_to_remove_bool==True].index:\r\n vps_intersecting_district_bool.loc[index] = False\r\n\r\n vps_in_district = voting_precincts[vps_intersecting_district_bool]\r\n if 'PREC' in list(vps_in_district.columns.values):\r\n vps_in_district = vps_in_district.rename(columns={'PREC':'PRECINCT'})\r\n\r\n # See issue #367 https://github.com/geopandas/geopandas/issues/367\r\n try: \r\n os.remove(vps_in_district_GeoJSON)\r\n except OSError:\r\n pass\r\n vps_in_district.to_file(vps_in_district_GeoJSON, driver='GeoJSON')\r\n \r\n vps_in_district.sort_values(by=['PRECINCT'])[['PRECINCT']].to_csv(\"vps.csv\", index=False)",
"def get_cal_gains(self,this_obsid):\n\n db = FileTools.safe_hdf5_open(self.cal_database,'r')\n\n gains = {'Gains':np.zeros((20,8))} #N_feeds, N_bands\n\n obsids = {k:[] for k in range(19)}\n for obsid, grp in db.items():\n for ifeed in range(19):\n if grp['CalFeedMask'][ifeed]:\n obsids[ifeed] += [int(obsid)]\n\n obsids = obsids\n for ifeed in range(19):\n if len(np.array(obsids[ifeed])) == 0:\n continue\n idx = np.argmin(np.abs(np.array(obsids[ifeed])-this_obsid))\n self.nearest_calibrator = str(obsids[ifeed][idx])\n gains['Gains'][ifeed] = db[self.nearest_calibrator]['CalGains'][ifeed,:]\n\n db.close()\n\n return gains",
"def calculate_percentage_overlap(bag_sample, kadaster_sample):\n # calculate percentage intersection\n\n ov_output = gpd.overlay(bag_sample, kadaster_sample, how=\"intersection\")\n\n percentage_overlap = (\n ov_output.geometry.area / kadaster_sample.geometry.area.values[0]\n )\n percentage_overlap.index = bag_sample.index\n percentage_overlap = percentage_overlap.to_frame().rename(\n columns={0: \"percentage_overlap\"}\n )\n percentage_overlap.index.name = \"bag_index\"\n return percentage_overlap",
"def cal_hit_gbratio(self):\n full, top_k = self._subjects, self._top_k\n top_k = full[full['rank']<=top_k]\n #print({d['user'].iloc[0]:d['ratings'].to_list() for i,d in top_k.groupby('user')})\n score = 0.0\n # golden items hit in the top_K items\n score_1 = {d['user'].iloc[0]:len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==1.0)]) for i,d in top_k.groupby('user')}\n score_2 = {d['user'].iloc[0]:len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==0.0)]) for i,d in top_k.groupby('user')} \n score_ratio = [(score_1[d]-score_2[d]/self._test_ratings[d]) if self._test_ratings[d]!=0 else 0 for d in self._test_ratings.keys()]\n\n #print(np.mean(score_ratio))\n #print(score_1)\n #score = score_1 + score_2\n return np.mean(score_ratio)",
"def check_gabor_approx(): \n x = np.arange(-40, 41, 1)\n DOG2 = make_DOG(3, x)\n DOGs = get_DOGs(3, x, (x.size, x.size))\n gaborx, gabory = make_gabors(x)\n n_gabors = gaborx.shape[1]\n\n w_kernel = pickle.load(open(\"/Users/bptripp/code/nengo-FPGA/v1/dog-gabor-weights-new.p\", \"rb\" ))\n\n plt.figure()\n for i in range(n_gabors):\n gabor = np.outer(gabory, gaborx[:,i])\n approx = np.dot(DOGs, w_kernel[:,:,i].flatten())\n plt.subplot(3,n_gabors,i+1)\n plt.imshow(gabor)\n plt.subplot(3,n_gabors,i+n_gabors+1)\n plt.imshow(np.reshape(approx, (x.size, x.size)))\n plt.subplot(3,n_gabors,i+2*n_gabors+1) \n plt.imshow(nd.filters.convolve(w_kernel[:,:,i].T, DOG2))\n plt.show()",
"def make_represented_genders(metric_df, label_lang):\n return dict(metric_df[['bias_value', 'bias_label']].drop_duplicates().to_dict('split')['data'])",
"def bags_in_shiny_gold():\n return Bag(\"shiny gold\").bags_inside() - 1 # -1 since we don't count the shiny bag itself",
"def find_blockgroups_in_district(state=48, district=7, leg_body='US-REP', year='2015', debug_is_on=False):\r\n shapfile_path = None\r\n bgs_in_district_GeoJSON = get_bgs_in_district_geojson_filename(state=state, district=district, leg_body=leg_body)\r\n bgs_in_district_JSON = get_bgs_in_district_json_filename(state=state, district=district, leg_body=leg_body)\r\n district_file = get_district_geojson_filename(state=state, district=district, leg_body=leg_body)\r\n blockgroups_file = get_state_blockgroups_geojson_filename(state=state)\r\n \r\n if (not os.path.isfile(bgs_in_district_JSON)) or (not os.path.isfile(bgs_in_district_GeoJSON) ):\r\n get_district_file(state=state, district=district, leg_body=leg_body)\r\n \r\n get_state_blockgroups_file(\r\n state=state, district=district, leg_body=leg_body, year=year)\r\n \r\n print( \"Finding blockgroups in district\" )\r\n district = gpd.read_file(district_file)\r\n block_groups = gpd.read_file(blockgroups_file)\r\n \r\n print( \"Finding blockgroups that touch the district boundary\" )\r\n bgs_touching_district_bool = block_groups.touches(district.geometry[0])\r\n \r\n print( \"Finding blockgroups that intersect the district boundary\")\r\n bgs_intersecting_district_bool = block_groups.intersects(district.geometry[0])\r\n \r\n print( \"Filtering the blockgroups\" )\r\n for index in bgs_touching_district_bool[bgs_touching_district_bool==True].index:\r\n bgs_intersecting_district_bool.loc[index] = False\r\n\r\n bgs_in_district = block_groups[bgs_intersecting_district_bool]\r\n \r\n print( \"Finding blockgroups to filter based on threshold\" )\r\n intersections = bgs_in_district.intersection(district.geometry[0])\r\n\r\n areas_of_intersections = intersections.area\r\n indx_out = []\r\n for bg_index, bg in bgs_in_district.iterrows():\r\n area_of_intersection = areas_of_intersections[bg_index]\r\n bg_area = GeoSeries(bg.geometry).area[0]\r\n\r\n share_of_intersection = area_of_intersection / bg_area\r\n \r\n if share_of_intersection < 0.10:\r\n indx_out.append(bg_index)\r\n\r\n #print( \"\\nBlock Group: \", bg.GEOID )\r\n #print( \"Area: \", str(bg_area) )\r\n #print( \"Share of Intersection: \", str(share_of_intersection) )\r\n \r\n bgs_to_remove_bool = pd.Series([False]*len(block_groups))\r\n\r\n for index in indx_out:\r\n bgs_to_remove_bool.loc[index] = True\r\n\r\n bgs_to_remove = block_groups[bgs_to_remove_bool]\r\n\r\n for index in bgs_to_remove_bool[bgs_to_remove_bool==True].index:\r\n bgs_intersecting_district_bool.loc[index] = False\r\n\r\n bgs_in_district = block_groups[bgs_intersecting_district_bool]\r\n\r\n # See issue #367 https://github.com/geopandas/geopandas/issues/367\r\n try: \r\n os.remove(bgs_in_district_GeoJSON)\r\n except OSError:\r\n pass\r\n bgs_in_district.to_file(bgs_in_district_GeoJSON, driver='GeoJSON')\r\n \r\n # Create json file of geo units\r\n bgs_in_district[['BLKGRPCE','COUNTYFP', 'STATEFP', 'TRACTCE', 'GEOID']].to_json(bgs_in_district_JSON)\r\n \r\n if debug_is_on:\r\n plt.figure(figsize=(400, 400))\r\n district_plot=district.plot(color='blue', alpha=0.5)\r\n bgs_in_district.plot(ax=district_plot, color='green',alpha=0.5)\r\n plt.savefig(bgs_in_district_fn,dpi=600)\r\n plt.close()\r\n\r\n plt.figure(figsize=(400, 400))\r\n district_plot=district.plot(color='blue', alpha=0.5)\r\n block_groups[bgs_touching_district_bool].plot(ax=district_plot, color='green',alpha=0.5)\r\n plt.savefig(bgs_in_district_fn + '-touching',dpi=600)\r\n plt.close()\r\n\r\n plt.figure(figsize=(400, 400))\r\n district_plot=district.plot(color='blue', alpha=0.5)\r\n bgs_to_remove.plot(ax=district_plot, color='green',alpha=0.5)\r\n plt.savefig(bgs_in_district_fn + '-threshold-filter',dpi=600)\r\n plt.close()",
"def add_clump_forming_kmers(counts, clumpFormingKmers):\n for kmer in counts:\n if counts[kmer] >= t:\n clumpFormingKmers.add(kmer)\n\n return clumpFormingKmers",
"def location_bid(ca_agent_df, agent_df):\r\n # print(f'ca_agent_df: {ca_agent_df}')\r\n # closest_coils_df = pd.DataFrame(ca_agent_df.loc[0, 'closest_coils_df'][0])\r\n # print(f'closest_coils_df: {closest_coils_df}')\r\n # ca_agent_df.drop(columns=['closest_coils_df'])\r\n locations_min_distances_df = locations_min_distances()\r\n ca_location_1 = ca_agent_df.loc[0, 'location_1']\r\n to = str()\r\n to = \"location_\" + ca_location_1\r\n locations_min_distances_df = locations_min_distances_df[[to, 'location_bid']]\r\n locations_min_distances_df = locations_min_distances_df.rename(columns={to: 'segment'})\r\n closest_coils_df = locations_min_distances_df[['segment']]\r\n df = closest_coils_df.merge(locations_min_distances_df, on='segment')\r\n coil_location = agent_df.loc[0, 'location']\r\n segment = ca_location_1 + '-' + coil_location\r\n df1 = df.loc[df['segment'] == segment]\r\n df1 = df1.reset_index(drop=True)\r\n location_bid_ = df1.loc[0, 'location_bid']\r\n return int(location_bid_)",
"def baggage(self):\n return SpanContext.EMPTY_BAGGAGE",
"def query_by_book(cls,bid,loade=True): \n gds = from_cache('VG_%s'%bid)\n if not gds:\n gds = [str(g.id()) for g in SuiGoods.all(keys_only=True).filter('book =',bid).fetch(1000)]\n to_cache('VG_%s'%bid, gds)\n if loade:\n return SuiGoods.load_by_ids(gds)\n return gds",
"def meters_state_of_buliding(self, target_building, target_appliances, threshold=70):\n if target_building not in self.buliding_df.keys():\n return\n \n meters_state = self.buliding_df[target_building] \n meters_state = meters_state[[ meter for meter in target_appliances if meter in meters_state.keys()]] > threshold\n return meters_state",
"def subsetPed(basename=\"\",lcdmap = [],faff='1', ofaff='2'):\r\n mf = file('%s.map' % basename,'r').readlines()\r\n lmap = [x.strip().split() for x in mf]\r\n rscols = {} # lookup marker table\r\n colrs = [] # lookup rs from column\r\n for i,m in enumerate(lmap): # get columns to keep in the order we want them\r\n rscols[m[1]] = i # keep track of where each rs is in this map\r\n colrs.append(m[1]) # and keep the list of rs for tracking alleles\r\n wewant = [rscols[x[2]] for x in lcdmap] # columns we want to keep\r\n print '#Subsetped faff=%s ofaff=%s keeping %d (%s) of potential lcd %d for %s' % \\\r\n (faff,ofaff,len(wewant),wewant[:20],len(lcdmap),basename)\r\n pf = file('%s.ped' % basename,'r')\r\n ogeno = [] # offspring new lines\r\n fgeno = [] # founders\r\n oped = [] # for pedigrees\r\n fped = []\r\n rsadict = {} # keep a count of alleles - seems to be a problem\r\n for i,l in enumerate(pf):\r\n if (i+1) % 500 == 0:\r\n print '%s at line %d' % (basename,i+1)\r\n ll = l.strip().split()\r\n ped = ll[:6]\r\n founder = (ll[2] == '0' and ll[3] == '0') \r\n aff = faff\r\n if not founder:\r\n aff = ofaff\r\n ped[5] = aff # adjust as needed\r\n if founder:\r\n fped.append(ped)\r\n else:\r\n oped.append(ped)\r\n gt = ll[6:]\r\n geno = []\r\n for snp in wewant: # columns in order\r\n thisrs = colrs[snp]\r\n base = snp*2\r\n g1 = gt[base]\r\n g2 = gt[base+1]\r\n geno.append(g1)\r\n geno.append(g2)\r\n if not rsadict.get(thisrs,None):\r\n rsadict[thisrs] = {}\r\n if g1 <> '0':\r\n if not rsadict[thisrs].get(g1,None):\r\n rsadict[thisrs][g1] = 1\r\n else:\r\n rsadict[thisrs][g1] += 1 \r\n if g2 <> '0':\r\n if not rsadict[thisrs].get(g2,None):\r\n rsadict[thisrs][g2] = 1\r\n else:\r\n rsadict[thisrs][g2] += 1\r\n keepgt = array.array('c',geno)\r\n if founder:\r\n fgeno.append(keepgt)\r\n else:\r\n ogeno.append(keepgt)\r\n print '#Subsetped %s %d fgeno %d ogeno' % (basename,len(fgeno),len(ogeno))\r\n return fped,oped,fgeno,ogeno,rsadict",
"def _collect_foll(all_detailed_cells, targeted_gids: pd.Series):\n all_foll_gids = {}\n\n for sim_gid, cells in pbar(all_detailed_cells.items(), total=len(all_detailed_cells), desc='sim'):\n targeted_gid = targeted_gids.loc[sim_gid]\n\n sb.CAT.add_cats_cells(cells)\n cells = cells.drop(targeted_gid)\n\n foll_ei_types = cells.loc[cells['frm_cat'] == 'foll', 'ei_type']\n\n all_foll_gids[sim_gid] = {\n f'{ei_type}_foll_gids': tuple(gids)\n for ei_type, gids in foll_ei_types.groupby(foll_ei_types).groups.items()}\n\n all_foll_gids = pd.DataFrame.from_dict(all_foll_gids, orient='index')\n\n # fillna doesn't like taking empty tuples\n for col, values in all_foll_gids.items():\n all_foll_gids.loc[all_foll_gids[col].isna(), col] = tuple()\n\n all_foll_gids = all_foll_gids.rename_axis(index='sim_gid')\n\n foll_counts = all_foll_gids.applymap(len)\n foll_counts.columns = [f'{col[0]}_foll_count'for col in foll_counts]\n\n all_foll_gids = pd.concat([all_foll_gids, foll_counts], axis=1)\n\n return all_foll_gids",
"def gagged(self):\r\n return self._gag",
"def get_dill_keq_df():\n df = pandas.read_csv(fixpath('Dill_dG_matrix.csv'))\n\n dill = df.rename(columns=get_replace_function('Dill_Keq_'))\n return dill.set_index('genome_region')",
"def assignToBurst(abfroot, burst, show=True, rmoutliers=True):\n # Find out type of input\n if type(abfroot) is str:\n if '.' in abfroot:\n abfroot = abfroot.split('.')[0]\n if '/' not in abfroot:\n try:\n dfroot = getFullPath(abfroot+'_props_clusters.csv')[0]\n df = pd.read_csv(dfroot)\n except:\n dfroot = getFullPath(abfroot+'_props.csv')[0]\n df = pd.read_csv(dfroot)\n else:\n dfroot = abfroot\n # %print('Trying to load %s ....' %dfroot)\n df = pd.read_csv(dfroot)\n if type(burst) is str:\n if '/' not in burst:\n try:\n burst = getFullPath(burst, '/home/alex/data/misc')[0]\n except:\n burst = getFullPath(burst)[0]\n burst = pd.read_csv(burst)\n \n # Now have both as data frames\n bs_cells = [i.split('s')[0].split('_')[1] for i in burst.columns] # Make sure it's in burst df\n if abfroot not in bs_cells:\n df['in_burst'] = [False for f in range(df.shape[0])]\n return df # No bursts, just return the df\n cell_id = 'id_'+ abfroot\n start = burst[cell_id+'start'].dropna().values\n stop = burst[cell_id+'stop'].dropna().values\n \n in_burst = [] # Check if each spike belongs to a burst\n for i in range(df.shape[0]):\n t_ = df.ix[i]['times']/1000. # For each spike time\n ibs = False\n for bur in range(len(start)): # Check if it fits inside a burst!\n if start[bur] < t_ < stop[bur]:\n ibs = True\n in_burst.append(int(ibs))\n df['in_burst'] = in_burst\n \n # Now do all the plotting!\n if show:\n # First is the simple color-by-burst plot\n collist = ['blue', 'red', 'forestgreen', 'goldenrod', 'purple', 'yellowgreen',\n 'skyblue', 'tomato', 'darkgray']\n for i in range(df.shape[0]):\n plt.plot([df.ix[i].times, df.ix[i].times], \n [df.ix[i].in_burst-1, df.ix[i].in_burst], \n color=collist[int(df.ix[i].in_burst)], linewidth=1.)\n patches = []\n labs = ['Tonic', 'Burst']\n for u in range(int(max(df.in_burst)+1)):\n patches.append(mpatches.Patch(color=collist[u],\n label=labs[u]))\n plt.legend(handles=patches)\n plt.ylim([-1.5, max(df.in_burst)+.5])\n \n # Next is the burst activity patterns\n plt.figure()\n checks = ['maxDerivV', 'maxDerivdV',\n 'minDerivdV', 'preMaxCurveK', 'postMaxCurveK',\n 'height', 'repolarizationV', 'intervals', 'frequencies']\n for ch in range(len(checks)):\n plt.subplot(2,int(len(checks)/2 +1), ch+1)\n labels = ['Tonic', 'Burst']\n \n for clust in range(max(df.in_burst)+1): # For each cluster\n plotthis = df[df.in_burst==clust][checks[ch]].values\n if rmoutliers:\n plotthis = outlier(plotthis, as_nan=False)\n plotthis = outlier(plotthis, as_nan=False)\n plt.plot([i*0.2+clust for i in np.random.random(len(plotthis))],\n plotthis, 'o', color=collist[clust], markeredgecolor='none',\n alpha=0.3)\n plt.plot([clust, clust+.2], [np.mean(plotthis), np.mean(plotthis)],\n color='black', lw=2)\n plt.plot([clust+.1, clust+.1], \n [np.percentile(plotthis, 25), np.percentile(plotthis, 75)],\n color='black', lw=2)\n labels = [labels[i] for i in range(max(df.in_burst)+1)]\n poses = [i+.1 for i in range(len(labels))]\n plt.xticks(poses, labels, rotation=45)\n plt.xlim([-.1, max(df.in_burst)+.3])\n plt.title(checks[ch])\n plt.show()\n \n return df",
"def gb(self):\n return self.data.gb",
"def muther_dau_gdau(df,var_m,var_d,var_gd):\n if 'g_parent_cell' not in df.columns:\n df = rl.genalogy(df,'parent_cell') #Create genealogy\n tmp={'gr_mu_{}'.format(var_m):[],'mu_{}'.format(var_d):[],'daugther_{}'.format(var_gd):[]}\n for k in df.cell.unique():\n dau = df.loc[df['cell']==k]\n dau_var = dau['{}'.format(var_gd)].iloc[0]\n nid = dau.parent_cell.iloc[0]\n mu = df.loc[df['cell']==nid]\n try:#if mother exists\n mu_var = mu['{}'.format(var_d)].iloc[0]\n nid = mu.g_parent_cell.iloc[0]\n mu_var = mu['{}'.format(var_d)].iloc[0]\n gmu = df.loc[df['cell']==nid]\n except IndexError:\n continue\n try:# if grand mother exists\n tmp['gr_mu_{}'.format(var_m)].append(gmu['{}'.format(var_m)].iloc[0])\n tmp['mu_{}'.format(var_d)].append(mu_var)\n tmp['daugther_{}'.format(var_gd)].append(dau_var)\n except IndexError:\n tmp['gr_mu_{}'.format(var_m)].append(np.nan)\n tmp['mu_{}'.format(var_d)].append(mu_var)\n tmp['daugther_{}'.format(var_gd)].append(dau_var)\n return pd.DataFrame(tmp)",
"def create_output_gdf(self, relevant_floorplan_gdf):\n # wall buffer size\n AREA_SIZE_DIVIDER = 25000\n MIN_BUFFER_SIZE = 3\n MAX_BUFFER_SIZE = 10\n wall_gdf = relevant_floorplan_gdf.loc[\n relevant_floorplan_gdf[\"category\"] == \"wall\"\n ].copy()\n\n area = wall_gdf.unary_union.buffer(0.01).convex_hull.area\n\n buffer_size = area / AREA_SIZE_DIVIDER\n buffer_size = max(buffer_size, MIN_BUFFER_SIZE)\n buffer_size = min(buffer_size, MAX_BUFFER_SIZE)\n\n extended_doors = relevant_floorplan_gdf.loc[\n relevant_floorplan_gdf[\"category\"] == \"door\"\n ].copy()\n extended_doors.geometry = extended_doors.buffer(buffer_size * 2, cap_style=2)\n extended_doors[\"colors\"] = \"blue\"\n\n # Create outer walls\n outer_walls = preprocessing.create_outer_bounderies(\n relevant_floorplan_gdf.loc[\n relevant_floorplan_gdf[\"category\"] != \"balcony\"\n ].copy(),\n buffer=0.1,\n )\n\n entrance = relevant_floorplan_gdf.loc[\n relevant_floorplan_gdf[\"category\"] == \"entrance\"\n ].copy()\n\n walls = self.create_input_gdf(relevant_floorplan_gdf)\n walls[\"colors\"] = \"black\"\n\n windows = self.find_overlap(extended_doors, outer_walls)\n windows[\"colors\"] = \"red\"\n\n if not entrance.empty:\n entrance_gdf = self.find_overlap(windows, entrance)\n entrance_gdf[\"colors\"] = \"green\"\n return pd.concat([walls, extended_doors, windows, entrance_gdf])\n return pd.concat([walls, extended_doors, windows])",
"def bike(analyzer, date, id):\n return model.bike(analyzer,date,id)",
"def get_bag_by_id(bagId): # noqa: E501\n\n\n\n test = baglist.get(bagId)\n\n response = ApiResponse()\n response = response.from_dict(test)\n return response",
"def gdf(self) -> gpd.GeoDataFrame:\n return self.just_geometry_gdf.join(self.df)",
"def burstDFhelper(tdf, temp, bs, cell_id):\n def ibi_cv(bstart, bstop):\n \"\"\"\n Calculate inter-burst interval coefficient of variation.\n \"\"\"\n ibis = []\n for b in range(len(bstart)-1):\n if bstart[b+1] > bstop[b]: # ortho, correct\n ibis.append(bstart[b+1] - bstop[b])\n else:\n print(' In %s, %.2f starts before burst ends at %.2f' \n %(cell_id, bstart[b+1], bstop[b]))\n return np.mean(ibis), np.std(ibis)/np.mean(ibis)\n \n def spikesperburst(tdf, bstart, bstop):\n \"\"\"\n Count spikes per burst and spikes/burst CV.\n \"\"\"\n tms = list(tdf.times.dropna().values)\n bursts = [[tms[u] for u in range(len(tms)) if bstart[k]<(tms[u]/1000.)<bstop[k] ]\n for k in range(len(bstart))]\n bursts = [len(i) for i in bursts]\n return np.mean(bursts), np.std(bursts)/np.mean(bursts)\n \n def burst_time(temp, bstart, bstop):\n \"\"\"\n Make sure bstop[i] is always after bstart[i]; also burst length\n \"\"\"\n to_sum = []\n for b in range(len(bstart)):\n if bstop[b]-bstart[b] >= 0:\n to_sum.append(bstop[b]-bstart[b])\n elif bstop[b]-bstart[b] < 0 and b == len(bstop)+1: # Make it go to end\n to_sum.append(temp['length']/1000.-bstart[b])\n else:\n pass\n return np.mean(to_sum), np.std(to_sum)/np.mean(to_sum), sum(to_sum)/(temp['length']/1000.)\n \n bs_cells = [i.split('s')[0].split('_')[1] for i in bs.columns]\n #print(cell_id, bs_cells)\n if cell_id in bs_cells:\n \n bstart = bs['id_'+cell_id+'start'].dropna().values\n bstop = bs['id_'+cell_id+'stop'].dropna().values\n temp['numbursts'] = len(bstart) # Number of bursts\n print(' --> Found %i bursts ' %temp['numbursts'])\n temp['burst_length'], temp['burst_length_cv'], \\\n temp['burst'] = burst_time(temp, bstart, bstop)\n temp['spikespburst'], temp['spikespburst_cv'] = \\\n spikesperburst(tdf, bstart, bstop)\n if temp['burst'] < 0:\n print(' Warning! Found %.4f burst time for %s!' \n %(temp['burst'], temp['file']))\n temp['burst'] = 0.\n else:\n temp['burst'] = temp['burst']/(temp['length']/1000.) # Burst time in s!!!\n temp['ibi_length'], temp['ibi_cv'] = ibi_cv(bstart, bstop)\n else: # Else, it doesn't burst\n temp['burst'], temp['burst_length_cv'], temp['ibi_cv'] = 0., np.nan, np.nan\n temp['tonic'] = sum(tdf[tdf.in_burst==0]['intervals'].dropna().values)/temp['length']\n temp['silent'] = 1. - (temp['burst']+temp['tonic'])\n \n return temp",
"def get_gene_values(hotel_ids):\n hotel_genes = {}\n subcats = get_subcat_axes()\n cursor = conn.cursor()\n cursor.execute(\n \"\"\"\n SELECT hotel_id, genome\n FROM hotel_genome\n WHERE hotel_id in (%s)\n \"\"\" % \",\".join([str(h) for h in hotel_ids])\n )\n for hotel_id, genome_str in cursor.fetchall():\n genome = [float(g.strip()) for g in genome_str.split(\",\")]\n hotel_genes[hotel_id] = get_hotel_genes_by_subcat(\n subcats, genome)\n return subcats, hotel_genes",
"def get_dashboard_bags():\n conn = fetch_replica_db()\n now = datetime.datetime.now()\n bags = conn.bags.aggregate([\n {'$match': {'$or':[{'pid':{'$exists': False}}, {'pid': None}],\n 'cs.sd': {'$gte': now - datetime.timedelta(days=7)},\n 'cs.ss': {'$in': BAG_DASHBOARD_STATUSES}}},\n {'$group': {'_id': {'cn':'$cn','cs.sl':'$cs.sl'},\n 'count': {'$sum': 1}, 'bss': {'$addToSet': '$bs'}}},\n {'$sort': {'count': -1}}\n ])\n res = dict()\n for bag in bags.get('result', {}):\n if isinstance(bag, dict):\n origin = bag.get('_id').get('cs.sl')\n dest = bag.get('_id').get('cn')\n dstate = get_center_from_cache(dest).get('state')\n count = bag.get('count')\n bss = bag.get('bss')\n if not res.get(origin):\n res[origin] = list()\n res[origin].append({'dest': dest, 'dstate': dstate, 'count': count, 'bss': bss,})\n cache.set(BAG_DASHBOARD_CACHE_KEY, res, 60*20)",
"def galaxy_positions():\n hdulist1 = pf.open(source+'/kids_data/KiDS_DR3.1_G9_ugri_shear.fits')\n '''\n hdulist2 = pf.open('../kids_data/KiDS_DR3.1_G12_ugri_shear.fits')\n hdulist3 = pf.open('../kids_data/KiDS_DR3.1_G15_ugri_shear.fits')\n hdulist4 = pf.open('../kids_data/KiDS_DR3.1_G23_ugri_shear.fits')\n hdulist5 = pf.open('../kids_data/KiDS_DR3.1_GS_ugri_shear.fits')\n '''\n ra = hdulist1[1].data['RAJ2000'][:sample]\n dec = hdulist1[1].data['DECJ2000'][:sample]\n global maxra\n maxra = max(ra)\n global minra\n minra = min(ra)\n global maxdec\n maxdec = max(dec)\n global mindec\n mindec = min(dec)\n global bsize\n bsize = abs(max(maxra, maxdec) - min(mindec, minra))\n coords = np.column_stack([ra, dec])\n global SIZE\n SIZE = len(coords)\n print(maxra, maxdec, minra, mindec, SIZE)\n ctree = cKDTree(coords)\n return ctree",
"def maak_attribuut_groep_koppeling(attribute_groep_id,\n attribuut,\n attribuut_toets_wijze,\n onder_toets_waarde,\n boven_toets_waarde):\n if attribuut_toets_wijze == 1:\n # Attribuuttoetswijze 1 wordt thans niet gebruikt in de grouper.\n raise NotImplementedError(\"Attribuuttoetswijze %d is niet geïmplementeerd.\" % attribuut_toets_wijze)\n\n if attribuut_toets_wijze == 2:\n return AttribuutGroepKoppeling2(attribute_groep_id, attribuut, onder_toets_waarde, boven_toets_waarde)\n\n raise RuntimeError(\"Onbekende attribuuttoetswijze %d.\" % attribuut_toets_wijze)"
] | [
"0.55878776",
"0.49948218",
"0.4724546",
"0.47188705",
"0.46614647",
"0.46237472",
"0.4620131",
"0.46127397",
"0.46111575",
"0.45959288",
"0.45944956",
"0.45454195",
"0.45318693",
"0.45301446",
"0.4526814",
"0.45172554",
"0.44915894",
"0.4489763",
"0.44570842",
"0.44483387",
"0.44312415",
"0.4430792",
"0.44300884",
"0.44279858",
"0.44215178",
"0.4416144",
"0.43952984",
"0.43358898",
"0.4334411",
"0.43343237"
] | 0.6432043 | 0 |
Calculate overlap between bag and kadaster geodataframes in percentages. | def calculate_percentage_overlap(bag_sample, kadaster_sample):
# calculate percentage intersection
ov_output = gpd.overlay(bag_sample, kadaster_sample, how="intersection")
percentage_overlap = (
ov_output.geometry.area / kadaster_sample.geometry.area.values[0]
)
percentage_overlap.index = bag_sample.index
percentage_overlap = percentage_overlap.to_frame().rename(
columns={0: "percentage_overlap"}
)
percentage_overlap.index.name = "bag_index"
return percentage_overlap | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def overlapPercent(box1, box2):\n xx2 = min(box1[2], box2[2])\n xx1 = max(box1[0], box2[0])\n yy2 = min(box1[3], box2[3])\n yy1 = max(box1[1], box2[1])\n w = max(0, xx2 - xx1 + 1)\n h = max(0, yy2 - yy1 + 1)\n areaBox1 = boundingBoxArea(box1)\n areaBox2 = boundingBoxArea(box2)\n overlap = max(w * h / areaBox1, w * h / areaBox2)\n return overlap",
"def percentOverlap(x1, x2):\n nonZeroX1 = np.count_nonzero(x1)\n nonZeroX2 = np.count_nonzero(x2)\n minX1X2 = min(nonZeroX1, nonZeroX2)\n percentOverlap = 0\n if minX1X2 > 0:\n percentOverlap = float(np.dot(x1.T, x2)) / float(minX1X2)\n return percentOverlap",
"def compute_overlap(self, *skymaps):\n masked_skymaps = [self.mask_skymap(m, self.percent) for m in skymaps]\n joint_masked_skymaps = np.multiply(*masked_skymaps)\n return self.count_masked_pixel(joint_masked_skymaps)/np.amin([self.count_masked_pixel(m) for m in masked_skymaps])",
"def percentages_overlapping(self, other: 'BBox') -> Optional['BBox']:\n return BBox.build(\n self.ix.percentages_overlapping(other.ix),\n self.iy.percentages_overlapping(other.iy))",
"def overlap_cost(track_a, track_b):\n return 1 - overlap(track_a.bbox, track_b.bbox)",
"def test_compute_overlap(self):\n # box1 contained in box2\n box1 = ((1, 2), (1, 2), (1, 2))\n box2 = ((1, 3), (1, 3), (1, 3))\n mapping = {box1: [1, 2, 3, 4], box2: [1, 2, 3, 4, 5]}\n # box1 in box2, so complete overlap\n np.testing.assert_almost_equal(\n dc.dock.binding_pocket.compute_overlap(mapping, box1, box2), 1)\n # 4/5 atoms in box2 in box1, so 80 % overlap\n np.testing.assert_almost_equal(\n dc.dock.binding_pocket.compute_overlap(mapping, box2, box1), .8)",
"def percent_overlap(items1, items2, k = None):\n if k is None:\n k = max([len(items1), len(items2)])\n assert k > 0 and k <= max([len(items1), len(items2)]), 'k is out of bounds!'\n items1_set, items2_set = set(items1[:k]), set(items2[:k])\n return len(items1_set & items2_set) / len(items1_set | items2_set)",
"def compute_overlap(self, skymap1, skymap2, single_skymap1, single_skymap2):\n from ligo.skymap.postprocess.crossmatch import crossmatch\n from astropy.coordinates import SkyCoord\n ra, dec = self.get_ra_dec_from_skymap(single_skymap1)\n coord = SkyCoord(ra, dec, unit=\"rad\")\n result = crossmatch(skymap2, coord)\n searched_prob_1 = np.min([result.searched_prob, 1.0])\n ra, dec = self.get_ra_dec_from_skymap(single_skymap2)\n coord = SkyCoord(ra, dec, unit=\"rad\")\n result = crossmatch(skymap1, coord)\n searched_prob_2 = np.min([result.searched_prob, 1.0])\n return np.max([1-searched_prob_1, 1-searched_prob_2])",
"def _overlap(c1, c2, index='dice'):\n set1 = set(c1)\n set2 = set(c2)\n intersection_num = float(len(set1 & set2))\n try:\n if index == 'dice':\n total_num = len(set1 | set2) + intersection_num\n overlap = 2.0 * intersection_num / total_num\n elif index == 'percent':\n overlap = 1.0 * intersection_num / len(set1)\n else:\n raise Exception(\"Only support 'dice' and 'percent' as overlap indices at present.\")\n except ZeroDivisionError as e:\n print(e)\n overlap = np.nan\n return overlap",
"def percentages_overlapping(self, other: 'Interval') -> Optional['Interval']:\n intersection = Interval.intersection([self, other])\n if intersection is None:\n return None\n if self.length == 0:\n return Interval(0, 1)\n return Interval(\n (intersection.a - self.a) / self.length,\n (intersection.b - self.a) / self.length)",
"def BD_overlap(df_OTU):\n # min BD for each library\n func = lambda x: np.min(x['BD_mid'])\n BD_mins = df_OTU.apply_by_group(func,groups=['library'],inplace=False)\n # max BD for each library\n func = lambda x: np.max(x['BD_mid'])\n BD_maxs = df_OTU.apply_by_group(func,groups=['library'],inplace=False)\n \n # overlap: max of BD_mins, min of BD_maxs\n BD_overlap_min = np.max(BD_mins['values'].values)\n BD_overlap_max = np.min(BD_maxs['values'].values)\n \n return BD_overlap_min, BD_overlap_max",
"def getOverlap(self):\n return 0.5",
"def _calculate_area_overlap(self, wake_velocities, freestream_velocities, turbine):\n count = np.sum(freestream_velocities - wake_velocities <= 0.05)\n return (turbine.grid_point_count - count) / turbine.grid_point_count",
"def compute_overlap(*skymaps):\n # Simply sum over all pixels\n # NOTE To avoid under/over-flow, add the log of pdfs then exponentiate\n _out = np.zeros_like(skymaps[0])\n for skymap in skymaps:\n _out += np.log(skymap)\n return np.nansum(np.exp(_out))",
"def compute_overlap_rate(box, boxes):\n # Calculate intersection areas\n\n x1 = np.maximum(box[0], boxes[:, 0])\n x2 = np.minimum(box[1], boxes[:, 1])\n intersection = np.maximum(x2 - x1, 0)\n boxes_area = boxes[:, 1] - boxes[:, 0]\n\n overlap = intersection/boxes_area\n\n return overlap",
"def overlap_score(labels, labels_pred):\n raw_overlap = 1-fraction_mislabeled_nodes(labels, labels_pred)\n partition_true = np.array(labels).astype(int)\n partition_pred = np.array(labels_pred).astype(int)\n num_nodes = partition_pred.size\n num_groups = partition_true.max() + 1\n\n chance_level = 0.\n for i in range(num_groups):\n temp = np.sum(i == partition_true) / num_nodes\n if temp > chance_level:\n chance_level = temp\n\n score = (raw_overlap - chance_level) / (1 - chance_level)\n if score <= 0:\n score = 0\n\n return score",
"def compute_overlap(*skymaps):\n unnormalized_overlap = PosteriorOverlap.compute_overlap(*skymaps)\n normalization = np.prod([np.sqrt(PosteriorOverlap.compute_overlap(m, m)) for m in skymaps])\n return unnormalized_overlap/normalization",
"def pred_overlap(t, h):\n a_set = set(get_pred(t))\n b_set = set(get_pred(h))\n return len(a_set&b_set)/float(len(a_set|b_set))",
"def bbox_overlap_ratio(bbox_a, bbox_b):\n\n overlap_area = bbox_overlap(bbox_a, bbox_b)\n ymin_a, xmin_a, ymax_a, xmax_a = bbox_a\n ymin_b, xmin_b, ymax_b, xmax_b = bbox_b\n\n area_a = (xmax_a - xmin_a + 1) * (ymax_a - ymin_a + 1)\n area_b = (xmax_b - xmin_b + 1) * (ymax_b - ymin_b + 1)\n\n union_area = area_a + area_b - overlap_area\n if union_area == 0:\n return 0\n else:\n return overlap_area / union_area",
"def overlap_rate(self, other):\n\n ### Original\n from pyresample.spherical_geometry import get_polygon_area\n other_area = other.get_area()\n inter_area = get_polygon_area(self.intersection(other))\n return inter_area / other_area\n ### End Original\n #from .spherical import SphPolygon\n #log.info('RUNNING SPHERICAL in overlap_rate')\n #selfpoly = SphPolygon(self.corners)\n #otherpoly = SphPolygon(other.corners)\n #other_area = other.get_area()\n #inter_area = selfpoly.intersection(otherpoly)\n #return inter_area / other_area",
"def _overlap_energy(self, this, that):\n if not this.overlaps(that):\n return 0.0\n\n return min(10.0 / this.rank, 10.0 / that.rank)",
"def getArea(self):\n seg = self._group_index\n groups = np.unique(seg)\n ng = len(groups)\n area = 0\n for i in range(ng):\n group_segments = np.where(groups[i] == seg)[0]\n nseg = len(group_segments) - 1\n for j in range(nseg):\n ind = group_segments[j]\n p0 = latlon2ecef(self._toplats[ind],\n self._toplons[ind],\n self._topdeps[ind])\n p1 = latlon2ecef(self._toplats[ind + 1],\n self._toplons[ind + 1],\n self._topdeps[ind + 1])\n p2 = latlon2ecef(self._botlats[ind + 1],\n self._botlons[ind + 1],\n self._botdeps[ind + 1])\n p3 = latlon2ecef(self._botlats[ind],\n self._botlons[ind],\n self._botdeps[ind])\n a = np.sqrt((p1[0] - p0[0])**2 +\n (p1[1] - p0[1])**2 +\n (p1[2] - p0[2])**2)\n b = np.sqrt((p2[0] - p0[0])**2 +\n (p2[1] - p0[1])**2 +\n (p2[2] - p0[2])**2)\n c = np.sqrt((p2[0] - p1[0])**2 +\n (p2[1] - p1[1])**2 +\n (p2[2] - p1[2])**2)\n s = (a + b + c) / 2\n A1 = np.sqrt(s * (s - a) * (s - b) * (s - c))\n a = np.sqrt((p0[0] - p3[0])**2 +\n (p0[1] - p3[1])**2 +\n (p0[2] - p3[2])**2)\n b = np.sqrt((p2[0] - p3[0])**2 +\n (p2[1] - p3[1])**2 +\n (p2[2] - p3[2])**2)\n c = np.sqrt((p0[0] - p2[0])**2 +\n (p0[1] - p2[1])**2 +\n (p0[2] - p2[2])**2)\n s = (a + b + c) / 2\n A2 = np.sqrt(s * (s - a) * (s - b) * (s - c))\n area = area + (A1 + A2) / 1000 / 1000\n return area",
"def overlap_ratio(self, other):\n VERIFICATION.verify_type(other, Rect, \"overlap_ratio target must be Rect\")\n\n intersection_rect = self.intersection(other)\n if intersection_rect is None:\n return 0.0\n\n union_rect = self.union(other)\n\n overlap_score = float(intersection_rect.area())/float(union_rect.area())\n return overlap_score",
"def iou(bbox1, bbox2):\n\n bbox1 = [float(x) for x in bbox1]\n bbox2 = [float(x) for x in bbox2]\n\n (x0_1, y0_1, x1_1, y1_1) = bbox1\n (x0_2, y0_2, x1_2, y1_2) = bbox2\n\n # get the overlap rectangle\n overlap_x0 = max(x0_1, x0_2)\n overlap_y0 = max(y0_1, y0_2)\n overlap_x1 = min(x1_1, x1_2)\n overlap_y1 = min(y1_1, y1_2)\n\n # check if there is an overlap\n if overlap_x1 - overlap_x0 <= 0 or overlap_y1 - overlap_y0 <= 0:\n return 0\n\n # if yes, calculate the ratio of the overlap to each ROI size and the unified size\n size_1 = (x1_1 - x0_1) * (y1_1 - y0_1)\n size_2 = (x1_2 - x0_2) * (y1_2 - y0_2)\n size_intersection = (overlap_x1 - overlap_x0) * (overlap_y1 - overlap_y0)\n size_union = size_1 + size_2 - size_intersection\n\n return size_intersection / size_union",
"def iou(bbox1, bbox2):\n\n bbox1 = [float(x) for x in bbox1]\n bbox2 = [float(x) for x in bbox2]\n\n (x0_1, y0_1, x1_1, y1_1) = bbox1\n (x0_2, y0_2, x1_2, y1_2) = bbox2\n\n # get the overlap rectangle\n overlap_x0 = max(x0_1, x0_2)\n overlap_y0 = max(y0_1, y0_2)\n overlap_x1 = min(x1_1, x1_2)\n overlap_y1 = min(y1_1, y1_2)\n\n # check if there is an overlap\n if overlap_x1 - overlap_x0 <= 0 or overlap_y1 - overlap_y0 <= 0:\n return 0\n\n # if yes, calculate the ratio of the overlap to each ROI size and the unified size\n size_1 = (x1_1 - x0_1) * (y1_1 - y0_1)\n size_2 = (x1_2 - x0_2) * (y1_2 - y0_2)\n size_intersection = (overlap_x1 - overlap_x0) * (overlap_y1 - overlap_y0)\n size_union = size_1 + size_2 - size_intersection\n\n return size_intersection / size_union",
"def overlap(path1, path2):\n DataL1 = BedTool(path1).sort()\n DataL2 = BedTool(path2).sort()\n overlap = DataL1.intersect(DataL2, wao=True)\n Overlap_df = overlap.to_dataframe()\n Strand1 = list(Overlap_df.iloc[:, 5])\n Strand2 = list(Overlap_df.iloc[:, 11])\n p_p, m_m, p_m, m_p, same_strand, opposite_strand, convergent, divergent = orientation(Strand1, Strand2)\n return p_p, m_m, p_m, m_p, same_strand, opposite_strand, convergent, divergent",
"def compute_overlap(edge_images):\n if edge_images[0].size == edge_images[1].size:\n # Overlap computation\n # ref: http://answers.opencv.org/question/37392/how-to-compute-intersections-of-two-contours/\n height, width = edge_images[0].shape\n sum_images = np.zeros((height, width), np.uint8)\n overlap_image = np.zeros((height, width), np.uint8)\n\n cv2.bitwise_and(edge_images[0], edge_images[1], overlap_image)\n cv2.bitwise_or(edge_images[0], edge_images[1], sum_images)\n non_zero_overlap_image = cv2.countNonZero(overlap_image)\n non_zero_sum_image = cv2.countNonZero(sum_images)\n\n try:\n overlap_percentage = non_zero_overlap_image / non_zero_sum_image\n return overlap_percentage\n\n except ZeroDivisionError:\n return 0.0\n else:\n return None",
"def _absolutecoverage(bd_shape,BD_directory, run, archive_file_path):\n\n path = get_archive_filepath(BD_directory, run, archive_file_path)\n all_non_empty_performances = get_bin_performances_uniquearchive(path,len(bd_shape))\n num_filled=len(all_non_empty_performances)\n return float(num_filled)",
"def cal_overlaps(boxes1, boxes2):\n area1 = (boxes1[:, 0] - boxes1[:, 2]) * (boxes1[:, 1] - boxes1[:, 3]) # (Nsample, 1)\n area2 = (boxes2[:, 0] - boxes2[:, 2]) * (boxes2[:, 1] - boxes2[:, 3]) # (Msample, 1)\n\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0])) # (Nsample, Msample)\n\n # calculate the intersection of boxes1(anchor) and boxes2(GT box)\n for i in range(boxes1.shape[0]):\n overlaps[i][:] = cal_iou(boxes1[i], area1[i], boxes2, area2)\n\n return overlaps",
"def normalized_overlap(g, node_1, node_2):\n inter = len(set(nx.neighbors(g, node_1)).intersection(set(nx.neighbors(g, node_2))))\n unio = len(set(nx.neighbors(g, node_1)).union(set(nx.neighbors(g, node_2))))\n return float(inter)/float(unio)"
] | [
"0.69667053",
"0.6415947",
"0.62875366",
"0.62101567",
"0.6209373",
"0.6169764",
"0.6060111",
"0.6049341",
"0.604258",
"0.5956855",
"0.59191436",
"0.5853579",
"0.5817987",
"0.5817337",
"0.5816945",
"0.57941353",
"0.5751341",
"0.57163626",
"0.56830454",
"0.5674475",
"0.5645978",
"0.5570281",
"0.55404973",
"0.55387145",
"0.55387145",
"0.55197287",
"0.5499339",
"0.5498487",
"0.54948986",
"0.5491168"
] | 0.7542141 | 0 |
Validates a file against a sha256 or md5 hash. Arguments | def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
if ((algorithm is 'sha256') or
(algorithm is 'auto' and len(file_hash) is 64)):
hasher = 'sha256'
else:
hasher = 'md5'
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):\n if (algorithm == 'sha256') or (algorithm == 'auto' and len(file_hash) == 64):\n hasher = 'sha256'\n else:\n hasher = 'md5'\n\n if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):\n return True\n else:\n return False",
"def validate_file(path: str, expected_num_bytes: int, expected_hexdigest: str):\n with open(path, 'rb') as f:\n data = f.read()\n num_bytes = len(data)\n if num_bytes != expected_num_bytes:\n raise ValueError(\n f'Expected file content number of bytes to be {expected_num_bytes} but found {num_bytes}.'\n )\n hexdigest = hashlib.sha256(data).hexdigest()\n if hexdigest != expected_hexdigest:\n raise ValueError(\n f'Expected file content hash to be {expected_hexdigest!r} but found {hexdigest!r}.'\n )",
"def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):\n hasher = _resolve_hasher(algorithm, file_hash)\n\n if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):\n return True\n else:\n return False",
"def check_hash(self, fname, args):\n fobj = self._open_file(fname)\n\n rc = 0\n format_errors = 0\n hash_errors = 0\n read_errors = 0\n for idx, line in enumerate(fobj):\n # remove any newline characters\n m = self.CHECK_RE.match(line.strip())\n if not m:\n if args.warn:\n self.app.stderr.write(\n 'hasher {0}: {1}: {2}: improperly formatted {3}'\n ' checksum line\\n'.format(self.name, fname, idx + 1,\n self.name.upper()))\n format_errors += 1\n rc = 1\n continue\n hash_value, binary, check_file = m.groups()\n\n try:\n check_f = open(check_file, 'rb' if binary == '*' else 'r')\n except IOError:\n self.app.stderr.write(\n 'hasher {0}: {1}: No such file or directory\\n'.format(\n self.name, check_file))\n if not args.status:\n self.app.stdout.write(\n STATUS_MSG.format(check_file, READ_ERROR))\n read_errors += 1\n rc = 1\n continue\n\n if self._calculate_hash(check_f) == hash_value:\n if not (args.quiet or args.status):\n self.app.stdout.write(\n STATUS_MSG.format(check_file, SUCCESS))\n else:\n if not args.status:\n self.app.stdout.write(\n STATUS_MSG.format(check_file, HASH_ERROR))\n hash_errors += 1\n rc = 1\n\n if format_errors and not args.status:\n self.app.stderr.write(\n 'hasher {0}: WARNING: {1} line{2} {3} improperly'\n ' formatted\\n'.format(\n self.name,\n format_errors,\n 's' if format_errors > 1 else '',\n 'are' if format_errors > 1 else 'is',\n ))\n if read_errors and not args.status:\n self.app.stderr.write(\n 'hasher {0}: WARNING: {1} listed file{2}'\n ' could not be read\\n'.format(\n self.name,\n read_errors,\n 's' if read_errors > 1 else '',\n ))\n if hash_errors and not args.status:\n self.app.stderr.write(\n 'hasher {0}: WARNING: {1} computed checksum{2}'\n ' did NOT match\\n'.format(\n self.name,\n hash_errors,\n 's' if hash_errors > 1 else '',\n ))\n return rc",
"def __check_hashes(self, file_object, trusted_hashes):\n\n # Verify each trusted hash of 'trusted_hashes'. Raise exception if\n # any of the hashes are incorrect and return if all are correct.\n for algorithm, trusted_hash in trusted_hashes.items():\n digest_object = tuf.hash.digest(algorithm)\n digest_object.update(file_object.read())\n computed_hash = digest_object.hexdigest()\n if trusted_hash != computed_hash:\n raise tuf.BadHashError(trusted_hash, computed_hash)\n else:\n logger.info('The file\\'s '+algorithm+' hash is correct: '+trusted_hash)",
"def verify(path, sha_path, verbose):\n if verbose:\n print(\"verifying\", path)\n with open(path, \"rb\") as source:\n found = hashlib.sha256(source.read()).hexdigest()\n with open(sha_path, \"r\") as sha256sum:\n expected = sha256sum.readline().split()[0]\n verified = found == expected\n if not verified:\n print(\"invalid checksum:\\n\"\n \" found: {}\\n\"\n \" expected: {}\".format(found, expected))\n return verified",
"def validate(self):\n print(\"Validating \")\n sha256_test = _get_file_sha256_hash(self.file_path)\n sha256_truth = self.metadata_pkg[\"hash\"]\n if sha256_test != sha256_truth:\n raise ValueError(\n f\"Hash of modelpkg file {os.path.basename(self.file_path)} ({sha256_test}) does not match truth hash ({sha256_truth}).\")",
"def test_file_integrity_return_error_in_case_of_bad_md5():\n test_file = open('./testfile.tmp', 'a')\n test_file.close()\n\n test_file_path = os.path.realpath('./testfile.tmp')\n test_file_md5 = hashlib.md5(open(test_file_path, 'rb').read()).hexdigest()\n\n bad_md5 = 'some_noise_%s' % test_file_md5\n\n result = PackageDownloadHelper.check_file_integrity(test_file_path, bad_md5)\n\n assert isinstance(result, ApiResponse)",
"def check_file(filename):\n\tfile = open(filename, 'r')\n\tfile_content = file.read()\n\tif len(file_content) < 3 or file_content.isspace():\n\t\tfile.close()\n\t\treturn (0, 'File content must begin with a keyword (HEX, BIN or ASC)!')\n\t# First 3 characters should represent the base of the content.\n\tbase = file_content[0:3]\n\tfile_content = file_content[3:]\n\tforbidden_chars = {'BIN': [None], 'HEX': [None]}\n\n\t# Content is claimed to be hexadecimal:\n\tif base == 'HEX':\n\t\tfile_content = ''.join(file_content.split())\n\t\tfile_content = file_content.upper()\n\t\tif len(file_content) < 2:\n\t\t\tfile.close()\n\t\t\treturn (0, 'File must contain at least 1 byte of data after the keyword!')\n\t\tmod = len(file_content) % 2\n\t\tif mod != 0:\n\t\t\treturn (0, 'File must contain full bytes of data (2 hex digits = 1 byte)!')\n\t\t# Use regular expression for verifying the content.\n\t\tif re.match('[0-9A-F]+$', file_content):\n\t\t\tcontent = ''\n\t\t\tfor start in range(0, len(file_content), 2):\n\t\t\t\tif start + 2 <= len(file_content):\n\t\t\t\t\tcontent += file_content[start:start+2] + ' '\n\t\t\t\telse:\n\t\t\t\t\tcontent += file_content[start:]\t\t# add the remainings\n\t\t\t\n\t\t\tcontent = content.rstrip()\t\t# remove possible whitespace at the end\n\t\t\t# Check that the file doesn't contain any forbidden control characters\n\t\t\tfor val in content.split():\n\t\t\t\tif val in forbidden_chars['HEX']:\n\t\t\t\t\tfile.close()\n\t\t\t\t\treturn (0, 'File must not contain other control characters than TAB, LF or CR!')\n\t\t\t# Return type indicator and the chopped content.\n\t\t\tfile.close()\n\t\t\treturn (1, content)\n\t\telse:\n\t\t\tfile.close()\n\t\t\treturn (0, 'File content was invalid hexadecimal data!')\n\t\t\t\n\t# Content is claimed to be binary:\n\telif base == 'BIN':\n\t\tfile_content = ''.join(file_content.split())\n\t\tif len(file_content) < 8:\n\t\t\tfile.close()\n\t\t\treturn (0, 'File must contain at least 1 byte of data after the keyword!')\n\t\tmod = len(file_content) % 8\n\t\tif mod != 0:\n\t\t\treturn (0, 'File must contain full bytes of data (8 bits = 1 byte)!')\n\t\t\t\n\t\t# Use regular expression for verifying the content.\n\t\tre.purge()\t\t# clear regex cache\n\t\tif re.match('[0-1]+$', file_content):\n\t\t\tcontent = ''\n\t\t\tfor start in range(0, len(file_content), 8):\n\t\t\t\tif start + 8 <= len(file_content):\n\t\t\t\t\tcontent += file_content[start:start+8] + ' '\n\t\t\t\telse:\n\t\t\t\t\tcontent += file_content[start:]\t\t# add the remainings\n\t\t\t\t\t\n\t\t\tcontent = content.rstrip()\t\t# remove possible whitespace at the end\n\t\t\t# Check that the file doesn't contain any forbidden control characters\n\t\t\tfor val in content.split():\n\t\t\t\tif val in forbidden_chars['BIN']:\n\t\t\t\t\tfile.close()\n\t\t\t\t\treturn (0, 'File must not contain other control characters than TAB, LF or CR!')\n\t\t\t# Return type indicator and the chopped content.\n\t\t\tfile.close()\n\t\t\treturn (2, content)\n\t\telse:\n\t\t\tfile.close()\n\t\t\treturn (0, 'File content was invalid binary data!')\n\t\t\t\n\t# Content is claimed to be ASCII:\n\telif base == 'ASC':\n\t\tescape_chars = ['\\a', '\\b', '\\f', '\\n', '\\r', '\\t', '\\v']\n\t\tescape_letters = ['a', 'b', 'f', 'n', 'r', 't', 'v']\n\t\t# Use regular expression for verifying the content.\n\t\tre.purge()\t\t# clear regex cache\n\t\tif re.match('[\\x00-\\x7F]+$', file_content):\t\t# [\\x20-\\x7E]\n\t\t\t# Check that the file doesn't contain any forbidden control characters\n\t\t\tfor c in file_content:\n\t\t\t\tif binascii.hexlify(c).upper() in forbidden_chars['HEX']:\n\t\t\t\t\tfile.close()\n\t\t\t\t\treturn (0, 'File contains illegal control characters!')\n\t\t\tfor c in escape_chars:\n\t\t\t\tif file_content.count(c) != 0:\n\t\t\t\t\tfile_content = file_content.replace(c, '')\t\t\t\t\t\n\t\t\t# Replace all \"\\\\n\", \"\\\\r\" etc. with \"\\n\", \"\\r\" etc. (i.e. remove\n\t\t\t# the extra backslash) so that the control characters are interpreted\n\t\t\t# correctly into hex values.\n\t\t\tfor c in range(0, len(file_content)):\n\t\t\t\tif file_content[c:c+1] == '\\\\':\n\t\t\t\t\tif file_content[c+1:c+2] in escape_letters:\n\t\t\t\t\t\tfor e in escape_letters:\n\t\t\t\t\t\t\tif file_content[c+1:c+2] == e:\n\t\t\t\t\t\t\t\tfile_content = file_content[:c] + escape_chars[escape_letters.index(e)] + file_content[c+2:]\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn (0, 'File contains illegal control characters!\\n\\n' + \n\t\t\t\t\t\t\t\t'Legal characters after a backslash are: a, b, f, n, r, t, and v.')\n\n\t\t\t# Return type indicator and the file content.\n\t\t\tfile.close()\n\t\t\treturn (3, file_content)\n\t\telse:\n\t\t\tfile.close()\n\t\t\treturn (0, 'File content was invalid ASCII data!')\n\t\t\n\t# Content is invalid:\n\telse:\n\t\tfile.close()\n\t\treturn (0, 'File content must begin with a keyword (HEX, BIN or ASC)!')",
"def verifyFile(source, destination):\n\tsourceHash = hashlib.sha256(open(source, 'rb').read()).digest()\n\tdestinationHash = hashlib.sha256(open(destination, 'rb').read()).digest()\n\n\tif sourceHash == destinationHash:\n\t\treturn (True, str(sourceHash))\n\n\treturn False",
"def validate_directory(self, checksum_file_hash=None):\n\n # If the directory doesn't have a checksum file, raise an error.\n if not self.has_checksum_file:\n return False\n\n # Load in all the text in the checksum file.\n with open(self.checksum_file_path, \"r\") as f:\n checksum_file_content = f.read()\n\n # If a hash was given for the checksum file, check the validity\n # of the checksum file.\n if checksum_file_hash is not None:\n hash_generator = hashlib.sha256()\n hash_generator.update(checksum_file_content.encode(\"utf-8\"))\n if checksum_file_hash != hash_generator.digest():\n return False\n\n # If we have reached this point and the checksum_file_hash was given, then we know that\n # the checksum file has not been corrupted. If the checksum_file_hash was not given then we\n # are not sure, so we need to check if there are any errors in the file.\n # This is done by adding and extra matching group to the regex \"|(.)\". This group will\n # match anything that has not been matched by the base regex. Meaning that\n # if something is matched by this group, there are syntax errors in the file.\n checksum_file_regex_pattern = \\\n r\"(?:(.+)(?:\\t)([a-fA-F0-9]{64}))|(.)\" if checksum_file_hash is None else r\"(?:(.+)(?:\\t)([a-fA-F0-9]{64}))\"\n\n # Loop through all matches found by the regex.\n for match in re.finditer(checksum_file_regex_pattern, checksum_file_content):\n # Check if syntax error was found in case no checksum_file_hash was given.\n if checksum_file_hash is None:\n if match.group(3) is not None:\n return False\n\n # Get the file path and file hash and convert these to the correct type.\n file_path = self.dir_path / match.group(1)\n file_hash = bytes.fromhex(match.group(2))\n\n # Generate the sha256 hash of the contents of the file.\n hash_generator = hashlib.sha256()\n with open(file_path, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_generator.update(chunk)\n\n # Return false if the hash doesn't match.\n if file_hash != hash_generator.digest():\n return False\n\n # If we have reached here, then all of the files have been checked and they are valid.\n # so return True.\n return True",
"def _verify_hash(self, read_bytes):\n if self.hash is None:\n raise QuiltException(\"Hash missing - need to build the package\")\n _check_hash_type_support(self.hash.get('type'))\n digest = hashlib.sha256(read_bytes).hexdigest()\n if digest != self.hash.get('value'):\n raise QuiltException(\"Hash validation failed\")",
"def md5check(fname, md5fname):\n\tmd5fh = open(md5fname, \"r\")\n\treturn (md5sum(fname) == md5fh.readline())",
"def checkfile(filename, source=None):\n if source:\n # Let's check some sums\n if os.path.exists(filename) and os.path.exists(source):\n src_sha = calchash(source)\n dest_sha = calchash(filename)\n if DRYRUN:\n print(\"{src} hash {src_sha}. {dest} hash {dest_sha}\".format(src=source, dest=filename, src_sha=src_sha.hexdigest(), dest_sha=dest_sha.hexdigest()))\n return src_sha.digest() == dest_sha.digest()\n else:\n return os.path.exists(filename)",
"def check_md5sum(file1: str, file2: str) -> bool:\n return get_md5_hash(file1) == get_md5_hash(file2)",
"def check_sha1(filename, sha1_hash):\n sha1 = hashlib.sha1()\n with open(filename, 'rb') as f:\n while True:\n data = f.read(1048576)\n if not data:\n break\n sha1.update(data)\n\n sha1_file = sha1.hexdigest()\n l = min(len(sha1_file), len(sha1_hash))\n return sha1.hexdigest()[0:l] == sha1_hash[0:l]",
"def find_and_validate_md5_checksums(in_folder, md5key_filename):\n validated_files = []\n for filename in locate(md5key_filename, root=in_folder):\n file_fd = open(filename, 'r')\n contents = file_fd.readlines()\n for line in contents:\n split_line = line.split(' ')\n if len(split_line) == 2:\n hashkey, hashkey_target = split_line\n hashkey_target = os.path.join(os.path.dirname(filename),\n hashkey_target.strip())\n hashkey = hashkey.strip()\n found_hashkey = calculate_md5_external(hashkey_target).strip()\n if found_hashkey != hashkey:\n raise InvenioFileChecksumError(\"Error matching checksum of %s:\"\n \" %s is not equal to %s\" %\n (hashkey_target,\n found_hashkey,\n hashkey))\n validated_files.append(hashkey_target)\n return validated_files",
"def Validate(self, relative_file, contents):\n pass",
"def validate(self, filepath):\n header_bytes = b''\n cypher_bytes = b''\n with open(filepath, 'rb') as f:\n header_bytes = f.read(16)\n cypher_bytes = f.read(16)\n data = self._decrypt(cypher_bytes, strip_padding=False)\n try:\n self._validate_header(header_bytes, data)\n except DecryptionError:\n return False\n return True",
"def check_hash(file_path: str, md5_value_from_ia: str) -> typing.Tuple[str, str]:\n md5_value_local = md5_hash_file(file_path)\n if md5_value_local.lower().strip() == md5_value_from_ia.lower().strip():\n return (\n \"debug\",\n \"'{}' file hash ('{}') matches between local file and IA metadata\".format(\n os.path.basename(file_path), md5_value_local\n ),\n )\n return (\n \"warning\",\n \"'{}' file hash does not match between local file ({}) and IA metadata ({})\".format(\n os.path.basename(file_path), md5_value_local, md5_value_from_ia\n ),\n )",
"def verify_sum(file_path, md5_sum):\n file_md5_sum = generate_sum(file_path)\n return (file_md5_sum == md5_sum)",
"def CheckMd5(filename, md5filename):\n try:\n hasher = hashlib.md5()\n with open(filename) as check_file:\n with open(md5filename) as golden_file:\n for chunk in iter(lambda: check_file.read(128*hasher.block_size), ''):\n hasher.update(chunk)\n md5_contents = golden_file.read()\n if md5_contents:\n golden_digest_and_more = md5_contents.split(' ')\n if golden_digest_and_more:\n return golden_digest_and_more[0] == hasher.hexdigest()\n logging.warning('MD5 checksum match failed for %s', filename)\n return False\n except IOError:\n logging.warning('MD5 hasher read failed for %s', filename)\n return False",
"def ensure_file(filename, old_contents=None, old_hash=None):\n hash_function = lambda text: hashlib.sha1(text.encode('utf-8')).digest()\n\n if old_hash is None and old_contents is not None:\n old_hash = hash_function(old_contents)\n\n if not os.path.exists(filename):\n # write the file if it doesn't exist\n if old_contents is not None:\n with open(filename, 'w') as f:\n f.write(old_contents)\n else:\n raise RuntimeError(\"No contents to write missing file \" +\n str(filename))\n\n with open(filename, mode='r') as f:\n contents = f.read()\n\n hashed = hash_function(contents)\n\n if old_hash and hashed != old_hash:\n raise RuntimeError(\"Existing file \" + str(filename) + \" does not\"\n + \" match stored file.\")\n\n return contents, hashed",
"def _check_hash(self, text):\n old = self.header.get(\"sha1sum\", None)\n if old is None:\n raise crexc.ChecksumError(\"sha1sum is missing in \" + repr(self.basename))\n if self._get_checksum(text) != self.header[\"sha1sum\"]:\n raise crexc.ChecksumError(\"sha1sum mismatch in \" + repr(self.basename))",
"def CheckHashes(self, hashes):\n hash_map = {}\n for hsh in hashes:\n if hsh.HasField(\"sha256\"):\n # The canonical name of the file is where we store the file hash.\n digest = hsh.sha256\n hash_map[aff4.ROOT_URN.Add(\"files/hash/generic/sha256\").Add(\n str(digest))] = digest\n\n for metadata in aff4.FACTORY.Stat(list(hash_map), token=self.token):\n yield metadata[\"urn\"], hash_map[metadata[\"urn\"]]",
"def check_md5(file1, file2):\r\n with open(file1, \"rb\") as f1:\r\n h1 = hashlib.md5(f1.read()).digest()\r\n with open(file2, \"rb\") as f2:\r\n h2 = hashlib.md5(f2.read()).digest()\r\n return h1 == h2",
"def _hash_file(file: Union[str, Path], md5: Hash) -> Hash:\n if isinstance(file, str) and file.lower().startswith(\"file://\"):\n file = unquote(urlparse(file).path)\n if not Path(file).is_file():\n raise ValueError(str(file) + \" is not a valid file\")\n with open(file, \"rb\") as f:\n while True:\n data = f.read(BUF_SIZE)\n if not data:\n break\n md5.update(data)\n return md5",
"def hash_file(filename):\n\n # make a hash object\n h = hashlib.sha256()\n\n # open file for reading in binary mode\n with open(filename,'rb') as file:\n\n # loop till the end of the file\n chunk = 0\n while chunk != b'':\n # read only 1024 bytes at a time\n chunk = file.read(1024)\n h.update(chunk)\n\n # return the hex representation of digest\n return h.hexdigest()",
"def checksumFile(filename):\n return md5File(filename)",
"def verification(file_name: str) -> None:\n print(\"Verification process...\")\n file_name = os.path.join('data', file_name)\n\n file1 = open(\"data/key.txt\", \"r\")\n file2 = open(\"data/signature.txt\", \"r\")\n p = int(file1.readline().rstrip())\n q = int(file1.readline().rstrip())\n g = int(file1.readline().rstrip())\n h = int(file1.readline().rstrip())\n\n c1 = int(file2.readline().rstrip())\n c2 = int(file2.readline().rstrip())\n print('c1 = ', c1)\n print('c2 = ', c2)\n\n t1 = sha_hash(file_name)\n print('hash = ', t1)\n inverseC2 = compute_inverse(c2, q)\n t1 = (t1 * inverseC2) % q\n\n t2 = compute_inverse(c2, q)\n t2 = (t2 * c1) % q\n\n valid1 = square_multiply(g, t1, p)\n valid2 = square_multiply(h, t2, p)\n valid = ((valid1 * valid2) % p) % q\n if valid == c1:\n print(\"Valid signature\")\n else:\n print(\"Invalid signature\")"
] | [
"0.7924767",
"0.74817306",
"0.72795886",
"0.71345603",
"0.6770209",
"0.6651307",
"0.65227723",
"0.65150875",
"0.64636904",
"0.63894236",
"0.6387766",
"0.6380768",
"0.6302816",
"0.62753654",
"0.6221864",
"0.6216318",
"0.6211538",
"0.6210252",
"0.6190917",
"0.6177316",
"0.61705214",
"0.6169289",
"0.6149175",
"0.61171216",
"0.6109018",
"0.60961616",
"0.60840786",
"0.6064134",
"0.60419995",
"0.60219365"
] | 0.7905711 | 1 |
Downloads a file from a URL if it not already in the cache. By default the file at the url `origin` is downloaded to the cache_dir `/tmp/datasets` Files in tar, tar.gz, tar.bz, and zip formats can also be extracted. Passing a hash will verify the file after download. The command line programs `shasum` and `sha256sum` can compute the hash. Arguments | def get_file(origin,
fname=None,
# untar=False,
md5_hash=None,
file_hash=None,
cache_dir=None,
cache_subdir=None,
hash_algorithm='auto',
extract=False,
archive_format='auto'):
if fname is None:
fname = os.path.basename(origin)
if fname == '':
raise Exception("Please specify fname")
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
if cache_dir is None:
cache_dir = '/tmp/datasets'
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
if not os.access(cache_dir, os.W_OK):
raise Exception("Can't write to {}".format(cache_dir))
if cache_subdir is not None:
datadir = os.path.join(cache_dir, cache_subdir)
else:
datadir = cache_dir
if not os.path.exists(datadir):
os.makedirs(datadir)
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the ' + hash_algorithm +
' file hash does not match the original value of ' +
file_hash + ' so we will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size is -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_file(fname,\n origin,\n untar=False,\n md5_hash=None,\n file_hash=None,\n cache_subdir='datasets',\n hash_algorithm='auto',\n extract=False,\n archive_format='auto',\n cache_dir=None):\n if cache_dir is None:\n cache_dir = os.path.join(os.path.expanduser('~'), '.keras')\n if md5_hash is not None and file_hash is None:\n file_hash = md5_hash\n hash_algorithm = 'md5'\n datadir_base = os.path.expanduser(cache_dir)\n if not os.access(datadir_base, os.W_OK):\n datadir_base = os.path.join('/tmp', '.keras')\n datadir = os.path.join(datadir_base, cache_subdir)\n _makedirs_exist_ok(datadir)\n\n if untar:\n untar_fpath = os.path.join(datadir, fname)\n fpath = untar_fpath + '.tar.gz'\n else:\n fpath = os.path.join(datadir, fname)\n\n download = False\n if os.path.exists(fpath):\n # File found; verify integrity if a hash was provided.\n if file_hash is not None:\n if not validate_file(fpath, file_hash, algorithm=hash_algorithm):\n print('A local file was found, but {} file hash not match original value of {}.'.format(\n hash_algorithm, file_hash))\n download = True\n else:\n download = True\n\n if download:\n print('Downloading data from', origin)\n\n class ProgressTracker(object):\n # Maintain progbar for the lifetime of download.\n # This design was chosen for Python 2.7 compatibility.\n progbar = None\n\n def dl_progress(count, block_size, total_size):\n if ProgressTracker.progbar is None:\n if total_size == -1:\n total_size = None\n ProgressTracker.progbar = Progbar(total_size)\n else:\n ProgressTracker.progbar.update(count * block_size)\n\n error_msg = 'URL fetch failure on {}: {} -- {}'\n try:\n try:\n urlretrieve(origin, fpath, dl_progress)\n except HTTPError as e:\n raise Exception(error_msg.format(origin, e.code, e.msg))\n except URLError as e:\n raise Exception(error_msg.format(origin, e.errno, e.reason))\n except (Exception, KeyboardInterrupt) as e:\n if os.path.exists(fpath):\n os.remove(fpath)\n raise e\n ProgressTracker.progbar = None\n\n if untar:\n if not os.path.exists(untar_fpath):\n _extract_archive(fpath, datadir, archive_format='tar')\n return untar_fpath\n\n if extract:\n _extract_archive(fpath, datadir, archive_format)\n\n return fpath",
"def get_file(fname,\n origin,\n untar=False,\n md5_hash=None,\n file_hash=None,\n cache_subdir='datasets',\n hash_algorithm='auto',\n extract=False,\n archive_format='auto',\n cache_dir=None):\n if cache_dir is None:\n cache_dir = os.path.join(os.path.expanduser('~'), '.keras')\n if md5_hash is not None and file_hash is None:\n file_hash = md5_hash\n hash_algorithm = 'md5'\n datadir_base = os.path.expanduser(cache_dir)\n if not os.access(datadir_base, os.W_OK):\n datadir_base = os.path.join('/tmp', '.keras')\n datadir = os.path.join(datadir_base, cache_subdir)\n _makedirs_exist_ok(datadir)\n\n fname = path_to_string(fname)\n\n if untar:\n untar_fpath = os.path.join(datadir, fname)\n fpath = untar_fpath + '.tar.gz'\n else:\n fpath = os.path.join(datadir, fname)\n\n download = False\n if os.path.exists(fpath):\n # File found; verify integrity if a hash was provided.\n if file_hash is not None:\n if not validate_file(fpath, file_hash, algorithm=hash_algorithm):\n print('A local file was found, but it seems to be '\n 'incomplete or outdated because the ' + hash_algorithm +\n ' file hash does not match the original value of ' + file_hash +\n ' so we will re-download the data.')\n download = True\n else:\n download = True\n\n if download:\n print('Downloading data from', origin)\n\n class ProgressTracker(object):\n # Maintain progbar for the lifetime of download.\n # This design was chosen for Python 2.7 compatibility.\n progbar = None\n\n def dl_progress(count, block_size, total_size):\n if ProgressTracker.progbar is None:\n if total_size == -1:\n total_size = None\n ProgressTracker.progbar = Progbar(total_size)\n else:\n ProgressTracker.progbar.update(count * block_size)\n\n error_msg = 'URL fetch failure on {}: {} -- {}'\n try:\n try:\n urlretrieve(origin, fpath, dl_progress)\n except urllib.error.HTTPError as e:\n raise Exception(error_msg.format(origin, e.code, e.msg))\n except urllib.error.URLError as e:\n raise Exception(error_msg.format(origin, e.errno, e.reason))\n except (Exception, KeyboardInterrupt) as e:\n if os.path.exists(fpath):\n os.remove(fpath)\n raise\n ProgressTracker.progbar = None\n\n if untar:\n if not os.path.exists(untar_fpath):\n _extract_archive(fpath, datadir, archive_format='tar')\n return untar_fpath\n\n if extract:\n _extract_archive(fpath, datadir, archive_format)\n\n return fpath",
"def download(name, cache_dir=os.path.join('..', 'data')): #@save\n assert name in DATA_HUB, f\"{name} does not exist in {DATA_HUB}.\"\n url, sha1_hash = DATA_HUB[name]\n d2l.mkdir_if_not_exist(cache_dir)\n fname = os.path.join(cache_dir, url.split('/')[-1])\n if os.path.exists(fname):\n sha1 = hashlib.sha1()\n with open(fname, 'rb') as f:\n while True:\n data = f.read(1048576)\n if not data:\n break\n sha1.update(data)\n if sha1.hexdigest() == sha1_hash:\n return fname # Hit cache\n print(f'Downloading {fname} from {url}...')\n r = requests.get(url, stream=True, verify=True)\n with open(fname, 'wb') as f:\n f.write(r.content)\n return fname",
"def download(name, cache_dir=os.path.join('..', 'data')):\n assert name in DATA_HUB, f\"{name} does not exist in {DATA_HUB}.\"\n url, sha1_hash = DATA_HUB[name]\n os.makedirs(cache_dir, exist_ok=True)\n fname = os.path.join(cache_dir, url.split('/')[-1])\n if os.path.exists(fname):\n sha1 = hashlib.sha1()\n with open(fname, 'rb') as f:\n while True:\n data = f.read(1048576)\n if not data:\n break\n sha1.update(data)\n if sha1.hexdigest() == sha1_hash:\n return fname # Hit cache\n print(f'Downloading {fname} from {url}...')\n r = requests.get(url, stream=True, verify=True)\n with open(fname, 'wb') as f:\n f.write(r.content)\n return fname",
"def fetch_and_cache(data_url, file, data_dir=\"data\", force=False):\n data_dir = Path(data_dir)\n data_dir.mkdir(exist_ok = True)\n file_path = data_dir / Path(file)\n if force and file_path.exists():\n file_path.unlink()\n if force or not file_path.exists():\n print('Downloading...', end=' ')\n resp = requests.get(data_url)\n with file_path.open('wb') as f:\n f.write(resp.content)\n print('Done!')\n last_modified_time = time.ctime(file_path.stat().st_mtime)\n else:\n last_modified_time = time.ctime(file_path.stat().st_mtime)\n print(\"Using cached version that was downloaded (UTC):\", last_modified_time)\n return file_path",
"def fetch_and_cache(data_url, file, data_dir=\"data\", force=False):\n\n import requests\n from hashlib import md5\n from pathlib import Path\n\n data_dir = Path(data_dir)\n data_dir.mkdir(exist_ok=True)\n file_path = data_dir/Path(file)\n # If the file already exists and we want to force a download then\n # delete the file first so that the creation date is correct.\n if force and file_path.exists():\n file_path.unlink()\n if force or not file_path.exists():\n resp = requests.get(data_url, stream=True)\n file_size = int(resp.headers.get('content-length', 0))\n step = 40\n chunk_size = file_size//step\n with file_path.open('wb') as f:\n for chunk in resp.iter_content(chunk_size): # write file in chunks\n f.write(chunk)\n step -= 1\n print('[' + '#'*(41 - step) + (step)*' ' + ']\\r', end='')\n print(f\"\\nDownloaded {data_url.split('/')[-1]}!\")\n else:\n import time\n time_downloaded = time.ctime(file_path.stat().st_ctime)\n print(\"Using version already downloaded:\", time_downloaded)\n # Compute and print md5 hash of file, whether newly downloaded or not\n m5 = md5()\n m5.update(file_path.read_bytes())\n print(f\"MD5 hash of file: {m5.hexdigest()}\")\n return file_path",
"def get_file(\n fname,\n origin,\n untar=False,\n cache_subdir=\"datasets\",\n extract=False,\n archive_format=\"auto\",\n cache_dir=None,\n):\n if cache_dir is None:\n cache_dir = os.path.join(os.path.expanduser(\"~\"), \".keras\")\n datadir_base = os.path.expanduser(cache_dir)\n if not os.access(datadir_base, os.W_OK):\n datadir_base = os.path.join(\"/tmp\", \".keras\")\n datadir = os.path.join(datadir_base, cache_subdir)\n if not os.path.exists(datadir):\n os.makedirs(datadir)\n\n if untar:\n untar_fpath = os.path.join(datadir, fname)\n fpath = untar_fpath + \".tar.gz\"\n else:\n fpath = os.path.join(datadir, fname)\n\n download = False\n if os.path.exists(fpath):\n download = False\n else:\n download = True\n\n if download:\n print(\"Downloading data from\", origin)\n\n class ProgressTracker(object):\n # Maintain progbar for the lifetime of download.\n # This design was chosen for Python 2.7 compatibility.\n progbar = None\n\n def dl_progress(count, block_size, total_size):\n if ProgressTracker.progbar is None:\n if total_size == -1:\n total_size = None\n ProgressTracker.progbar = Progbar(total_size)\n else:\n ProgressTracker.progbar.update(count * block_size)\n\n error_msg = \"URL fetch failure on {}: {} -- {}\"\n try:\n try:\n urlretrieve(origin, fpath, dl_progress)\n except HTTPError as e:\n raise Exception(error_msg.format(origin, e.code, e.msg))\n except URLError as e:\n raise Exception(error_msg.format(origin, e.errno, e.reason))\n except (Exception, KeyboardInterrupt) as e:\n if os.path.exists(fpath):\n os.remove(fpath)\n raise\n ProgressTracker.progbar = None\n\n if untar:\n if not os.path.exists(untar_fpath):\n _extract_archive(fpath, datadir, archive_format=\"tar\")\n return untar_fpath\n\n if extract:\n _extract_archive(fpath, datadir, archive_format)\n\n return fpath",
"def download_from_url(file_name: str, url: str, download_dir: str, cache_dir: Optional[str] = None):\n if not isinstance(url, str):\n raise TypeError(f\"{url} must be str type.\")\n if not isinstance(file_name, str):\n raise TypeError(f\"{file_name} must be str type.\")\n if not isinstance(download_dir, str):\n raise TypeError(f\"{download_dir} must be str type.\")\n\n if cache_dir is None:\n cache_dir = URDUHACK_DIRECTORY\n\n Path(cache_dir).mkdir(parents=True, exist_ok=True)\n tf.keras.utils.get_file(fname=file_name, origin=url, cache_subdir=download_dir, cache_dir=cache_dir, extract=True)",
"def download(url: str, checksum: str) -> bytes:\n cachepath = cachedir() / f\"{checksum}.data\"\n if cachepath.is_file():\n with open(cachepath, \"rb\") as f:\n content = f.read()\n else:\n print(\"downloading\", url, \"...\")\n content = requests.get(url).content\n cachepath.parent.mkdir(parents=True, exist_ok=True)\n with open(cachepath, \"wb\") as f:\n f.write(content)\n\n sha256 = hashlib.sha256()\n sha256.update(content)\n actual_checksum = sha256.hexdigest()\n if actual_checksum != checksum:\n raise ValueError(\n f\"Checksum mismatch of downloaded file {url}. \"\n f\"Expected: {checksum}. Actual: {actual_checksum}\"\n )\n return content",
"def get_from_cache(url, cache_dir=None, force_download=False, proxies=None):\n\tif cache_dir is None:\n\t\tcache_dir = PYTORCH_TRANSFORMERS_CACHE\n\tif sys.version_info[0] == 3 and isinstance (cache_dir, Path):\n\t\tcache_dir = str (cache_dir)\n\tif sys.version_info[0] == 2 and not isinstance (cache_dir, str):\n\t\tcache_dir = str (cache_dir)\n\n\tif not os.path.exists (cache_dir):\n\t\tos.makedirs (cache_dir)\n\n\t# Get eTag to add to filename, if it exists.\n\tif url.startswith (\"s3://\"):\n\t\tetag = s3_etag (url, proxies=proxies)\n\telse:\n\t\ttry:\n\t\t\tresponse = requests.head (url, allow_redirects=True, proxies=proxies)\n\t\t\tif response.status_code != 200:\n\t\t\t\tetag = None\n\t\t\telse:\n\t\t\t\tetag = response.headers.get (\"ETag\")\n\t\texcept EnvironmentError:\n\t\t\tetag = None\n\n\tif sys.version_info[0] == 2 and etag is not None:\n\t\tetag = etag.decode ('utf-8')\n\tfilename = url_to_filename (url, etag)\n\n\t# get cache path to put the file\n\tcache_path = os.path.join (cache_dir, filename)\n\n\t# If we don't have a connection (etag is None) and can't identify the file\n\t# try to get the last downloaded one\n\tif not os.path.exists (cache_path) and etag is None:\n\t\tmatching_files = fnmatch.filter (os.listdir (cache_dir), filename + '.*')\n\t\tmatching_files = list (filter (lambda s: not s.endswith ('.json'), matching_files))\n\t\tif matching_files:\n\t\t\tcache_path = os.path.join (cache_dir, matching_files[-1])\n\n\tif not os.path.exists (cache_path) or force_download:\n\t\t# Download to temporary file, then copy to cache dir once finished.\n\t\t# Otherwise you get corrupt cache entries if the download gets interrupted.\n\t\twith tempfile.NamedTemporaryFile () as temp_file:\n\t\t\tlogger.info (\"%s not found in cache or force_download set to True, downloading to %s\", url, temp_file.name)\n\n\t\t\t# GET file object\n\t\t\tif url.startswith (\"s3://\"):\n\t\t\t\ts3_get (url, temp_file, proxies=proxies)\n\t\t\telse:\n\t\t\t\thttp_get (url, temp_file, proxies=proxies)\n\n\t\t\t# we are copying the file before closing it, so flush to avoid truncation\n\t\t\ttemp_file.flush ()\n\t\t\t# shutil.copyfileobj() starts at the current position, so go to the start\n\t\t\ttemp_file.seek (0)\n\n\t\t\tlogger.info (\"copying %s to cache at %s\", temp_file.name, cache_path)\n\t\t\twith open (cache_path, 'wb') as cache_file:\n\t\t\t\tshutil.copyfileobj (temp_file, cache_file)\n\n\t\t\tlogger.info (\"creating metadata file for %s\", cache_path)\n\t\t\tmeta = {'url': url, 'etag': etag}\n\t\t\tmeta_path = cache_path + '.json'\n\t\t\twith open (meta_path, 'w') as meta_file:\n\t\t\t\toutput_string = json.dumps (meta)\n\t\t\t\tif sys.version_info[0] == 2 and isinstance (output_string, str):\n\t\t\t\t\toutput_string = unicode (output_string, 'utf-8') # The beauty of python 2\n\t\t\t\tmeta_file.write (output_string)\n\n\t\t\tlogger.info (\"removing temp file %s\", temp_file.name)\n\n\treturn cache_path",
"def _fetch_lzma_file(origin: str, filename: str):\n # Read and decompress in approximately megabyte chunks.\n chunk_size = 2**20\n decompressor = lzma.LZMADecompressor()\n with urllib.request.urlopen(origin) as in_stream, tf.io.gfile.GFile(\n filename, 'wb'\n ) as out_stream:\n length = in_stream.headers.get('content-length')\n if length is not None:\n total_size = int(length)\n else:\n total_size = None\n download_chunk = in_stream.read(chunk_size)\n with tqdm.tqdm(\n total=total_size, desc=f'Downloading {url_basename(origin)}'\n ) as progbar:\n while download_chunk:\n progbar.update(len(download_chunk))\n out_stream.write(decompressor.decompress(download_chunk))\n download_chunk = in_stream.read(chunk_size)",
"def download_url(url, fd, handle=None):\n return _librepo.download_url(handle, url, fd)",
"def download(url, path=None, overwrite=False, sha1_hash=None):\n if path is None:\n fname = url.split('/')[-1]\n else:\n path = os.path.expanduser(path)\n if os.path.isdir(path):\n fname = os.path.join(path, url.split('/')[-1])\n else:\n fname = path\n\n if overwrite or not os.path.exists(fname) or (sha1_hash and not check_sha1(fname, sha1_hash)):\n dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n print('Downloading %s from %s...'%(fname, url))\n r = requests.get(url, stream=True)\n if r.status_code != 200:\n raise RuntimeError(\"Failed downloading url %s\"%url)\n total_length = r.headers.get('content-length')\n with open(fname, 'wb') as f:\n if total_length is None: # no content length header\n for chunk in r.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n else:\n total_length = int(total_length)\n for chunk in tqdm(r.iter_content(chunk_size=1024),\n total=int(total_length / 1024. + 0.5),\n unit='KB', unit_scale=False, dynamic_ncols=True):\n f.write(chunk)\n\n if sha1_hash and not check_sha1(fname, sha1_hash):\n raise UserWarning('File {} is downloaded but the content hash does not match. ' \\\n 'The repo may be outdated or download may be incomplete. ' \\\n 'If the \"repo_url\" is overridden, consider switching to ' \\\n 'the default repo.'.format(fname))\n\n return fname",
"def get_from_cache(url, cache_dir=None):\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BIGGAN_CACHE\n if sys.version_info[0] == 3 and isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n # Get eTag to add to filename, if it exists.\n if url.startswith(\"s3://\"):\n etag = s3_etag(url)\n else:\n response = requests.head(url, allow_redirects=True)\n if response.status_code != 200:\n raise IOError(\"HEAD request failed for url {} with status code {}\"\n .format(url, response.status_code))\n etag = response.headers.get(\"ETag\")\n\n filename = url_to_filename(url, etag)\n\n # get cache path to put the file\n cache_path = os.path.join(cache_dir, filename)\n\n if not os.path.exists(cache_path):\n # Download to temporary file, then copy to cache dir once finished.\n # Otherwise you get corrupt cache entries if the download gets interrupted.\n with tempfile.NamedTemporaryFile() as temp_file:\n logger.info(\"%s not found in cache, downloading to %s\", url, temp_file.name)\n\n # GET file object\n if url.startswith(\"s3://\"):\n s3_get(url, temp_file)\n else:\n http_get(url, temp_file)\n\n # we are copying the file before closing it, so flush to avoid truncation\n temp_file.flush()\n # shutil.copyfileobj() starts at the current position, so go to the start\n temp_file.seek(0)\n\n logger.info(\"copying %s to cache at %s\", temp_file.name, cache_path)\n with open(cache_path, 'wb') as cache_file:\n shutil.copyfileobj(temp_file, cache_file)\n\n logger.info(\"creating metadata file for %s\", cache_path)\n meta = {'url': url, 'etag': etag}\n meta_path = cache_path + '.json'\n with open(meta_path, 'w', encoding=\"utf-8\") as meta_file:\n json.dump(meta, meta_file)\n\n logger.info(\"removing temp file %s\", temp_file.name)\n\n return cache_path",
"def cached_file(\n path_or_repo_id: Union[str, os.PathLike],\n filename: str,\n cache_dir: Optional[Union[str, os.PathLike]] = None,\n force_download: bool = False,\n resume_download: bool = False,\n proxies: Optional[Dict[str, str]] = None,\n token: Optional[Union[bool, str]] = None,\n revision: Optional[str] = None,\n local_files_only: bool = False,\n subfolder: str = \"\",\n repo_type: Optional[str] = None,\n user_agent: Optional[Union[str, Dict[str, str]]] = None,\n _raise_exceptions_for_missing_entries: bool = True,\n _raise_exceptions_for_connection_errors: bool = True,\n _commit_hash: Optional[str] = None,\n **deprecated_kwargs,\n):\n use_auth_token = deprecated_kwargs.pop(\"use_auth_token\", None)\n if use_auth_token is not None:\n warnings.warn(\n \"The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers.\", FutureWarning\n )\n if token is not None:\n raise ValueError(\"`token` and `use_auth_token` are both specified. Please set only the argument `token`.\")\n token = use_auth_token\n\n # Private arguments\n # _raise_exceptions_for_missing_entries: if False, do not raise an exception for missing entries but return\n # None.\n # _raise_exceptions_for_connection_errors: if False, do not raise an exception for connection errors but return\n # None.\n # _commit_hash: passed when we are chaining several calls to various files (e.g. when loading a tokenizer or\n # a pipeline). If files are cached for this commit hash, avoid calls to head and get from the cache.\n if is_offline_mode() and not local_files_only:\n logger.info(\"Offline mode: forcing local_files_only=True\")\n local_files_only = True\n if subfolder is None:\n subfolder = \"\"\n\n path_or_repo_id = str(path_or_repo_id)\n full_filename = os.path.join(subfolder, filename)\n if os.path.isdir(path_or_repo_id):\n resolved_file = os.path.join(os.path.join(path_or_repo_id, subfolder), filename)\n if not os.path.isfile(resolved_file):\n if _raise_exceptions_for_missing_entries:\n raise EnvironmentError(\n f\"{path_or_repo_id} does not appear to have a file named {full_filename}. Checkout \"\n f\"'https://huggingface.co/{path_or_repo_id}/{revision}' for available files.\"\n )\n else:\n return None\n return resolved_file\n\n if cache_dir is None:\n cache_dir = TRANSFORMERS_CACHE\n if isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n if _commit_hash is not None and not force_download:\n # If the file is cached under that commit hash, we return it directly.\n resolved_file = try_to_load_from_cache(\n path_or_repo_id, full_filename, cache_dir=cache_dir, revision=_commit_hash, repo_type=repo_type\n )\n if resolved_file is not None:\n if resolved_file is not _CACHED_NO_EXIST:\n return resolved_file\n elif not _raise_exceptions_for_missing_entries:\n return None\n else:\n raise EnvironmentError(f\"Could not locate {full_filename} inside {path_or_repo_id}.\")\n\n user_agent = http_user_agent(user_agent)\n try:\n # Load from URL or cache if already cached\n resolved_file = hf_hub_download(\n path_or_repo_id,\n filename,\n subfolder=None if len(subfolder) == 0 else subfolder,\n repo_type=repo_type,\n revision=revision,\n cache_dir=cache_dir,\n user_agent=user_agent,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n token=token,\n local_files_only=local_files_only,\n )\n except GatedRepoError as e:\n raise EnvironmentError(\n \"You are trying to access a gated repo.\\nMake sure to request access at \"\n f\"https://huggingface.co/{path_or_repo_id} and pass a token having permission to this repo either \"\n \"by logging in with `huggingface-cli login` or by passing `token=<your_token>`.\"\n ) from e\n except RepositoryNotFoundError as e:\n raise EnvironmentError(\n f\"{path_or_repo_id} is not a local folder and is not a valid model identifier \"\n \"listed on 'https://huggingface.co/models'\\nIf this is a private repository, make sure to pass a token \"\n \"having permission to this repo either by logging in with `huggingface-cli login` or by passing \"\n \"`token=<your_token>`\"\n ) from e\n except RevisionNotFoundError as e:\n raise EnvironmentError(\n f\"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists \"\n \"for this model name. Check the model page at \"\n f\"'https://huggingface.co/{path_or_repo_id}' for available revisions.\"\n ) from e\n except LocalEntryNotFoundError as e:\n # We try to see if we have a cached version (not up to date):\n resolved_file = try_to_load_from_cache(path_or_repo_id, full_filename, cache_dir=cache_dir, revision=revision)\n if resolved_file is not None and resolved_file != _CACHED_NO_EXIST:\n return resolved_file\n if not _raise_exceptions_for_missing_entries or not _raise_exceptions_for_connection_errors:\n return None\n raise EnvironmentError(\n f\"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this file, couldn't find it in the\"\n f\" cached files and it looks like {path_or_repo_id} is not the path to a directory containing a file named\"\n f\" {full_filename}.\\nCheckout your internet connection or see how to run the library in offline mode at\"\n \" 'https://huggingface.co/docs/transformers/installation#offline-mode'.\"\n ) from e\n except EntryNotFoundError as e:\n if not _raise_exceptions_for_missing_entries:\n return None\n if revision is None:\n revision = \"main\"\n raise EnvironmentError(\n f\"{path_or_repo_id} does not appear to have a file named {full_filename}. Checkout \"\n f\"'https://huggingface.co/{path_or_repo_id}/{revision}' for available files.\"\n ) from e\n except HTTPError as err:\n # First we try to see if we have a cached version (not up to date):\n resolved_file = try_to_load_from_cache(path_or_repo_id, full_filename, cache_dir=cache_dir, revision=revision)\n if resolved_file is not None and resolved_file != _CACHED_NO_EXIST:\n return resolved_file\n if not _raise_exceptions_for_connection_errors:\n return None\n\n raise EnvironmentError(f\"There was a specific connection error when trying to load {path_or_repo_id}:\\n{err}\")\n\n return resolved_file",
"def retrieve(\n url,\n known_hash,\n fname=None,\n path=None,\n processor=None,\n downloader=None,\n progressbar=False,\n):\n if path is None:\n path = os_cache(\"pooch\")\n if fname is None:\n fname = unique_file_name(url)\n # Create the local data directory if it doesn't already exist and make the\n # path absolute.\n path = cache_location(path, env=None, version=None)\n make_local_storage(path)\n\n full_path = path.resolve() / fname\n action, verb = download_action(full_path, known_hash)\n\n if action in (\"download\", \"update\"):\n get_logger().info(\n \"%s data from '%s' to file '%s'.\",\n verb,\n url,\n str(full_path),\n )\n\n if downloader is None:\n downloader = choose_downloader(url, progressbar=progressbar)\n\n stream_download(url, full_path, known_hash, downloader, pooch=None)\n\n if known_hash is None:\n get_logger().info(\n \"SHA256 hash of downloaded file: %s\\n\"\n \"Use this value as the 'known_hash' argument of 'pooch.retrieve'\"\n \" to ensure that the file hasn't changed if it is downloaded again\"\n \" in the future.\",\n file_hash(str(full_path)),\n )\n\n if processor is not None:\n return processor(str(full_path), action, None)\n\n return str(full_path)",
"def get_from_cache(cls, target_filename):\n is_cached = cls.is_remote_cached(target_filename)\n if is_cached:\n cache = cls.CACHE_BACKEND()\n cache.download(is_cached, target_filename)\n logger.debug('File %r was downloaded from %r', target_filename, cls.CACHE_BACKEND)\n else:\n target_filename = None\n return target_filename",
"def download_dataset_from_url(dataset_url_md5, name, to_path):\n # Prevent concurrent FileExistsError\n try:\n if not os.path.exists(to_path):\n os.mkdir(to_path)\n except Exception:\n pass\n\n dataset_url = dataset_url_md5[\"url\"]\n dataset_md5 = dataset_url_md5[\"md5\"]\n\n dataset_filepath = os.path.join(to_path, name)\n\n if os.path.exists(dataset_filepath):\n local_file_md5 = get_file_md5(dataset_filepath)\n if local_file_md5 == dataset_md5:\n return dataset_filepath\n else:\n print(f\"Local dataset {name} is broken, ready to re-download.\")\n\n print(f'Downloading dataset: {dataset_url} to {dataset_filepath}')\n urllib.request.urlretrieve(dataset_url, dataset_filepath)\n\n if not os.path.exists(dataset_filepath):\n raise IOError(f\"Failed to download dataset from {dataset_url}\")\n return dataset_filepath",
"def downloadfile(self):\n req = requests.get(self.url, stream=True)\n mdsha256 = hashlib.sha256()\n with gzip.open(self.file_path, \"wb\") as gfile:\n for line in req.iter_lines():\n if line:\n gfile.write(line + b\"\\n\")\n mdsha256.update(line + b\"\\n\")\n\n with open(self.sha_file_name, \"wb\") as sfile:\n sfile.write(mdsha256.digest())\n\n sha256 = mdsha256.digest()\n if self.sha256 != sha256:\n self.sha256 = sha256\n print(\"File updated!\")\n else:\n print(\"File not updated!\")",
"def cached_path(url_or_filename, cache_dir=None, force_download=False, proxies=None):\n\tif cache_dir is None:\n\t\tcache_dir = PYTORCH_TRANSFORMERS_CACHE\n\tif sys.version_info[0] == 3 and isinstance (url_or_filename, Path):\n\t\turl_or_filename = str (url_or_filename)\n\tif sys.version_info[0] == 3 and isinstance (cache_dir, Path):\n\t\tcache_dir = str (cache_dir)\n\n\tparsed = urlparse (url_or_filename)\n\n\tif parsed.scheme in ('http', 'https', 's3'):\n\t\t# URL, so get it from the cache (downloading if necessary)\n\t\treturn get_from_cache (url_or_filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies)\n\telif os.path.exists (url_or_filename):\n\t\t# File, and it exists.\n\t\treturn url_or_filename\n\telif parsed.scheme == '':\n\t\t# File, but it doesn't exist.\n\t\traise EnvironmentError (\"file {} not found\".format (url_or_filename))\n\telse:\n\t\t# Something unknown\n\t\traise ValueError (\"unable to parse {} as a URL or as a local path\".format (url_or_filename))",
"def download():\n try:\n cli.run(\n [URL, '--output', TEMP_DIR],\n )\n except SystemExit:\n return None",
"def get_file(url, file_name=None):\n cache_dir = os.path.join(os.path.expanduser(\"~\"), \".jhML\")\n\n if file_name is None:\n file_name = url[url.rfind('/') + 1:]\n file_path = os.path.join(cache_dir, file_name)\n\n if not os.path.exists(cache_dir):\n os.mkdir(cache_dir)\n\n if os.path.exists(file_path):\n return file_path\n\n print(\"Downloading: \" + file_name)\n try:\n urllib.request.urlretrieve(url, file_path, show_progress)\n except (Exception, KeyboardInterrupt) as e:\n if os.path.exists(file_path):\n os.remove(file_path)\n raise\n print(\" Done\")\n\n return file_path",
"def cache_download(url, path):\n # Prep cache path and make necessary dirs\n cache_path = os.path.join(CAMD_CACHE, path)\n\n # Download and write file\n if not os.path.isfile(cache_path):\n makedirs_p(os.path.split(cache_path)[0])\n r = requests.get(url, stream=True)\n total_size = int(r.headers.get('content-length', 0))\n block_size = 1024 # 1 Kibibyte\n t = tqdm(total=total_size, unit='iB', unit_scale=True)\n with open(cache_path, 'wb') as f:\n for data in r.iter_content(block_size):\n t.update(len(data))\n f.write(data)",
"def cached_path(url_or_filename, cache_dir=None):\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BIGGAN_CACHE\n if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):\n url_or_filename = str(url_or_filename)\n if sys.version_info[0] == 3 and isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n parsed = urlparse(url_or_filename)\n\n if parsed.scheme in ('http', 'https', 's3'):\n # URL, so get it from the cache (downloading if necessary)\n return get_from_cache(url_or_filename, cache_dir)\n elif os.path.exists(url_or_filename):\n # File, and it exists.\n return url_or_filename\n elif parsed.scheme == '':\n # File, but it doesn't exist.\n raise EnvironmentError(\"file {} not found\".format(url_or_filename))\n else:\n # Something unknown\n raise ValueError(\"unable to parse {} as a URL or as a local path\".format(url_or_filename))",
"def install_from_zip(url, filename, path=os.getcwd(), filesha=None):\n\n if (os.path.isfile(os.path.join(path, filename))\n and filesha is not None\n and sha256sum(filename) == filesha):\n log.info('File ' + filename + ' found in ' + path)\n return\n\n cache_dir = config.cache_dir\n zip_file_name = os.path.basename(url)\n zip_file_path = os.path.join(cache_dir, zip_file_name)\n\n if zip_file_name not in os.listdir(cache_dir):\n log.info('Downloading ' + filename + ' to ' + zip_file_path)\n urllib.request.urlretrieve(url, zip_file_path)\n\n with zipfile.ZipFile(zip_file_path, 'r') as zip_obj:\n log.info('Extracting ' + filename + ' to ' + path)\n zip_obj.extract(filename, path=path)",
"def cache_file(url, prefix):\n cache_filepath = _get_cached_filepath(\n prefix=prefix,\n url=url,\n )\n # If the file exists, return path.\n if os.path.isfile(cache_filepath):\n logger.info('Returning cached file for {}.'.format(url))\n return cache_filepath\n # If the file does not exist, download and return path.\n else:\n r = requests.get(url, verify=False)\n\n with open(cache_filepath, 'wb') as f:\n f.write(r.content)\n\n logger.info('Caching file for {}.'.format(url))\n return cache_filepath",
"def get_from_cache(url, cache_dir=None):\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BERT_CACHE\n if sys.version_info[0] == 3 and isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n # Get eTag to add to filename, if it exists.\n if url.startswith(\"s3://\"):\n etag = s3_etag(url)\n else:\n response = requests.head(url, allow_redirects=True)\n if response.status_code != 200:\n raise IOError(\"HEAD request failed for url {} with status code {}\".format(url, response.status_code))\n etag = response.headers.get(\"ETag\")\n\n filename = url_to_filename(url, etag)\n\n # get cache path to put the file\n cache_path = os.path.join(cache_dir, filename)\n\n if not os.path.exists(cache_path):\n # Download to temporary file, then copy to cache dir once finished.\n # Otherwise you get corrupt cache entries if the download gets interrupted.\n with tempfile.NamedTemporaryFile() as temp_file:\n logger.info(\"%s not found in cache, downloading to %s\", url, temp_file.name)\n\n # GET file object\n if url.startswith(\"s3://\"):\n s3_get(url, temp_file)\n else:\n http_get(url, temp_file)\n\n # we are copying the file before closing it, so flush to avoid truncation\n temp_file.flush()\n # shutil.copyfileobj() starts at the current position, so go to the start\n temp_file.seek(0)\n\n logger.info(\"copying %s to cache at %s\", temp_file.name, cache_path)\n with open(cache_path, \"wb\") as cache_file:\n shutil.copyfileobj(temp_file, cache_file)\n\n logger.info(\"creating metadata file for %s\", cache_path)\n meta = {\"url\": url, \"etag\": etag}\n meta_path = cache_path + \".json\"\n with open(meta_path, \"w\", encoding=\"utf-8\") as meta_file:\n json.dump(meta, meta_file)\n\n logger.info(\"removing temp file %s\", temp_file.name)\n\n return cache_path",
"def download_file_cached(file_url: str, location: str) -> None:\n\titem = os.path.basename(location)\n\n\tlocal = cache_find(item)\n\n\tif local is None:\n\t\t# Cached item doesn't exist\n\t\tcache_create()\n\t\tdownload_file(file_url, \"Cached/\" + item)\n\t\tcopy_file(\"Cached/\" + item, location)\n\t\treturn\n\n\t# Copy file from cache to location\n\tuux.show_debug(\"Cache hit for \" + item)\n\tcopy_file(local, location)",
"def _getFile(url, cachedFile=True, return_filename=False):\n assert url, \"WHY are you trying to load an empty string url?!?! Nothing good will come of this! In fact, I will assure that! %s\" % (url)\n md5 = hashlib.md5(url).hexdigest()\n filename = os.path.join(config.WEB_CACHE_DIR, md5)\n if os.path.exists(filename) and cachedFile:\n ret = open(filename, 'r').read()\n else:\n opener = urllib.FancyURLopener()\n ret = opener.open(url).read()\n o = open(filename, 'wb') # had to open in binary mode so PIL's Image.Open() function would work\n o.write(ret)\n o.close()\n if return_filename:\n return filename\n else:\n return ret",
"def maybe_download(url: str,\n cache_dir: Optional[str] = None,\n progress_: Callable[[int], Iterator[int]] = progress) -> str:\n # TODO(wuke): Avoid race conditions when downloading the same file from\n # different threads/processes at the same time.\n if cache_dir is None:\n cache_dir = default_cache_dir()\n os.makedirs(cache_dir, exist_ok=True)\n path = os.path.join(cache_dir,\n os.path.basename(urllib.parse.urlparse(url).path))\n if os.path.exists(path):\n log(f'Reusing cached file {path!r}')\n else:\n log(f'Downloading {url!r} to {path!r}')\n with open(path + '.partial', 'wb') as fo:\n r = requests.get(url, stream=True)\n r.raise_for_status()\n length = int(r.headers['content-length'])\n block_size = 1 << 18\n for _ in progress_((length + block_size - 1) // block_size):\n fo.write(r.raw.read(block_size))\n os.rename(path + '.partial', path)\n return path"
] | [
"0.7127393",
"0.69456184",
"0.6733968",
"0.66419476",
"0.6417536",
"0.63657403",
"0.6318456",
"0.63171995",
"0.62680537",
"0.6157976",
"0.61561304",
"0.61438686",
"0.61410165",
"0.6120135",
"0.60910845",
"0.60345376",
"0.60229146",
"0.5977921",
"0.59734684",
"0.59687644",
"0.59445983",
"0.5944508",
"0.5937387",
"0.5918953",
"0.5910881",
"0.589486",
"0.58808994",
"0.5879698",
"0.58783084",
"0.5855376"
] | 0.74226344 | 0 |
Transform the information of the timetriggered frame to a string | def __str__(self):
return_text = "Time-Triggered Frame information =>\n"
return_text += " Sender id : " + str(self.__sender_id) + "\n"
return_text += " Receivers ids : " + str(self.__receivers_id) + "\n"
return_text += " Path : " + str(self.__paths) + "\n"
return_text += " End_to_End : " + str(self.__end_to_end_delay) + " nanoseconds\n"
return_text += " Period : " + str(self.__period) + " nanoseconds\n"
return_text += " Starting : " + str(self.__starting_time) + " nanoseconds\n"
return_text += " Deadline : " + str(self.__deadline) + " nanoseconds\n"
return_text += " Size : " + str(self.__size) + " bytes"
return return_text | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __str__(self):\n\n nframes = len(self.frames)\n if nframes == 0:\n return \"\"\n elif nframes == 1:\n frame, = self.frames\n return str(frame)\n else:\n frames = sorted(self.frames)\n start = prev = frames[0] # First frame.\n step = None\n subranges = []\n for end in frames[1:]: # Frame starting from the second in the list.\n\n if step is None: # Step is still none.\n step = end - prev # Find and set step.\n\n if prev + step != end: # If the sequence is broken.\n subranges.append((start, prev, step)) # Create a subrange.\n step = None # Reset step.\n start = end # Re-start start.\n prev = end # The next previous.\n\n else:\n subranges.append((start, end, step))\n\n return \", \".join(format_subrange(start, end, step) for (start, end, step) in subranges)",
"def _generate_bvh_frame_string(self,euler_frames, frame_time):\n\n frame_parameter_string = \"MOTION\\n\"\n frame_parameter_string += \"Frames: \" + str(len(euler_frames)) + \"\\n\"\n frame_parameter_string += \"Frame Time: \" + str(frame_time) + \"\\n\"\n for frame in euler_frames:\n frame_parameter_string += ' '.join([str(f) for f in frame])\n frame_parameter_string += '\\n'\n\n return frame_parameter_string",
"def _make_title(self, ind):\n start = self.df_event_time.loc[ind, 'time']\n date = np.datetime_as_string(start.astype('<M8[ns]'), unit='s')\n start_ns = start - (start // 10**9) * 10**9\n end = self.df_event_time.loc[ind, 'endtime']\n end_ns = end - start + start_ns\n return ''.join((f'##Event {ind} from run {self.run_id}\\n',\n f'##Recorded at ({date[:10]} {date[10:]}) UTC ',\n f'{start_ns} ns - {end_ns} ns'))",
"def get_timeframes_info_as_strings(tframes):\n employers_str = ' | '.join(ef.employer for ef in tframes)\n timeframes_str = ' | '.join(f'{get_date_string(ef.start)} - {get_date_string(ef.end)}' for ef in tframes)\n return employers_str, timeframes_str",
"def output(self):\n if self.after_sunrise:\n return \"%02d:%02d:%02dR\" % self.time\n if self.after_sunset:\n return \"%02d:%02d:%02dT\" % self.time\n return \"%02d:%02d:%02d\" % self.time",
"def telemetry_to_string(self, telemetry):\n _log_line = \"%s,%s,%d,%.5f,%.5f,%.1f,%.1f,%s,%.3f\\n\" % (\n telemetry['datetime'],\n telemetry['id'],\n telemetry['frame'],\n telemetry['lat'],\n telemetry['lon'],\n telemetry['alt'],\n telemetry['temp'],\n telemetry['type'],\n telemetry['freq_float'])\n\n # TODO: Add Aux data, if it exists.\n\n return _log_line",
"def _get_timestamp() -> str:\n\n dt = timezone(\"UTC\").localize(datetime.utcnow()).strftime(\"%b. %d, %Y#%H:%M UTC\")\n date, time = dt.split(\"#\")\n return f\"Event Timestamp: 📅 {date} 🕒 {time}\"",
"def coverage_time_str(info_df: DataFrame) -> str:\n start = attribute_value(info_df, \"time_coverage_start\")\n start_dt = parse_time(start)\n\n now = datetime.now()\n now = now.replace(hour=0, minute=0, second=0, microsecond=0)\n\n if start_dt < now:\n start = now.isoformat() + \"Z\"\n end = attribute_value(info_df, \"time_coverage_end\")\n\n return f\"[({start}):1:({end})]\"",
"def timeStamp():\n import time\n return str(time.strftime(\"%a %d %b %Y %I:%M:%S %p\"))",
"def _live_title(self, fps):\n\n et = np.int(np.round(self.camera.actual_exposure_time_ms))\n return 'Live %d ms %.2f FPS' % (et, fps)",
"def get_at_as_string(self):\n\n return self.at.strftime(\"%Y-%m-%dT%H:%M:%S.000Z\")",
"def get_entry_string(self):\n return f\"{self.get_time_string()} {self.mode} {self.radar}\"",
"def frame_data(self) -> str:\n pass",
"def report(self) -> str:\n return REPORT_TIMEFRAME.format(self.name,\n self.times_run,\n self.average_runtime)",
"def _timestamp(self):\n\n retval = []\n\n if self.log_level >= _Log.DEBUG:\n retval.append('%f: ' % (time.time() - self.start_time,))\n\n return ''.join(retval)",
"def ffmpeg_frame_string(filename, frame_time=None, frame_number=None):\n if frame_number is not None:\n # If specified by number, convert to time\n frame_rate = get_video_params(filename)[2]\n use_frame_time = (old_div(frame_number, float(frame_rate))) - .001\n use_frame_time = old_div(np.floor(use_frame_time * 1000), 1000.)\n \n elif frame_time is not None:\n frame_rate = get_video_params(filename)[2]\n use_frame_time = frame_time - (old_div(1., (2 * frame_rate)))\n \n else:\n raise ValueError(\"must specify frame by time or number\")\n \n use_frame_string = '%0.3f' % use_frame_time\n return use_frame_string",
"def __str__(self):\n out_str = \"\\n\".join(`\"%.5f, %.5f, %.1f, %s, %s\" % (point[0], point[1], point[2], point[3], point[4])` for point in self.__traectory_list)\n return \"\\'x, y, altitude, capture time, capture date'\\n\"+out_str",
"def toString(self):\r\n if self.mesgType == MULTIPLEXER_FRAME_NOT_INIT:\r\n raise AttributeError, \"Frame is not yet initialized!\"\r\n \r\n # Create header\r\n frameHeader = MULTIPLEXER_FRAME_DIVIDER + str(self.mesgType) + MULTIPLEXER_FRAME_DIVIDER + str(self.contentLength) + \\\r\n MULTIPLEXER_FRAME_DIVIDER + str(self.referenceID) + MULTIPLEXER_FRAME_DIVIDER\r\n \r\n # Determine variable header size\r\n headerSize = str(len(frameHeader)).rjust(MULTIPLEXER_FRAME_HEADER_DIGITS,\"0\")\r\n \r\n if len(headerSize) > MULTIPLEXER_FRAME_HEADER_DIGITS:\r\n raise AttributeError, \"Frame Header too large! Max:\"+ MULTIPLEXER_FRAME_HEADER_DIGITS+ \" Actual:\"+ len(headerSize)\r\n \r\n return headerSize + frameHeader + self.content",
"def __str__(self):\n return str(self.time)+\" \" + \" \".join(ParticlePhaseCoordinates.__str__(self))",
"def time_form(gdf):\n gdf['time'] = gdf['time'].dt.strftime(\"%Y-%m-%dT%H:%M:%S\")\n \n return gdf",
"def __str__(self) -> str:\n\n # Get current time\n t = timer()\n # Length of label field, calculated from max label length\n fldlen = [len(lbl) for lbl in self.t0] + [\n len(self.default_label),\n ]\n lfldln = max(fldlen) + 2\n # Header string for table of timers\n s = f\"{'Label':{lfldln}s} Accum. Current\\n\"\n s += \"-\" * (lfldln + 25) + \"\\n\"\n # Construct table of timer details\n for lbl in sorted(self.t0):\n td = self.td[lbl]\n if self.t0[lbl] is None:\n ts = \" Stopped\"\n else:\n ts = f\" {(t - self.t0[lbl]):.2e} s\" % (t - self.t0[lbl]) # type: ignore\n s += f\"{lbl:{lfldln}s} {td:.2e} s {ts}\\n\"\n\n return s",
"def rrsigtime2string(rrsig_time):\n return time.strftime(\"%Y-%m-%d-%H:%M\", time.gmtime(rrsig_time))",
"def get_frame_time(self, f):\n return f * self.get_frame_duration()",
"def _get_debug_message(self):\r\n if DEBUG:\r\n return 'fps = %d' % self.timer.get_fps()",
"def frames_to_tc(frames):\n ffps = 25.0\n\n # Number of frames in an hour\n frames_per_hour = int(round(ffps * 60 * 60))\n # Number of frames in a day - timecode rolls over after 24 hours\n frames_per_24_hours = frames_per_hour * 24\n # Number of frames per ten minutes\n frames_per_10_minutes = int(round(ffps * 60 * 10))\n # Number of frames per minute is the round of the framerate * 60 minus\n # the number of dropped frames\n frames_per_minute = int(round(ffps) * 60)\n\n frame_number = frames - 1\n\n if frame_number < 0:\n # Negative time. Add 24 hours.\n frame_number += frames_per_24_hours\n\n # If frame_number is greater than 24 hrs, next operation will rollover\n # clock\n frame_number %= frames_per_24_hours\n\n ifps = 25.0\n frs = frame_number % ifps\n secs = (frame_number // ifps) % 60\n mins = ((frame_number // ifps) // 60) % 60\n hrs = (((frame_number // ifps) // 60) // 60)\n\n return \"%02d:%02d:%02d.%02d\" % (hrs,\n mins,\n secs,\n frs)",
"def to_str(self):\n return self.PATTERN % (self.hours, self.minutes, self.seconds, self.milliseconds)",
"def Beat_disp(self):\n return ' '.join(str(x+self.offset) for x in self.beats)",
"def encode(self):\r\n tint = long(self.time)\r\n tfrac = long((self.time - tint)*1000000)\r\n return struct.pack(Format.Event, tsec, tfrac, self.eventType,\r\n self.eventCode, self.eventValue)",
"def info(cls):\n return 'Snapshot (i.e. hydro variables at given time) plotting module.'",
"def get_time_string(self):\n return f\"{self.year} {self.month:02} \" \\\n f\"{self.start_day:02} {self.start_hour:02} 00 {self.get_duration():6}\""
] | [
"0.6260433",
"0.61221725",
"0.6112356",
"0.6092692",
"0.6030933",
"0.60286427",
"0.59961176",
"0.591236",
"0.5886073",
"0.5884544",
"0.5821253",
"0.58184284",
"0.5814035",
"0.57295614",
"0.57094294",
"0.5665555",
"0.565353",
"0.5652871",
"0.56451994",
"0.56060076",
"0.5603432",
"0.5593597",
"0.55397075",
"0.55224186",
"0.5516471",
"0.5514761",
"0.5509764",
"0.5509646",
"0.5505968",
"0.5502446"
] | 0.65640455 | 0 |
Set the sender id of the frame | def __set_sender_id(self, sender_id):
if not isinstance(sender_id, int):
raise TypeError('It has to be an integer identifier')
if sender_id < 0:
raise ValueError('There are not negative identifiers')
self.__sender_id = sender_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sender(self, sender):\n\n self._sender = sender",
"def sender(self, sender):\n\n self._sender = sender",
"def sender(self, sender):\n\n self._sender = sender",
"def sender(self, sender):\n\n self._sender = sender",
"def sender(self, sender):\n\n self._sender = sender",
"def sender_name(self, sender_name):\n\n self._sender_name = sender_name",
"def __get_sender_id(self):\n return self.__sender_id",
"def sender(self, sender: str):\n if sender is None:\n raise ValueError(\"Invalid value for `sender`, must not be `None`\") # noqa: E501\n\n self._sender = sender",
"def sender(self, sender: Address) -> None:\n enforce(\n isinstance(sender, str), f\"Sender must be string. Found '{type(sender)}'\"\n )\n self._sender = sender",
"def SenderId(self):\n return self._sender_id",
"def set_id(self, player_id):\n pass",
"def thread_originator_id(self, thread_originator_id):\n\n self._thread_originator_id = thread_originator_id",
"def setId(self, *args):\n return _libsbml.Event_setId(self, *args)",
"def __init__(self, sender):\r\n\t\tself.sender = sender",
"def set_id(self, id):\n self.id = id\n print(\"self id = \" + str(self.id))",
"def setId(self, *args):\n return _libsbml.FluxBound_setId(self, *args)",
"def set_window_id(self, window_id):\r\n self.window_id = window_id",
"def origin_id(self, origin_id):\n\n self._origin_id = origin_id",
"def owner_id(self, owner_id):\n self._owner_id = owner_id",
"def button_id(self, button_id):\n\n self._button_id = button_id",
"def setFocusId(*args):",
"def setFocusId(*args):",
"def setFocusId(*args):",
"def setFocusId(*args):",
"def _set_id(self):\n raise NotImplementedError()",
"def setId(self, *args):\n return _libsbml.Reaction_setId(self, *args)",
"def set_player_id(self, player_id):\n self.player_id = player_id",
"def setReactionId(self, *args):\n return _libsbml.ReactionGlyph_setReactionId(self, *args)",
"def sender_order(self, sender_order):\n\n self._sender_order = sender_order",
"def _set_id(self, value):\n pass"
] | [
"0.6736208",
"0.6736208",
"0.6736208",
"0.6736208",
"0.6736208",
"0.6245707",
"0.61775434",
"0.61548287",
"0.6074172",
"0.6006215",
"0.5950996",
"0.5842727",
"0.57942617",
"0.5655139",
"0.5590423",
"0.5571542",
"0.54551816",
"0.544673",
"0.5442492",
"0.5391776",
"0.53906125",
"0.53906125",
"0.53906125",
"0.53906125",
"0.53862834",
"0.53826034",
"0.53575885",
"0.53504646",
"0.5343964",
"0.5340266"
] | 0.73018974 | 0 |
Get the sender id of the frame | def __get_sender_id(self):
return self.__sender_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def SenderId(self):\n return self._sender_id",
"def get_channel_id(event):\n return event.source.sender_id",
"def sender(self) -> str:\n return self._sender",
"def showsender(self):\n return self.sender",
"def sender(self):\n return self._sender",
"def sender(self) -> Address:\n return self._sender",
"def SenderScreenName(self):\n return self._sender_screen_name",
"def msg_id(self):\n return struct.unpack('<H', self.pkt.payload[0:2])[0]",
"def getSender(self):\n\n if self in WebSocketRouter.nodemap:\n return WebSocketRouter.nodemap[self]\n elif self not in WebSocketRouter.usermap:\n WebSocketRouter.usermap[self] = str(uuid4())\n debug(\"Added user py id: %s uuid: %s\" % \\\n (str(id(self)), WebSocketRouter.usermap[self]))\n return WebSocketRouter.usermap[self]",
"def sender(self):\n l = self.link\n if l and l.is_sender:\n return l\n else:\n return None",
"def get_sender(doc):\n key_id = doc.find(\".//{%s}sig\" % NAMESPACE).get(\"key_id\")\n return urlsafe_b64decode(key_id).decode(\"utf-8\")",
"def get_sender(cls, message):\r\n if message is not None:\r\n if isinstance(message, PlatformMessage):\r\n return message.sender\r\n assert isinstance(message, (list, tuple)), \"Message is expected to be a list or a tuple\"\r\n assert len(message) >= 4, \"Message's length expected to be at least 4\"\r\n assert message[0] == PlatformMessage._signature, \"Message's signature is incorrect\"\r\n return message[1]\r\n return None",
"def sent_id(self):\n return self._sent_id",
"def GetCallerName(num_frame=1):\n frame = sys._getframe(num_frame + 1) # pylint: disable=protected-access\n return inspect.getframeinfo(frame, 1)[2]",
"def sender(self):\n key, alt = ('Sender', 'From') if not self.resent else \\\n ('Resent-Sender', 'Resent-From')\n value = self.get(key) or self.get(alt)\n _, addr = getaddresses([value])[0]\n return addr",
"def __get_receivers_id(self):\n return self.__receivers_id",
"def getMessageID(self):\n return self._payload[1]",
"def sandbox_id_for_message(self, msg_or_event):\n return msg_or_event['sandbox_id']",
"def outgoing_caller_id(self, sid):\r\n return numbers.OutgoingCallerId(self, sid)",
"def getCallerName(self,frameLevel=1):\n self.getCallerParams(frameLevel)\n result=self.callerName\n return result",
"def _get_frame_name(self, frame):\n if isinstance(frame, str):\n name = frame\n frame_obj = None\n else:\n name = frame.name\n frame_obj = frame\n return name, frame_obj",
"def sender(self) -> str:",
"def _get_frame_index(self, frame):\n if isinstance(frame, cf.CoordinateFrame):\n frame = frame.name\n #frame_names = [getattr(item[0], \"name\", item[0]) for item in self._pipeline]\n frame_names = [step.frame if isinstance(step.frame, str) else step.frame.name for step in self._pipeline]\n return frame_names.index(frame)",
"def get_id(self):\n if hasattr(self, \"_thread_id\"):\n return self._thread_id\n for id, thread in threading._active.items():\n if thread is self:\n return id",
"def get_id(self):\n if hasattr(self, \"_thread_id\"):\n return self._thread_id\n for id, thread in threading._active.items():\n if thread is self:\n return id",
"def _get_sender(self, sender):\n if isinstance(sender, tuple):\n return \"%s <%s>\" % sender, sender[0], sender[1]\n else:\n return sender, sender, sender",
"def get_frame(frame):\n\n return int.from_bytes(frame, byteorder='big')",
"def _get_sender_key(self, outer_message, aad, plaintext, request_id):\n return self.sender_key",
"def frame_idx(self) -> int:\n pass",
"def get_frame_id(self, ar_tags):\n frame_string = None\n while not frame_string:\n _print_choose_frame_text(ar_tags)\n tag_id = int(raw_input('>> ').split()[0])\n if tag_id in [x.id for x in ar_tags] or tag_id is -1:\n frame_string = gripper_wrapper.DEFAULT_FRAME if tag_id is -1 else \"ar_marker_%d\" % tag_id\n else:\n print \"illegal frame\"\n return frame_string"
] | [
"0.72898585",
"0.66977465",
"0.6695593",
"0.6613553",
"0.66016334",
"0.6535047",
"0.60609436",
"0.60208005",
"0.5981284",
"0.5972186",
"0.58671016",
"0.58098495",
"0.5786878",
"0.5773325",
"0.5759419",
"0.57203734",
"0.5719647",
"0.57100904",
"0.5695645",
"0.5668261",
"0.5645822",
"0.5630374",
"0.5610462",
"0.5603895",
"0.5603895",
"0.56030107",
"0.5599975",
"0.5589984",
"0.55865103",
"0.5584772"
] | 0.76059324 | 0 |
Set the list of receivers id | def __set_receivers_id(self, receivers_id):
if not isinstance(receivers_id, list):
raise TypeError('Receivers id should be a list')
if not all(isinstance(receiver_id, int) for receiver_id in receivers_id): # Check if all elements are int
raise TypeError('All elements in the receivers id list should be integer')
if any(receiver_id < 0 for receiver_id in receivers_id): # If any elements is negative
raise ValueError('An element is negative, there can not be negative ids')
self.__receivers_id = receivers_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __get_receivers_id(self):\n return self.__receivers_id",
"def ids(self, ids):\n self._ids = ids",
"def setId(self, *args):\n return _libsbml.ListOfMembers_setId(self, *args)",
"def set_ids(self, item_list):\n self._reset_sequence()\n for item in item_list:\n key = self.nondup_key_for_item(item)\n item[self.id_key] = self.ids.get(key) or self._get_next_id()",
"def setUserIDRefs( self, text ):\n self.user_id_list= text.split()",
"def setSiteids(self):\n self.siteids = []\n for site in self.sites:\n self.siteids.append(site.siteid)",
"def set_receiver(self, receiver):\n self.receiver = receiver",
"def update_recipients_list(recipient_list, id_data):\n\n email_sock = s.socket(s.AF_INET, s.SOCK_DGRAM)\n email_sock.settimeout(1)\n request = json.dumps({'type': 'retrieve', 'device_id': id_data})\n email_address = \"\"\n try:\n email_sock.sendto(bytes(request, 'utf-8'), (c.DEVICE_MANAGER, 3210))\n reply, _ = email_sock.recvfrom(1024)\n reply_json = json.loads(reply)\n email_address = reply_json['email']\n except (s.gaierror, s.timeout) as e:\n logging.error(f'Failed to retrieve email for {id_data}. E:{str(e)}')\n finally:\n email_sock.close()\n\n if len(email_address) != 0:\n if not recipient_list:\n recipient_list = [email_address]\n else:\n recipient_list.append(email_address)\n\n return recipient_list",
"def cron_partner_ids(self):\n pass",
"def update_moderator_ids():\n moderator_emails_config = Registry.get_config_property(\n 'moderator_emails')\n if not moderator_emails_config:\n return []\n\n moderator_ids = []\n for email in moderator_emails_config.value:\n user_id = user_services.get_user_id_from_email(email)\n if user_id is not None:\n moderator_ids.append(user_id)\n else:\n raise Exception('Bad moderator email: %s' % email)\n return moderator_ids",
"def update_ids(self, value, destination_ids_by_source):\n return value",
"def setRecipients(self, *recipients):\n assert [r for r in recipients if r], \"setRecipients called with %r\" % (recipients,)\n self.recipients = recipients",
"def _set_id(self, value):\n pass",
"def __set_sender_id(self, sender_id):\n if not isinstance(sender_id, int):\n raise TypeError('It has to be an integer identifier')\n if sender_id < 0:\n raise ValueError('There are not negative identifiers')\n self.__sender_id = sender_id",
"def learn_ids(self, item_list):\n self._reset_sequence()\n for item in item_list:\n key = self.nondup_key_for_item(item)\n self.ids[key] = item[self.id_key]",
"def service_ids(self, service_ids):\n\n self._service_ids = service_ids",
"def search_among_agents(self, agent_ids):\n self._id_list = agent_ids",
"def _set_id(self):\n raise NotImplementedError()",
"def recipients(self) -> ty.List[str]:",
"def employee_ids_to_notify(self, employee_ids_to_notify):\n\n self._employee_ids_to_notify = employee_ids_to_notify",
"def message_subscribe(self, cr, uid, ids, partner_ids, subtype_ids=None, context=None):\n test = set(partner_ids)\n mail_followers_obj = self.pool.get('mail.followers')\n subtype_obj = self.pool.get('mail.message.subtype')\n partner_obj = self.pool.get('res.partner')\n\n user_pid = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id.id\n if set(partner_ids) == set([user_pid]):\n try:\n self.check_access_rights(cr, uid, 'read')\n except (osv.except_osv, orm.except_orm):\n return\n else:\n self.check_access_rights(cr, uid, 'write')\n\n for record in self.browse(cr, SUPERUSER_ID, ids, context=context):\n existing_pids = set([f.id for f in record.message_follower_ids\n if f.id in partner_ids])\n new_pids = set(partner_ids) - existing_pids\n \n ### MOD START: if id is a customer, DO NOT ADD HIM\n for partner in partner_ids:\n if partner_obj.browse(cr, SUPERUSER_ID, partner, context=context).customer == True:\n new_pids -= set([partner])\n ### MOD STOP\n\n # subtype_ids specified: update already subscribed partners\n if subtype_ids and existing_pids:\n fol_ids = mail_followers_obj.search(cr, SUPERUSER_ID, [\n ('res_model', '=', self._name),\n ('res_id', '=', record.id),\n ('partner_id', 'in', list(existing_pids)),\n ], context=context)\n mail_followers_obj.write(cr, SUPERUSER_ID, fol_ids, {'subtype_ids': [(6, 0, subtype_ids)]}, context=context)\n # subtype_ids not specified: do not update already subscribed partner, fetch default subtypes for new partners\n elif subtype_ids is None:\n subtype_ids = subtype_obj.search(cr, uid, [\n ('default', '=', True),\n '|',\n ('res_model', '=', self._name),\n ('res_model', '=', False)\n ], context=context)\n # subscribe new followers\n for new_pid in new_pids:\n mail_followers_obj.create(cr, SUPERUSER_ID, {\n 'res_model': self._name,\n 'res_id': record.id,\n 'partner_id': new_pid,\n 'subtype_ids': [(6, 0, subtype_ids)],\n }, context=context)\n\n return True",
"def getIDs():",
"def security_list_ids(self, security_list_ids):\n self._security_list_ids = security_list_ids",
"def fdsid_list(self, fdsid_list):\n\n self._fdsid_list = fdsid_list",
"def setReceivers(self, xrec, zrec):\n xrec = ascontiguousarray(xrec, float64)\n zrec = ascontiguousarray(zrec, float64)\n nrec = min(len(xrec), len(zrec))\n if (len(xrec) != len(zrec)):\n print(\"Inconsistent array lengths\")\n xrecPointer = xrec.ctypes.data_as(POINTER(c_double))\n zrecPointer = zrec.ctypes.data_as(POINTER(c_double))\n ierr = c_int(1)\n self.fteik2d.fteik_solver2d_setReceivers64f(nrec,\n zrecPointer,\n xrecPointer,\n ierr)\n if (ierr.value != 0): \n print(\"Error setting receivers\")\n return -1\n self.nrec = nrec\n return 0",
"def resource_ids(self, resource_ids):\n\n self._resource_ids = resource_ids",
"def set_id(self, player_id):\n pass",
"def mailing_id(self, val: str):\n self._mailing_id = val",
"def external_ids(self, external_ids):\n\n self._external_ids = external_ids",
"def update_node_id(node: Element) -> None:\n new_ids: list[str] = []\n for node_id in node['ids']:\n new_id = self.fix_fragment('', node_id)\n if new_id not in new_ids:\n new_ids.append(new_id)\n node['ids'] = new_ids"
] | [
"0.68162084",
"0.61993223",
"0.6040532",
"0.5932464",
"0.5899451",
"0.58826494",
"0.5788359",
"0.57493365",
"0.57457477",
"0.57353175",
"0.5734101",
"0.57076067",
"0.55706054",
"0.54016364",
"0.5394554",
"0.52624714",
"0.525897",
"0.52536356",
"0.5247424",
"0.5245702",
"0.52455664",
"0.5226694",
"0.5214529",
"0.52108186",
"0.52104765",
"0.5189817",
"0.5184865",
"0.51820064",
"0.5165794",
"0.5147058"
] | 0.7947489 | 0 |
Get the list of receivers id | def __get_receivers_id(self):
return self.__receivers_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_ids(self) -> List[str]:",
"def getIDs():",
"def receivers(self):\n keys = ('To', 'Cc', 'Bcc') if not self.resent else \\\n ('Resent-To', 'Resent-Cc', 'Resent-Bcc')\n vals = (v for v in (self.get(key) for key in keys) if v)\n return [addr for _, addr in getaddresses(vals)]",
"def recipients(self) -> ty.List[str]:",
"def remote_get_ids(self):\n return self.smultiengine.get_ids()",
"def get_resend_ids(self, reg_ids):\n if not reg_ids or len(reg_ids) == 0:\n return []\n num_incoming = len(reg_ids)\n num_results = len(self.my_json['results'])\n if num_incoming != num_results:\n print('expected number of incoming reg_ids: %i to equal number of results: %i' % (num_incoming, num_results))\n\n resends = self._get_resends()\n ids = []\n for (line_num, err) in resends:\n if line_num < num_incoming:\n ids.append(reg_ids[line_num])\n else:\n break\n return ids",
"def getFollowerIDs(self, screen_name):\n follower_ids = []\n for follower_id in tweepy.Cursor(self.api.followers_ids,id=screen_name).items():\n print follower_id\n follower_ids.append(follower_id)\n return follower_ids",
"def extract_receivers(self) -> Iterable[Account]:\n receivers: List[Account] = []\n for transaction in self.transactions_all:\n if transaction.receiver not in receivers:\n receivers.append(transaction.receiver)\n return receivers",
"def get_recordIds(self):\n record_ids = []\n for item in self.order_items:\n record_ids.append(item.get_recordId())\n \n return record_ids",
"def recipients(self) -> pulumi.Output[Sequence[int]]:\n return pulumi.get(self, \"recipients\")",
"def _get_all_subscribers_of_creator(self, user_id: str) -> List[str]:\n subscribers_model = user_models.UserSubscribersModel.get(\n user_id, strict=False)\n # TODO(#15621): The explicit declaration of type for ndb properties\n # should be removed. Currently, these ndb properties are annotated with\n # Any return type. Once we have proper return type we can remove this.\n if subscribers_model:\n subscriber_ids: List[str] = subscribers_model.subscriber_ids\n return subscriber_ids\n else:\n return []",
"def get_email_ids(self):\n if self.emails is None or self.emails == '':\n return []\n email_ids = self.emails.replace(' ', '')\n return email_ids.split(',')",
"def getIds(self) -> List[int]:\n return list(self.users.keys())",
"def get_id(self):\n id_num = []\n i = 0\n while True:\n serial_data = self.rfid_serial_port.read()\n data = serial_data.decode('utf-8')\n i = i + 1\n if i == 12:\n i = 0\n ID = \"\".join(map(str, id_num))\n return ID\n else:\n id_num.append(data)",
"def get_ids(self):\n return [item.id for item in self.items]",
"def get_recipients(self) -> List[Client]:\n\n index_list = [i for i in range(len(self.int_var_list)) if self.int_var_list[i].get() == 1]\n return [self.client_list[i] for i in index_list]",
"def __set_receivers_id(self, receivers_id):\n if not isinstance(receivers_id, list):\n raise TypeError('Receivers id should be a list')\n if not all(isinstance(receiver_id, int) for receiver_id in receivers_id): # Check if all elements are int\n raise TypeError('All elements in the receivers id list should be integer')\n if any(receiver_id < 0 for receiver_id in receivers_id): # If any elements is negative\n raise ValueError('An element is negative, there can not be negative ids')\n self.__receivers_id = receivers_id",
"def get_ids(self):\n return self._ids",
"def ids(self) -> Sequence[str]:\n return pulumi.get(self, \"ids\")",
"def ids(self) -> Sequence[str]:\n return pulumi.get(self, \"ids\")",
"def recipients(self) -> pulumi.Input[Sequence[pulumi.Input[int]]]:\n return pulumi.get(self, \"recipients\")",
"def get_service_ids(self) -> list[bluetooth.BluetoothUuid]:\n return [bluetooth.BluetoothUuid(i) for i in self.serviceIds()]",
"def _get_chat_id_list():\n cfg = read_config()\n cfg = cfg['notifier']['telegram_bot']\n return cfg.get('chat_id')",
"def get_all_party_id() -> List[int]:\n\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select id from party order by id\"\n cursor.execute(query)\n data = cursor.fetchall()\n r_list = [x[0] for x in data]\n db.disconnect()\n return r_list",
"def getEventIds(self):\n eventIdsLst = []\n for event in self.eventsLst:\n eventIdsLst.append(event['id'])\n return eventIdsLst",
"def get_device_ids(self) -> list[bluetooth.BluetoothUuid]:\n return [bluetooth.BluetoothUuid(i) for i in self.deviceIds()]",
"def get_ids(self):\n return self.redis.hkeys(self.feed_items)",
"def returnIdEndereco(self):\r\n self.cursor.execute(\"SELECT ID FROM ENDERECO;\")\r\n self.__result = self.cursor.fetchall()\r\n self.__lista = []\r\n try:\r\n for self.__i in self.__result:\r\n self.__lista.append(self.__i[0])\r\n return self.__lista\r\n except:\r\n return []",
"def cron_partner_ids(self):\n pass",
"def recipients(self):\n return self._recips"
] | [
"0.676375",
"0.6558249",
"0.64179915",
"0.64104015",
"0.6283956",
"0.626986",
"0.6142786",
"0.6132265",
"0.6117434",
"0.6115579",
"0.60932165",
"0.60912985",
"0.60734886",
"0.60161537",
"0.6016079",
"0.60060114",
"0.60022485",
"0.59980255",
"0.5988492",
"0.5988492",
"0.59827334",
"0.59379184",
"0.59363353",
"0.59357816",
"0.5921115",
"0.5879206",
"0.5867267",
"0.58659047",
"0.5836212",
"0.5827755"
] | 0.80000204 | 0 |
Get the starting time of the frame | def __get_starting_time(self):
return self.__starting_time | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_frame_time(self):\n return self.get_timings().frame_time",
"def getStartTime(self):\n return _osgAnimation.Animation_getStartTime(self)",
"def getStartTime(self):\n return _osgAnimation.Channel_getStartTime(self)",
"def start_time(self) -> float:\r\n ...",
"def start_time(self) -> float:\n return self._start_time",
"def start_time(self):\n return self.__start",
"def get_frame_clock(self): # real signature unknown; restored from __doc__\n pass",
"def startTime(self) -> float:\n try: return self.times[0]\n except IndexError: return 0.0",
"def getStartTime(self):\n return _osgAnimation.Vec4LinearSampler_getStartTime(self)",
"def getStartTime(self):\n return _osgAnimation.MatrixLinearSampler_getStartTime(self)",
"def start_time(self):\n return self.time_parser.start_time",
"def get_start_time(self):\n # Timezone and BST not accounted for. Always gives it as GMT.\n create_time = (os.path.getmtime(self.file_path))\n start_time = create_time - len(self.amplitude) / self.fs\n return datetime.fromtimestamp(start_time)",
"def initialTime(self):\n return self.params['t0']",
"def start_time(self):\n return RPR.GetAudioAccessorStartTime(self.id)",
"def getStartTime(self):\n assert not self.isWaitingToStart(), \"Too early to tell: %s\" % self\n return \"%s\" % self.__rawInfo.startTime",
"def start_time(self):\n # TODO: use pd.Timestamp instead\n return self.time[0].to_pydatetime()",
"def time(self):\n return self._begin",
"def getStartTime(self):\n return _osgAnimation.Vec2LinearSampler_getStartTime(self)",
"def getStartTime(self):\n return _osgAnimation.Vec3LinearSampler_getStartTime(self)",
"def start_time(self):\n return self._start_time",
"def start_time(self):\n return self._start_time",
"def start_time(self):\n return self._start_time",
"def start_time(self):\n return self._start_time",
"def start_time(self):\n return self._start_time",
"def start_time(self):\n return self._start_time",
"def start_time(self):\n return self._start_time",
"def start_time(self):\n return self._start_time",
"def start_time(self):\n pass",
"def start_time(self) -> str:\n return pulumi.get(self, \"start_time\")",
"def start_time(self) -> str:\n return pulumi.get(self, \"start_time\")"
] | [
"0.78929955",
"0.7614445",
"0.75264573",
"0.7499873",
"0.7396878",
"0.7367973",
"0.7346448",
"0.73315626",
"0.7275492",
"0.72647166",
"0.7260599",
"0.7249851",
"0.7245025",
"0.72314405",
"0.722858",
"0.72115505",
"0.7198226",
"0.7188492",
"0.71580946",
"0.712532",
"0.712532",
"0.712532",
"0.712532",
"0.712532",
"0.712532",
"0.712532",
"0.712532",
"0.7113169",
"0.7110801",
"0.7110801"
] | 0.7710456 | 1 |
Set the deadline of the frame in ns, it should be smaller than the period, if 0 then same as period | def __set_deadline(self, deadline):
if not isinstance(deadline, int):
raise TypeError('The deadline should be an integer')
if deadline < 0:
raise ValueError('The deadline should be positive')
if deadline > self.__period:
raise ValueError('The deadline cannot be larger than the period')
if deadline == 0:
self.__deadline = self.__period
else:
self.__deadline = deadline | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_setDeadline(self):\n self.session.setDeadline(13.5)\n self.assertEqual(13.5, self.session.getRemainingTime())",
"def test_deadlineChangesAsTimePasses(self):\n self.session.setDeadline(13.5)\n self.runtime.getTimeService().advance(2.0)\n self.assertEqual(11.5, self.session.getRemainingTime())",
"def get_deadline(self):\n min_time = self.introduction_time or 0\n return min_time + self.time_slot[0] * self.__period",
"def set_deadline(self, depth):\n dl_table = [30, 50, 60, 70, 80, 100, 100, 100, 100, 100]\n self.deadline = dl_table[int(depth/10)]",
"def current_effective_deadline(cls) -> float:",
"def deadline(self):\n\n if self.service and self.service.solution_time:\n return self.created + \\\n timedelta(hours=self.service.solution_time) - \\\n timedelta(seconds=self._time_on_hold)\n else:\n return None",
"def deadline(self) -> pd.Timestamp:\n if self.loading_preference:\n # The loading time can be spread out of multiple shifts\n logging.info(f\"Calculating: Spread Order Deadline\")\n return self._deadline_spread()\n else:\n # The loading time must be completed all on the same day\n logging.info(f\"Calculating: Same Day Order Deadline\")\n return self._deadline_same_day()",
"def __get_deadline(self):\n return self.__deadline",
"def test_notSetDeadline(self):\n self.assertEqual(None, self.session.getRemainingTime())",
"def test_setDeadlineTwice(self):\n self.session.setDeadline(10.0)\n self.session.setDeadline(9.0)\n decreased = self.session.getRemainingTime()\n self.session.setDeadline(11.0)\n still_decreased = self.session.getRemainingTime()\n self.assertEqual((decreased, still_decreased), (9.0, 9.0))",
"def to_deadline(timeout):\n return libruss.russ_to_deadline(timeout)",
"def schedule_deadline_time(self) -> str:\n return pulumi.get(self, \"schedule_deadline_time\")",
"def with_deadline(self, deadline):\n return ExponentialTimeout(\n initial=self._initial,\n maximum=self._maximum,\n multiplier=self._multiplier,\n deadline=deadline)",
"def test_timeout_pending(self):\n deadline = Deadline(MS)\n timeout = deadline.timeout()\n self.assertGreater(timeout, 0)\n self.assertLess(timeout, MS)",
"def test_update_verification_deadline_without_expiring_modes(self):\n verification_deadline = datetime(year=1915, month=5, day=7, tzinfo=pytz.utc)\n response, __ = self._get_update_response_and_expected_data(None, verification_deadline)\n\n assert response.status_code == 200\n assert VerificationDeadline.deadline_for_course(self.course.id) == verification_deadline",
"def test_resolveLowerDeadline(self):\n self.session.setDeadline(3.0)\n start = time()\n with self.assertRaises(Exception):\n self.session.resolve(\"unknown\", \"1.0\")\n self.assertAlmostEqual(time() - start, 3.0, delta=1)",
"def max_deadline(self):\n return self._max_deadline",
"def test_issue_edit_issue_deadline(self):\n pass",
"def __init__(self, period: float = 0.1, timeout: float = 30):\n self.period = period\n self.timeout = timeout",
"def test_remove_upgrade_deadline(self):\n # First create a deadline\n upgrade_deadline = datetime.now(pytz.utc) + timedelta(days=1)\n response, __ = self._get_update_response_and_expected_data(upgrade_deadline, None)\n assert response.status_code == 200\n verified_mode = CourseMode.verified_mode_for_course(self.course.id)\n assert verified_mode is not None\n assert verified_mode.expiration_datetime.date() == upgrade_deadline.date()\n\n # Now set the deadline to None\n response, __ = self._get_update_response_and_expected_data(None, None)\n assert response.status_code == 200\n\n updated_verified_mode = CourseMode.verified_mode_for_course(self.course.id)\n assert updated_verified_mode is not None\n assert updated_verified_mode.expiration_datetime is None",
"def active_deadline_seconds(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"active_deadline_seconds\")",
"def active_deadline_seconds(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"active_deadline_seconds\")",
"def set_timer(self, update: Update, context: CallbackContext) -> None:\n chat_id = update.message.chat_id\n try:\n due = int(context.user_data[\"duration\"])\n if due < 0:\n # update.message.reply_text('Sorry we can not go back to future!')\n return\n\n job_removed = self.remove_job_if_exists(str(chat_id), context)\n context.job_queue.run_once(\n self.command_stop_irrigation, due, context=chat_id, name=str(chat_id)\n )\n\n # text = 'Timer successfully set!'\n # if job_removed:\n # text += ' Old one was removed.'\n # update.message.reply_text(text)\n\n except (IndexError, ValueError):\n # update.message.reply_text('Usage: /set <seconds>')\n update.message.reply_text(\"Erro ao agendar o desligamento da irrigação 😞\")",
"def _deadline_spread(self) -> pd.Timestamp:\n shift_idx = 0\n remaining_work = self._lt_sec()\n workshift = self._get_departure_shift()\n # Continue working on order until no remaining work to meet deadline\n while self.compare_shift(workshift.rollback(shift_idx), remaining_work) <= 0:\n remaining_work -= workshift.duration\n logging.info(f\"Shift: {shift_idx}, Remaining: {remaining_work}\")\n shift_idx += 1\n\n return workshift.rollback(shift_idx).end_time - dt.timedelta(\n seconds=remaining_work\n )",
"def update_period(self):\n return 0.1",
"def to_timeout(deadline):\n return libruss.russ_to_timeout(deadline)",
"def SetTimeLeft(self, *args, **kwargs):\n pass",
"def deadline_decorated(*args, **kwargs):\n return IOLoop.current().add_timeout(\n self._deadline,\n functools.partial(func, *args, **kwargs)\n )",
"def time_to_resolve(self):\n\n now = timezone.now()\n\n if self.deadline:\n\n if self.deadline < now:\n\n return time.strftime(\n \"%H:%M late\",\n time.gmtime((now - self.deadline).seconds))\n else:\n return time.strftime(\n \"%H:%M\",\n time.gmtime((self.deadline - now).seconds))\n else:\n return None",
"def test_timeout_elapsed_no_exception(self):\n deadline = Deadline(-MS)\n timeout = deadline.timeout(raise_if_elapsed=False)\n self.assertGreater(timeout, -2 * MS)\n self.assertLess(timeout, -MS)"
] | [
"0.6961596",
"0.681887",
"0.67824566",
"0.6683734",
"0.66523653",
"0.62993157",
"0.61421096",
"0.6116758",
"0.6001748",
"0.5914177",
"0.5862476",
"0.5794072",
"0.57932574",
"0.5778166",
"0.56893766",
"0.5676566",
"0.5609137",
"0.56078696",
"0.5586334",
"0.55774784",
"0.5494957",
"0.5494957",
"0.54440105",
"0.5432554",
"0.5429198",
"0.5406225",
"0.5390196",
"0.5343674",
"0.5338318",
"0.5267708"
] | 0.70939225 | 0 |
Get the deadline of the frame in ns | def __get_deadline(self):
return self.__deadline | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def deadline(self):\n\n if self.service and self.service.solution_time:\n return self.created + \\\n timedelta(hours=self.service.solution_time) - \\\n timedelta(seconds=self._time_on_hold)\n else:\n return None",
"def get_deadline(self):\n min_time = self.introduction_time or 0\n return min_time + self.time_slot[0] * self.__period",
"def current_effective_deadline(cls) -> float:",
"def get_frame_time(self):\n return self.get_timings().frame_time",
"def schedule_deadline_time(self) -> str:\n return pulumi.get(self, \"schedule_deadline_time\")",
"def to_deadline(timeout):\n return libruss.russ_to_deadline(timeout)",
"def time_remaining(self):\n with self._lock:\n deadline = self._expiration_manager.deadline()\n return max(0.0, deadline - time.time())",
"def get_timeout(self) -> int:",
"def get_frame_clock(self): # real signature unknown; restored from __doc__\n pass",
"def max_deadline(self):\n return self._max_deadline",
"def timeout_time(self):\n if self.start_time is None:\n return None\n return self.start_time + self.timeout",
"def time_to_resolve(self):\n\n now = timezone.now()\n\n if self.deadline:\n\n if self.deadline < now:\n\n return time.strftime(\n \"%H:%M late\",\n time.gmtime((now - self.deadline).seconds))\n else:\n return time.strftime(\n \"%H:%M\",\n time.gmtime((self.deadline - now).seconds))\n else:\n return None",
"def active_deadline_seconds(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"active_deadline_seconds\")",
"def active_deadline_seconds(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"active_deadline_seconds\")",
"def deadline(self) -> pd.Timestamp:\n if self.loading_preference:\n # The loading time can be spread out of multiple shifts\n logging.info(f\"Calculating: Spread Order Deadline\")\n return self._deadline_spread()\n else:\n # The loading time must be completed all on the same day\n logging.info(f\"Calculating: Same Day Order Deadline\")\n return self._deadline_same_day()",
"def get_frame_duration(self):\n return self._frame_duration",
"def get_timeout(self):\n return self.timeout",
"def test_deadlineChangesAsTimePasses(self):\n self.session.setDeadline(13.5)\n self.runtime.getTimeService().advance(2.0)\n self.assertEqual(11.5, self.session.getRemainingTime())",
"def get_duration(self):\n duration_ns = self.stream.InitialTimeToWaitGet()\n duration_ns += self.stream.NumberOfFramesGet() * self.stream.InterFrameGapGet()\n return datetime.timedelta(seconds=duration_ns / 1e9)",
"def to_timeout(deadline):\n return libruss.russ_to_timeout(deadline)",
"def gettimeout(self):\r\n return self._timeout",
"def get_frame_time(self, f):\n return f * self.get_frame_duration()",
"def FlowStatResponseTimeOut(self):\n\t\treturn self._get_attribute('flowStatResponseTimeOut')",
"def queued_time(self):\r\n return (self.node_monitor_launch_time - self.node_monitor_submit_time)",
"def remaining_ms():",
"def duration(self):\n return (self.fcip_doc[\"latest_timestamp\"] - self.fcip_doc[\"packet_timestamps\"][0])",
"def get_wait_time(self) -> int:\n next_ts = self.get_next_timestamp()\n if next_ts is None:\n return max(0, self.min_wait)\n return min((next_ts - parser.parse(self.event['timestamp'])).seconds, self.max_wait)",
"def test_setDeadline(self):\n self.session.setDeadline(13.5)\n self.assertEqual(13.5, self.session.getRemainingTime())",
"def heartbeat_time(self):\n if self._lasthb is not None:\n return (now() - self._lasthb).seconds\n return 0.0",
"def time_left(self):\n t=self.transport\n return (t.stoptime or t.get_length())-t.get_time()"
] | [
"0.71374065",
"0.7088324",
"0.69267964",
"0.6588213",
"0.6578111",
"0.65595996",
"0.6538972",
"0.648789",
"0.6469537",
"0.6462406",
"0.63980615",
"0.6368143",
"0.631343",
"0.631343",
"0.6300549",
"0.6265253",
"0.62152845",
"0.6122565",
"0.60784006",
"0.6046691",
"0.5992835",
"0.5942206",
"0.59096754",
"0.5878874",
"0.58762807",
"0.58621705",
"0.58517575",
"0.5849019",
"0.58481336",
"0.58460987"
] | 0.7362362 | 0 |
Evaluate query and save result. Output is saved either to a target directory (current working directory by default) to a file deduced from the query, or to target_file (if specified) Returns a state. | def evaluate_and_save(
query, target_directory=None, target_file=None, target_resource_directory=None
):
return get_context().evaluate_and_save(
query,
target_directory=target_directory,
target_file=target_file,
target_resource_directory=target_resource_directory,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def query_into_file(self, query, fname=\"\", fields=None, parameters=None):\n target_url = self.build_query(query, fields=fields, parameters=parameters)\n\n with urllib.request.urlopen(target_url) as url:\n content = url.read()\n\n with open(fname, 'wb') as ofs:\n ofs.write(content)",
"def run(\n self,\n query=\"*\",\n destination=\"exports/\",\n overwrite=False,\n batchsize=None,\n *args,\n **kwargs\n ):\n if not batchsize:\n batchsize = self.batchsize\n for docbatch in self._process_by_batch(\n self._retrieve(query), batchsize=batchsize\n ):\n self.save(docbatch, destination=destination, *args, **kwargs)\n if self.fileobj:\n self.fileobj.close()",
"def compute_ground_truth(query_filename, target_path, physical_db_name):\r\n\r\n db_connection = DBConnection(db=physical_db_name)\r\n\r\n # read all queries\r\n with open(query_filename) as f:\r\n queries = f.readlines()\r\n\r\n csv_rows = []\r\n for query_no, query_str in enumerate(queries):\r\n logger.debug(f\"Computing ground truth for cardinality query {query_no}: {query_str}\")\r\n query_str = query_str.strip()\r\n cardinality_true = db_connection.get_result(query_str)\r\n\r\n csv_rows.append({'query_no': query_no,\r\n 'query': query_str,\r\n 'cardinality_true': cardinality_true})\r\n\r\n save_csv(csv_rows, target_path)",
"def eval(\n self,\n processed_data_dir: Path,\n output_result_dir: Path,\n ) -> NoReturn:\n pass",
"def get_query_output(self):\n\n return self.query_output_file",
"def set_query_output(self, path):\n\n file = f'sql_query_R{str(self.time_span).replace(\".\", \"_\")} ({str(self.date_time).replace(\":\",\"_\")}).csv'\n self.query_output_file = path_inc(path, file)",
"def query(self, query, *, output_file=None, output_format=\"votable\", verbose=False):\n if not verbose:\n with warnings.catch_warnings():\n commons.suppress_vo_warnings()\n warnings.filterwarnings(\"ignore\", category=u.UnitsWarning)\n job = self._tap.launch_job(query=query, output_file=output_file, output_format=output_format,\n verbose=False, dump_to_file=output_file is not None)\n else:\n job = self._tap.launch_job(query=query, output_file=output_file, output_format=output_format,\n verbose=True, dump_to_file=output_file is not None)\n return job.get_results()",
"def fire(self):\n job = TripleQuery(self.query, self.inputpaths, self.outputpath)\n try:\n job.run()\n logger.debug(\"TripleQuery run. Setting status to done.\")\n self.status = 'done'\n except Exception:\n logger.error(\"Caught exception in TripleQuery. Setting status to fail and deleting output.\")\n dfs.delete(self.outputpath)\n self.status = 'fail'",
"def run_check(output_file,\n documents,\n queries,\n results,\n ext=\".xhtml\",\n formula_bags=False,\n keep_words=True,\n keep_math=True\n ):\n with open(output_file, \"w+\") as out:\n analyzer = Analyzer(formula_bags=formula_bags,\n keep_words=keep_words,\n keep_math=keep_math)\n queries = Queries(queries).get_queries()\n results = Results(results)\n print(\"{},{},{},{},{},{},{},{},{},{},{}\".format(\"Query\",\n \"Document\",\n \"Doc-Length\",\n \"Ranking\",\n \"Span\",\n \"Min-Span\",\n \"Normalized-Span\",\n \"Normalized-Min-Span\",\n \"Min-Distance\",\n \"Ave-Distance\",\n \"Max-Distance\"),\n file=out)\n undefined_docs = []\n for q in tqdm(range(0, len(queries))):\n query = queries[q]\n for doc in results.documents_for_query(query):\n try:\n document = Document(os.path.join(documents, doc + ext))\n (tf_dic, __) = document.lookup_dictionaries(analyzer)\n relevant = lookup_relevant(results.find_score(query, doc))\n try:\n dist = calculate_distances(query, tf_dic)\n doc_length = sum([len(tf_dic[key])\n for key in tf_dic.keys()])\n print(\"{},{},{},{},{}\".format(query,\n document,\n doc_length,\n relevant,\n \",\".join([str(d)\n for d in dist])),\n file=out)\n except DistancesUndefinedException:\n undefined_docs.append((document, relevant, query))\n except FileNotFoundError:\n print(\"Error in opening document: {}\".format(doc))\n print(\"Documents with undefined Distances\")\n for doc in undefined_docs:\n print(\"{}:{}:{}\".format(doc[2], doc[0], doc[1]))",
"def writeSmtQueryToFile(self, location):\n try:\n smtQueryFileHandler = open(location, \"w\")\n except EnvironmentError as e:\n errMsg = (\"Error writing the SMT query to a file \"\n \"located at %s: %s\" % (location, e))\n raise GameTimeError(errMsg)\n else:\n with smtQueryFileHandler:\n smtQueryFileHandler.write(self.getSmtQuery())",
"def execute(input_file, op_exec):\n if op_exec == \"ts2db\":\n # print(prep.ts2db(input_file, None))\n return prep.ts2db(input_file)",
"def SaveResult(self, result, result_path):\n\n # Create / get results file in the local directory \"\"\"\n db = self.GetResultFile()\n\n if db is not None:\n debug('result : {}'.format(result))\n\n # Check if file exists\n try:\n o = db[str(result_path + '/' + result.GetName())]\n except KeyError:\n print(\"No object in {}/{}\".format(result_path, result.GetName()))\n o = None\n\n if o is not None:\n print(\"Replacing {}/{}\".format(result_path, result.GetName()))\n del o\n\n db[str(result_path + '/' + result.GetName())] = result\n if db[str(result_path + '/' + result.GetName())] is not None:\n print(\"+++result {}/{} adopted\".format(result_path, result.GetName()))\n\n else:\n error(\"Could not adopt result {}\".format(result.GetName()))\n db.close()\n return\n\n else:\n error(\"Error creating result file\")\n db.close()\n return\n\n db.close()",
"def _store_results(user_cfg: Dict, run_cfg: Dict, results: pd.DataFrame, epoch: int):\n if \"eval_dir\" in user_cfg:\n store_dir = user_cfg[\"eval_dir\"]\n store_dir.mkdir(exist_ok=True, parents=True)\n else:\n store_dir = user_cfg[\"run_dir\"]\n\n if run_cfg[\"no_static\"]:\n file_name = store_dir / f\"lstm_no_static_seed{run_cfg['seed']}_epoch_{epoch}.p\"\n else:\n if run_cfg[\"concat_static\"]:\n file_name = store_dir / f\"lstm_seed{run_cfg['seed']}_epoch_{epoch}.p\"\n else:\n file_name = store_dir / f\"ealstm_seed{run_cfg['seed']}_epoch_{epoch}.p\"\n\n with (file_name).open(\"wb\") as fp:\n pickle.dump(results, fp)\n\n print(f\"Sucessfully store results at {file_name}\")",
"def call(self, *args):\n self.formula.to_file(self.output_file)",
"def save(self):\n output = self.prepare_results()\n\n override_name = output[\"config\"][\"sysconfig\"].get(\"output_filename\", None)\n scenario_name = (\n override_name if override_name else output[\"config\"][\"scenario\"][\"name\"]\n )\n filename = f\"{scenario_name}_{output['timestamp']}.json\"\n log.info(\n \"Saving evaluation results to path \"\n f\"{self.scenario_output_dir}/{filename} \"\n \"inside container.\"\n )\n output_path = os.path.join(self.scenario_output_dir, filename)\n with open(output_path, \"w\") as f:\n json_utils.dump(output, f)\n if os.path.getsize(output_path) > 2**27:\n log.warning(\n \"Results json file exceeds 128 MB! \"\n \"Recommend checking what is being recorded!\"\n )",
"def run_query_and_save_results(client, query, res_dataset, res_table, timeout = 60):\n # Delete the results table if it exists\n delete_bq_table(client, res_dataset, res_table)\n # Run the query and write results to table\n print('\\nRunning query and writing to table %s.%s:\\n%s\\n' % (res_dataset, res_table, query))\n job = client.write_to_table(query, res_dataset, res_table, allow_large_results = True, \n create_disposition = 'CREATE_IF_NEEDED', write_disposition = 'WRITE_EMPTY')\n client.wait_for_job(job, timeout)",
"def store_result(result: dict, filepath: str):\n\n raise NotImplementedError",
"def evaluate(self, evaluator: DialogEvaluator, output_path: str = None):\r\n if output_path is not None:\r\n os.makedirs(output_path, exist_ok=True)\r\n return evaluator(self, output_path)",
"def run(self, file_path: str, save_file_path: str, inner_separator: str = None, outer_separator: str = None,\n query_format: QueryFormat = QueryFormat.CROSS_PRODUCT) \\\n -> Dict[int, Dict[str, List[str or Tuple[str, str]]]]:\n\n if query_format is None: query_format = QueryFormat.CROSS_PRODUCT\n\n command_dict, file_type, inner_separator, outer_separator = self.read_file(file_path=file_path,\n inner_separator=inner_separator,\n outer_separator=outer_separator,\n query_format=query_format)\n solution_dict = self.create_solution_dict(command_dict=command_dict, file_type=file_type,\n inner_separator=inner_separator)\n self.save_solution_dict(solution_dict=solution_dict, save_file_path=save_file_path)\n\n return solution_dict",
"def main():\n\n global final_dictionary\n global final_doc_set\n\n input_query = input(\"Please enter query for search: \")\n\n # Retrieving positional inverted index for query terms\n final_dictionary = fetch_dictionary(input_query.lower()) # Query is converted to lowercase as pre-process step\n\n #The final set of document IDs is retrieved below\n fetch_posting_list(input_query)\n sc = tf_idf_score()\n output = fetch_document_contents(input_query, sc)\n print(output)\n output_file = open(RESULT_FILE, 'a')\n output_file.write(output)\n output_file.write('\\n##############################################################\\n')\n output_file.close()\n\n print(\"Query results also appended to file: {0}\".format(RESULT_FILE))",
"def fetch_inspect_data(filename, output, db_url=None):\n r2dt.write_training_data(filename, db_url, output)",
"def __call__(self, result_path=None, log_path=None,\r\n *args, **kwargs):\r\n result = self.getResult(*args, **kwargs)\r\n if log_path:\r\n self.writeLog(log_path)\r\n if result_path:\r\n self.writeResult(result_path, result)\r\n else:\r\n return result",
"def save_evaluations(session, database, bad_evals):\n # Create result directory and go in it\n os.makedirs('result', exist_ok=True)\n os.chdir('result')\n # Create database file\n conn = create_connection(database)\n # Gather all the project ids and names, this is a very very slow process\n projects = get_projects(session)\n\n print(f'saving results in results\\\\{database}...')\n # Save each bad eval in the database file\n for bad_eval in bad_evals:\n # project_name is set here and not in create_bad_eval to make program faster in\n # case there are no bad evaluations. (less requests, less processing)\n bad_eval.project_name = get_project_name(projects, bad_eval.project_id)\n insert_evaluation(conn, bad_eval.sql_tuple())\n del bad_eval\n\n print('results saved !')",
"def eval_result(self, jdata):\n with current_app.app_context():\n out_msg = helpers.file_result(jdata)\n jdata['status_f'] = \"Complete\"\n if jdata['status'] == 2 or jdata['status'] == 3:\n jdata['status_f'] = \"Error\"\n db.session.query(File).filter(File.sha1 == jdata[\"sha1\"]).update({\n File.status_f: jdata['status_f'],\n File.score: jdata['score'],\n File.exec_time: jdata['exec_time'],\n #File.date_b: jdata['server_time'],\n #File.date_b: dtime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3].datetime(),\n File.date_b: dtime.now(),\n File.message: out_msg,\n File.results: json.dumps(jdata)\n #File.results: jdata\n #File.results: {}\n })\n db.session.commit()\n return self.update_state(state='SUCCESS')",
"def evaluate(self) -> None:\n eval_results = {'segmentation': self.evaluate_segmentation()}\n if self.task == 'tracking':\n eval_results['tracking'] = self.evaluate_tracking()\n self.save_result(eval_results)",
"def exec_query_athena(self, query, bucket, output_location):\n response = self.athena_client.start_query_execution(\n QueryString=query,\n ResultConfiguration={\n 'OutputLocation': output_location\n }\n )\n\n execution_id = response['QueryExecutionId']\n result = self.athena_client.get_query_execution(QueryExecutionId=execution_id)\n output_key = (result['QueryExecution']['ResultConfiguration']['OutputLocation']).split('s3://' + bucket + '/')\n\n while result['QueryExecution']['Status']['State'] == 'RUNNING':\n result = self.athena_client.get_query_execution(QueryExecutionId=execution_id)\n\n try:\n obj = self.s3_client.get_object(Bucket=bucket, Key=output_key[1])\n df = pd.read_csv(io.BytesIO(obj['Body'].read()))\n\n # File cleansing on S3\n self.s3_client.delete_object(Bucket=bucket, Key=output_key[1])\n self.s3_client.delete_object(Bucket=bucket, Key=output_key[1] + '.metadata')\n\n except Exception as e:\n df = pd.DataFrame()\n df = df.fillna(0)\n\n\n return df",
"def run(output, path):\n\n # Derive path to dbfile\n dbfile = os.path.join(path, \"articles.sqlite\")\n\n # Stream text from database to file\n Export.stream(dbfile, output)",
"def write_data_to_result_table(self, id_column, path_src,\n path_result, size_of_result_file):\n self.cursor.execute(\"\"\"INSERT INTO result_files\n VALUES('1','2','3','4');\"\"\")",
"def _store_test_result(ptfhost):\n logger.info(\"Copying file from folder: {0} to folder: {1}\".format(\n\t\tSAI_TEST_REPORT_TMP_DIR_ON_PTF, \n\t\tSAI_TEST_REPORT_DIR_ON_PTF))\n ptfhost.shell(\"cp {0}/*.* {1}/\".format(\n\t\tSAI_TEST_REPORT_TMP_DIR_ON_PTF, \n\t\tSAI_TEST_REPORT_DIR_ON_PTF))",
"def dump_evaluation(self, output_path: str) -> None:\n if not self._evaluation_done:\n self.start_evaluation()\n\n file_to_dump = os.path.join(output_path, 'evaluation.json')\n LOGGER.info(f'dump evaluation results to {file_to_dump}')\n with open(file_to_dump, 'w') as fp:\n json.dump(self.eval_dict, fp)"
] | [
"0.6271955",
"0.5941349",
"0.58987206",
"0.5728236",
"0.5728066",
"0.56052387",
"0.5449336",
"0.5394666",
"0.52985036",
"0.52504694",
"0.52476597",
"0.5220453",
"0.5208357",
"0.5203255",
"0.5198881",
"0.51611763",
"0.5131378",
"0.5129826",
"0.51099676",
"0.5095326",
"0.5094822",
"0.5078805",
"0.50747395",
"0.5071826",
"0.50542206",
"0.5034609",
"0.5033531",
"0.49969634",
"0.49927643",
"0.4990137"
] | 0.8222652 | 0 |
Evaluate a string template; replace all queries by their values Queries in the template are delimited by prefix and sufix. Queries should evaluate to strings and should not cause errors. | def evaluate_template(template: str, prefix="$", sufix="$"):
return get_context().evaluate_template(template, prefix=prefix, sufix=sufix) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def replacements(input_str, query, replace=\"\", num=0):\n check_parentheses = re.findall(\"\\([^()]*\\)\", query)\n check_replacement = re.findall(r\"\\\\[0-9]+\", replace)\n check_replacement = sorted([int(match[1:]) for match in check_replacement])\n if check_replacement and check_replacement[-1] > len(check_parentheses):\n raise AttributeError(\"There are more replacement match values specified than query parenthesized groups\")\n\n if num < 0:\n if check_replacement:\n for indx in sorted(range(check_replacement[-1]), reverse=True):\n indx += 1\n replace = re.sub(r\"\\\\%s\" % indx, r\"\\\\%s\" % (indx + 1), replace)\n right_replace = \"\\\\%s\" % (len(check_replacement) + 2)\n else:\n right_replace = \"\\\\2\"\n leftmost = str(input_str)\n new_str = str(input_str)\n rightmost = \"\"\n hash_to_split_on = \"UPNFSZ7FQ6RBhfFzwt0Cku4Yr1n2VvwVUG7x97G7\"\n for _ in range(abs(num)):\n if leftmost == \"\":\n break\n new_str = re.sub(r\"(.*)%s(.*)\" % query,\n r\"\\1%s%s%s\" % (hash_to_split_on, replace, right_replace), leftmost, 1)\n new_str = new_str.split(hash_to_split_on)\n if len(new_str) == 2:\n leftmost = new_str[0]\n rightmost = new_str[1] + rightmost\n new_str = leftmost + rightmost\n else:\n new_str = leftmost + rightmost\n break\n else:\n new_str = re.sub(query, replace, input_str, num)\n\n return new_str",
"def reformulate_query(s):\n words = tokenize(s)\n tags = [tag for _, tag in pos_tag(words)]\n\n if tags[-1] == '.':\n words.pop()\n\n # what/who questions\n if tags[0] in set(['WP', 'WDT']):\n if tags[1] in set(['VBZ', 'VBD', 'VBP']):\n if tags[-1] is not 'IN':\n exact_query = '{0}\\s*{1}\\s*{2}'.format(' '.join(words[2:]),\n '(?:\\(.*\\))?', words[1])\n inexact_query = '{0} {1}'.format(' '.join(words[2:]), words[1])\n return exact_query, inexact_query\n return s, s",
"def _string_subst_partial(self, val):\n def repl(m):\n k = m.group('id')\n replacement = self.bib_database.strings[k.lower()] if k.lower() in self.bib_database.strings else k\n pre = '\"' if m.group('pre') != '\"' else ''\n post = '\"' if m.group('post') != '\"' else ''\n return pre + replacement + post\n\n logger.debug('Substitute string definitions inside larger expressions')\n if '#' not in val:\n return val\n\n # TODO?: Does not match two subsequent variables or strings, such as \"start\" # foo # bar # \"end\" or \"start\" # \"end\".\n # TODO: Does not support braces instead of quotes, e.g.: {start} # foo # {bar}\n # TODO: Does not support strings like: \"te#s#t\"\n return self.replace_all_re.sub(repl, val)",
"def reparam(string_, dictionary):\n dictionary = dictionary.copy() # eval mucks with it\n # disable builtins to avoid risk for remote code exection.\n dictionary['__builtins__'] = object()\n vals = []\n result = []\n for live, chunk in _interpolate(string_):\n if live:\n v = eval(chunk, dictionary)\n result.append(sqlquote(v))\n else: \n result.append(chunk)\n return SQLQuery.join(result, '')",
"def materialize(template, substitutions):\n\n script_str = template\n for param, value in substitutions.items():\n script_str = re.sub(param, str(value), script_str)\n\n return script_str",
"def update_placeholders(self, format_string, placeholders):\n # Tokenize the format string and process them\n output = []\n for token in self.tokens(format_string):\n if token.group(\"key\") in placeholders:\n output.append(\n \"{{{}{}}}\".format(placeholders[token.group(\"key\")], token.group(\"format\"))\n )\n continue\n elif token.group(\"command\"):\n # update any placeholders used in commands\n commands = parse_qsl(token.group(\"command\"), keep_blank_values=True)\n # placeholders only used in `if`\n if \"if\" in [x[0] for x in commands]:\n items = []\n for key, value in commands:\n if key == \"if\":\n # we have to rebuild from the parts we have\n condition = Condition(value)\n variable = condition.variable\n if variable in placeholders:\n variable = placeholders[variable]\n # negation via `!`\n not_ = \"!\" if not condition.default else \"\"\n condition_ = condition.condition or \"\"\n # if there is no condition then there is no\n # value\n if condition_:\n value_ = condition.value\n else:\n value_ = \"\"\n value = \"{}{}{}{}\".format(not_, variable, condition_, value_)\n if value:\n items.append(f\"{key}={value}\")\n else:\n items.append(key)\n\n # we cannot use urlencode because it will escape things\n # like `!`\n output.append(r\"\\?{} \".format(\"&\".join(items)))\n continue\n value = token.group(0)\n output.append(value)\n return \"\".join(output)",
"def parse_string(\n raw_string: Text,\n variables_mapping: VariablesMapping,\n functions_mapping: FunctionsMapping,\n) -> Any:\n try:\n match_start_position = raw_string.index(\"$\", 0)\n parsed_string = raw_string[0:match_start_position]\n except ValueError:\n parsed_string = raw_string\n return parsed_string\n\n while match_start_position < len(raw_string):\n\n # Notice: notation priority\n # $$ > ${func($a, $b)} > $var\n\n # search $$\n dollar_match = dolloar_regex_compile.match(raw_string, match_start_position)\n if dollar_match:\n match_start_position = dollar_match.end()\n parsed_string += \"$\"\n continue\n\n # search function like ${func($a, $b)}\n func_match = function_regex_compile.match(raw_string, match_start_position)\n if func_match:\n func_name = func_match.group(1)\n func = get_mapping_function(func_name, functions_mapping)\n\n func_params_str = func_match.group(2)\n function_meta = parse_function_params(func_params_str)\n args = function_meta[\"args\"]\n kwargs = function_meta[\"kwargs\"]\n parsed_args = parse_data(args, variables_mapping, functions_mapping)\n parsed_kwargs = parse_data(kwargs, variables_mapping, functions_mapping)\n\n try:\n func_eval_value = func(*parsed_args, **parsed_kwargs)\n except Exception as ex:\n logger.error(\n f\"call function error:\\n\"\n f\"func_name: {func_name}\\n\"\n f\"args: {parsed_args}\\n\"\n f\"kwargs: {parsed_kwargs}\\n\"\n f\"{type(ex).__name__}: {ex}\"\n )\n raise\n\n func_raw_str = \"${\" + func_name + f\"({func_params_str})\" + \"}\"\n if func_raw_str == raw_string:\n # raw_string is a function, e.g. \"${add_one(3)}\", return its eval value directly\n return func_eval_value\n\n # raw_string contains one or many functions, e.g. \"abc${add_one(3)}def\"\n parsed_string += str(func_eval_value)\n match_start_position = func_match.end()\n continue\n\n # search variable like ${var} or $var\n var_match = variable_regex_compile.match(raw_string, match_start_position)\n if var_match:\n var_name = var_match.group(1) or var_match.group(2)\n var_value = get_mapping_variable(var_name, variables_mapping)\n\n if f\"${var_name}\" == raw_string or \"${\" + var_name + \"}\" == raw_string:\n # raw_string is a variable, $var or ${var}, return its value directly\n return var_value\n\n # raw_string contains one or many variables, e.g. \"abc${var}def\"\n parsed_string += str(var_value)\n match_start_position = var_match.end()\n continue\n\n curr_position = match_start_position\n try:\n # find next $ location\n match_start_position = raw_string.index(\"$\", curr_position + 1)\n remain_string = raw_string[curr_position:match_start_position]\n except ValueError:\n remain_string = raw_string[curr_position:]\n # break while loop\n match_start_position = len(raw_string)\n\n parsed_string += remain_string\n\n return parsed_string",
"def expr(s):\n if isinstance(s, Expr): return s\n if isnumber(s): return Expr(s)\n ## Replace the alternative spellings of operators with canonical spellings\n s = s.replace('==>', '>>').replace('<==', '<<')\n s = s.replace('<=>', '%').replace('=/=', '^')\n ## Replace a symbol or number, such as 'P' with 'Expr(\"P\")'\n s = re.sub(r'([a-zA-Z0-9_.]+)', r'Expr(\"\\1\")', s)\n ## Now eval the string. (A security hole; do not use with an adversary.)\n return eval(s, {'Expr':Expr})",
"def evaluate_math(query):\n # Final result\n evaluated_query = []\n\n math_expr = re.compile(r'(\\d+([.]\\d+)*|[+\\-/*])+\\d([.]\\d+)*$')\n\n for q in query:\n if math_expr.match(q):\n evaluated_query += [str(eval(q))]\n else:\n evaluated_query += [q]\n\n return evaluated_query",
"def sql(self):\n rule_specs = []\n\n patterns = {}\n pattern_specs = []\n\n # It's safe to unpack `self.get_rules` because it can only\n # return A) an empty list or B) a list of two-tuples with two elements in\n # them (the path and the rule for each query directive).\n for path, rule in self.rules:\n # Don't parse if this is not a properly registered rule type.\n if not self.is_rule(rule):\n pass\n rule_type = rule['_rule_type']\n sql_tuple = self.sql_generators[rule_type](path, rule)\n if sql_tuple is not None:\n rule_specs.append(sql_tuple)\n\n # The check on 'pattern' here allows us to apply a pattern filter on top of others\n if 'pattern' in rule:\n match_multiple = (rule['_rule_type'] == 'containment_multiple')\n for pattern in self.split_search_pattern(rule['pattern']):\n sql_tuple = FilterTree.text_similarity_filter(path, pattern, match_multiple)\n # add to the list of rules generated for this pattern (one per field)\n patterns.setdefault(pattern, []).append(sql_tuple)\n\n rule_string = ' AND '.join([rule[0] for rule in rule_specs])\n\n pattern_rules = patterns.values()\n pattern_strings = []\n\n # check if any of the fields for this string pattern match\n for rule_list in pattern_rules:\n pattern_strings.append(' OR '.join([rule[0] for rule in rule_list]))\n pattern_specs += rule_list\n\n # check that record has a match for all of the string patterns in some field\n pattern_string = '(' + ') AND ('.join(pattern_strings) + ')' if pattern_strings else ''\n\n if rule_string != '' and pattern_string != '':\n filter_string = '(' + (' AND ('.join([rule_string, pattern_string])) + ')' + ')'\n elif rule_string != '' or pattern_string != '':\n filter_string = '(' + ''.join([rule_string, pattern_string]) + ')'\n else:\n filter_string = ''\n\n # flatten the rule_paths\n rule_paths_first = ([rule[1] for rule in rule_specs] +\n [rule[1] for rule in pattern_specs])\n rule_paths = [item for sublist in rule_paths_first\n for item in sublist]\n\n outcome = (filter_string, tuple(rule_paths))\n return outcome",
"def eval_postfix(s):\n stack = Stack()\n \n s = s.split()\n for i in s:\n \tif operator(i) == False:\n \t\tstack.push(int(i))\n \telse:\n \t\tb = stack.pop()\n \t\ta = stack.pop()\n \t\tresult = evaluate(a, i, b)\n \t\tstack.push(result)\n return stack.pop()",
"def process_query(s):\n query = re.sub(r'[!\\'()|&:\\x00<>]', ' ', s).strip()\n if query:\n query = re.sub(r'\\s+', ' & ', query)\n # Support prefix search on the last word. A tsquery of 'toda:*' will\n # match against any words that start with 'toda', which is good for\n # search-as-you-type.\n query += ':*'\n return query",
"def parser(string, queryset):\n QueryObjects.D = {}\n QueryObjects.B = []\n QueryObjects.IND = 0\n QueryObjects.TEMP_FIELD = None\n\n algebra = boolean.BooleanAlgebra()\n query_list = lexer(string)\n query_string = ' '.join(query_list)\n qs = algebra.parse(query_string)\n\n if QueryObjects.TEMP_FIELD:\n queryset = queryset.annotate(**QueryObjects.TEMP_FIELD)\n QueryObjects.TEMP_FIELD = None\n\n locals().update(QueryObjects.D.items())\n query = str(qs)\n query = eval(query)\n queryset = queryset.filter(query)\n return queryset",
"def FillForm(string_for_substitution, dictionary_of_vars):\n return_string = string_for_substitution\n for i in re.findall(\"//%%(.*)%%//\", string_for_substitution):\n return_string = re.sub(\"//%%\" + i + \"%%//\", dictionary_of_vars[i],\n return_string)\n return return_string",
"def replacevals(self, stmt):\n if 'materialize' in stmt:\n stmt = self.process_materialize(stmt)\n if 'listagg' in stmt:\n stmt = process_aggregates(stmt)\n if 'select USER, table_name' in stmt and stmt.count('UNION') == 3:\n return \"select user,table_name,preference from ingest_test\"\n if '.nextval from dual' in stmt and 'connect by' in stmt:\n self.num = int(stmt[stmt.rfind('<') + 1:])\n return None\n for k, v in self.repl.items():\n stmt = stmt.replace(k, v)\n return stmt",
"def evaluate_infix(string):\n return postfix(infix_to_postfix(string))",
"def interpolate_replacements(text, expr_replacements):\n if not expr_replacements:\n return [text]\n\n replacement_strings = list(expr_replacements.keys())\n splitter = re.compile(\n \"({0})\".format(\"|\".join(re.escape(r) for r in replacement_strings))\n )\n split_text = [p for p in splitter.split(text) if p]\n return [expr_replacements.get(t, t) for t in split_text]",
"def preprocess_query(self, input_ids, prefix):\n\n input_strings = self.generator_tokenizer.batch_decode(input_ids, skip_special_tokens=False)\n\n # handle prefix for T5\n if isinstance(self.generator_tokenizer, T5Tokenizer):\n for i, s in enumerate(input_strings):\n if not s.startswith(prefix):\n logger.warning(\"T5 prefix mismatch in {}\".format(s))\n if len(input_strings[i]) <= len(prefix):\n input_strings[i] = \"\"\n else:\n input_strings[i] = input_strings[i][len(prefix) :]\n\n retriever_inputs = self.question_encoder_tokenizer.batch_encode_plus(\n input_strings,\n return_tensors=\"pt\",\n padding=True,\n truncation=True,\n )\n\n return retriever_inputs[\"input_ids\"].to(input_ids.device), input_strings",
"def string_strip(string, isql=True, remove_space=True):\n if not string:\n return string\n if isql:\n for regex in isqlsubs:\n string = re.sub(regex, \"\", string)\n for pattern, replacement in self.substitutions:\n string= re.compile(pattern, re.M).sub(replacement, string)\n if remove_space:\n string = space_strip(string)\n return string",
"def execute_all_templated(self, conn, query, *values, system=False):\n query_filled = query.format(*values)\n self.last_query=query_filled if not system else self.last_query\n queries = [item for item in query_filled.split(\";\") if item != \"\"]\n results = []\n if len(queries) > 1 and not system:\n self.health = Health.SICK\n self.message = \"SQL injection detected\"\n for query in queries:\n response = conn.execute(query + \";\")\n results.extend([item for item in response])\n return results",
"def _parse_string(\n value_expr: str, target_expr: str, ref_parts: List[str],\n a_type: mapry.String, auto_id: mapry.py.generate.AutoID) -> str:\n uid = auto_id.next_identifier()\n\n return _PARSE_STRING_TPL.render(\n uid=uid,\n value_expr=value_expr,\n ref_parts=ref_parts,\n target_expr=target_expr,\n a_type=a_type).rstrip(\"\\n\")",
"def subst(self, value, filter=None):\n\n if isinstance(value, Literal):\n return value._value\n elif isinstance(value, tuple):\n return tuple(self.subst(i, filter) for i in value)\n elif isinstance(value, list):\n return list(self.subst(i, filter) for i in value)\n elif isinstance(value, dict):\n return {i: self.subst(value[i], filter) for i in value}\n elif isinstance(value, StringTypes):\n def subfn(mo):\n var = mo.group(0)\n\n if var == \"$$\":\n return \"$\"\n\n # Apply variable filters\n parts = var[2:-1].split(\"|\")\n value = self.evaluate(parts[0])\n\n if len(parts) > 1:\n # Filters supplied directly\n for part in parts[1:]:\n if len(part) == 0:\n # Empty filter can be used to disable auto filter\n continue\n else:\n value = self.callfilter(part, value)\n elif filter:\n # Use auto-filter if specified\n for part in filter.split(\"|\"):\n value = self.callfilter(part, value)\n\n return value\n return re.sub(r\"\\$\\$|\\$\\(.*?\\)\", subfn, value)\n else:\n return value",
"def value(self, vars={}):\n self.__validateTemplateVariables(vars)\n\n # resolving variables values\n resolvedTemplate = self.inputString()\n for varName, varValue in vars.items():\n resolvedTemplate = resolvedTemplate.replace(\n ('{' + varName + '}'),\n self.__escapeTemplateTokens(varValue)\n )\n\n # resolving function values\n finalResolvedTemplate = \"\"\n for templatePart in resolvedTemplate.split(\"(\"):\n\n endIndex = templatePart.find(')')\n if endIndex != -1:\n\n # processing the expression only when it has not been\n # evaluated yet, otherwise return it from the cache.\n # Potentially we could add support for \"<expression>\" rather\n # than \"(expression)\" to tell to avoid this cache. However, the\n # default behaviour should be to always cache it (never change it)\n # otherwise it could side effect in expressions that create\n # new versions...\n rawExpression = templatePart[:endIndex]\n\n # this is a special token that allows to pass the parent path\n # to an expression, replacing it with the parent path at this point.\n rawExpression = rawExpression.replace(\n \"<parentPath>\",\n self.__escapeTemplateTokens(finalResolvedTemplate.replace(\"/!\", \"/\"), 0)\n )\n\n if rawExpression not in self.__expressionValueCache:\n # replacing any reserved token from the result of the expression\n self.__expressionValueCache[rawExpression] = self.__escapeTemplateTokens(\n ExpressionEvaluator.parseRun(\n rawExpression\n )\n )\n\n expressionValue = self.__expressionValueCache[rawExpression]\n finalResolvedTemplate += expressionValue + templatePart[endIndex + 1:]\n else:\n finalResolvedTemplate += templatePart\n\n # resolving required path levels\n if \"/!\" in finalResolvedTemplate:\n finalPath = []\n for pathLevel in self.__escapeTemplateTokens(finalResolvedTemplate, 0).split(os.sep):\n if pathLevel.startswith(\"!\"):\n finalPath.append(pathLevel[1:])\n resolvedPath = os.sep.join(finalPath)\n if not os.path.exists(resolvedPath):\n raise RequiredPathNotFoundError(\n 'Template contains a path marked as required:\\n\"{0}\"\\n\\nThis error is caused because the target path does not exist in the file system:\\n{1}'.format(\n pathLevel,\n resolvedPath\n )\n )\n\n else:\n finalPath.append(pathLevel)\n finalResolvedTemplate = os.sep.join(finalPath)\n\n # restoring all the espaped tokens to the original value\n finalResolvedTemplate = self.__escapeTemplateTokens(finalResolvedTemplate, 0)\n\n return finalResolvedTemplate",
"def substitute(self):\n\n n_chars = len(self.literal)\n term = ['' for i in range(n_chars)]\n\n for i in range(n_chars):\n if self.literal[i] in self.bindings:\n term[i] = self.bindings[self.literal[i]]\n else:\n term[i] = self.literal[i]\n\n return (''.join(term))",
"def query_from_strings(\n query_cls: Type[query.CollectionQuery],\n model_cls: Type[Model],\n prefixes: Dict,\n query_parts: Collection[str],\n) -> query.Query:\n subqueries = []\n for part in query_parts:\n subqueries.append(construct_query_part(model_cls, prefixes, part))\n if not subqueries: # No terms in query.\n subqueries = [query.TrueQuery()]\n return query_cls(subqueries)",
"def lexer(string): # TODO: refactor\n parsedlist = []\n parsedstring = ''\n leftbcounter = 0\n rightbcounter = 0\n qcounter = 0\n for index, a in enumerate(string):\n if qcounter == 2:\n if a.isalpha():\n qcounter = 1\n else:\n qcounter = 0\n if a == '(':\n leftbcounter += 1\n if a == ')':\n rightbcounter += 1\n if a == \"'\" and leftbcounter == rightbcounter:\n qcounter += 1\n if a != ' ' and leftbcounter == rightbcounter \\\n and qcounter == 0:\n parsedstring += a\n if index+1 == len(string):\n parsedlist.append(parsedstring)\n parsedstring = ''\n elif leftbcounter != rightbcounter:\n parsedstring += a\n elif qcounter > 0:\n parsedstring += a\n if index+1 == len(string):\n parsedlist.append(parsedstring)\n parsedstring = ''\n else:\n parsedlist.append(parsedstring)\n parsedstring = ''\n if leftbcounter != rightbcounter:\n raise BadRequest()\n bl = []\n sl = []\n counter = 0\n for index, query in enumerate(parsedlist, 1):\n if query == \"and\" or query == \"or\" or query == \"not\":\n if sl:\n bl.append(sl)\n bl.append([query])\n counter = 0\n sl = []\n continue\n sl.append(query)\n counter += 1\n if index == len(parsedlist) and sl:\n bl.append(sl)\n # i later added a third nested list to seperate AND and OR\n query_list = []\n al = []\n counter = 0\n for index, grouped_query in enumerate(bl, 1):\n if grouped_query[0] == \"or\":\n query_list.append(al)\n query_list.append([grouped_query])\n counter = 0\n al = []\n continue\n al.append(grouped_query)\n counter += 1\n if index == len(bl):\n query_list.append(al)\n\n for x in query_list:\n for y in x:\n if y[0] == 'and' or y[0] == 'or' or y[0] == 'not':\n QueryObjects.B.append(y[0])\n continue\n if y[0][0] == '(' and y[0][-1] == ')':\n QueryObjects.B.append(y[0][0])\n lexer(y[0][1:-1])\n QueryObjects.B.append(y[0][-1])\n else:\n QueryObjects.IND += 1\n n = 'arg' + str(QueryObjects.IND)\n QueryObjects.D[n] = query_mapping(y, QueryObjects.IND)[\"query\"]\n QueryObjects.B.append(n)\n return QueryObjects.B",
"def postfix_eval(input_str):\n\n \"\"\"Input argument: a string containing a postfix expression where tokens \n are space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns the result of the expression evaluation. \n Raises an PostfixFormatException if the input is not well-formed\"\"\"\n if input_str is None: raise PostfixFormatException\n # create list of operands and operators\n term_list = input_str.split()\n # initialize stack large enough to contain all operands\n operand_stack = Stack(2*len(term_list)//3+1)\n # iterate over term_list\n for term in term_list:\n # check for operatorm, evaluate operators on A & B if True\n if operator_present(term) is True:\n if operand_stack.size()<2: \n raise PostfixFormatException(\"Insufficient operands\")\n B = operand_stack.pop()\n A = operand_stack.pop()\n operand_stack.push(\n calculate(\n A, # A\n B, # B\n term) # operator\n )\n # check for operand, push to stack if True\n elif operand_present(term) is True:\n operand_stack.push(term)\n else: raise PostfixFormatException(\"Invalid token\")\n if len(term_list) % 3 != 0: raise PostfixFormatException(\"Too many operands\")\n return operand_stack.pop()",
"def infix_to_postfix(input_str): # postfix requires that all operators proceed after the two operands that they work on\n\n \"\"\"Input argument: a string containing an infix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression \"\"\"\n if input_str is None: raise ValueError\n # Split input string\n term_list = input_str.split()\n #print(\"TERM LIST \",term_list) \n # Create output list, will be fed to postfix_eval() at end\n output_list = []\n # initialize stack large enough to contain all operators\n operator_stack = Stack(len(term_list)//3+1)\n for term in term_list:\n # check for operand, if present append to output list\n if operand_present(term) is True:\n output_list.append(term)\n # check for operator\n elif operator_present(term) or term == '(' or term == ')':\n #if operand_stack.size()<2: \n # raise PostfixFormatException(\"Insufficient operands\")\n # Check for open parentheses\n if term == '(': operator_stack.push(term)\n # Check for closing parentheses, pop stack until open parentheses found\n elif term == ')':\n while 1:\n token = operator_stack.pop()\n if token != '(': \n output_list.append(token)\n else: break\n # Otherwise push to stack but pop any higher/equal order operators\n else:\n sort_operators(term, operator_stack, output_list)\n #print(operator_stack.peek())\n #else: raise PostfixFormatException(\"Invalid token\")\n #if len(term_list) % 3 != 0: raise PostfixFormatException(\"Too many operands\")\n while operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n new_str = (\" \".join(output_list))\n #print(\"NEW STR \", new_str)\n return new_str",
"def substitute(self, field_values):\n\n\t\tdef substituteFunc(mo):\n\t\t\tname = mo.group('braced')\n\t\t\tif name is not None:\n\t\t\t\tif name in field_values:\n\t\t\t\t\treturn str(field_values[name])\n\t\t\t\telse:\n\t\t\t\t\treturn self._DELIMITER + '{' + name + '}'\n\n\t\t\t# regexp could catch either 'braced' or 'escaped' substring\n\t\t\t# if it is not 'braced', it is 'escaped'\n\t\t\treturn self._DELIMITER\n\n\t\treturn self._PATTERN.sub(substituteFunc, self._template)",
"def prefix_to_postfix(input_str: str) -> Any:\n \"\"\"Input argument: a string containing a prefix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression(tokens are space separated)\"\"\"\n stack = Stack(30)\n if input_str == \"\":\n return (\"\")\n op_list = [\"+\", \"-\", \"*\", \"/\", \"<<\", \">>\", \"**\"]\n split_list = input_str.split()\n track = len(split_list) - 1\n while track >= 0:\n new_val = split_list[track].lstrip(\"-\")\n new_val = new_val.replace(\".\", \"\", 1)\n if new_val.isdigit():\n stack.push(split_list[track])\n track = track - 1\n elif split_list[track] in op_list:\n first = stack.pop()\n second = stack.pop()\n stack.push(first + \" \" + second + \" \" + split_list[track])\n track = track - 1\n else:\n break\n postfix = stack.pop()\n return postfix"
] | [
"0.59523755",
"0.57478",
"0.564346",
"0.56363636",
"0.5548136",
"0.54896957",
"0.5392804",
"0.534426",
"0.53307384",
"0.5292386",
"0.5263303",
"0.52400285",
"0.52348495",
"0.52257264",
"0.52243674",
"0.52145886",
"0.5206932",
"0.5192216",
"0.51901",
"0.515437",
"0.51394385",
"0.5136424",
"0.51330763",
"0.5130507",
"0.51181936",
"0.5103848",
"0.5100793",
"0.5097449",
"0.50972635",
"0.5089299"
] | 0.65676814 | 0 |
Iterator. Return opcode, data, pc, new_pc at each step | def get_opcodes(self, script, verify_minimal_data=False, pc=0):
while pc < len(script):
opcode, data, new_pc, is_ok = self.scriptStreamer.get_opcode(
script, pc, verify_minimal_data=verify_minimal_data)
yield opcode, data, pc, new_pc
pc = new_pc | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __iter__(self):\n for i in self.ref:\n yield PythonBytecodeInPreproc(i)",
"def __iter__(self) -> Iterator[packets.Packet]:\n for packet in self._packets:\n yield packet\n for pointer in self._packet_pointers:\n yield pointer.get()",
"def instruction_iter(self):\n for ins in self.instructions:\n yield ins",
"def items():\n for i in self._iter_restrict(zeros, ones):\n yield self.pcdata[i]",
"def udp_iterator(pc):\n\tfor time,pkt in pc:\n\t\teth = dpkt.ethernet.Ethernet(pkt)\n\t\tif eth.type == dpkt.ethernet.ETH_TYPE_IP:\n\t\t ip = eth.data\n\t\t # if the IP protocol is UDP, process it further\n\t\t if ip.p == dpkt.ip.IP_PROTO_UDP :\n\t\t\tudp = ip.data\n\t\t\tyield( ip.src, udp.sport, ip.dst, udp.dport, udp.data )",
"def _next(self):\n i = 0\n while i < self.size:\n yield self.data[i]\n i += 1",
"def items():\n sub = {PC_ZERO: PC_ONE, PC_ONE: PC_ZERO, PC_DC: PC_DC}\n for item in self.pcdata:\n yield sub[item]",
"def __iter__(self):\n yield from self.qc_mol\n yield from self.br_mol\n yield from self.pc_mol",
"def pd_iterate(env: Environment, func: Block) -> Tuple[List[PdObject], PdObject]:\n acc: List[PdObject] = []\n seen: Set[PdKey] = set()\n while True:\n obj = env.peek()\n key = pykey(obj)\n if key in seen:\n env.pop()\n return (acc, obj)\n\n acc.append(obj)\n seen.add(key)\n func(env)",
"def next():",
"def next():",
"def __iter__(self):\n cursor = self._front\n while not cursor is None:\n yield cursor.data\n cursor = cursor.next",
"def nextIter(self):\n\t\tpass",
"def runIntcode(program):\n\n pc = 0\n\n while program[pc] != 99:\n command = program[pc]\n reg1 = program[program[pc + 1]]\n reg2 = program[program[pc + 2]]\n dest = program[pc + 3]\n\n if command == 1:\n print (pc, \" (add) \", reg1, \" \", reg2, \" -> \", dest)\n program[dest] = reg1 + reg2\n\n if command == 2:\n print (pc, \" (mul) \", reg1, \" \", reg2, \" -> \", dest)\n program[dest] = reg1 * reg2\n\n pc = pc + 4\n\n return program",
"def __next__(self):\n return [next(self.vdc)] + next(self.S)",
"def __next__(self) -> dict:\n batches = {}\n terminations = 0\n for iterator in self.iterators:\n \n try:\n data, target = next(iterator)\n batches[data.location] = (data, target)\n\n except (TypeError, AttributeError) as e:\n logging.warning(f\"Dangling pointer detected! Skipping operation... Error: {e}\")\n \n except StopIteration:\n terminations += 1\n\n # Every cached iterator has been iterated through completely\n if terminations == len(self.iterators):\n raise StopIteration\n\n return batches",
"def step(self):\n start_pc = self.pc\n decoded_opcode = self.decode_opcode(self.eat_pc(True))\n print('Step from PC=%d (%s)' % (start_pc, decoded_opcode))\n opcode = decoded_opcode[0]\n if 1 == opcode or 2 == opcode:\n self.add_multiply_instruction(decoded_opcode)\n elif 3 == opcode:\n self.input_instruction(decoded_opcode)\n elif 4 == opcode:\n self.output_instruction(decoded_opcode)\n elif 5 == opcode or 6 == opcode:\n self.jump_instruction(decoded_opcode)\n elif 7 == opcode or 8 == opcode:\n self.compare_instruction(decoded_opcode)\n elif 99 == opcode:\n self.halt_instruction(decoded_opcode)\n else:\n print('Unknown opcode: ', opcode)\n raise AssertionError('!')",
"def iterator(self):\n yield",
"def __iter__(self):\n for b in self.dl: \n yield to_device(b, self.device) # yield pauses the execution, not store values in memory, forgets about them once iterated\n # no need to remove batch of data from device, done automatically",
"def process(self, data_itr):\n for data in data_itr:\n self.update(data)\n while True:\n try:\n out = self.next()\n yield out\n except StopIteration:\n break",
"def __iter__(self):\r\n self.pointer = 0\r\n return self",
"def __iter__(self):\n # return self.options[:self.idx] + self.options[self.idx:]\n for op in self.queue():\n yield op",
"def enumerate(self):\n # go through the container and tile in sync\n for index, value in zip(self.tile, self.data):\n # hand the {index} and the corresponding value to the caller\n yield index, value\n # all done\n return",
"def __iter__(self):\n self.current = self.start\n self.returned = 0\n return self",
"def step(self):\r\n if self.pc < 0 or self.pc >= len(self.instructions):\r\n raise Exception('Index out of bounds for pc.')\r\n ins, x, *optionals = self.instructions[self.pc].split()\r\n y = optionals[0] if len(optionals) > 0 else None\r\n if ins == 'set':\r\n self.registers[x] = self._get_value(y)\r\n elif ins == 'sub':\r\n self.registers[x] -= self._get_value(y)\r\n elif ins == 'mul':\r\n self.registers[x] *= self._get_value(y)\r\n self.mul_counter += 1\r\n elif ins == 'jnz':\r\n if self._get_value(x) != 0:\r\n # -1 since we always add 1 to self.pc later in the method.\r\n self.pc += self._get_value(y) - 1\r\n else:\r\n raise Exception('Unable to parse instruction: ' + self.instructions[self.pc])\r\n self.pc += 1",
"def __iter__(self):\n cur = self.head\n while cur is not None:\n yield cur.data\n cur = cur.next",
"def items(self):\n cdef sym.SymbolTableIterator* it = new sym.SymbolTableIterator(self.table[0])\n try:\n while not it.Done():\n yield (it.Symbol().decode('utf8'), it.Value())\n it.Next()\n finally:\n del it",
"def __iter__(self):\n return zip(self._phases, self.data)",
"def __iter__(self):\n for i in self.loopindices:\n pid = self.frametracks.particle.values[i]\n yield pid, self.neighbors(pid)",
"def read_pc(self):\n\n self.pc = self.read_stack() << 8 | self.read_stack()"
] | [
"0.5994231",
"0.5834547",
"0.5778902",
"0.56972027",
"0.56266713",
"0.55741405",
"0.5524681",
"0.5516168",
"0.53814715",
"0.5375429",
"0.5375429",
"0.53654444",
"0.53418607",
"0.53404146",
"0.53236276",
"0.5272017",
"0.523277",
"0.52307564",
"0.52223295",
"0.5202266",
"0.51867974",
"0.5174437",
"0.5167858",
"0.5153779",
"0.5147669",
"0.51375026",
"0.51362836",
"0.5134577",
"0.51119494",
"0.51050204"
] | 0.6933305 | 0 |
Disassemble the given script. Returns a list of opcodes. | def opcode_list(self, script):
opcodes = []
new_pc = 0
try:
for opcode, data, pc, new_pc in self.get_opcodes(script):
opcodes.append(self.disassemble_for_opcode_data(opcode, data))
except ScriptError:
opcodes.append(binascii.hexlify(script[new_pc:]).decode("utf8"))
return opcodes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def disassemble(self, script):\n return ' '.join(self.opcode_list(script))",
"def dis_to_instructions(disasm):\n\tline_num = None\n\tinstructions = []\n\tfor line in disasm.split(\"\\n\"):\n\t\tmatch = re.search(\n\t\t\tr\"( ?(?P<line_num>\\d+)[ >]+)?(?P<offset>\\d+) (?P<opname>[A-Z_]+)(?:\\s+(?P<arg>\\d+)(?: \\((?P<argval>.+)\\))?)?\",\n\t\t\tline\n\t\t)\n\t\tif match is not None:\n\t\t\tif match[\"line_num\"]:\n\t\t\t\tline_num = int(match[\"line_num\"])\n\t\t\toffset = int(match[\"offset\"])\n\t\t\topname = match[\"opname\"]\n\t\t\tif match[\"arg\"] is not None:\n\t\t\t\targ = int(match[\"arg\"])\n\t\t\telse:\n\t\t\t\targ = None\n\t\t\tif opname == \"EXTENDED_ARG\":\n\t\t\t\tcontinue\n\t\t\targval = match[\"argval\"]\n\t\t\tinstructions.append(Instruction(line_num, offset, opname, arg, argval))\n\treturn instructions",
"def dis_all(self, code, address, count=0):\n dis_gen = self.cs.disasm(code, address, count=count)\n return [asm for asm in dis_gen]",
"def do_disassemble(self, args):\n if len(args) != 0:\n args = args.split(' ')\n self.u_start = self.ParseAddressExpr(args[0])\n self.u_size = self.ParseAddressExpr(args[1]) if len(args) > 1 else 0x20\n skip = False\n else:\n # Skip the first instruction if we reuse the last address.\n skip = True\n\n if not self.reader.IsValidAddress(self.u_start):\n print(\"Address %s is not contained within the minidump!\" % (\n self.reader.FormatIntPtr(self.u_start)))\n return\n lines = self.reader.GetDisasmLines(self.u_start, self.u_size)\n if len(lines) == 0:\n print(\"Address %s could not be disassembled!\" % (\n self.reader.FormatIntPtr(self.u_start)))\n print(\" Could not disassemble using %s.\" % OBJDUMP_BIN)\n print(\" Pass path to architecture specific objdump via --objdump?\")\n return\n for line in lines:\n if skip:\n skip = False\n continue\n print(FormatDisasmLine(self.u_start, self.heap, line))\n # Set the next start address = last line\n self.u_start += lines[-1][0]\n print()",
"def dis_lite_all(self, code, address):\n dis_gen = self.cs.disasm_lite(code, address)\n return [asm for asm in dis_gen]",
"def dis(x=None):\n result = []\n if x is None:\n distb()\n return\n if isinstance(x, types.InstanceType):\n x = x.__class__\n if hasattr(x, 'im_func'):\n x = x.im_func\n if hasattr(x, 'func_code'):\n x = x.func_code\n if hasattr(x, '__dict__'):\n items = x.__dict__.items()\n items.sort()\n for name, x1 in items:\n if isinstance(x1, _have_code):\n result.append(\"Disassembly of %s:\" % name)\n try:\n dis(x1)\n except TypeError, msg:\n result.append(\"Sorry:\", msg)\n result.append('\\n')\n elif hasattr(x, 'co_code'):\n result.extend(disassemble(x))\n elif isinstance(x, str):\n result.extend(disassemble_string(x))\n else:\n raise TypeError, \\\n \"don't know how to disassemble %s objects\" % \\\n type(x).__name__\n return result",
"def dis(self, code, address, count=0):\n dis_gen = self.cs.disasm(code, address, count=count)\n return dis_gen",
"def assemble(\n code: str, base_address: int = DISASSEMBLY_DEFAULT_BASE_ADDRESS\n) -> list[Instruction]:\n arch = cemu.core.context.architecture\n\n #\n # Compile the entire given code\n #\n bytecode, assembled_insn_count = arch.ks.asm(code, as_bytes=True, addr=base_address)\n if not bytecode or assembled_insn_count == 0:\n raise cemu.errors.AssemblyException(\"Not instruction compiled\")\n\n assert isinstance(bytecode, bytes)\n\n #\n # Decompile it and return the stuff\n #\n insns = disassemble(bytecode, base=base_address)\n dbg(f\"{insns=}\")\n return insns",
"def get_instructions(asm_file):\n instructions = []\n with open(asm_file, 'r') as f:\n \n for line in f:\n instruction = line.split('\\n')\n instruction = remove_comment_white_space(instruction)\n instructions.append(instruction)\n \n return instructions",
"def decompile(*args):\n return _ida_hexrays.decompile(*args)",
"def disassemble(co):\n code = co.co_code\n n = len(code)\n i = 0\n extended_arg = 0\n result = []\n while i < n:\n op = code[i]\n\n curi = i\n i = i+1\n if op >= dis.HAVE_ARGUMENT:\n # Python 2\n # oparg = ord(code[i]) + ord(code[i+1])*256 + extended_arg\n oparg = code[i] + code[i+1] * 256 + extended_arg\n extended_arg = 0\n i = i+2\n if op == dis.EXTENDED_ARG:\n # Python 2\n #extended_arg = oparg*65536L\n extended_arg = oparg*65536\n else:\n oparg = None\n\n # print(opcode.opname[op])\n\n opv = globals()[opcode.opname[op].replace('+', '_')](co, curi, i, op, oparg)\n\n result.append(opv)\n\n return result",
"def disassemble(\n raw_data: bytes, count: int = -1, base: int = DISASSEMBLY_DEFAULT_BASE_ADDRESS\n) -> list[Instruction]:\n arch = cemu.core.context.architecture\n insns: list[Instruction] = []\n for idx, ins in enumerate(arch.cs.disasm(raw_data, base)):\n insn = Instruction(ins.address, ins.mnemonic, ins.op_str, ins.bytes)\n insns.append(insn)\n if idx == count:\n break\n\n dbg(f\"{insns=}\")\n return insns",
"def getDisassembledCode(self,filename, delimeter='\\n', bits='32bit'):\n iterable = self.diassemble(filename,bits)\n\n assembly_code = ''\n for i in iterable:\n # To avoid TypeError: a bytes-like object is required, not 'str'\n (address, mnemonic, op_str) = i.address, i.mnemonic, i.op_str\n assembly_code += (\"0x%s:\\t%s\\t%s\"% (address, mnemonic, op_str))+delimeter\n\n #print(\"filename:\" + filename + \"\\ncode:\\n\" + assembly_code)\n return assembly_code",
"def _decompile(*args):\n return _ida_hexrays._decompile(*args)",
"def recursive_disassemble(self, start_addresses):\n self.instr_queue = deque()\n for address in start_addresses:\n self.instr_queue.append(address)\n\n if self.use_trace_file == True:\n self.insert_branches_from_trace_file(self.instr_queue)\n limit = 0\n disassembled = 0\n while len(self.instr_queue) > 0:\n instr_addr = self.instr_queue.popleft()\n if self.instr_graph.is_analysed(instr_addr) == True or self.mem_dump.is_it_in_module(instr_addr) == False:\n continue\n disassembled += 1\n instr = RPE_instruction()\n buf = self.mem_dump.read_memory(instr_addr, 15)\n try:\n dec_instr = self.instr_decoder.disasm(buf, instr_addr).next()\n except StopIteration:\n continue\n\n instr.address = instr_addr\n instr.decoded_instr = dec_instr\n self.instr_graph.add_node(instr)\n branch_dsts = self.get_branch_destinations(instr)\n for dst_addr in branch_dsts:\n self.instr_graph.add_edge(instr, dst_addr, 3)\n self.instr_queue.append(dst_addr)\n\n self.handle_branch_destination(instr)\n self.handle_memory_reads(instr)\n\n print 'Disassembled in total: ' + `disassembled`\n print 'number of nodes in graph: ' + `(len(self.instr_graph.graph))`\n print 'Instructions to relocate: ' + `(len(self.instructions_to_relocate))`\n print 'Number of functions in import table: ' + `(self.import_table.number_of_functions())`\n relocate_set = set(self.instructions_to_relocate)\n print 'Size of set to relocate: ' + `(len(relocate_set))`\n #for instr_address in relocate_set:\n # print '%x\\t' % instr_address",
"def disassemble(self, data, start_address=0):\n return _opcodes.disassemble(self._ptr, data, start_address)",
"def reverse(script):\n\n offset = 0\n RCES = [] # Reversed, compressed edit script\n\n for edit in script:\n command, start, stop = edit\n if command == \"i\":\n RCES.append( ('d', start + offset, stop + offset) )\n offset = offset - (stop - start) - 1\n elif command == \"d\":\n RCES.append( ('i', start + offset, stop + offset) )\n offset = offset + (stop - start) + 1\n else:\n raise \"Unknown Command\", \"Unexpected command %s\" % command\n\n return RCES",
"def disassemble_instruction(self, instruction):\n if not util.is_integer(instruction):\n raise TypeError('Expected instruction to be an integer.')\n\n buf_size = self.MAX_BUF_SIZE\n buf = (ctypes.c_char * buf_size)()\n res = self._dll.JLINKARM_DisassembleInst(ctypes.byref(buf), buf_size, instruction)\n if res < 0:\n raise errors.JLinkException('Failed to disassemble instruction.')\n\n return ctypes.string_at(buf).decode()",
"def getDisassembledCode(self,filename, delimeter='\\n', bits='32bit'):\n\n return self.diassemble(filename,bits).replace(\"\\n\",delimeter)",
"def disassemble(co, lasti=-1):\n result = []\n code = co.co_code\n labels = findlabels(code)\n linestarts = dict(findlinestarts(co))\n n = len(code)\n i = 0\n extended_arg = 0\n free = None\n while i < n:\n c = code[i]\n op = ord(c)\n if i in linestarts:\n if i > 0:\n result.append()\n result.append(\"%3d\" % linestarts[i], )\n else:\n result.append(' ', )\n\n if i == lasti:\n result.append('-->', )\n else:\n result.append(' ', )\n if i in labels:\n result.append('>>', )\n else:\n result.append(' ', )\n result.append(repr(i).rjust(4), )\n result.append(opname[op].ljust(20), )\n i = i + 1\n if op >= HAVE_ARGUMENT:\n oparg = ord(code[i]) + ord(code[i + 1]) * 256 + extended_arg\n extended_arg = 0\n i = i + 2\n try:\n if op == EXTENDED_ARG:\n extended_arg = oparg * 65536L\n result.append(repr(oparg).rjust(5), )\n if op in hasconst:\n result.append('(' + repr(co.co_consts[oparg]) + ')', )\n elif op in hasname:\n result.append('(' + co.co_names[oparg] + ')', )\n elif op in hasjrel:\n result.append('(to ' + repr(i + oparg) + ')', )\n elif op in haslocal:\n result.append('(' + co.co_varnames[oparg] + ')', )\n elif op in hascompare:\n result.append('(' + cmp_op[oparg] + ')', )\n elif op in hasfree:\n if free is None:\n free = co.co_cellvars + co.co_freevars\n result.append('(' + free[oparg] + ')', )\n except KeyError:\n result.append(oparg)\n result.append()\n return result",
"def disasm_dump(bin, addr):\n return cache.access((bin,addr), lambda x: disasm_work(*x))",
"def dis(x=None):\n if x is None:\n distb()\n return\n if isinstance(x, types.InstanceType):\n x = x.__class__\n if hasattr(x, 'im_func'):\n x = x.im_func\n if hasattr(x, 'func_code'):\n x = x.func_code\n if hasattr(x, '__dict__'):\n items = x.__dict__.items()\n items.sort()\n for name, x1 in items:\n if isinstance(x1, _have_code):\n print \"Disassembly of %s:\" % name\n try:\n dis(x1)\n except TypeError, msg:\n print \"Sorry:\", msg\n print\n elif hasattr(x, 'co_code'):\n disassemble(x)\n elif isinstance(x, str):\n disassemble_string(x)\n else:\n raise TypeError, \\\n \"don't know how to disassemble %s objects\" % \\\n type(x).__name__",
"def get_disasm_line( ea ):\r\n\top1 = ua_outop2( ea, 0, 0 )\t\r\n\top2 = ua_outop2( ea, 1, 0 )\r\n\top3 = ua_outop2( ea, 2, 0 )\r\n\tif op1 == None:\r\n\t\top1 = \"\"\r\n\telse:\r\n\t\top1 = idaline_to_string( op1 )\r\n\tif op2 == None:\r\n\t\top2 = \"\"\r\n\telse:\r\n\t\top2 = idaline_to_string( op2 )\r\n\tif op3 == None:\r\n\t\top3 = \"\"\r\n\telse:\r\n\t\top3 = idaline_to_string( op3 )\r\n\tret = [ ea, ua_mnem( ea ), op1, op2, op3 ]\r\n\treturn ret",
"def get_opcodes(self, script, verify_minimal_data=False, pc=0):\n while pc < len(script):\n opcode, data, new_pc, is_ok = self.scriptStreamer.get_opcode(\n script, pc, verify_minimal_data=verify_minimal_data)\n yield opcode, data, pc, new_pc\n pc = new_pc",
"def decompile_many(*args):\n return _ida_hexrays.decompile_many(*args)",
"def get_insn_disasm(insn):\n\tglobal RE_SPACES\n\tdisasm = idc.GetDisasmEx(insn.ea, idc.GENDSM_FORCE_CODE)\n\treturn RE_SPACES.sub(' ', disasm.replace('\\t', ' '))",
"def disassemble_code(self, dso_path, start_addr, addr_len):\n # 1. Find real path.\n real_path = find_real_dso_path(dso_path, self.binary_cache_path)\n if real_path is None:\n return None\n\n # 2. Get path of objdump.\n arch = self.readelf.get_arch(real_path)\n if arch == 'unknown':\n return None\n objdump_path = self.objdump_paths.get(arch)\n if not objdump_path:\n objdump_path = find_tool_path('objdump', self.ndk_path, arch)\n if not objdump_path:\n log_exit(\"Can't find objdump. Please set ndk path with --ndk_path option.\")\n self.objdump_paths[arch] = objdump_path\n\n # 3. Run objdump.\n args = [objdump_path, '-dlC', '--no-show-raw-insn',\n '--start-address=0x%x' % start_addr,\n '--stop-address=0x%x' % (start_addr + addr_len),\n real_path]\n try:\n subproc = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n (stdoutdata, _) = subproc.communicate()\n stdoutdata = bytes_to_str(stdoutdata)\n except:\n return None\n\n if not stdoutdata:\n return None\n result = []\n for line in stdoutdata.split('\\n'):\n line = line.rstrip() # Remove '\\r' on Windows.\n items = line.split(':', 1)\n try:\n addr = int(items[0], 16)\n except ValueError:\n addr = 0\n result.append((line, addr))\n return result",
"def extract(input_data: str) -> list:\n instructions = list()\n for instruction in input_data.split('\\n'):\n op, arg = instruction.split(' ')\n arg = int(arg)\n assert op in ('acc', 'jmp', 'nop')\n instructions.append(Instruction(op, arg))\n return instructions",
"def render_disassembly(self, data, addr):\n disasm = list(self.cap.disasm(data, addr))\n leaders = set([addr])\n blocks = {}\n\n # Hack to force local scoping of global names:\n self.memory = {}\n\n for i in disasm:\n if capstone.x86.X86_GRP_JUMP in i.groups:\n leaders.add(i.operands[0].value.imm)\n leaders.add(i.address + i.size)\n\n leaders = sorted(leaders)\n for baddr in leaders:\n if baddr in self.sections or baddr in self.symbols:\n continue\n blocks[baddr] = 'block' + str(len(blocks) + 1)\n\n d = [] # Textual Disassembly\n a = [] # Instruction Addresses\n\n for i in disasm:\n if i.address in self.symbols:\n d.append(self.symbols[i.address].name + ':')\n a.append(i.address)\n elif i.address in self.sections:\n d.append(self.sections[i.address].name + ':')\n a.append(i.address)\n elif i.address in blocks:\n d.append(blocks[i.address] + ':')\n a.append(i.address)\n\n if i.mnemonic == 'nop':\n continue\n\n ops = ', '.join([self.render_operand(i, o, blocks) for o in i.operands])\n d.append((' %-7s %s' % (i.mnemonic, ops)).rstrip())\n a.append(i.address)\n\n if i.address >= leaders[-1] and is_terminal(i):\n break\n return d, a",
"def dis_lite(self, code, address):\n dis_gen = self.cs.disasm_lite(code, address)\n return dis_gen"
] | [
"0.7977805",
"0.6308526",
"0.6110257",
"0.5981193",
"0.57591164",
"0.5738575",
"0.5584899",
"0.557888",
"0.5507902",
"0.5493241",
"0.549239",
"0.5449774",
"0.5375824",
"0.53389883",
"0.5307862",
"0.52669376",
"0.52389264",
"0.5218232",
"0.5153286",
"0.5099107",
"0.50559896",
"0.5054593",
"0.503201",
"0.5004134",
"0.49864253",
"0.49222285",
"0.491924",
"0.48554093",
"0.48479554",
"0.4838673"
] | 0.71088725 | 1 |
Disassemble the given script. Returns a string. | def disassemble(self, script):
return ' '.join(self.opcode_list(script)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def do_disassemble(self, args):\n if len(args) != 0:\n args = args.split(' ')\n self.u_start = self.ParseAddressExpr(args[0])\n self.u_size = self.ParseAddressExpr(args[1]) if len(args) > 1 else 0x20\n skip = False\n else:\n # Skip the first instruction if we reuse the last address.\n skip = True\n\n if not self.reader.IsValidAddress(self.u_start):\n print(\"Address %s is not contained within the minidump!\" % (\n self.reader.FormatIntPtr(self.u_start)))\n return\n lines = self.reader.GetDisasmLines(self.u_start, self.u_size)\n if len(lines) == 0:\n print(\"Address %s could not be disassembled!\" % (\n self.reader.FormatIntPtr(self.u_start)))\n print(\" Could not disassemble using %s.\" % OBJDUMP_BIN)\n print(\" Pass path to architecture specific objdump via --objdump?\")\n return\n for line in lines:\n if skip:\n skip = False\n continue\n print(FormatDisasmLine(self.u_start, self.heap, line))\n # Set the next start address = last line\n self.u_start += lines[-1][0]\n print()",
"def disassemble_instruction(self, instruction):\n if not util.is_integer(instruction):\n raise TypeError('Expected instruction to be an integer.')\n\n buf_size = self.MAX_BUF_SIZE\n buf = (ctypes.c_char * buf_size)()\n res = self._dll.JLINKARM_DisassembleInst(ctypes.byref(buf), buf_size, instruction)\n if res < 0:\n raise errors.JLinkException('Failed to disassemble instruction.')\n\n return ctypes.string_at(buf).decode()",
"def disassemble(filepath):\n if not filepath.endswith('.ll'):\n command = [shared.LLVM_DIS, '-o=-', filepath] + shared.LLVM_DIS_OPTS\n with temp_files.get('.ll') as out: ret = subprocess.call(command, stdout=out)\n if ret != 0: raise RuntimeError('Could not disassemble %s.' % filepath)\n filepath = out.name\n return filepath",
"def decompile(*args):\n return _ida_hexrays.decompile(*args)",
"def decompile(self, filepath, output_dir):\n uncompyle6.main.main(output_dir, output_dir, [os.path.basename(filepath)], None)",
"def getDisassembledCode(self,filename, delimeter='\\n', bits='32bit'):\n\n return self.diassemble(filename,bits).replace(\"\\n\",delimeter)",
"def _decompile(*args):\n return _ida_hexrays._decompile(*args)",
"def decompilation(file_name):\n if sys.version_info[:2] == (3, 3):\n file_name = os.path.basename(unpyc3(file_name))\n elif sys.version_info[0] in (1, 2):\n file_name = os.path.basename(uncompyle_decompilation(file_name))\n else:\n print(\"It's impossible to decompile: {0}\".format(os.path.basename(file_name)))\n\n return\n\n print(\"\\nPython source code file: {0}\".format(file_name))",
"def _DisassembleFunc(self, symbol, elf_path=None, use_pager=None,\n to_file=None):\n assert not symbol.IsGroup()\n assert symbol.address and symbol.section_name == models.SECTION_TEXT\n assert not symbol.IsDelta(), ('Cannot disasseble a Diff\\'ed symbol. Try '\n 'passing .before_symbol or .after_symbol.')\n size_info = self._SizeInfoForSymbol(symbol)\n container = symbol.container\n elf_path = self._ElfPathForSymbol(size_info, container, elf_path)\n # Always use Android NDK's objdump because llvm-objdump does not print\n # the target of jump instructions, which is really useful.\n output_directory_finder = self._output_directory_finder\n if not output_directory_finder.Tentative():\n output_directory_finder = path_util.OutputDirectoryFinder(\n any_path_within_output_directory=elf_path)\n if output_directory_finder.Tentative():\n # Running objdump from an output directory means that objdump can\n # interleave source file lines in the disassembly.\n objdump_pwd = output_directory_finder.Finalized()\n else:\n # If we do not know/guess the output directory, run from any directory 2\n # levels below src since it is better than a random cwd (because usually\n # source file paths are relative to an output directory two levels below\n # src and start with ../../).\n objdump_pwd = path_util.FromToolsSrcRoot('tools', 'binary_size')\n\n arch = readelf.ArchFromElf(elf_path)\n objdump_path = path_util.GetDisassembleObjDumpPath(arch)\n args = [\n os.path.relpath(objdump_path, objdump_pwd),\n '--disassemble',\n '--source',\n '--line-numbers',\n '--demangle',\n '--start-address=0x%x' % symbol.address,\n '--stop-address=0x%x' % symbol.end_address,\n os.path.relpath(elf_path, objdump_pwd),\n ]\n\n # pylint: disable=unexpected-keyword-arg\n proc = subprocess.Popen(args,\n stdout=subprocess.PIPE,\n encoding='utf-8',\n cwd=objdump_pwd)\n lines = itertools.chain(('Showing disassembly for %r' % symbol,\n 'Command: %s' % ' '.join(args)),\n (l.rstrip() for l in proc.stdout))\n _WriteToStream(lines, use_pager=use_pager, to_file=to_file)\n proc.kill()",
"def disassemble(self, data, start_address=0):\n return _opcodes.disassemble(self._ptr, data, start_address)",
"def _get_cleaned_script(self, script):\n script = self._add_uuids(script)\n splitted_script = script.split('\\n')\n code_lines = []\n for line in splitted_script:\n if line.strip() != '':\n code_lines.append(line)\n for i, line in enumerate(code_lines):\n if line[0] != '\\t' and line[0] != ' ':\n code_lines[i] = '\\n' + line\n script = '\\n'.join(code_lines) + '\\n'\n return script",
"def get_insn_disasm(insn):\n\tglobal RE_SPACES\n\tdisasm = idc.GetDisasmEx(insn.ea, idc.GENDSM_FORCE_CODE)\n\treturn RE_SPACES.sub(' ', disasm.replace('\\t', ' '))",
"def getDisassembledCode(self,filename, delimeter='\\n', bits='32bit'):\n iterable = self.diassemble(filename,bits)\n\n assembly_code = ''\n for i in iterable:\n # To avoid TypeError: a bytes-like object is required, not 'str'\n (address, mnemonic, op_str) = i.address, i.mnemonic, i.op_str\n assembly_code += (\"0x%s:\\t%s\\t%s\"% (address, mnemonic, op_str))+delimeter\n\n #print(\"filename:\" + filename + \"\\ncode:\\n\" + assembly_code)\n return assembly_code",
"def _magic_g(self, s):\n s = s.strip()\n if idapy._d is None:\n print \"Please select a dump first. Example:\"\n print \"sel t2i\"\n return\n a = addr_from_magic_string(s)\n show_disasm(idapy._d, a, a+80)",
"def disasm_dump(bin, addr):\n return cache.access((bin,addr), lambda x: disasm_work(*x))",
"def dis(self, code, address, count=0):\n dis_gen = self.cs.disasm(code, address, count=count)\n return dis_gen",
"def dis(x=None):\n if x is None:\n distb()\n return\n if isinstance(x, types.InstanceType):\n x = x.__class__\n if hasattr(x, 'im_func'):\n x = x.im_func\n if hasattr(x, 'func_code'):\n x = x.func_code\n if hasattr(x, '__dict__'):\n items = x.__dict__.items()\n items.sort()\n for name, x1 in items:\n if isinstance(x1, _have_code):\n print \"Disassembly of %s:\" % name\n try:\n dis(x1)\n except TypeError, msg:\n print \"Sorry:\", msg\n print\n elif hasattr(x, 'co_code'):\n disassemble(x)\n elif isinstance(x, str):\n disassemble_string(x)\n else:\n raise TypeError, \\\n \"don't know how to disassemble %s objects\" % \\\n type(x).__name__",
"def dis_lite(self, code, address):\n dis_gen = self.cs.disasm_lite(code, address)\n return dis_gen",
"def dis(x=None):\n result = []\n if x is None:\n distb()\n return\n if isinstance(x, types.InstanceType):\n x = x.__class__\n if hasattr(x, 'im_func'):\n x = x.im_func\n if hasattr(x, 'func_code'):\n x = x.func_code\n if hasattr(x, '__dict__'):\n items = x.__dict__.items()\n items.sort()\n for name, x1 in items:\n if isinstance(x1, _have_code):\n result.append(\"Disassembly of %s:\" % name)\n try:\n dis(x1)\n except TypeError, msg:\n result.append(\"Sorry:\", msg)\n result.append('\\n')\n elif hasattr(x, 'co_code'):\n result.extend(disassemble(x))\n elif isinstance(x, str):\n result.extend(disassemble_string(x))\n else:\n raise TypeError, \\\n \"don't know how to disassemble %s objects\" % \\\n type(x).__name__\n return result",
"def test_extract():\n python = uflash.hexlify(TEST_SCRIPT)\n result = uflash.embed_hex(uflash._RUNTIME, python)\n extracted = uflash.extract_script(result)\n assert extracted == TEST_SCRIPT.decode('utf-8')",
"def diassemble(self,filename, bits='32bit'):\n mode = bits.replace(\"bit\",\"\")\n diasm = subprocess.check_output(['lib/ZydisDisasm',\"-\"+mode, filename])\n return diasm.decode(\"utf-8\")",
"def test_extract_sandwiched():\n python = uflash.hexlify(TEST_SCRIPT)\n python_hex_lines = python.split('\\n')\n python_sandwiched = [python_hex_lines[0]] + \\\n [':10DFE000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF41'] + \\\n python_hex_lines[1:] + [':10E50000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF1B']\n result = uflash.embed_hex(uflash._RUNTIME, '\\n'.join(python_sandwiched))\n extracted = uflash.extract_script(result)\n assert extracted == TEST_SCRIPT.decode('utf-8')",
"def disassemble_code(self, dso_path, start_addr, addr_len):\n # 1. Find real path.\n real_path = find_real_dso_path(dso_path, self.binary_cache_path)\n if real_path is None:\n return None\n\n # 2. Get path of objdump.\n arch = self.readelf.get_arch(real_path)\n if arch == 'unknown':\n return None\n objdump_path = self.objdump_paths.get(arch)\n if not objdump_path:\n objdump_path = find_tool_path('objdump', self.ndk_path, arch)\n if not objdump_path:\n log_exit(\"Can't find objdump. Please set ndk path with --ndk_path option.\")\n self.objdump_paths[arch] = objdump_path\n\n # 3. Run objdump.\n args = [objdump_path, '-dlC', '--no-show-raw-insn',\n '--start-address=0x%x' % start_addr,\n '--stop-address=0x%x' % (start_addr + addr_len),\n real_path]\n try:\n subproc = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n (stdoutdata, _) = subproc.communicate()\n stdoutdata = bytes_to_str(stdoutdata)\n except:\n return None\n\n if not stdoutdata:\n return None\n result = []\n for line in stdoutdata.split('\\n'):\n line = line.rstrip() # Remove '\\r' on Windows.\n items = line.split(':', 1)\n try:\n addr = int(items[0], 16)\n except ValueError:\n addr = 0\n result.append((line, addr))\n return result",
"def daast_from_str(src, filename='<str>', args=None):\n try:\n dt = Parser(filename, args)\n rawast = parse(src, filename)\n dt.visit(rawast)\n sys.stderr.write(\"%s compiled with %d errors and %d warnings.\\n\" %\n (filename, dt.errcnt, dt.warncnt))\n if dt.errcnt == 0:\n return dt.program\n except SyntaxError as e:\n sys.stderr.write(\"%s:%d:%d: SyntaxError: %s\" % (e.filename, e.lineno,\n e.offset, e.text))\n return None",
"def recursive_disassemble(self, start_addresses):\n self.instr_queue = deque()\n for address in start_addresses:\n self.instr_queue.append(address)\n\n if self.use_trace_file == True:\n self.insert_branches_from_trace_file(self.instr_queue)\n limit = 0\n disassembled = 0\n while len(self.instr_queue) > 0:\n instr_addr = self.instr_queue.popleft()\n if self.instr_graph.is_analysed(instr_addr) == True or self.mem_dump.is_it_in_module(instr_addr) == False:\n continue\n disassembled += 1\n instr = RPE_instruction()\n buf = self.mem_dump.read_memory(instr_addr, 15)\n try:\n dec_instr = self.instr_decoder.disasm(buf, instr_addr).next()\n except StopIteration:\n continue\n\n instr.address = instr_addr\n instr.decoded_instr = dec_instr\n self.instr_graph.add_node(instr)\n branch_dsts = self.get_branch_destinations(instr)\n for dst_addr in branch_dsts:\n self.instr_graph.add_edge(instr, dst_addr, 3)\n self.instr_queue.append(dst_addr)\n\n self.handle_branch_destination(instr)\n self.handle_memory_reads(instr)\n\n print 'Disassembled in total: ' + `disassembled`\n print 'number of nodes in graph: ' + `(len(self.instr_graph.graph))`\n print 'Instructions to relocate: ' + `(len(self.instructions_to_relocate))`\n print 'Number of functions in import table: ' + `(self.import_table.number_of_functions())`\n relocate_set = set(self.instructions_to_relocate)\n print 'Size of set to relocate: ' + `(len(relocate_set))`\n #for instr_address in relocate_set:\n # print '%x\\t' % instr_address",
"def source_to_code(self, data, path):\n\t\treturn _call_with_frames_removed(compile, data, path, 'exec', dont_inherit=True)",
"def deletescript(self, name):\n code, data = self.__send_command(\n \"DELETESCRIPT\", [name.encode(\"utf-8\")])\n if code == \"OK\":\n return True\n return False",
"def strip_script(soup):\r\n for script in soup.find_all('script'):\r\n soup.script.decompose()",
"def Replicate():\r\n\tcode = [] \t\t\t\t\t\t#list for saving malicious code\r\n\r\n\twith open(sys.argv[0],'r') as f: \t \t\t#read and save running script\r\n\t\tlines=f.readlines()\t\t\r\n\t\t\r\n\tvirusArea = False\r\n\tfor line in lines:\t\t\t\t\t#iterate through script and find virus part\r\n\t\tif line == '### VIRUS START ###\\n':\r\n\t\t\tvirusArea=True\r\n\t\tif virusArea:\r\n\t\t\tcode.append(line)\r\n\t\tif line == '### VIRUS END ###\\n':\r\n\t\t\tbreak\r\n\t\t\t\r\n\tpythonScripts = glob.glob('*.py') + glob.glob('*.pyw')#find all python scripts in local directory\r\n\r\n\tfor script in pythonScripts:\t\t\t\t#iterate through all found scripts\r\n\r\n\t\twith open(script,'r') as f:\t\t\t#read script\r\n\t\t\tscriptCode = f.readlines()\r\n\t\tinfected=False\r\n\t\t\r\n\t\tfor line in scriptCode:\t\t\t#check if its already infected\r\n\t\t\tif line == '### VIRUS START ###\\n':\r\n\t\t\t\tinfected= True\r\n\t\t\t\tbreak\r\n\t\tif not infected:\t\t\t\t#if not infected append virus code\r\n\t\t\tfinalCode=[]\r\n\t\t\tfinalCode.extend(code)\r\n\t\t\tfinalCode.extend('\\n')\r\n\t\t\tfinalCode.extend(scriptCode)\r\n\t\t\t\r\n\t\t\twith open(script,'w') as f:\r\n\t\t\t\tf.writelines(finalCode)",
"def _test():\n if sys.argv[1:]:\n if sys.argv[2:]:\n sys.stderr.write(\"usage: python dis.py [-|file]\\n\")\n sys.exit(2)\n fn = sys.argv[1]\n if not fn or fn == \"-\":\n fn = None\n else:\n fn = None\n if fn is None:\n f = sys.stdin\n else:\n f = open(fn)\n source = f.read()\n if fn is not None:\n f.close()\n else:\n fn = \"<stdin>\"\n code = compile(source, fn, \"exec\")\n dis(code)"
] | [
"0.6129356",
"0.5955644",
"0.5793927",
"0.5649976",
"0.55970746",
"0.5520527",
"0.54753274",
"0.54259163",
"0.53937024",
"0.5353539",
"0.5237331",
"0.52210057",
"0.52193767",
"0.5176129",
"0.5160617",
"0.514914",
"0.50190306",
"0.5009232",
"0.49686974",
"0.4957609",
"0.4934007",
"0.48871088",
"0.4869204",
"0.486767",
"0.48424003",
"0.48330605",
"0.48111007",
"0.48044035",
"0.4791913",
"0.47786155"
] | 0.7294682 | 0 |
Keepalive topic publisher, in very 30sec to the broker. | def thread_function(client):
threading.Timer(30.0, thread_function).start()
client.publish("serverCommand/keepalive", "0")
print("Message Sent. (keepalive)") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n # pylint: disable=too-many-locals\n parser = init_parser()\n options = parser.parse_args()\n\n host = options.host\n port = options.port\n keepalive = options.keepalive\n client_id = options.client_id\n topic = options.topic\n qos = options.qos\n filename = options.file\n interval = options.interval\n min_interval = options.min_interval\n max_interval = options.max_interval\n prompt_to_send = options.prompt_to_send\n\n client = mqtt.Client(client_id)\n client.on_connect = on_connect\n client.on_disconnect = on_disconnect\n client.on_publish = on_publish\n client.on_log = on_log\n client.connect(host, port, keepalive)\n client.loop_start()\n\n publish_time = 0\n\n with open(filename) as file_object:\n message = file_object.readline().rstrip()\n while message:\n interval = random.randint(min_interval, max_interval)\n current_time = int(time.time() + 0.5)\n used_time = current_time - publish_time\n if used_time < interval:\n time.sleep(interval - used_time)\n\n publish_time = int(time.time() + 0.5)\n message = message.replace(\"{DATETIME}\", str(publish_time))\n if prompt_to_send:\n print(\"press enter to send next message.\")\n if PY2:\n raw_input() # (only a python 3 error) pylint: disable=undefined-variable\n else:\n input()\n mqtt_message_info = client.publish(topic, message, qos=qos)\n print(\"Publish: %s has return code %i, %s\" % (mqtt_message_info.mid, mqtt_message_info.rc, mqtt.error_string(mqtt_message_info.rc)))\n\n if mqtt_message_info.rc != mqtt.MQTT_ERR_SUCCESS:\n raise ValueError(mqtt.error_string(mqtt_message_info.rc))\n\n if not mqtt_message_info.is_published():\n print(\"Waiting for publish.\")\n mqtt_message_info.wait_for_publish()\n\n message = file_object.readline().rstrip()\n\n client.disconnect()\n print(\"Done\")",
"def keepAliveReceived(self):",
"def perform_action(self):\n logger.info(\"Now sending a keepalive to the primary\")\n self.connection_handler.send_message(\"I am still alive, client: {num}\".format(num=self.uuid))\n time.sleep(5)",
"def test_NWK_04():\n # imports\n import paho.mqtt.client as mqtt\n import time\n\n client = mqtt.Client()\n mqtt_helper = MQTTHelper(client)\n\n # mqtt client setup\n client.username_pw_set(USERNAME, PASSWORD)\n client.connect(HOST, PORT)\n client.subscribe(TOPIC, 0)\n client.loop_start()\n\n msg: str = \"This is a test publish\"\n client.publish(TOPIC, msg)\n\n time.sleep(1)\n\n # TODO: add on_publish handler in MQTT_Helper and assert publish was successful\n assert 1 == 1\n\n # cleanup\n client.loop_stop()\n client.unsubscribe(TOPIC)\n client.disconnect()",
"def on_publish(self, mqtt_client, userdata, mid):\n logging.debug(\"DEBUG - publish ack received\")",
"def publish():\n while True:\n mqttClient.reconnect()\n\n energy_data = getEnergyUsage()\n wats = float(energy_data['power_mw']) / 1000\n wat_hours = float(energy_data['total_wh'])\n\n sentPayload(name=\"power\", site=\"bathroom\", value=wats)\n sentPayload(name=\"energy_total\", site=\"bathroom\", value=wat_hours)\n\n time.sleep(updateInterval)",
"def publisher():\r\n pub = rospy.Publisher('tibbling', Int16, queue_size=1)\r\n rospy.init_node('publisher', anonymous = True)\r\n rate = rospy.Rate(20) #Sets rate at 20 Hz\r\n k = 0 #Number to be sent\r\n n = 4 #The increment for k with each loop\r\n\r\n while not rospy.is_shutdown():\r\n k += n #After 20 ms, send k + n to topic\r\n rospy.loginfo(k) #Used for troubleshooting\r\n pub.publish(k)\r\n rate.sleep()",
"def publish_mqtt_msg(topic, mqtt_msg):\n\n MQTT_HOST = settings.MQTT_HOST\n MQTT_PORT = settings.MQTT_PORT\n MQTT_KEEPALIVE_INTERVAL = settings.MQTT_KEEPALIVE_INTERVAL\n\n MQTT_TOPIC = topic\n\n MQTT_MSG = json.dumps(mqtt_msg)\n\n \"\"\" Celery task to create a password for the user \"\"\"\n\n celery_task.delay(MQTT_MSG)\n\n def on_publish(client, userdata, mid):\n print(\"Message Published...\")\n\n def on_connect(client, userdata, flags, rc):\n client.subscribe(MQTT_TOPIC)\n client.publish(MQTT_TOPIC, MQTT_MSG)\n\n def on_message(client, userdata, msg):\n print(msg.topic)\n print(msg.payload)\n payload = json.loads(msg.payload)\n print(payload['sepalWidth'])\n client.disconnect()\n\n mqttc = mqtt.Client()\n mqttc.on_publish = on_publish\n mqttc.on_connect = on_connect\n mqttc.on_message = on_message\n\n mqttc.connect(MQTT_HOST, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL)",
"def publish( self, topic, data, qos = 1, retain = False ):\n logging.info( \"Publishing to topic %s\" %topic )\n self.client.publish( topic, data, qos = qos, retain = retain )",
"def on_next(self, msg):\n # publish the message to the topics\n retain = msg.retain if hasattr(msg, 'retain') else False\n for (topic, qos) in self.topics:\n self.client.publish(topic, msg, qos, retain)",
"def __producer__(self):\n import time\n i = 0\n while True:\n self.publish( i )\n i += 1\n time.sleep(1)",
"def slot_keepalive_timer(self, _sender, _data):\r\n if self.connected:\r\n #self.debug(\"### sending keepalive\")\r\n self._try_send_raw(\"2::\")",
"def subscribe_to_ticks_publisher(topic):\n ConfigFile = \"../config/kuber.conf\"\n config = configparser.ConfigParser()\n config.read(ConfigFile)\n\n zmq_conf = config['ZMQ CONFIGURATION']\n publish_port = zmq_conf['publish_port']\n\n print(\"Subscribing to topic %s at %s\" % (topic, publish_port))\n sub = TopicSubscriber()\n\n try: \n sub.init(topic, publish_port)\n except Exception as e:\n print(\"\"\"\n Subscriber init failed: {}\n \"\"\".format(e))\n sys.exit(0)\n\n # Return the subscriber context.\n return sub",
"async def keep_alive(self):\n self._keepalive = True\n while True:\n await gen.sleep(self.KEEPALIVE_INTERVAL)\n if not self._keepalive:\n return\n try:\n # lines that start with : are comments\n # and should be ignored by event consumers\n self.write(\":keepalive\\n\\n\")\n await self.flush()\n except StreamClosedError:\n return",
"def publish(self, topic:str, data:bytes) -> None:\n\t\tself.mqttClient.publish(topic, data)",
"def publish(self, topic, msg):\n\t\tself.topic = topic\n\t\tself.msg = msg \n\t\tself.client.publish(self.topic, self.msg)",
"def keepalive(self) -> None:",
"def publish_mqtt_message(self, topic: str, payload: str) -> bool:\n self.log.error(\n \"This functionality is not enabled yet since it is unclear \"\n \"whether this CSC will be responsible for this or if this will be \"\n \"done via the HVAC software user interface.\"\n )\n return False\n # msg_info = self.client.publish(topic=topic, payload=payload)\n # return msg_info.is_published()",
"def keepalive(self):\n return f'PersistentKeepalive = {self._peer.keepalive}'",
"def publish(self, topic, msg):\n formatted_msg = json.dumps(msg)\n self.client.publish(topic, formatted_msg) # json converting cause of mqtt's data transfer limit.",
"def publish(self, topic, payload):\n complete_topic = \"{}/{}\".format(self._base_topic, topic)\n self._client.publish(complete_topic, payload, qos=2)\n logger.info(\"On topic %s published: %s\", complete_topic, payload)",
"def on_publish(client, userdata, mid):\n print(\"Message Published.\")",
"def test_NWK_03():\n\n # imports\n import paho.mqtt.client as mqtt\n import time\n\n # backend setup\n client = mqtt.Client()\n mqtt_helper = MQTTHelper(client)\n\n # mqtt client setup\n client.username_pw_set(USERNAME, PASSWORD)\n client.connect(HOST, PORT)\n client.subscribe(TOPIC, 0)\n client.loop_start()\n\n # sleep to allow network traffic\n time.sleep(1)\n\n # test that the client subscribed\n assert mqtt_helper.on_subscribe_last_client == client\n\n # cleanup\n client.loop_stop()\n client.unsubscribe(TOPIC)\n client.disconnect()",
"def send_discovery_msg(client):\n global last_discovery\n passed = datetime.now() - last_discovery\n if passed.seconds > 59:\n log.debug(\"Sending discovery message ...\")\n client.publish('fbp', payload=json.dumps(discovery_message).encode('utf-8'), qos=0)\n last_discovery = datetime.now()",
"def publish_status(client):\n client.publish(config.topic_get, payload=getlight())",
"def setup(self, dictParams):\n global connack\n # we dont use globals here now - instead use config.py which is imported where needed to \n # give cross-module scope\n #global globalScope.incomingMessageBuffer\n global broker_public_key\n \n hostname = dictParams[\"sess\"]\n name = dictParams[\"dest\"]\n port = dictParams[\"port\"]\n protocol = dictParams[\"protoVer\"]\n broker = dictParams[\"broker\"]\n # this is a Singleton\n self.receiver = dictParams[\"RcvdMsg\"]\n timeout = dictParams[\"timeout\"]\n oldtimestamp = dictParams[\"oldtimestamp\"]\n anotherParam = dictParams[\"anotherParam\"]\n \n self.brokerName = broker\n\n # Setting clean_session = False means that subsciption information and \n # queued messages are retained after the client disconnects. It is suitable\n # in an environment where disconnects are frequent.\n # client_id is any unique identifier so our own fqdn should be alright to use\n mqtt_client = mqtt.Client(protocol=self.protocol, client_id=self.myName, clean_session=False)\n mqtt_client.on_connect = self.on_connect\n mqtt_client.on_message = self.on_message\n mqtt_client.on_publish = self.on_publish\n mqtt_client.on_disconnect = self.on_disconnect\n mqtt_client.on_subscribe = self.on_subscribe\n mqtt_client.on_unsubscribe = self.on_unsubscribe\n mqtt_client.on_log = self.on_log\n \n \n # Set the LWT\n # If the client disconnects without calling disconnect, the broker will\n # publish this message on its behalf\n # retain should be set to true\n mqtt_client.will_set(self.STATUS, \n self.status_message(STATUS_DISCONNECTED_UNGRACE), \n qos=QosType.FLAG_QOS_ATMOSTONCE.value, retain=True) \n\n # Connect to the broker\n # keepalive is maximum number of seconds allowed between communications\n # with the broker. If no other messages are sent, the client will send a\n # ping request at this interval\n # set it high for devices that are disconnected for small periods of time, also for debugging\n # set it extremely high for devices that are out to sea for days\n keepalive=1800\n try:\n logging.info('Attempting to connect to broker at ' + self.brokerName)\n mqtt_client.connect(self.brokerName, self.port, keepalive)\n except:\n logging.error(\"ERROR - could not connect to broker at \" + self.brokerName)\n return False\n else:\n logging.info(\"INFO - all OK - no problem on attempt to connect to broker at \" + self.brokerName) \n \n \n # Force function to block until connack is sent from the broker, or timeout\n connack = False\n start_time = time.time()\n while not connack:\n time.sleep(0.1)\n mqtt_client.loop()\n \n if time.time() - start_time > float(timeout):\n raise MqttTimeOutError(\"The program timed out while trying to connect to the broker!\")\n break\n \n # When connected, subscribe to the relevant channels\n mqtt_client.subscribe([(self.PUBLIC, 1), (self.PROTECTED, 1),\n (self.PRIVATE, 1), (self.PINGREQ, 1),\n (self.PINGACK, 1), (self.HANDSHAKE, 1)\n ])\n \n self.client = mqtt_client\n \n # init the globalScope.incomingMessageBuffer - this is now done in the config.py\n \n \n # Do a blocking call\n broker_public_key = None\n self.client.publish(self.STATUS, \n self.status_message(STATUS_CONNECTED), \n qos=QosType.FLAG_QOS_ATLEASTONCE.value)\n\n # TODO dont know what this does and it was getting stuck in the loop so i got rid of it \n# while self.broker_public_key == None:\n# time.sleep(0.1)\n# mqtt_client.loop()\n# # Check the message buffer\n# if globalScope.incomingMessageBuffer != []:\n# for message in globalScope.incomingMessageBuffer:\n# if message.topic == self.HANDSHAKE:\n# # Check whether it is a broker key message.\n# try:\n# payload = json.loads(message.payload.decode())\n## disabled for now\n## self.broker_public_key = payload['public_key']\n## print(self.broker_public_key)\n# self.broker_public_key = json.loads(message.payload.decode())\n# except:\n# pass\n# globalScope.incomingMessageBuffer = []\n \n\n # Start the loop. This method is preferable to repeatedly calling loop\n # since it handles reconnections automatically. It is non-blocking and \n # handles interactions with the broker in the background.\n logging.debug('DEBUG - Starting loop')\n try:\n# mqtt_client.loop()\n self.client.loop_start()\n except:\n logging.error(\"ERROR - failure of loop_start\")\n \n return True",
"def broker_null(self, data):\n\n print(\"Heartbeat\")\n #TODO: Reset heartbeat timer or something like that",
"def on_publish(client: mqtt.Client, userdata: Any, mid: int) -> None:\n logging.info(f\"Successfully published a message: mid={mid}\")",
"def publish(topic, message):\n if DEBUG:\n print(\"Publish: '\" + message + \"' (topic: '\" + topic + \"')\")\n DATA[\"client\"].publish(topic, message)",
"def keepalive_received(self, peer, timestamp):\n\n if peer.msg_recv_stat['Keepalives'] == 1:\n # do something with the connection establish event\n pass\n\n if CONF.message.write_keepalive:\n # write bgp message\n self.write_msg(\n peer=peer.factory.peer_addr,\n timestamp=timestamp,\n msg_type=4,\n msg={\"msg\": None}\n )"
] | [
"0.6194819",
"0.6047085",
"0.60348225",
"0.60193574",
"0.5993197",
"0.5971651",
"0.59388137",
"0.59254557",
"0.5893863",
"0.57715595",
"0.5753941",
"0.5746216",
"0.57367104",
"0.5735936",
"0.56993383",
"0.5693414",
"0.5682998",
"0.5676654",
"0.566459",
"0.56511974",
"0.56461155",
"0.56422335",
"0.56321484",
"0.563049",
"0.56283516",
"0.5624085",
"0.5614636",
"0.5613877",
"0.5609863",
"0.5609193"
] | 0.6438521 | 0 |
Read the config xml, convert to string, and send it to the mqtt broker. | def send_config_xml_to_broker(self):
xmlObject = xml.dom.minidom.parse("config_setup.xml")
pretty_xml_as_string = xmlObject.toprettyxml()
self.client.publish("users/everyone/inbox/server/deviceList", pretty_xml_as_string)
print("XML config sent.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_mqtt(self, config):\n\t\tif \"mqtt\" in config:\n\t\t\tself._mqtt = config[\"mqtt\"]\n\t\tif not \"host\" in self._mqtt:\n\t\t\traise ValueError(\"MQTT host not set\")\n\t\tif not \"port\" in self._mqtt:\n\t\t\traise ValueError(\"MQTT port not set\")\n\t\tif not \"user\" in self._mqtt:\n\t\t\traise ValueError(\"MQTT user not set\")\n\t\tif not \"password\" in self._mqtt:\n\t\t\traise ValueError(\"MQTT password not set\")\n\t\tif not \"topic\" in self._mqtt:\n\t\t\traise ValueError(\"MQTT topic not set\")\n\t\tif not \"qos\" in self._mqtt:\n\t\t\tself._mqtt[\"qos\"] = 0\n\t\tif not \"retain\" in self._mqtt:\n\t\t\tself._mqtt[\"retain\"] = False",
"def testCreateConfigGet(self):\n\t\txml = \"\"\"<iq to=\"pubsub.asdf\" type=\"set\" id=\"E\" from=\"fritzy@asdf/87292ede-524d-4117-9076-d934ed3db8e7\"><pubsub xmlns=\"http://jabber.org/protocol/pubsub\"><create node=\"testnode2\" /><configure><x xmlns=\"jabber:x:data\" type=\"submit\"><field var=\"FORM_TYPE\" type=\"hidden\"><value>http://jabber.org/protocol/pubsub#node_config</value></field><field var=\"pubsub#node_type\" type=\"list-single\" label=\"Select the node type\"><value>leaf</value></field><field var=\"pubsub#title\" type=\"text-single\" label=\"A friendly name for the node\" /><field var=\"pubsub#deliver_notifications\" type=\"boolean\" label=\"Deliver event notifications\"><value>1</value></field><field var=\"pubsub#deliver_payloads\" type=\"boolean\" label=\"Deliver payloads with event notifications\"><value>1</value></field><field var=\"pubsub#notify_config\" type=\"boolean\" label=\"Notify subscribers when the node configuration changes\" /><field var=\"pubsub#notify_delete\" type=\"boolean\" label=\"Notify subscribers when the node is deleted\" /><field var=\"pubsub#notify_retract\" type=\"boolean\" label=\"Notify subscribers when items are removed from the node\"><value>1</value></field><field var=\"pubsub#notify_sub\" type=\"boolean\" label=\"Notify owners about new subscribers and unsubscribes\" /><field var=\"pubsub#persist_items\" type=\"boolean\" label=\"Persist items in storage\" /><field var=\"pubsub#max_items\" type=\"text-single\" label=\"Max # of items to persist\"><value>10</value></field><field var=\"pubsub#subscribe\" type=\"boolean\" label=\"Whether to allow subscriptions\"><value>1</value></field><field var=\"pubsub#access_model\" type=\"list-single\" label=\"Specify the subscriber model\"><value>open</value></field><field var=\"pubsub#publish_model\" type=\"list-single\" label=\"Specify the publisher model\"><value>publishers</value></field><field var=\"pubsub#send_last_published_item\" type=\"list-single\" label=\"Send last published item\"><value>never</value></field><field var=\"pubsub#presence_based_delivery\" type=\"boolean\" label=\"Deliver notification only to available users\" /></x></configure></pubsub></iq>\"\"\"\n\t\tiq = self.ps.Iq(None, self.ps.ET.fromstring(xml))\n\t\tconfig = iq['pubsub']['configure']['config']\n\t\tself.failUnless(config.getValues() != {})",
"async def send_config_req(self):\n if not self.connected:\n return\n\n data = bytearray(7)\n data[0] = M_START\n data[1] = 5 # len of msg\n data[2] = mtypes[BMTS_CONFIG_REQ][0]\n data[3] = mtypes[BMTS_CONFIG_REQ][1]\n data[4] = mtypes[BMTS_CONFIG_REQ][2]\n data[5] = 0x77 # known value\n data[6] = M_END\n\n self.writer.write(data)\n await self.writer.drain()",
"async def test_get_config(self):\n disable_auto_linking = random_bool()\n monitor_mode = random_bool()\n auto_led = random_bool()\n deadman = random_bool()\n topic = f\"ack.{GET_IM_CONFIGURATION}\"\n\n topic_item = TopicItem(\n topic,\n {\n \"disable_auto_linking\": disable_auto_linking,\n \"monitor_mode\": monitor_mode,\n \"auto_led\": auto_led,\n \"deadman\": deadman,\n },\n 0,\n )\n modem = ModemBase()\n send_topics([topic_item])\n await asyncio.sleep(0.1)\n\n assert modem.disable_auto_linking == disable_auto_linking\n assert modem.monitor_mode == monitor_mode\n assert modem.auto_led == auto_led\n assert modem.deadman == deadman\n\n assert modem.configuration[DISABLE_AUTO_LINKING].value == disable_auto_linking\n assert modem.configuration[MONITOR_MODE].value == monitor_mode\n assert modem.configuration[AUTO_LED].value == auto_led\n assert modem.configuration[DEADMAN].value == deadman\n\n assert modem.configuration[DISABLE_AUTO_LINKING].new_value is None\n assert modem.configuration[MONITOR_MODE].new_value is None\n assert modem.configuration[AUTO_LED].new_value is None\n assert modem.configuration[DEADMAN].new_value is None",
"def __init__(self, config: ConfigType) -> None:\n\n # We don't import on the top because some integrations\n # should be able to optionally rely on MQTT.\n import paho.mqtt.client as mqtt # pylint: disable=import-outside-toplevel\n\n if (protocol := config.get(CONF_PROTOCOL, DEFAULT_PROTOCOL)) == PROTOCOL_31:\n proto = mqtt.MQTTv31\n elif protocol == PROTOCOL_5:\n proto = mqtt.MQTTv5\n else:\n proto = mqtt.MQTTv311\n\n if (client_id := config.get(CONF_CLIENT_ID)) is None:\n # PAHO MQTT relies on the MQTT server to generate random client IDs.\n # However, that feature is not mandatory so we generate our own.\n client_id = mqtt.base62(uuid.uuid4().int, padding=22)\n transport = config.get(CONF_TRANSPORT, DEFAULT_TRANSPORT)\n self._client = mqtt.Client(client_id, protocol=proto, transport=transport)\n\n # Enable logging\n self._client.enable_logger()\n\n username: str | None = config.get(CONF_USERNAME)\n password: str | None = config.get(CONF_PASSWORD)\n if username is not None:\n self._client.username_pw_set(username, password)\n\n if (\n certificate := get_file_path(CONF_CERTIFICATE, config.get(CONF_CERTIFICATE))\n ) == \"auto\":\n certificate = certifi.where()\n\n client_key = get_file_path(CONF_CLIENT_KEY, config.get(CONF_CLIENT_KEY))\n client_cert = get_file_path(CONF_CLIENT_CERT, config.get(CONF_CLIENT_CERT))\n tls_insecure = config.get(CONF_TLS_INSECURE)\n if transport == TRANSPORT_WEBSOCKETS:\n ws_path: str = config.get(CONF_WS_PATH, DEFAULT_WS_PATH)\n ws_headers: dict[str, str] = config.get(CONF_WS_HEADERS, DEFAULT_WS_HEADERS)\n self._client.ws_set_options(ws_path, ws_headers)\n if certificate is not None:\n self._client.tls_set(\n certificate,\n certfile=client_cert,\n keyfile=client_key,\n tls_version=ssl.PROTOCOL_TLS_CLIENT,\n )\n\n if tls_insecure is not None:\n self._client.tls_insecure_set(tls_insecure)",
"def read_mqtt_config():\n with open(join(env.get('XDG_CONFIG_HOME',\n join(expanduser('~'), '.config')),\n 'mosquitto_pub')) as f:\n d = dict(line.replace('-', '').split()\n for line in f.read().splitlines())\n return dict(host=d['h'],\n port=d['p'],\n username=d['username'],\n password=d['pw'])",
"def setup(self, dictParams):\n global connack\n # we dont use globals here now - instead use config.py which is imported where needed to \n # give cross-module scope\n #global globalScope.incomingMessageBuffer\n global broker_public_key\n \n hostname = dictParams[\"sess\"]\n name = dictParams[\"dest\"]\n port = dictParams[\"port\"]\n protocol = dictParams[\"protoVer\"]\n broker = dictParams[\"broker\"]\n # this is a Singleton\n self.receiver = dictParams[\"RcvdMsg\"]\n timeout = dictParams[\"timeout\"]\n oldtimestamp = dictParams[\"oldtimestamp\"]\n anotherParam = dictParams[\"anotherParam\"]\n \n self.brokerName = broker\n\n # Setting clean_session = False means that subsciption information and \n # queued messages are retained after the client disconnects. It is suitable\n # in an environment where disconnects are frequent.\n # client_id is any unique identifier so our own fqdn should be alright to use\n mqtt_client = mqtt.Client(protocol=self.protocol, client_id=self.myName, clean_session=False)\n mqtt_client.on_connect = self.on_connect\n mqtt_client.on_message = self.on_message\n mqtt_client.on_publish = self.on_publish\n mqtt_client.on_disconnect = self.on_disconnect\n mqtt_client.on_subscribe = self.on_subscribe\n mqtt_client.on_unsubscribe = self.on_unsubscribe\n mqtt_client.on_log = self.on_log\n \n \n # Set the LWT\n # If the client disconnects without calling disconnect, the broker will\n # publish this message on its behalf\n # retain should be set to true\n mqtt_client.will_set(self.STATUS, \n self.status_message(STATUS_DISCONNECTED_UNGRACE), \n qos=QosType.FLAG_QOS_ATMOSTONCE.value, retain=True) \n\n # Connect to the broker\n # keepalive is maximum number of seconds allowed between communications\n # with the broker. If no other messages are sent, the client will send a\n # ping request at this interval\n # set it high for devices that are disconnected for small periods of time, also for debugging\n # set it extremely high for devices that are out to sea for days\n keepalive=1800\n try:\n logging.info('Attempting to connect to broker at ' + self.brokerName)\n mqtt_client.connect(self.brokerName, self.port, keepalive)\n except:\n logging.error(\"ERROR - could not connect to broker at \" + self.brokerName)\n return False\n else:\n logging.info(\"INFO - all OK - no problem on attempt to connect to broker at \" + self.brokerName) \n \n \n # Force function to block until connack is sent from the broker, or timeout\n connack = False\n start_time = time.time()\n while not connack:\n time.sleep(0.1)\n mqtt_client.loop()\n \n if time.time() - start_time > float(timeout):\n raise MqttTimeOutError(\"The program timed out while trying to connect to the broker!\")\n break\n \n # When connected, subscribe to the relevant channels\n mqtt_client.subscribe([(self.PUBLIC, 1), (self.PROTECTED, 1),\n (self.PRIVATE, 1), (self.PINGREQ, 1),\n (self.PINGACK, 1), (self.HANDSHAKE, 1)\n ])\n \n self.client = mqtt_client\n \n # init the globalScope.incomingMessageBuffer - this is now done in the config.py\n \n \n # Do a blocking call\n broker_public_key = None\n self.client.publish(self.STATUS, \n self.status_message(STATUS_CONNECTED), \n qos=QosType.FLAG_QOS_ATLEASTONCE.value)\n\n # TODO dont know what this does and it was getting stuck in the loop so i got rid of it \n# while self.broker_public_key == None:\n# time.sleep(0.1)\n# mqtt_client.loop()\n# # Check the message buffer\n# if globalScope.incomingMessageBuffer != []:\n# for message in globalScope.incomingMessageBuffer:\n# if message.topic == self.HANDSHAKE:\n# # Check whether it is a broker key message.\n# try:\n# payload = json.loads(message.payload.decode())\n## disabled for now\n## self.broker_public_key = payload['public_key']\n## print(self.broker_public_key)\n# self.broker_public_key = json.loads(message.payload.decode())\n# except:\n# pass\n# globalScope.incomingMessageBuffer = []\n \n\n # Start the loop. This method is preferable to repeatedly calling loop\n # since it handles reconnections automatically. It is non-blocking and \n # handles interactions with the broker in the background.\n logging.debug('DEBUG - Starting loop')\n try:\n# mqtt_client.loop()\n self.client.loop_start()\n except:\n logging.error(\"ERROR - failure of loop_start\")\n \n return True",
"def mqttConnect(self):\n clientId = 'projects/{}/locations/{}/registries/{}/devices/{}'.format(self.projectId, \n self.cloudRegion, \n self.registryId, \n self.deviceId)\n mqttc = mqtt.Client(client_id = clientId)\n \n # With Google Cloud IoT Core, the username field is ignored, and the\n # password field is used to transmit a JWT to authorize the device.\n mqttc.username_pw_set(\n username='unused',\n password=self.create_jwt())\n\n # Enable SSL/TLS support.\n mqttc.tls_set(ca_certs=self.caCert, tls_version=ssl.PROTOCOL_TLSv1_2) \n self.blogger.info('Starting connection to: {0}:{1}'.format(self.mqttHost, self.mqttPort))\n mqttc.on_connect = self.connectCallBack\n mqttc.on_message = self.processMessage\n mqttc.on_publish = self.publishedMessageCallBack\n mqttc.connect(self.mqttHost, port=self.mqttPort, keepalive=60)\n try:\n mqttc.subscribe(self.configTopic, qos=self.QoS)\n self.blogger.debug('Subscribed to config topic: {}'.format(self.configTopic))\n mqttc.subscribe(self.commandTopic, qos=self.QoS)\n self.blogger.debug('Subscribed to command topic: {}'.format(self.commandTopic))\n mqttc.subscribe(self.eventTopic, qos=self.QoS)\n self.blogger.debug('Subscribed to event topic: {}'.format(self.eventTopic))\n self.messageToPublish = '{\"thingy\":\"ready\"}'\n# self.publishMessage(self.eventTopic, QoS)\n except Exception as e:\n self.blogger.error('subscription failed for reason: {0}'.format(e))\n\n return mqttc",
"def connect(self, mqtt_client, discovery_prefix=\"homeassistant\", node_id=None):\n self.discovery_prefix = discovery_prefix\n self.node_id = node_id\n self.client = mqtt_client\n if self.device_type == 'switch':\n self.client.message_callback_add(self.command_topic, self._on_command)\n self.client.subscribe(self.command_topic)\n\n self.client.publish(self.config_topic, json.dumps(self.config), retain=self.retain)\n logger.debug(\"Connected to broker, sent config to {}\".format(self.config_topic))",
"def sendConfig(self, config, filename=''):\n if self.ioLoopInst is not None:\n cmd = {'cmd': 'setConfig', 'value': config, 'filename': filename}\n self._sendMessageToWeb(cmd)\n else:\n print(\"sendConfig: \" + filename)",
"def send_config(self, configdict):\n self.config = configdict\n self.send_to_backend('config', configdict=configdict)",
"def test_repr_config(self) -> None:\n self.assertEqual(\n repr(self.config), \"TMConfiguration('q2', TMTape('abcdefghij', '.', 2))\"\n )\n self.assertEqual(\n repr(self.config2),\n \"MTMConfiguration('q1', (TMTape('abcdefghij', '.', 2), \"\n + \"TMTape('klmnopq', '.', 5)))\",\n )",
"def send_mqtt(self, data_type, data):\n try:\n client = mqtt.Client(\"rpi1_qnas\")\n client.on_connect = self.on_connect\n client.on_message = self.on_message\n client.connect(MQTT_BROKER_ADDRESS)\n client.loop_start()\n client.publish(MQTT_TOPIC + \"/{}\".format(data_type), data)\n client.disconnect()\n client.loop_stop()\n except Exception:\n msg = \"{} \\nMQTT error\".format(time.strftime(\"%Y-%m-%d %H:%M:%S\"))\n self.loggers[\"log_stdout\"].warning(msg)\n self.loggers[\"log_errors\"].warning(msg)\n self.verbose(msg)",
"def init_mqtt_client(self):\n self.mqtt_client = Client() # create client object\n self.mqtt_client.username_pw_set(self.mqtt_user, self.mqtt_password)\n print(\"Connecting to the MQTT broker\", self.host_name, \".\")\n self.mqtt_client.connect(self.host_name, self.host_port)\n\n def on_message(client, userdata, msg):\n \"\"\" callback function to process mqtt messages \"\"\"\n message_type = msg.topic.split(\"/\")[-1]\n message = str(msg.payload.decode(\"utf-8\"))\n print(\"\\nreceived message on topic \" + msg.topic + \": \" + message)\n\n # The message should contain 3 things:\n # either <field/config>, parameter_name, new_value\n # or <field/config>, parameter_name, client_id\n if len(message.split(\",\")) != 3:\n print(\"Bad message structure\")\n return 0\n\n # React to custom topics. Should be implemented in a concrete class depending on the behaviour to simulate.\n self.custom_mqtt_reaction(msg.topic, message)\n\n # The client wants to change the value of a parameter\n if message_type == \"change\":\n request_type, parameter_name, new_value = message.split(\",\")\n if request_type == \"config\" and parameter_name in self.get_parameters_list():\n self.set_parameter_value(parameter_name, new_value)\n elif request_type == \"field\" and parameter_name in self.get_fields_list():\n self.set_field_value(parameter_name, new_value)\n\n # The client requests the value of a parameter\n elif message_type == \"request\":\n request_type, parameter_name, client_id = message.split(\",\")\n\n # Fake latency\n sleep(float(self.get_parameter_value(\"response_latency\")) / 1000)\n\n # ask for a configuration parameter\n if request_type == \"config\":\n print(\"request for a configuration parameter\")\n if parameter_name in self.get_parameters_list():\n self.mqtt_client.publish(self.base_topic + \"/answer/\" + client_id,\n self.get_parameter_value(parameter_name))\n else:\n self.mqtt_client.publish(self.base_topic + \"/answer/\" + client_id,\n \"no such parameter\")\n\n # ask for a field\n elif request_type == \"field\":\n print(\"request for a field\")\n if parameter_name in self.get_fields_list():\n client.publish(self.base_topic + \"/answer/\" + client_id,\n self.get_field_value(parameter_name))\n else:\n self.mqtt_client.publish(self.base_topic + \"/answer/\" + client_id,\n \"no such field\")\n\n self.mqtt_client.on_message = on_message # bind function to callback\n\n building, floor, room, type, name = self.base_parameters.values()\n\n topics = [\n building + \"/\" + floor + \"/\" + room + \"/\" + type + \"/\" + name + \"/+\",\n building + \"/\" + floor + \"/\" + room + \"/\" + type + \"/All/+\",\n building + \"/\" + floor + \"/\" + room + \"/All/All/+\",\n building + \"/\" + floor + \"/All/\" + type + \"/All/+\",\n building + \"/\" + floor + \"/All/All/All/+\",\n building + \"/All/All/\" + type + \"/All/+\",\n building + \"/All/All/All/All/+\",\n \"All/All/All/\" + type + \"/All/+\",\n \"All/All/All/All/All/+\"\n ]\n for topic in topics:\n print(\"Subscribing to the topic \" + topic)\n self.mqtt_client.subscribe(topic)\n\n self.mqtt_client.loop_start() # start loop to process received messages",
"async def test_set_config(self):\n set_log_levels(logger_topics=True)\n\n disable_auto_linking = random_bool()\n monitor_mode = random_bool()\n auto_led = random_bool()\n deadman = random_bool()\n topic = f\"ack.{SET_IM_CONFIGURATION}\"\n topic_item = TopicItem(\n topic,\n {\n \"disable_auto_linking\": disable_auto_linking,\n \"monitor_mode\": monitor_mode,\n \"auto_led\": auto_led,\n \"deadman\": deadman,\n },\n 0.1,\n )\n\n modem = ModemBase()\n reset_config(modem, disable_auto_linking, monitor_mode, auto_led, deadman)\n\n send_topics([topic_item])\n await modem.async_set_configuration(\n disable_auto_linking, monitor_mode, auto_led, deadman\n )\n await asyncio.sleep(0.1)\n\n assert modem.configuration[DISABLE_AUTO_LINKING].value == disable_auto_linking\n assert modem.configuration[MONITOR_MODE].value == monitor_mode\n assert modem.configuration[AUTO_LED].value == auto_led\n assert modem.configuration[DEADMAN].value == deadman\n\n assert modem.configuration[DISABLE_AUTO_LINKING].new_value is None\n assert modem.configuration[MONITOR_MODE].new_value is None\n assert modem.configuration[AUTO_LED].new_value is None\n assert modem.configuration[DEADMAN].new_value is None",
"def connect_to_broker(self):\n\n self.client_edge = mqtt.Client(\"P2\", transport=self.tTransport)\n self.client_edge.username_pw_set(username=\"dreamlabanveshak\", password=\"dream119\")\n self.client_edge.on_publish = self.on_publish_edge\n self.client_edge.connect(self.broker_address, port=self.tPort, keepalive=60, bind_address=\"\")\n\n self.client_vertex = mqtt.Client(\"P3\", transport=self.tTransport)\n self.client_vertex.username_pw_set(username=\"dreamlabanveshak\", password=\"dream119\")\n self.client_vertex.on_publish = self.on_publish_vertex\n self.client_vertex.connect(self.broker_address, port=self.tPort, keepalive=60, bind_address=\"\")\n\n self.client_path_topic = mqtt.Client(\"P13\", transport=self.tTransport)\n self.client_path_topic.username_pw_set(username=\"dreamlabanveshak\", password=\"dream119\")\n self.client_path_topic.on_publish = self.on_publish_path_topic\n self.client_path_topic.connect(self.broker_address, port=self.tPort, keepalive=60, bind_address=\"\")\n\n self.client_path_traffic_topic = mqtt.Client(\"P14\", transport=self.tTransport)\n self.client_path_traffic_topic.username_pw_set(username=\"dreamlabanveshak\", password=\"dream119\")\n self.client_path_traffic_topic.on_publish = self.on_publish_path_traffic_topic\n self.client_path_traffic_topic.connect(self.broker_address, port=self.tPort, keepalive=60, bind_address=\"\")\n\n self.client_traffic_color_topic = mqtt.Client(\"P21\", transport=self.tTransport)\n self.client_traffic_color_topic.username_pw_set(username=\"dreamlabanveshak\", password=\"dream119\")\n self.client_traffic_color_topic.on_publish = self.on_publish_path_traffic_topic\n self.client_traffic_color_topic.connect(self.broker_address, port=self.tPort, keepalive=60, bind_address=\"\")",
"def write_config(self):\n cfg = {\n 'ALERT_API_KEY':self.api_key,\n 'APP_NAME':self.title,\n 'alertes':self.alertes\n }\n write_conf(self.CONF_FILE,cfg)",
"def gather_configuration(self, config):\n config['log']['logging_level'] = self.logDisplay.get_logging_level()\n\n # MIDI\n config['midi']['winch_midi_input'] = self.winchMidiInputCombo.current_item()\n config['midi']['midi_output'] = self.midiOutputCombo.current_item()\n\n # OSC\n addr, port = self.oscListenerConfig.get_OSC_port()\n config['osc']['listener_addr'] = addr\n config['osc']['listener_port'] = str(port)\n addr, port = self.oscSenderConfig.get_OSC_port()\n config['osc']['sender_addr'] = addr\n config['osc']['sender_port'] = str(port)\n\n # DMX\n config['dmx']['dmx_output_serial_port'] = self.dmxSelect.current_item()\n\n # winches\n for i, winchSelect in enumerate(self.winchSelects):\n key = \"winch_%d_output_serial_port\" % (i+1)\n config['winches'][key] = winchSelect.current_item()\n\n return",
"def setup_mqtt_client(mqtt_conf, mqtt_client):\n\n if mqtt_conf['TLS']['enable']:\n logger.info(\"TLS Setup for Broker\")\n logger.info(\"checking TLS_Version\")\n tls = mqtt_conf['TLS']['tls_version']\n if tls == 'tlsv1.2':\n tlsVersion = ssl.PROTOCOL_TLSv1_2\n elif tls == \"tlsv1.1\":\n tlsVersion = ssl.PROTOCOL_TLSv1_1\n elif tls == \"tlsv1\":\n tlsVersion = ssl.PROTOCOL_TLSv1\n else:\n logger.info(\"Unknown TLS version - ignoring\")\n tlsVersion = None\n if not mqtt_conf['TLS']['insecure']:\n\n logger.info(\"Searching for Certificates in certdir\")\n CERTS_DIR = mqtt_conf['TLS']['certs']['certdir']\n if os.path.isdir(CERTS_DIR):\n logger.info(\"certdir exists\")\n CA_CERT_FILE = os.path.join(CERTS_DIR, mqtt_conf['TLS']['certs']['cafile'])\n CERT_FILE = os.path.join(CERTS_DIR, mqtt_conf['TLS']['certs']['certfile'])\n KEY_FILE = os.path.join(CERTS_DIR, mqtt_conf['TLS']['certs']['keyfile'])\n\n mqtt_client.tls_set(ca_certs=CA_CERT_FILE, certfile=CERT_FILE, keyfile=KEY_FILE, cert_reqs=ssl.CERT_REQUIRED, tls_version=tlsVersion)\n else:\n logger.error(\"certdir does not exist.. check path\")\n sys.exit()\n else:\n mqtt_client.tls_set(ca_certs=None, certfile=None, keyfile=None, cert_reqs=ssl.CERT_NONE, tls_version=tlsVersion)\n mqtt_client.tls_insecure_set(True)\n \n if mqtt_conf['username'] and mqtt_conf['password']:\n logger.info(\"setting username and password for Broker\")\n mqtt_client.username_pw_set(mqtt_conf['username'], mqtt_conf['password'])\n \n return mqtt_client",
"def publish_mqtt_msg(topic, mqtt_msg):\n\n MQTT_HOST = settings.MQTT_HOST\n MQTT_PORT = settings.MQTT_PORT\n MQTT_KEEPALIVE_INTERVAL = settings.MQTT_KEEPALIVE_INTERVAL\n\n MQTT_TOPIC = topic\n\n MQTT_MSG = json.dumps(mqtt_msg)\n\n \"\"\" Celery task to create a password for the user \"\"\"\n\n celery_task.delay(MQTT_MSG)\n\n def on_publish(client, userdata, mid):\n print(\"Message Published...\")\n\n def on_connect(client, userdata, flags, rc):\n client.subscribe(MQTT_TOPIC)\n client.publish(MQTT_TOPIC, MQTT_MSG)\n\n def on_message(client, userdata, msg):\n print(msg.topic)\n print(msg.payload)\n payload = json.loads(msg.payload)\n print(payload['sepalWidth'])\n client.disconnect()\n\n mqttc = mqtt.Client()\n mqttc.on_publish = on_publish\n mqttc.on_connect = on_connect\n mqttc.on_message = on_message\n\n mqttc.connect(MQTT_HOST, MQTT_PORT, MQTT_KEEPALIVE_INTERVAL)",
"def on_message(self, client, userdata, msg):\n self.log.info(u\"==> Received subscribed MQTT message: %s = %s\" % (msg.topic, msg.payload))\n for deviceid in self.devicelist: # {'70' : {'name': 'Temp Atelier', 'topic': 'domogik/maison/ateliertemp', 'type' : 'mqtt.sensor_temperature', 'qos' : 0}}\n if msg.topic == self.devicelist[deviceid][\"topic\"]:\n if self.devicelist[deviceid][\"type\"] in self.numberSensors:\n if not self.is_number(msg.payload):\n self.log.error(u\"### MQTT message '%s' for device '%s' not return a number: '%s'\" % (msg.topic, self.devicelist[deviceid][\"name\"], msg.payload))\n return\n elif self.devicelist[deviceid][\"type\"] in self.boolSensors:\n if msg.payload.lower() not in self.boolStrValue:\n self.log.error(u\"### MQTT message '%s' for device '%s' not return a binary: '%s'\" % (msg.topic, self.devicelist[deviceid][\"name\"], msg.payload))\n return\n msg.payload = self.boolStrValue[msg.payload.lower()]\n \n self.send(deviceid, msg.payload.decode('utf-8', 'ignore')) # Erreur \"UnicodeDecodeError: 'ascii' codec can't decode byte\" avec prevision pluie !",
"def publish_mqtt(client, sensor_data, options, topics, file_handle, verbose=False):\n\n hum = sensor_data.humidity + options.hoffset\n\n temp_C = sensor_data.temperature + options.toffset\n temp_F = 9.0/5.0 * temp_C + 32\n temp_K = temp_C + 273.15\n\n press_A = sensor_data.pressure + options.poffset\n\n # https://www.sandhurstweather.org.uk/barometric.pdf\n if options.elevation > SEALEVEL_MIN:\n # option one: Sea Level Pressure = Station Pressure / e ** -elevation / (temperature x 29.263)\n #press_S = press_A / math.exp( - elevation / (temp_K * 29.263))\n # option two: Sea Level Pressure = Station Pressure + (elevation/9.2)\n press_S = press_A + (options.elevation/9.2)\n else:\n press_S = press_A\n\n curr_datetime = datetime.datetime.now()\n\n if verbose:\n str_datetime = curr_datetime.strftime(\"%Y-%m-%d %H:%M:%S\")\n print(\"{0}: temperature: {1:.1f} F, humidity: {2:.1f} %RH, pressure: {3:.2f} hPa, sealevel: {4:.2f} hPa\".\n format(str_datetime, temp_F, hum, press_A, press_S), file=file_handle)\n file_handle.flush()\n\n if options.format == \"flat\":\n temperature = str(round(temp_F, 1))\n humidity = str(round(hum, 1))\n pressure = str(round(press_A, 2))\n pressure_sealevel = str(round(press_S, 2))\n\n client.publish(topics.temperature, temperature)\n client.publish(topics.humidity, humidity)\n client.publish(topics.pressure, pressure)\n\n if options.elevation > SEALEVEL_MIN:\n client.publish(topics.sealevel_pressure, pressure_sealevel)\n\n else:\n data = {}\n\n data[options.section] = {}\n data[options.section]['Humidity'] = round(hum, 1)\n data[options.section]['Temperature'] = round(temp_F, 1)\n data[options.section]['Pressure'] = round(press_A, 2)\n if options.elevation > SEALEVEL_MIN:\n data[options.section]['Sealevel'] = round(press_S, 2)\n\n data['TempUnit'] = 'F'\n data['Time'] = curr_datetime.replace(microsecond=0).isoformat()\n\n #json_data = json.dumps(data)\n client.publish(options.root_topic + '/SENSOR', json.dumps(data))\n\n return",
"def save_conf(self):\r\n self.sendAndRecv(\"SAVECONF\\r\\n\")",
"async def websocket_lovelace_save_config(\n hass: HomeAssistant,\n connection: websocket_api.ActiveConnection,\n msg: dict[str, Any],\n config: LovelaceStorage,\n) -> None:\n await config.async_save(msg[\"config\"])",
"def test_readConfiguration(self):\n self.createSettingsFile()\n testSave = ConfigurationWindow()\n\n self.assertEqual(str(testSave.FieldLabelServer.text()),\n '172.19.51.133')\n self.assertEqual(str(testSave.FieldLabelPort.text()),\n '25345')\n self.assertEqual(str(testSave.FieldLabelUDPIpSend.text()),\n '172.19.51.145')\n self.assertEqual(str(testSave.FieldLabelUDPPortSend.text()),\n '57009')\n self.assertEqual(str(testSave.FieldLabelUDPIPReceive.text()),\n '127.0.0.1')\n self.assertEqual(str(testSave.FieldLabelUDPPortRececeive.text()),\n '1234')\n self.assertEqual(str(testSave.FieldLabelTCPIPSend.text()),\n '127.0.0.1')\n self.assertEqual(str(testSave.FieldLabelTCPPortSend.text()),\n '1234')\n self.assertEqual(str(testSave.FieldLabelTCPIPReceive.text()),\n '127.0.0.1')\n self.assertEqual(str(testSave.FieldLabelTCPPortRececeive.text()),\n '4321')",
"def translate_msg_from_adaptor(neid, msg_type, opdata): # noqa: E501\n converted_msg = None\n if msg_type == \"edit_config\":\n root = etree.Element(\"config\", nsmap={None: \"urn:ietf:params:xml:ns:netconf:base:1.0\"}) # add the config element\n converted_msg = parse_keyvalue_all(opdata, root)\n return 'it work'",
"def configure(self, config_name, action, contents):\n config = self.default_config.copy()\n config.update(contents)\n\n _log.debug(\"Configuring Agent\")\n\n try:\n setting1 = int(config[\"setting1\"])\n setting2 = config[\"setting2\"]\n except ValueError as e:\n _log.error(\"ERROR PROCESSING CONFIGURATION: {}\".format(e))\n return\n\n for x in self.setting2:\n self._create_subscriptions(str(x))\n print(str(x))",
"def send_service_config(self, honeypotids, config):\n req = {\"type\": \"set_settings\", \n \"from\": self.network.mc_id,\n \"to\": honeypotids,\n \"settings\": config}\n expect_dict = {\"type\": \"hp_settings\"}\n msg_list = self.send_receive(req, honeypotids, expect_dict)\n answer = {}\n for msg in msg_list:\n answer[msg[\"from\"]] = msg[\"settings\"]\n return answer",
"def config():",
"def config():"
] | [
"0.6071265",
"0.606536",
"0.55985576",
"0.5453353",
"0.54124284",
"0.5389315",
"0.5340085",
"0.53380466",
"0.530493",
"0.5208637",
"0.5197179",
"0.5167025",
"0.513544",
"0.51308423",
"0.51290375",
"0.51168764",
"0.5085436",
"0.50713146",
"0.504145",
"0.50359666",
"0.5025919",
"0.50226337",
"0.501956",
"0.5002826",
"0.49917975",
"0.49899557",
"0.49832207",
"0.4978781",
"0.49681282",
"0.49681282"
] | 0.7361652 | 0 |
Identifies if an object is numeric, that is int, float or bool. | def isNumeric(obj):
return isinstance(obj, (int, float, bool)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_numeric(obj):\n return isinstance(obj, (int, float, complex))",
"def isNumeric(obj):\n # type: (Any) -> bool\n return isinstance(obj, numbers.Number)",
"def is_numeric(space, w_obj):\n if w_obj.tp in [space.tp_float, space.tp_int]:\n return space.w_True\n if w_obj.tp == space.tp_str:\n return space.newbool(w_obj.is_really_valid_number(space))\n return space.w_False",
"def is_numeric(value):\n return isinstance(value, int) or isinstance(value, float)",
"def isnumeric(self):\n return isnumeric(self)",
"def is_numeric(number):\n\n if isinstance(number, bool):\n return False\n elif isinstance(number, int) or isinstance(number, float):\n return True\n else:\n return False",
"def has_numeric_type(obj: _std_typing.Any) -> bool:\n return (not has_vector_type(obj)) and (not has_string_type(obj))",
"def is_numeric(self) -> bool:\n return False",
"def is_numeric (self) :\n\n return self.__isnumeric__",
"def is_numeric_type(self):\n row_type = self.get_type()\n is_numeric = row_type in (\n 'hidden decimal',\n 'decimal',\n 'hidden integer',\n 'integer',\n 'int',\n 'range',\n )\n return is_numeric",
"def is_numeric(x):\n if isinstance(x, NUMBER_TYPES):\n return True\n elif isinstance(x, np.ndarray):\n return x.dtype.type not in NUMPY_NON_TYPES\n return False",
"def is_numeric(val):\n if \\\n isinstance(val, int) or \\\n isinstance(val, float):\n return True\n elif \\\n isinstance(val, str) and \\\n val.isdigit():\n return True\n else:\n return False",
"def is_numeric(value):\n return any([\n type(value) is str and value.isnumeric(),\n hasattr(value, 'is_integer') and value.is_integer(),\n type(value) is int,\n ])",
"def ISNUMBER(value):\n return isinstance(value, numbers.Number)",
"def _has_numeric_or_bool(self) -> bool:\n dtypes: Set[str] = set(self._data.keys())\n return 'i' in dtypes or 'f' in dtypes or 'b' in dtypes",
"def isNumber(x):\n return isinstance(x, (int, float))",
"def isnum(value):\n\n try:\n return bool(isinstance(value, (float, int)))\n except RuntimeError:\n return False",
"def is_number(obj):\n try:\n complex(obj) # for int, long, float and complex\n except ValueError:\n return False\n\n return True",
"def isnumeric(number):\n try:\n float(number)\n return True\n except (TypeError, ValueError):\n return False",
"def is_number(self, value):\n if isinstance(value, (int, float, long, complex)): # noqa\n return True\n return False",
"def is_number(x):\n if isinstance(x, (int, float)):\n return True\n else:\n return False",
"def isNumber(x):\n\treturn type(x) in [int, float]",
"def is_number(value):\n\n return isinstance(value, (int, long, float))",
"def is_numerable(self):\n return (self.is_unknown or self.is_byte or self.is_word\n or self.is_dword or self.is_qword)",
"def could_be_number(val):\n if val == None:\n return False\n\n if isinstance(val, (float, int, long)):\n return True\n\n # allow coercion from str\n if isinstance(val, (str, unicode)):\n try:\n n = float(val)\n if not isinstance(n, float):\n raise ValueError\n else:\n return True\n except:\n return False\n\n #otherwise\n return False",
"def _is_number(value):\n try:\n float(value)\n return True\n except (TypeError, ValueError):\n return False",
"def is_number(n):\n return isinstance(n, (int, float))",
"def isnumeric(a):\n if not _is_unicode(a):\n raise TypeError(\"isnumeric is only available for Unicode strings and arrays\")\n return _vec_string(a, bool_, 'isnumeric')",
"def is_number(s: Union[str, int, float]):\n if isinstance(s, str) and s.lower() == \"nan\":\n return True\n try:\n float(s)\n return True\n except ValueError:\n return False",
"def is_number(s: Any) -> bool:\n try:\n int(s)\n return True\n except ValueError:\n pass\n\n try:\n float(s)\n return True\n except ValueError:\n pass\n\n return False"
] | [
"0.8730025",
"0.8449824",
"0.83024406",
"0.8183594",
"0.80185515",
"0.7984723",
"0.78311867",
"0.77982944",
"0.7680896",
"0.7653552",
"0.75546384",
"0.7527674",
"0.7505374",
"0.74033344",
"0.7371881",
"0.73513925",
"0.7326164",
"0.73183155",
"0.73108757",
"0.7299548",
"0.72880125",
"0.72752184",
"0.7222098",
"0.72206885",
"0.719314",
"0.71758264",
"0.71755654",
"0.716814",
"0.71641254",
"0.7134992"
] | 0.8853641 | 0 |
Operator overloading. Generate an expression for addition. | def __add__(self, other):
if not (isNumeric(other) or isinstance(other, Expression)):
error_msg = (
f'Invalid expression during addition to {self}: [{other}]'
)
raise excep.biogemeError(error_msg)
return Plus(self, other) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __add__(self, other: Any) -> ColumnOperators:\n return self.operate(add, other)",
"def plus(self, a, b):\n return a + b",
"def __radd__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during addition to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Plus(other, self)",
"def math_add():\n a = int(request.args.get(\"a\"))\n b = int(request.args.get(\"b\"))\n return str(add(a, b))",
"def addExpr( ):\n\n\ttok = tokens.peek( )\n\tif debug: print (\"addExpr: \", tok)\n\tleft = term( )\n\ttok = tokens.peek( )\n\twhile tok == \"+\" or tok == \"-\":\n\t\ttokens.next()\n\t\tright = term( )\n\t\tleft = BinaryExpr( tok, left, right )\n\t\ttok = tokens.peek( )\n\treturn left",
"def __radd__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(add, other)",
"def addition(self):\n\t\treturn lambda anything: self.__class__(\n\t\t\t(self[:], disj, checked_proposition(anything)[:])\n\t\t)",
"def test_evaluate_add_expression(self):\n value = self.evaluate_common(\"2M add 2M\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Decimal, \"Expected Decimal\")\n self.assertTrue(value.value == 4, \"Expected 4\")\n value = self.evaluate_common(\"2D add 2M\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Double, \"Expected Double\")\n self.assertTrue(value.value == 4.0, \"Expected 4\")\n value = self.evaluate_common(\"2F add 2D\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Double, \"Expected Double\")\n self.assertTrue(value.value == 4.0, \"Expected 4\")\n value = self.evaluate_common(\"2 add 2L\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Int64, \"Expected Int64\")\n self.assertTrue(value.value == 4, \"Expected 4\")\n try:\n value = self.evaluate_common(\"2 add '2'\")\n self.fail(\"String promotion to int\")\n except odata.EvaluationError:\n pass\n value = self.evaluate_common(\"2 add null\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Int32, \"Expected Int32\")\n self.assertTrue(value.value is None, \"Expected None\")",
"def addition(a, b):\n return a + b",
"def ADD (self, n1, n2):",
"def addition(a, b):\n pass",
"def calculate_expression(number1, number2, operator):\n\n if operator == '+':\n return number1 + number2\n elif operator == '-':\n return number1 - number2\n elif operator == '*':\n return number1 * number2",
"def calc(operand_1, operand_2):\n return operand_1 + operand_2",
"def calc(operand_1, operand_2):\n return operand_1 + operand_2",
"def __add__(self,that):\n return self.__opExpand2(that,np.add)",
"def addition(a, b):\r\n\r\n result = a + b\r\n return result",
"def __iadd__(self, other):\n\n return self + other",
"def convert_elementwise_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)",
"def convert_addn(node, **kwargs):\n return create_basic_op_node('Sum', node, kwargs)",
"def add( a, b ):\n return a + b",
"def __add__(self, other):\n if isinstance(other, NeuralQueryExpression):\n self._check_type_compatibility(self.type_name, other.type_name, 'add')\n provenance = NQExprProvenance(\n operation='add', inner=self.provenance, other=other.provenance)\n return self.context.as_nql(self.tf + other.tf, self.type_name, provenance)\n else:\n # hopefully a constant\n provenance = NQExprProvenance(\n operation='add',\n inner=self.provenance,\n args=(None, other),\n other=NQExprProvenance(operation='constant'))\n return self.context.as_nql(self.tf + other, self.type_name, provenance)",
"def add(a, b):\n c = Calculator()\n result = c.add(a, b)\n click.echo('{} + {} = {}'.format(a, b, result))",
"def calc(operand_1, operand_2):\n\n return operand_1 + operand_2",
"def addition(a,b):\n return a+b",
"def __add__(self, value):\r\n if isinstance(value, (int, dec.Decimal)):\r\n return self.__class__(self._real + value, self._imag)\r\n elif isinstance(value, self.__class__):\r\n return self.__class__(self._real + value._real, self._imag + value._imag)\r\n raise TypeError(\r\n 'unsupported operand type(s) for +: {!r} and {!r}'.format(\r\n self.__class__.__name__, value.__class__.__name__\r\n )\r\n )",
"def add(self, a, b):\n return a + b",
"def __add__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Add, value)\n return out",
"def add(lhs, rhs):\n return _make.add(lhs, rhs)",
"def add(x, y):\n\n return x + y",
"def __add__(self, other):\n\n return self._binary_elementwise_op(other, np.add)"
] | [
"0.7368789",
"0.72432524",
"0.7233358",
"0.7125167",
"0.7071412",
"0.6955811",
"0.6865642",
"0.68313384",
"0.68223995",
"0.6804402",
"0.6791067",
"0.6787343",
"0.6785731",
"0.6785731",
"0.67737544",
"0.6763604",
"0.6743341",
"0.6740774",
"0.67383057",
"0.6683524",
"0.66759837",
"0.66643786",
"0.6645709",
"0.6641556",
"0.6635464",
"0.6626003",
"0.66106755",
"0.66097736",
"0.6591463",
"0.6586191"
] | 0.7279723 | 1 |
Operator overloading. Generate an expression for unary minus. | def __neg__(self):
return UnaryMinus(self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unary_operator(op):\n # Only negate is currently supported for all our possible input types.\n valid_ops = {'-'}\n if op not in valid_ops:\n raise ValueError(\"Invalid unary operator %s.\" % op)\n\n def unary_operator(self):\n # This can't be hoisted up a scope because the types returned by\n # unary_op_return_type aren't defined when the top-level function is\n # invoked.\n if isinstance(self, NumericalExpression):\n return NumExprFactor(\n \"{op}({expr})\".format(op=op, expr=self._expr),\n self.inputs,\n )\n else:\n return NumExprFactor(\"{op}x_0\".format(op=op), (self,))\n\n unary_operator.__doc__ = \"Unary Operator: '%s'\" % op\n return unary_operator",
"def visit_Unary(self, node):\n op = node.op.type\n if op == PLUS:\n return +self.visit(node.expr)\n elif op == MINUS:\n return -self.visit(node.expr)",
"def test02_unary_math_operators(self):\n\n import _cppyy\n number = _cppyy.gbl.number\n\n n = number(20)\n n += number(10)\n n -= number(10)\n n *= number(10)\n n /= number(2)\n assert n == number(100)\n\n nn = -n;\n assert nn == number(-100)",
"def __neg__(self) -> ColumnOperators:\n return self.operate(neg)",
"def _negation_op(spec, expression):",
"def visit_UnaryOp(self, node):\n token = node.token\n if token.type == PLUS:\n return self.visit(node.right)\n if token.type == MINUS:\n return -1 * self.visit(node.right)\n self.raise_error()",
"def minus(self, a, b):\n return a - b",
"def visit_UnaryOp(self, node):\n self.generic_visit(node)\n if isinstance(node.operand, ast.Num):\n # Don't transform negations of numeric literals. Just treat them\n # as literals.\n return node\n return to_call(self.op_to_function(node.op), [node.operand])",
"def unary(op, v):\n if op == \"+\":\n return v\n if op == \"-\":\n return -v\n if op.lower() == \"not\":\n return not(v)\n raise Exception(\"unary op not implemented\")",
"def __neg__(self):\n return type(self)(self.parent(), self._simplify(-self._express))",
"def __neg__(self):\n return self.__mul__(-1)",
"def subtract(lhs, rhs):\n return _make.subtract(lhs, rhs)",
"def neg(a):\n return -a;",
"def __neg__(self):\n return (-1)*self",
"def convert_negative(node, **kwargs):\n return create_basic_op_node('Neg', node, kwargs)",
"def convert_minus_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Sub', **kwargs)",
"def make_positive(expression: Expr) -> Expr:\n if expression.op == '~':\n new_expression = Expr(expression.args[0].op, *expression.args[0].args)\n return new_expression\n return expression",
"def __neg__(self):\n return self.coeff_mul(-1)",
"def __sub__(self, other):\n return self + other.__neg__()",
"def __sub__(self, other):\n return self + other.__neg__()",
"def gen_unop(self, expr: expressions.UnaryOperator):\n if expr.op in [\"x++\", \"x--\", \"--x\", \"++x\"]:\n # Increment and decrement in pre and post form\n # Determine increment or decrement:\n op = expr.op[1]\n pre = expr.op[0] == \"x\"\n value = self.gen_inplace_mutation(expr, op, pre)\n elif expr.op == \"*\":\n value = self.gen_expr(expr.a, rvalue=True)\n assert expr.lvalue\n elif expr.op == \"&\":\n assert expr.a.lvalue\n value = self.gen_expr(expr.a, rvalue=False)\n elif expr.op in [\"-\", \"~\"]:\n a = self.gen_expr(expr.a, rvalue=True)\n ir_typ = self.get_ir_type(expr.typ)\n value = self.emit(ir.Unop(expr.op, a, \"unop\", ir_typ))\n elif expr.op in [\"!\"]:\n value = self.gen_condition_to_integer(expr)\n else: # pragma: no cover\n raise NotImplementedError(str(expr.op))\n return value",
"def cg_inline_unary(self, fn):\n if fn == 'neg':\n op = '-'\n elif fn == 'not':\n op = '!'\n else:\n raise ValueError(f\"Unknown unary operator: {fn}\")\n self.asm(unindent(f\"\"\"\n @SP\n AM=M-1 // SP--\n D={op}M // D = MEM[SP]\n {self._cg_push_D}\n \"\"\"))",
"def convert_rminus_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Sub', **kwargs)",
"def __neg__(self):\n return 0 - self",
"def pauli_represent_minus_plus(e):\n # XXX: todo, make sure that new operators inherit labels\n return expression_tree_transform(\n e, [(lambda e: isinstance(e, SigmaX),\n lambda e: SigmaMinus() + SigmaPlus()),\n (lambda e: isinstance(e, SigmaY),\n lambda e: I * SigmaMinus() - I * SigmaPlus())]\n )",
"def test_unary_op_support():\n check_peval_expression(\"+(2)\", {}, \"2\", fully_evaluated=True, expected_value=2)\n check_peval_expression(\"-(-3)\", {}, \"3\", fully_evaluated=True, expected_value=3)\n check_peval_expression_bool(\"not 0\", {}, True)\n check_peval_expression(\"~(-4)\", {}, \"3\", fully_evaluated=True, expected_value=3)",
"def __neg__(self) -> 'SInt':\r\n return self.complement()",
"def neg(self, a):\n return -a",
"def __neg__(self):\n return self[::-1].complement",
"def add_subtract(statement):\r\n operators = list(filter(lambda x: x in ('+', '-'), statement))\r\n index = statement.index(operators[0])\r\n\r\n # Find operands\r\n op1, op2 = find_operands(statement, index)\r\n\r\n # Perform operation\r\n if operators[0] == '+':\r\n result = op1 + op2\r\n elif operators[0] == '-':\r\n result = op1 - op2\r\n\r\n # Replace operator and operands with result\r\n remove_and_replace(statement, index, result)\r\n\r\n return statement"
] | [
"0.7384949",
"0.7325116",
"0.7280399",
"0.7254818",
"0.71499866",
"0.71430916",
"0.6979477",
"0.6833238",
"0.6832739",
"0.6772833",
"0.66956836",
"0.65985936",
"0.6595696",
"0.65774065",
"0.65257376",
"0.6504867",
"0.6501448",
"0.64852464",
"0.6480339",
"0.6480339",
"0.64526474",
"0.64509785",
"0.64163905",
"0.63729864",
"0.6361572",
"0.6355979",
"0.63531137",
"0.63358235",
"0.6327072",
"0.63047314"
] | 0.7887373 | 0 |
Recursively extract the variables appearing in the expression, and store them in a dictionary. | def dictOfVariables(self):
s = {}
for e in self.children:
d = e.dictOfVariables()
s = dict(s, **d)
return s | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def eval(self):\n vars = {}\n for line in self._instrs:\n yield_result = False\n save_result = None\n yield_match = re.match(r\"yield (.*)\", line)\n if yield_match:\n expr = yield_match.group(1)\n yield_result = True\n var_match = re.match(r\"\\$([a-z0-9]+) = (.*)\", line)\n if var_match:\n save_result = var_match.group(1)\n expr = var_match.group(2)\n value = self._eval_composite(vars, expr, save_result)\n if yield_result:\n return value\n if not save_result is None:\n vars[save_result] = value",
"def get_variables_binds(self, predicate, bound_variables=None, variables_binds=None, recursion_level=1):\n\n # print(\"EXPLORING\", recursion_level, predicate, variables_binds)\n\n # Set of bound variables in predicate body\n if bound_variables is None:\n bound_variables = set()\n\n # Possible binds\n if variables_binds is None:\n variables_binds = [{}]\n\n recursion_level -= 1\n\n new_possible_binds = []\n\n for body_clause in predicate.body:\n adornments = self.compute_adornments(body_clause.parameters, bound_variables)\n\n # For each fact search if we can match every bound variable and assign free ones\n if body_clause.name in self._facts:\n for fact in self._facts[body_clause.name]:\n possible_binds = self.check_fact_with_adornment(fact, body_clause, adornments, variables_binds)\n if len(possible_binds):\n # A fact matched, we add variables binds to sup\n new_possible_binds.extend(possible_binds)\n\n # if len(new_possible_binds):\n # variables_binds = new_possible_binds\n\n if recursion_level > 0:\n # For each rule\n if body_clause.name in self._rules:\n for applicable_rule in self._rules[body_clause.name]:\n\n n_bound_variables = set()\n n_variables_binds = [{}]\n\n for index, argument in enumerate(body_clause.parameters):\n rule_corresponding_parameter = applicable_rule.head.parameters[index]\n\n if rule_corresponding_parameter.is_constant():\n if argument.is_constant():\n if rule_corresponding_parameter.value != argument.value:\n break\n else:\n if adornments[index]:\n if argument.is_constant():\n n_bound_variables.add(rule_corresponding_parameter.name)\n n_variables_binds[0][rule_corresponding_parameter.name] = argument.value\n elif argument.name in bound_variables and argument.name in variables_binds[0]:\n n_bound_variables.add(rule_corresponding_parameter.name)\n n_variables_binds[0][rule_corresponding_parameter.name] = variables_binds[0][argument.name]\n\n applicable_predicate_binds = self.get_variables_binds(applicable_rule, n_bound_variables, n_variables_binds, recursion_level)\n for n_bind in applicable_predicate_binds:\n adapted_bind = self.substitute_variable_names(n_bind, applicable_rule.head, body_clause)\n new_possible_binds.extend(adapted_bind)\n\n if len(new_possible_binds):\n variables_binds = new_possible_binds.copy()\n new_possible_binds.clear()\n else:\n variables_binds = [{}]\n\n new_possible_binds_no_duplicates = self.remove_duplicate_binds(variables_binds)\n\n if len(new_possible_binds_no_duplicates):\n yield new_possible_binds_no_duplicates",
"def _get_var_vals(item, context, global_only=False):\n\n import procedures\n import statements\n\n # Get all the variables.\n\n # Vars on RHS.\n var_visitor = var_in_expr_visitor(context)\n item.accept(var_visitor, no_embedded_loops=False)\n var_names = var_visitor.variables\n\n # Vars on LHS.\n lhs_visitor = lhs_var_visitor()\n item.accept(lhs_visitor, no_embedded_loops=False)\n lhs_var_names = lhs_visitor.variables\n \n # Handle member access expressions.\n var_names = var_names.union(lhs_var_names)\n tmp = set()\n for var in var_names:\n tmp.add(var)\n if (\".\" in var):\n tmp.add(var[:var.index(\".\")])\n var_names = tmp\n\n # Handle With variables if needed.\n if (context.with_prefix_raw is not None):\n var_names.add(safe_str_convert(context.with_prefix_raw))\n \n # Get a value for each variable.\n r = {}\n zero_arg_funcs = set()\n for var in var_names:\n\n # Don't try to convert member access expressions that involve\n # method calls to Python variables. These should be handled\n # later as actual calls.\n if (\"(\" in var):\n continue\n\n # Do we already know the variable value? \n val = None\n orig_val = None\n try:\n\n # Try to get the current value.\n val = context.get(var, global_only=global_only)\n orig_val = val\n \n # We have been kind of fuzzing the distinction between global and\n # local variables, so tighten down on globals only by just picking\n # up global variables that appear on the RHS but not LHS.\n if (global_only and (var in lhs_var_names)):\n continue\n \n # Do not set function arguments to new values.\n # Do not set loop index variables to new values.\n if ((val == \"__FUNC_ARG__\") or\n (val == \"__ALREADY_SET__\") or\n (val == \"__LOOP_VAR__\")):\n continue\n \n # Function definitions are not valid values.\n if isinstance(val, (VbaLibraryFunc, procedures.Function, procedures.Sub, statements.External_Function)):\n\n # Don't use the function definition as the value.\n val = None\n \n # 0 arg func calls should only appear on the RHS\n if (var not in lhs_var_names):\n zero_arg_funcs.add(var)\n\n # Don't treat these function calls as variables and\n # assign initial values to them.\n context.set(\"__ORIG__\" + var, orig_val, force_local=True)\n context.set(\"__ORIG__\" + var, orig_val, force_global=True)\n continue\n\n # 'inf' is not a valid value.\n val_str = None\n try:\n val_str = safe_str_convert(val).strip()\n except UnicodeEncodeError:\n val_str = filter(isprint, val).strip()\n if ((val_str == \"inf\") or\n (val_str == \"-inf\")):\n val = None\n\n # 'NULL' is not a valid value.\n if (val_str == \"NULL\"):\n val = None\n\n # Weird bug.\n if (\"core.vba_library.run_function\" in val_str):\n val = 0\n \n # Unedfined variable.\n except KeyError:\n if global_only:\n continue\n\n # Got a valid value for the variable?\n if (val is None):\n\n # Variable is not defined. Try to infer the type based on how it is used.\n #print \"TOP LOOK TYPE: \" + safe_str_convert(var)\n var_type, certain_of_type = _infer_type(var, item, context)\n #print (var_type, certain_of_type)\n if (var_type == \"INTEGER\"):\n val = \"NULL\"\n if certain_of_type:\n #print \"SET TYPE INT\"\n #print var\n val = 0\n context.set_type(var, \"Integer\")\n elif (var_type == \"STRING\"):\n val = \"\"\n if certain_of_type:\n context.set_type(var, \"String\")\n else:\n log.warning(\"Type '\" + safe_str_convert(var_type) + \"' of var '\" + safe_str_convert(var) + \"' not handled.\" + \\\n \" Defaulting initial value to \\\"NULL\\\".\")\n val = \"NULL\"\n\n # Rename some vars that overlap with python builtins.\n var = utils.fix_python_overlap(var)\n \n # Save the variable value.\n r[var] = val\n\n # Save the regex pattern if this is a regex object.\n if (safe_str_convert(val) == \"RegExp\"):\n if (context.contains(\"RegExp.pattern\")):\n pval = to_python(context.get(\"RegExp.pattern\"), context)\n if (pval.startswith('\"')):\n pval = pval[1:]\n if (pval.endswith('\"')):\n pval = pval[:-1]\n r[var + \".Pattern\"] = pval\n if (context.contains(\"RegExp.global\")):\n gval = to_python(context.get(\"RegExp.global\"), context)\n gval = gval.replace('\"', \"\")\n if (gval == \"True\"):\n gval = True\n if (gval == \"False\"):\n gval = False\n r[var + \".Global\"] = gval\n \n # Mark this variable as being set in the Python code to avoid\n # embedded loop Python code generation stomping on the value.\n context.set(var, \"__ALREADY_SET__\", force_local=True)\n context.set(var, \"__ALREADY_SET__\", force_global=True)\n \n # Save the original value so we know it's data type for later use in JIT\n # code generation.\n if (orig_val is None):\n orig_val = val\n context.set(\"__ORIG__\" + var, orig_val, force_local=True)\n context.set(\"__ORIG__\" + var, orig_val, force_global=True)\n \n # Done.\n return (r, zero_arg_funcs)",
"def extract_variables(expected_variables, _request):\n extracted_variables = {}\n for variable in expected_variables:\n form_var = _request.form.get(variable)\n args_var = _request.args.get(variable)\n if form_var and args_var:\n extracted_variables[variable] = [form_var, args_var]\n else:\n extracted_variables[variable] = form_var if form_var else args_var\n return extracted_variables",
"def context(template):\n\n return {\n v.key: v.read()\n for v in [Variable(name) for name in extract_variables(template)]\n }",
"def getVariables(self)->Dict[str,str]:\n pass",
"def variables(s):\n result = set([])\n def walk(s):\n if is_variable(s):\n result.add(s)\n else:\n for arg in s.args:\n walk(arg)\n walk(s)\n return result",
"def get_variables(self):\n local_variables = self._design.GetVariables(\n )+self._design.GetPostProcessingVariables()\n return {lv: self.get_variable_value(lv) for lv in local_variables}",
"def parse_vars(items):\n return dict((parse_var(item) for item in items))",
"def query_variables(md):\n\n # save as dictionaries with searchers as keys\n x_searchers = {}\n b_target = {}\n\n t_max = 0\n\n for var in md.getVars():\n my_var_name = var.varName\n my_var_value = var.x\n # print('%s %g' % (my_var_name, my_var_value))\n\n if 'x' in my_var_name:\n s = int(my_var_name[2:my_var_name.find(\",\")])\n v = int(my_var_name[my_var_name.find(\",\") + 1:my_var_name.rfind(\",\")])\n t = int(my_var_name[my_var_name.rfind(\",\") + 1:-1])\n\n # print('%s = %f ' % (my_var_name, my_var_value))\n x_searchers[(s, v, t)] = my_var_value\n\n if t > t_max:\n t_max = t\n\n elif 'beta' in my_var_name and '_s' not in my_var_name:\n # print('%s %g' % (my_var_name, my_var_value))\n # remember: b[0] is probability of capture\n v = int(my_var_name[5:my_var_name.find(\",\")])\n t = int(my_var_name[my_var_name.find(\",\") + 1:my_var_name.rfind(\"]\")])\n b_target[(v, t)] = my_var_value\n\n # make sure x is binary\n x_searchers = enforce_binary(x_searchers, t_max)\n b_target = enforce_sum_1(b_target, t_max)\n\n # x_searchers[(s, v, t)] and b_target[(v, t)]\n return x_searchers, b_target",
"def extract_variables(content: Any) -> Set:\n if isinstance(content, (list, set, tuple)):\n variables = set()\n for item in content:\n variables = variables | extract_variables(item)\n return variables\n\n elif isinstance(content, dict):\n variables = set()\n for key, value in content.items():\n variables = variables | extract_variables(value)\n return variables\n\n elif isinstance(content, str):\n return set(regex_findall_variables(content))\n\n return set()",
"def scanvars(reader, frame, locals):\n import tokenize\n import keyword\n vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__\n for ttype, token, start, end, line in tokenize.generate_tokens(reader):\n if ttype == tokenize.NEWLINE:\n break\n if ttype == tokenize.NAME and token not in keyword.kwlist:\n if lasttoken == '.':\n if parent is not __UNDEF__:\n value = getattr(parent, token, __UNDEF__)\n vars.append((prefix + token, prefix, value))\n else:\n where, value = lookup(token, frame, locals)\n vars.append((token, where, value))\n elif token == '.':\n prefix += lasttoken + '.'\n parent = value\n else:\n parent, prefix = None, ''\n lasttoken = token\n return vars",
"def process_expr(expr, get, key, val):\n for node in ast.walk(ast.parse(expr.strip(), mode='eval')):\n name = get(node)\n if name not in symbols:\n result.append((name, key, val))",
"def build_expression_tree(Omega, rewrites):\n class Node:\n def __init__(self):\n self.before = []\n self.expr = None\n self.var = None\n def ht(self):\n return reduce(lambda x, y: x + y,\n [x.ht() for x in self.before], 1)\n nodes = {}\n for expr, v in Omega:\n n = Node()\n n.var = v\n n.expr = expr\n nodes[v] = n\n for _, v in Omega:\n if v in rewrites:\n n = nodes[v]\n r = rewrites[v]\n for _, v2 in Omega:\n if r.has(v2):\n n.before.append(nodes[v2])\n\n return nodes",
"def get_variables_func(arguments, exclude):\n names = [name for name in arguments.keys() if name not in exclude]\n return lambda obj: {name: getattr(obj, name) for\n name in names}",
"def dictOfVariables(self):\n return {x.name: x for x in self.variables}",
"def get_variables(self):\n return {VariableString(s): self.get_variable_value(s) for s in self._project.GetVariables()}",
"def _get_path_variables(\n path: Tuple[str, ...], variables: FrozenVariableDict\n) -> MutableVariableDict:\n path_variables = {}\n\n for collection in variables:\n collection_variables = variables[collection]\n for name in path:\n if name not in collection_variables:\n collection_variables = None\n break\n collection_variables = collection_variables[name]\n\n if collection_variables is not None:\n path_variables[collection] = unfreeze(collection_variables)\n\n return path_variables",
"def visit_varname(self, node, children):\n # Return only dict nodes\n return {'type':'var','val':str(node)}",
"def expression_depth(expr):\n \n return depth_helper(expr, 0, set())",
"def _map_state_vars_and_eqs(self):\n\n def get_used_eqs_and_state_vars(eq_to_expand, equations):\n \"\"\" Returns used equations and state vars for a given equation\n\n :param eq_to_expand: list containing equations to recurse over and expand definitions for\n note: expecting equations in [(lhs, rhs)] form.\n :param equations: set of equations to look for definitions in.\n :return: set of equations and set of used state vars.\n \"\"\"\n used_state_vars = set()\n for eq in eq_to_expand:\n for v in eq[1].atoms(Derivative) | eq[1].free_symbols:\n if v in self._model.state_vars:\n used_state_vars.add(v)\n elif v not in [e[0] for e in eq_to_expand]:\n eq_to_expand.extend(filter(lambda e: e[0] == v, equations))\n return set(eq_to_expand), used_state_vars\n\n for i, deriv in enumerate(self._model.y_derivatives):\n equations, used_state_vars = \\\n get_used_eqs_and_state_vars([(d.lhs, d.rhs) for d in self._derivative_equations if d.lhs == deriv],\n set(map(lambda e: (e.lhs, e.rhs), self._derivative_equations)))\n\n # get all the variables used in jacobian matrix entry and all variables used to define them\n used_jacobian_vars, used_jacobian_state_vars = \\\n get_used_eqs_and_state_vars([(None, self._jacobian_matrix[i, i])], set(self._jacobian_equations))\n\n for sv in self._formatted_state_vars:\n sv.setdefault('in_evaluate_y_derivative', []).append(sv['sympy_var'] in used_state_vars)\n sv.setdefault('in_evaluate_partial_derivative', []).append(sv['sympy_var'] in used_jacobian_state_vars)\n\n for eq in self._vars_for_template['y_derivative_equations']:\n self.eq_in_evaluate_y_derivative(eq, equations)\n\n for je in self._vars_for_template['jacobian_equations']:\n self.eq_in_evaluate_partial_derivative(je, used_jacobian_vars)",
"def _parse_rec (self, node):\n if node['type'] == 'ObjectExpression':\n _ret = {}\n for prop in node['properties']:\n _ret.update({prop['key']['value']: self._parse_rec(prop['value'])})\n return _ret\n if node['type'] == 'Literal':\n return node['value']",
"def extract_fields(band_expression):\n if isinstance(band_expression, string_types):\n root_expr = parse_expression(band_expression)\n else:\n root_expr = band_expression\n return [\n node.id\n for parent, node in parent_walk(root_expr)\n if isinstance(node, _ast.Name) and not (\n isinstance(parent, _ast.Call) and parent.func == node\n )\n ]",
"def getVar(tree):\n if(tree.data == \"string_expression\"):\n if(tree.children[0].data == \"string\"):\n return tree.children[0].children[0]\n elif(tree.children[0].data == \"variable\"):\n return getValue(tree.children[0].children[0])\n elif(tree.children[0].data == \"string_expression\"):\n # if the child is a string expression apply getVar again on the child\n if(len(tree.children)== 2):\n return getVar(tree.children[0])+getVar(tree.children[1])\n return getVar(tree.children[0])\n elif(tree.data == \"integer\"):\n return evalInteger(tree) \n \n elif(tree.data == \"string_list\"):\n return getStringInterior(tree.children[0],[])\n return \"ERROR\"",
"def getvar(obj):\n class VarDict(dict):\n \"\"\"wrapper of var dict\"\"\"\n def __getitem__(self, key):\n # expression may be set a var in this dict\n if key in self:\n return super(VarDict, self).__getitem__(key)\n if hastag(obj, key):\n return gettag(obj, key)\n # maybe some build-in object\n try:\n return eval(key, {}, {})\n except:\n return False\n\n return VarDict()",
"def _parse_jinja2_variables(meta_yaml: str) -> dict:\n meta_yaml_lines = meta_yaml.splitlines()\n env = jinja2.Environment()\n parsed_content = env.parse(meta_yaml)\n all_nodes = list(parsed_content.iter_child_nodes())\n\n jinja2_exprs = {}\n jinja2_vals = {}\n for i, n in enumerate(all_nodes):\n if isinstance(n, jinja2.nodes.Assign) and isinstance(\n n.node,\n jinja2.nodes.Const,\n ):\n if _config_has_key_with_selectors(jinja2_vals, n.target.name):\n # selectors!\n\n # this block runs if we see the key for the\n # first time\n if n.target.name in jinja2_vals:\n # we need to adjust the previous key\n # first get the data right after the key we have\n jinja2_data = (\n all_nodes[jinja2_vals[n.target.name][1] + 1].nodes[0].data\n )\n\n # now pull out the selector and reset the key\n selector_re = SELECTOR_RE.match(jinja2_data)\n if selector_re is not None:\n selector = selector_re.group(1)\n new_key = n.target.name + CONDA_SELECTOR + selector\n jinja2_vals[new_key] = jinja2_vals[n.target.name]\n del jinja2_vals[n.target.name]\n\n # now insert this key - selector is the next thing\n jinja2_data = all_nodes[i + 1].nodes[0].data\n selector_re = SELECTOR_RE.match(jinja2_data)\n if selector_re is not None:\n selector = selector_re.group(1)\n new_key = n.target.name + CONDA_SELECTOR + selector\n jinja2_vals[new_key] = (n.node.value, i)\n else:\n jinja2_vals[n.target.name] = (n.node.value, i)\n else:\n jinja2_vals[n.target.name] = (n.node.value, i)\n elif isinstance(n, jinja2.nodes.Assign):\n if isinstance(n.target, jinja2.nodes.Tuple):\n for __n in n.target.items:\n jinja2_exprs[__n.name] = meta_yaml_lines[n.lineno - 1]\n else:\n jinja2_exprs[n.target.name] = meta_yaml_lines[n.lineno - 1]\n\n # we don't need the indexes into the jinja2 node list anymore\n for key, val in jinja2_vals.items():\n jinja2_vals[key] = jinja2_vals[key][0]\n\n return jinja2_vals, jinja2_exprs",
"def _get_module_variables(\n path: Tuple[str, ...],\n variables: FrozenVariableDict,\n all_paths: Set[Tuple[str, ...]],\n) -> Tuple[MutableVariableDict, Any]:\n module_variables = _get_path_variables(path, variables)\n submodule_variables: Any = {collection: {} for collection in module_variables}\n all_keys = set(\n key for collection in module_variables.values() for key in collection\n )\n\n for key in all_keys:\n submodule_path = path + (key,)\n if submodule_path in all_paths:\n for collection in module_variables:\n if key in module_variables[collection]:\n submodule_variables[collection][key] = module_variables[\n collection\n ].pop(key)\n\n return module_variables, submodule_variables",
"def all_subexpressions_with_context_information(e : Exp, context : Context, pool : Pool = RUNTIME_POOL) -> [(Exp, Context, Pool)]:\n return _Shredder(context, pool).visit(e)",
"def variables(self) -> VariableDict:\n if self.scope is None:\n raise ValueError(\"Can't access variables on unbound modules\")\n return self.scope.variables()",
"def get_numbers_operators(text: str, var_dict: dict, svar_dict:dict) -> (list, list, dict):\n\n\n # Define regex to extract all numbers in a string, as well as placeholders for intermediate results.\n # These placeholders start with a character, followed by a sequence of characters and numbers.\n # Use re.findall method to get a list of all numbers from the string.\n variables_regex = r\"((?<=[\\+\\-\\*\\/\\^\\,])|^)\\s*[\\+\\-]?\\s*(\\d+\\.?\\d*(e-?\\d+)?|[A-Za-z]+[A-Za-z0-9]*)\"\n var_list = re.findall(variables_regex, text)\n var_list = [i[1] for i in var_list]\n\n # Create dynamic view objects of the keys in var_dict and svar_dict.\n var_dict_keys = var_dict.keys() # returns DYNAMIC view object\n svar_dict_keys = svar_dict.keys()\n\n # Loop over var_list to assign variables to numbers and to copy saved variables from svar_dict to var_dict.\n for idx, entry in enumerate(var_list):\n # Do nothing if an entry is already stored in var_dict\n if not entry in var_dict_keys:\n # Check if entry is contained in svar_dict\n if not entry in svar_dict_keys:\n var_list[idx] = float(entry)\n else:\n var_list[idx] = svar_dict[entry]\n else:\n var_list[idx] = var_dict.pop(entry)\n\n \n operator_string = re.sub(variables_regex, '', text)\n operator_list = [i for i in operator_string if i !=' ']\n\n # Return both lists and the dictionairy.\n return var_list, operator_list, var_dict"
] | [
"0.5945532",
"0.5885752",
"0.58202237",
"0.5805298",
"0.57318467",
"0.5723188",
"0.5675114",
"0.56344885",
"0.56314737",
"0.55876845",
"0.5575075",
"0.5556912",
"0.55559593",
"0.55430675",
"0.55181867",
"0.55167776",
"0.5486167",
"0.5456266",
"0.5422159",
"0.53869724",
"0.52988744",
"0.52983606",
"0.5284948",
"0.52803004",
"0.52742434",
"0.5246554",
"0.51975507",
"0.5197334",
"0.5192101",
"0.5171632"
] | 0.63879704 | 0 |
Recursively extract the random variables appearing in the expression, and store them in a dictionary. | def dictOfRandomVariables(self):
s = {}
for e in self.children:
d = e.dictOfRandomVariables()
s = dict(s, **d)
return s | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def query_variables(md):\n\n # save as dictionaries with searchers as keys\n x_searchers = {}\n b_target = {}\n\n t_max = 0\n\n for var in md.getVars():\n my_var_name = var.varName\n my_var_value = var.x\n # print('%s %g' % (my_var_name, my_var_value))\n\n if 'x' in my_var_name:\n s = int(my_var_name[2:my_var_name.find(\",\")])\n v = int(my_var_name[my_var_name.find(\",\") + 1:my_var_name.rfind(\",\")])\n t = int(my_var_name[my_var_name.rfind(\",\") + 1:-1])\n\n # print('%s = %f ' % (my_var_name, my_var_value))\n x_searchers[(s, v, t)] = my_var_value\n\n if t > t_max:\n t_max = t\n\n elif 'beta' in my_var_name and '_s' not in my_var_name:\n # print('%s %g' % (my_var_name, my_var_value))\n # remember: b[0] is probability of capture\n v = int(my_var_name[5:my_var_name.find(\",\")])\n t = int(my_var_name[my_var_name.find(\",\") + 1:my_var_name.rfind(\"]\")])\n b_target[(v, t)] = my_var_value\n\n # make sure x is binary\n x_searchers = enforce_binary(x_searchers, t_max)\n b_target = enforce_sum_1(b_target, t_max)\n\n # x_searchers[(s, v, t)] and b_target[(v, t)]\n return x_searchers, b_target",
"def dictOfRandomVariables(self):\n return dict()",
"def dictOfVariables(self):\n s = {}\n for e in self.children:\n d = e.dictOfVariables()\n s = dict(s, **d)\n return s",
"def variables(self):\n for name in self._nodes:\n if isinstance(self._nodes[name], RandomVariable):\n yield name",
"def dictOfRandomVariables(self):\n return {self.name: self}",
"def eval(self):\n vars = {}\n for line in self._instrs:\n yield_result = False\n save_result = None\n yield_match = re.match(r\"yield (.*)\", line)\n if yield_match:\n expr = yield_match.group(1)\n yield_result = True\n var_match = re.match(r\"\\$([a-z0-9]+) = (.*)\", line)\n if var_match:\n save_result = var_match.group(1)\n expr = var_match.group(2)\n value = self._eval_composite(vars, expr, save_result)\n if yield_result:\n return value\n if not save_result is None:\n vars[save_result] = value",
"def context(template):\n\n return {\n v.key: v.read()\n for v in [Variable(name) for name in extract_variables(template)]\n }",
"def variables(s):\n result = set([])\n def walk(s):\n if is_variable(s):\n result.add(s)\n else:\n for arg in s.args:\n walk(arg)\n walk(s)\n return result",
"def find_template_variables(code):\n return re.findall(re_template_var, code)",
"def extract_variables(expected_variables, _request):\n extracted_variables = {}\n for variable in expected_variables:\n form_var = _request.form.get(variable)\n args_var = _request.args.get(variable)\n if form_var and args_var:\n extracted_variables[variable] = [form_var, args_var]\n else:\n extracted_variables[variable] = form_var if form_var else args_var\n return extracted_variables",
"def _get_vars(symbol: Union[str, int]) -> str:\n if isinstance(symbol, str):\n return {\n 'circle': 'var b1=n.round(t,2);',\n 'square': 'var b1=n.round(t,2);',\n 'diamond': 'var b1=n.round(t*1.3,2);',\n 'hexagram': 'var b1=n.round(t,2);var b2=n.round(t/2,2);var b3=n.round(t*Math.sqrt(3)/2,2);'\n }[symbol]\n return {\n 37: 'var d1=n.round(t*1.2,2);var d2=n.round(t*1.6,2);var d3=n.round(t*0.8,2);',\n 38: 'var d1=n.round(t*1.2,2);var d2=n.round(t*1.6,2);var d3=n.round(t*0.8,2);',\n 39: 'var d1=n.round(t*1.2,2);var d2=n.round(t*1.6,2);var d3=n.round(t*0.8,2);',\n 40: 'var d1=n.round(t*1.2,2);var d2=n.round(t*1.6,2);var d3=n.round(t*0.8,2);',\n 34: 'var d1=n.round(t,2);',\n 33: 'var d1=n.round(t*1.4,2);',\n 35: 'var d1=n.round(t*1.2,2);var d2=n.round(t*0.85,2);',\n 36: 'var d1=n.round(t/2,2);var d2=n.round(t,2);'\n }[symbol]",
"def get_variables_binds(self, predicate, bound_variables=None, variables_binds=None, recursion_level=1):\n\n # print(\"EXPLORING\", recursion_level, predicate, variables_binds)\n\n # Set of bound variables in predicate body\n if bound_variables is None:\n bound_variables = set()\n\n # Possible binds\n if variables_binds is None:\n variables_binds = [{}]\n\n recursion_level -= 1\n\n new_possible_binds = []\n\n for body_clause in predicate.body:\n adornments = self.compute_adornments(body_clause.parameters, bound_variables)\n\n # For each fact search if we can match every bound variable and assign free ones\n if body_clause.name in self._facts:\n for fact in self._facts[body_clause.name]:\n possible_binds = self.check_fact_with_adornment(fact, body_clause, adornments, variables_binds)\n if len(possible_binds):\n # A fact matched, we add variables binds to sup\n new_possible_binds.extend(possible_binds)\n\n # if len(new_possible_binds):\n # variables_binds = new_possible_binds\n\n if recursion_level > 0:\n # For each rule\n if body_clause.name in self._rules:\n for applicable_rule in self._rules[body_clause.name]:\n\n n_bound_variables = set()\n n_variables_binds = [{}]\n\n for index, argument in enumerate(body_clause.parameters):\n rule_corresponding_parameter = applicable_rule.head.parameters[index]\n\n if rule_corresponding_parameter.is_constant():\n if argument.is_constant():\n if rule_corresponding_parameter.value != argument.value:\n break\n else:\n if adornments[index]:\n if argument.is_constant():\n n_bound_variables.add(rule_corresponding_parameter.name)\n n_variables_binds[0][rule_corresponding_parameter.name] = argument.value\n elif argument.name in bound_variables and argument.name in variables_binds[0]:\n n_bound_variables.add(rule_corresponding_parameter.name)\n n_variables_binds[0][rule_corresponding_parameter.name] = variables_binds[0][argument.name]\n\n applicable_predicate_binds = self.get_variables_binds(applicable_rule, n_bound_variables, n_variables_binds, recursion_level)\n for n_bind in applicable_predicate_binds:\n adapted_bind = self.substitute_variable_names(n_bind, applicable_rule.head, body_clause)\n new_possible_binds.extend(adapted_bind)\n\n if len(new_possible_binds):\n variables_binds = new_possible_binds.copy()\n new_possible_binds.clear()\n else:\n variables_binds = [{}]\n\n new_possible_binds_no_duplicates = self.remove_duplicate_binds(variables_binds)\n\n if len(new_possible_binds_no_duplicates):\n yield new_possible_binds_no_duplicates",
"def query_and_print_variables(md):\n\n # save x variable as dictionary with keys (s, v, t)\n x_searchers = {}\n # save beta variable as dictionary with keys (v, t)\n b_target = {}\n\n for var in md.getVars():\n my_var_name = var.varName\n my_var_value = var.x\n print('%s %g' % (my_var_name, my_var_value))\n\n if 'x' in my_var_name:\n s = int(my_var_name[2])\n v = int(my_var_name[4])\n t = int(my_var_name[6])\n\n if my_var_value >= 0.5:\n x_searchers[(s, v, t)] = 1\n else:\n x_searchers[(s, v, t)] = 0\n\n elif 'beta' in my_var_name:\n # print('%s %g' % (my_var_name, my_var_value))\n # remember b[0] is probability of capture\n v = int(my_var_name[5])\n t = int(my_var_name[7])\n b_target[v, t] = my_var_value\n\n obj = md.getObjective()\n print(obj.getValue())\n\n return x_searchers, b_target",
"def randomize_variables(self, samples):\r\n variables = samples.split('@')[0].split(',')\r\n numsamples = int(samples.split('@')[1].split('#')[1])\r\n sranges = zip(*map(lambda x: map(float, x.split(\",\")),\r\n samples.split('@')[1].split('#')[0].split(':')))\r\n ranges = dict(zip(variables, sranges))\r\n\r\n out = []\r\n for _ in range(numsamples):\r\n var_dict = {}\r\n # ranges give numerical ranges for testing\r\n for var in ranges:\r\n # TODO: allow specified ranges (i.e. integers and complex numbers) for random variables\r\n value = random.uniform(*ranges[var])\r\n var_dict[str(var)] = value\r\n out.append(var_dict)\r\n return out",
"def visit_varname(self, node, children):\n # Return only dict nodes\n return {'type':'var','val':str(node)}",
"def extract_variables(content: Any) -> Set:\n if isinstance(content, (list, set, tuple)):\n variables = set()\n for item in content:\n variables = variables | extract_variables(item)\n return variables\n\n elif isinstance(content, dict):\n variables = set()\n for key, value in content.items():\n variables = variables | extract_variables(value)\n return variables\n\n elif isinstance(content, str):\n return set(regex_findall_variables(content))\n\n return set()",
"def Get_FIREXAQ_variable_dict():\n firex_vars = { 'EOH' : {\n 'firex' : 'C2H5OH_TOGA_APEL',\n 'gc' : 'SpeciesConc_EOH',\n 'conv' : False,\n 'scale' : 1e12},\n 'CH4' : {\n 'firex' : 'CH4_DACOM_DISKIN',\n 'gc' : 'SpeciesConc_CH4',\n 'conv' : False,\n 'scale' : 1e9},\n #'HNO3-NO3' : {\n # 'firex' : 'HNO3+submicron-NO3_SAGA_DIBB',\n # 'gc' : ['HNO3','NIT','NITD1','NITD2'],\n # 'conv' : False,\n # 'scale' : 1e12},\n 'C2H6' : {\n 'firex' : 'Ethane_WAS_BLAKE',\n 'gc' : 'SpeciesConc_C2H6',\n 'conv' : False,\n 'scale' : 1e12},\n 'C3H8' : {\n 'firex' : 'Propane_WAS_BLAKE',\n 'gc' : 'SpeciesConc_C3H8',\n 'conv' : False,\n 'scale' : 1e12},\n 'BENZ' : {\n 'firex' : 'Benzene_WAS_BLAKE',\n 'gc' : 'SpeciesConc_BENZ',\n 'conv' : False,\n 'scale' : 1e12},\n 'TOLU' : {\n 'firex' : 'Toluene_WAS_BLAKE',\n 'gc' : 'SpeciesConc_TOLU',\n 'conv' : False,\n 'scale' : 1e12},\n 'HNO2' : {\n 'firex' : 'HNO2_NOAACIMS_VERES',\n 'gc' : 'SpeciesConc_HNO2',\n 'conv' : False,\n 'scale' : 1e12 },\n 'NH4' : {\n 'firex' : 'NH4_ug/m3_DIBB',\n 'gc' : 'SpeciesConc_NH4',\n 'conv' : True,\n 'scale' : 1e12,\n 'mm' : 18.04},\n 'SO4' : {\n 'firex' : 'SO4_ug/m3_DIBB',\n 'gc' : 'SpeciesConc_SO4',\n 'conv' : True,\n 'scale' : 1e9,\n 'mm' : 96.06},\n 'NIT-all' : {\n 'firex' : 'NO3_ug/m3_DIBB',\n 'gc' : ['NIT','NITs','NITD1','NITD2','NITD3','NITD4'],\n 'conv' : True,\n 'scale' : 1e9,\n 'mm' : 62.0049 },\n 'NITa' : {\n 'firex' : 'NO3_ug/m3_DIBB',\n 'gc' : ['NIT','NITD1','NITD2'],\n 'conv' : True,\n 'scale' : 1e9,\n 'mm' : 62.0049},\n # 'HNO2' : {\n # 'firex' : 'HNO2_NOAACIMS_VERES',\n # 'gc' : 'SpeciesConc_HNO2',\n # 'conv' : False,\n # 'scale' : 1e12 },\n 'NH3' : {\n 'firex' : 'NH3_UIOPTR_ppbV_WISTHALER',\n 'gc' : 'SpeciesConc_NH3',\n 'conv' : False,\n 'scale' : 1e9 },\n 'HNO3' : {\n 'firex' : 'HNO3-1Hz_CIT_WENNBERG',\n 'gc' : 'SpeciesConc_HNO3' ,\n 'conv' : False,\n 'scale' : 1e12 },\n 'O3' : {\n 'firex' : 'O3_CL_RYERSON',\n 'gc' : 'SpeciesConc_O3' ,\n 'conv' : False,\n 'scale' : 1e9 },\n 'NO' : {\n 'firex' : 'NO_CL_RYERSON',\n 'gc' : 'SpeciesConc_NO' ,\n 'conv' : False,\n 'scale' : 1e9 },\n 'NO2' : {\n 'firex' : 'NO2_CL_RYERSON',\n 'gc' : 'SpeciesConc_NO2' ,\n 'conv' : False,\n 'scale' : 1e9 },\n 'NOx' : {\n 'firex' : 'NOx',\n 'gc' : 'NOx' ,\n 'conv' : False,\n 'scale' : 1e9 },\n }\n '''\n firex_vars = { 'NOy' : {\n 'firex' : 'NOy_CL_RYERSON',\n 'gc' : ['NO', 'NO2', 'PAN', 'HNO3', 'PPN', 'R4N2', 'N2O5', 'HNO4', 'BrNO2',\n 'BrNO3', 'MPN', 'PROPNN', 'NO3', 'HNO2', 'IONO', 'IONO2',\n 'INO', 'ClNO2', 'ClNO3'] ,\n 'conv' : False,\n 'scale' : 1e9 },\n 'CO' : {\n 'firex' : 'CO_DACOM_DISKIN',\n 'gc' : 'SpeciesConc_CO' ,\n 'conv' : False,\n 'scale' : 1e9 }\n }\n '''\n return firex_vars",
"def sample(self):\n assignments = {}\n for v in self.all_variables():\n for _ in range(gc.max_satisfy_tries):\n assignments[v] = v.sample()\n if self.check(assignments):\n break\n assert len(assignments) == len(self.all_variables())\n return assignments",
"def get_values(self):\n\n variable_values = {}\n\n # Do first of all the first ones, as these are special cases.\n for edge in self.node_levels[0]:\n variable_values[edge] = self._random()\n\n # Now loop through the rest, excluding the first level\n for edges in self.node_levels[1:]:\n for edge in edges:\n parents = self.adj_inv[edge]\n partial_values = [self.weights[(parent, edge)]*variable_values[parent]\n for parent in parents]\n\n variable_values[edge] = sum(partial_values)\n\n return variable_values",
"def variable_dicts(self):\n \n def get_variable_text(rtf_file):\n \"Returns a list of variable_texts for each variable\"\n st='Pos. = '\n return rtf_file.split(st)[1:]\n \n def get_variable_name(variable_text):\n st='Variable = '\n b=variable_text.split(st)[1]\n return b[b.find(' ')+1:b.find('\\t')]\n \n def find_pos(rtf):\n a=rtf\n b=a\n return b[b.find(' ')+1:b.find('\\t')]\n \n def find_variable_label(rtf):\n try:\n a=rtf\n b=a.split('Variable label = ')[1]\n return b[b.find(' ')+1:b.find('\\\\par')]\n except IndexError:\n return None\n \n def find_variable_type(rtf):\n if not 'This variable is ' in rtf: return ''\n a=rtf\n b=a.split('This variable is ')[1]\n i1=b.find(' ')+1\n i2=i1+b[i1:].find('}')\n return b[i1:i2]\n \n def find_SPSS_measurement_level(rtf):\n if not 'the SPSS measurement level is ' in rtf: return ''\n a=rtf\n b=a.split('the SPSS measurement level is ')[1]\n i1=b.find(' ')+1\n i2=i1+b[i1:].find('\\\\par')\n return b[i1:i2]\n \n def find_SPSS_user_missing_values(rtf):\n if not 'SPSS user missing values = ' in rtf: return dict()\n a=rtf\n d=a.split('SPSS user missing values = ')\n if len(d)<2: return None\n e=d[1]\n i1=e.find(' ')+1\n i2=i1+e[i1:].find('\\\\par')\n f=e[i1:i2]\n g=f.split(' ')\n i=' '.join([g[0],g[2],g[4]])\n return i\n \n def find_value_labels(rtf):\n if not 'Value = ' in rtf: return dict()\n a=rtf\n d=a.split('Value = ')[1:]\n z={}\n for e in d:\n value=e[e.find(' ')+1:e.find('\\t')]\n value=float(value)\n f=e.split('Label = ')[1]\n label=f[f.find(' ')+1:f.find('\\\\par')]\n z[value]=label\n #print(z)\n return z\n \n variable_texts=get_variable_text(self.rtf)\n #pprint(variable_texts[0:2])\n \n result=[]\n for variable_text in variable_texts:\n d={'pos':find_pos(variable_text),\n 'variable':get_variable_name(variable_text),\n 'variable_label':find_variable_label(variable_text),\n 'variable_type':find_variable_type(variable_text),\n 'SPSS_measurement_level':find_SPSS_measurement_level(variable_text),\n 'SPSS_user_missing_values':find_SPSS_user_missing_values(variable_text),\n 'value_labels':find_value_labels(variable_text) \n }\n result.append(d)\n \n return result",
"def scanvars(reader, frame, locals):\n import tokenize\n import keyword\n vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__\n for ttype, token, start, end, line in tokenize.generate_tokens(reader):\n if ttype == tokenize.NEWLINE:\n break\n if ttype == tokenize.NAME and token not in keyword.kwlist:\n if lasttoken == '.':\n if parent is not __UNDEF__:\n value = getattr(parent, token, __UNDEF__)\n vars.append((prefix + token, prefix, value))\n else:\n where, value = lookup(token, frame, locals)\n vars.append((token, where, value))\n elif token == '.':\n prefix += lasttoken + '.'\n parent = value\n else:\n parent, prefix = None, ''\n lasttoken = token\n return vars",
"def parse_vars(items):\n return dict((parse_var(item) for item in items))",
"def _parse_jinja2_variables(meta_yaml: str) -> dict:\n meta_yaml_lines = meta_yaml.splitlines()\n env = jinja2.Environment()\n parsed_content = env.parse(meta_yaml)\n all_nodes = list(parsed_content.iter_child_nodes())\n\n jinja2_exprs = {}\n jinja2_vals = {}\n for i, n in enumerate(all_nodes):\n if isinstance(n, jinja2.nodes.Assign) and isinstance(\n n.node,\n jinja2.nodes.Const,\n ):\n if _config_has_key_with_selectors(jinja2_vals, n.target.name):\n # selectors!\n\n # this block runs if we see the key for the\n # first time\n if n.target.name in jinja2_vals:\n # we need to adjust the previous key\n # first get the data right after the key we have\n jinja2_data = (\n all_nodes[jinja2_vals[n.target.name][1] + 1].nodes[0].data\n )\n\n # now pull out the selector and reset the key\n selector_re = SELECTOR_RE.match(jinja2_data)\n if selector_re is not None:\n selector = selector_re.group(1)\n new_key = n.target.name + CONDA_SELECTOR + selector\n jinja2_vals[new_key] = jinja2_vals[n.target.name]\n del jinja2_vals[n.target.name]\n\n # now insert this key - selector is the next thing\n jinja2_data = all_nodes[i + 1].nodes[0].data\n selector_re = SELECTOR_RE.match(jinja2_data)\n if selector_re is not None:\n selector = selector_re.group(1)\n new_key = n.target.name + CONDA_SELECTOR + selector\n jinja2_vals[new_key] = (n.node.value, i)\n else:\n jinja2_vals[n.target.name] = (n.node.value, i)\n else:\n jinja2_vals[n.target.name] = (n.node.value, i)\n elif isinstance(n, jinja2.nodes.Assign):\n if isinstance(n.target, jinja2.nodes.Tuple):\n for __n in n.target.items:\n jinja2_exprs[__n.name] = meta_yaml_lines[n.lineno - 1]\n else:\n jinja2_exprs[n.target.name] = meta_yaml_lines[n.lineno - 1]\n\n # we don't need the indexes into the jinja2 node list anymore\n for key, val in jinja2_vals.items():\n jinja2_vals[key] = jinja2_vals[key][0]\n\n return jinja2_vals, jinja2_exprs",
"def get_numbers_operators(text: str, var_dict: dict, svar_dict:dict) -> (list, list, dict):\n\n\n # Define regex to extract all numbers in a string, as well as placeholders for intermediate results.\n # These placeholders start with a character, followed by a sequence of characters and numbers.\n # Use re.findall method to get a list of all numbers from the string.\n variables_regex = r\"((?<=[\\+\\-\\*\\/\\^\\,])|^)\\s*[\\+\\-]?\\s*(\\d+\\.?\\d*(e-?\\d+)?|[A-Za-z]+[A-Za-z0-9]*)\"\n var_list = re.findall(variables_regex, text)\n var_list = [i[1] for i in var_list]\n\n # Create dynamic view objects of the keys in var_dict and svar_dict.\n var_dict_keys = var_dict.keys() # returns DYNAMIC view object\n svar_dict_keys = svar_dict.keys()\n\n # Loop over var_list to assign variables to numbers and to copy saved variables from svar_dict to var_dict.\n for idx, entry in enumerate(var_list):\n # Do nothing if an entry is already stored in var_dict\n if not entry in var_dict_keys:\n # Check if entry is contained in svar_dict\n if not entry in svar_dict_keys:\n var_list[idx] = float(entry)\n else:\n var_list[idx] = svar_dict[entry]\n else:\n var_list[idx] = var_dict.pop(entry)\n\n \n operator_string = re.sub(variables_regex, '', text)\n operator_list = [i for i in operator_string if i !=' ']\n\n # Return both lists and the dictionairy.\n return var_list, operator_list, var_dict",
"def get_variables_func(arguments, exclude):\n names = [name for name in arguments.keys() if name not in exclude]\n return lambda obj: {name: getattr(obj, name) for\n name in names}",
"def get_variables(self):\n local_variables = self._design.GetVariables(\n )+self._design.GetPostProcessingVariables()\n return {lv: self.get_variable_value(lv) for lv in local_variables}",
"def gen_noise_apply_map(self):\n eval_rnd_map = {}\n for k, v in self.rnd_map.items():\n objs = k.replace(' ', '')\n objs = objs.split(',')\n eval_v = eval(v)\n for obj in objs:\n eval_rnd_map[obj] = eval_v\n return eval_rnd_map",
"def getVariables(self)->Dict[str,str]:\n pass",
"def variables_referenced(text):\n return set(substitution_pattern.findall(text))",
"def read_vars(self, vars):\n fields = {}\n for var in vars:\n try:\n fields[var] = Variable(self.template, var)[:]\n except:\n if var == 'NN':\n fields[var] = self.brunt_vaisalla()\n elif var == 'KE':\n fields[var] = self.kinetic_energy()\n elif var == 'Ep':\n fields[var] = self.potential_energy()\n elif var == 'none':\n fields[var] = np.ones(self.params['global_shape'])\n elif var == 'APE':\n fields[var] = self.available_potential_energy()\n elif var == 'Eb':\n fields[var] = self.background_potential_energy()\n elif var == 'test':\n fields[var] = self.test()\n elif var == 'p_mean':\n fields[var] = self.mean_pressure()\n elif var == 'Q_times_z':\n fields[var] = self.E_2()\n elif var == 'br_times_z':\n fields[var] = self.E_1()\n elif var == 'phi_z':\n fields[var] = self.buoyancy_flux()\n elif var == 'phi_b':\n fields[var] = self.buoyancy_forcing()\n elif var == 'pr':\n fields[var] = self.backgroud_pressure()\n\n if var == 'u':\n fields[var] = fields[var]/self.params['dx']\n elif var == 'v':\n fields[var] = fields[var]/self.params['dy']\n elif var == 'w':\n fields[var] = fields[var]/self.params['dz']\n\n return fields"
] | [
"0.5932875",
"0.5881359",
"0.58104384",
"0.5771592",
"0.57176113",
"0.56042373",
"0.557",
"0.54678077",
"0.54553556",
"0.5453515",
"0.5445234",
"0.54076046",
"0.539915",
"0.5379454",
"0.53549975",
"0.53479403",
"0.5346211",
"0.53291714",
"0.5299673",
"0.52857965",
"0.52682525",
"0.52258515",
"0.5219252",
"0.5212118",
"0.52043515",
"0.5199856",
"0.51913875",
"0.5169805",
"0.5148711",
"0.51462"
] | 0.6830504 | 0 |
Check if the expression contains an expression of type t. Typically, this would be used to check that a MonteCarlo expression contains a bioDraws expression. | def embedExpression(self, t):
if self.getClassName() == t:
return True
for e in self.children:
if e.embedExpression(t):
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def contains(self, expr):\n # NOTE: when multiplying out series a lot of queries like\n # O(...).contains(a*x**b) with many a and few b are made.\n # Separating out the independent part allows for better caching.\n c, m = expr.as_coeff_mul(*self.variables)\n if m != ():\n return self._contains(Mul(*m))\n else:\n # Mul(*m) == 1, and O(1) treatment is somewhat peculiar ...\n # some day this else should not be necessary\n return self._contains(expr)",
"def has_expression(self):\n return self._expression is not None",
"def contain_op(self, expr):\n return expr in self.table.inv",
"def is_type_expression(self, expr: Expression, top_level: bool=True) -> bool:\n # Assignment of TypeVar(...) are passed through\n if (isinstance(expr, CallExpr) and\n isinstance(expr.callee, NameExpr) and\n expr.callee.name == 'TypeVar'):\n return True\n elif isinstance(expr, EllipsisExpr):\n return not top_level\n elif isinstance(expr, NameExpr):\n if expr.name in ('True', 'False'):\n return False\n elif expr.name == 'None':\n return not top_level\n else:\n return True\n elif isinstance(expr, IndexExpr) and isinstance(expr.base, NameExpr):\n if isinstance(expr.index, TupleExpr):\n indices = expr.index.items\n else:\n indices = [expr.index]\n if expr.base.name == 'Callable' and len(indices) == 2:\n args, ret = indices\n if isinstance(args, EllipsisExpr):\n indices = [ret]\n elif isinstance(args, ListExpr):\n indices = args.items + [ret]\n else:\n return False\n return all(self.is_type_expression(i, top_level=False) for i in indices)\n else:\n return False",
"def is_in(cls, s, t):\n\n assert cls.is_selector(s)\n assert cls.is_selector(t)\n\n s_exp = set(cls.expand(s))\n if s_exp == set([()]):\n return True\n t_exp = set(cls.expand(t))\n if s_exp.issubset(t_exp):\n return True\n else:\n return False",
"def tree_contains(T, x):",
"def contains_expr(self, *args):\n return _ida_hexrays.citem_t_contains_expr(self, *args)",
"def is_expression(self):\r\n return conf.lib.clang_isExpression(self)",
"def tree_contains(T, x):\n if T.label == x:\n return True\n for c in T:\n if tree_contains(c, x):\n return True\n return False",
"def isMatched(expr):\n pass",
"def test_expression_contains(self):\n\n # Checks on a specified attribute with operators \"==\" and \"!=\" with integers\n expression = BooleanExpression(\"NORMAL\", models.Network.id.in_([1, 3, 4]))\n value = expression.evaluate(KeyedTuple([{\"id\": 1}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.id in [1, 3, 4] with models.Network.id=1\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.id.in_([1, 3, 4]))\n value = expression.evaluate(KeyedTuple([{\"id\": 2}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.id in [1, 3, 4] with models.Network.id=2\")\n\n # Checks on a specified attribute with operators \"==\" and \"!=\" with string\n expression = BooleanExpression(\"NORMAL\", models.Network.label.in_([\"network_1\", \"network_3\", \"network_4\"]))\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_1\"}], [\"networks\"]))\n self.assertTrue(value, \"\"\"models.Network.label in [\"network_1\", \"network_3\", \"network_4\"] with models.Network.label=\"network_1\" \"\"\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.label.in_([\"network_1\", \"network_3\", \"network_4\"]))\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_2\"}], [\"networks\"]))\n self.assertFalse(value, \"\"\"models.Network.label in [\"network_1\", \"network_3\", \"network_4\"] with models.Network.label=\"network_1\" \"\"\")",
"def is_(t, x):\n return type(x) is t",
"def tree_contains2(T, x):\n return T.label == x or any(map(lambda tree: tree_contains(tree, x), T))",
"def test_empty_tree_contains(empty_t):\n empty_t.contains(1) == \"An empty tree has no values.\"",
"def isMatched(expr):\n S = Stack()\n n = len(expr)\n\n for i in range (0,n):\n \tsymb = expr[i] #next symbol\n \t# print(symb)\n\n \tif symb in ['{','(','[']:\n \t\tS.Push(symb)\n\n \telif symb in ['}',')',']']:\n\n \t\tif S.isEmpty():\n \t\t\treturn False\n \t\tif S.Top() == '{' and symb == '}':\n \t\t\tS.Pop()\n \t\telif S.Top() == '(' and symb == ')':\n \t\t\tS.Pop()\n \t\telif S.Top() == '[' and symb == ']':\n \t\t\tS.Pop()\n\n \telse:\n \t\tcontinue\n\n if S.isEmpty():\n \treturn True\n else:\n \treturn False\n\n # \telif symb in range(48,58):\n # \t\tcontinue\n\n # \telif symb in ['+','-','*','/','%']:\n # \t\tcontinue\n\n # \telse:\n # \t\tprint(\"Error\") \n # \t\treturn 0",
"def test_expected_type(val, exp_type):\n\n if not isinstance(val, exp_type):\n return False",
"def type_check_expr(value, kind):\n if not isinstance(kind, type):\n raise TypeError(\"%s is not a type\" % kind)\n if not isinstance(value, kind):\n msg = \"expression value has invalid type '%s'\"\n raise TypeError(msg % type(value).__name__)",
"def is_used_as_expression(item):\n # note: this is not accurate because of the last statement of a program\n # but intended\n return not is_used_as_statement(item)",
"def notpexpr(*disallowed_heads):\n return some(lambda x: not (\n isinstance(x, HyExpression) and\n x and\n isinstance(x[0], HySymbol) and\n x[0] in disallowed_heads))",
"def no_operators(expression):\n OPERATORS = set('+-*/')\n for i in expression:\n if i in OPERATORS:\n return True\n raise NotValidExpression('Not a valid expression, no operators')",
"def contains_operator(self, *args):\n return _ida_hexrays.cexpr_t_contains_operator(self, *args)",
"def isNodeType(self, t):\n return isinstance(self, t)",
"def _contains_op(spec):",
"def needs_root_task(expr: Any) -> bool:\n if not isinstance(expr, TaskExpression) or isinstance(expr, SchedulerExpression):\n return True\n\n return any(\n isinstance(arg, TaskExpression) for arg in iter_nested_value((expr.args, expr.kwargs))\n )",
"def time_invariant(self, expr=None):\n if expr is None:\n return all(self.time_invariant(v) for v in self.values())\n\n if any(i in expr.free_symbols for i in self.time_indices):\n return False\n queue = [expr.rhs] if expr.is_Equality else [expr]\n while queue:\n item = queue.pop()\n temporaries = []\n for i in retrieve_terminals(item):\n if any(j in i.free_symbols for j in self.time_indices):\n # Definitely not time-invariant\n return False\n if i in self:\n # Go on with the search\n temporaries.append(i)\n elif isinstance(i, Dimension):\n # Go on with the search, as /i/ is not a time dimension\n continue\n elif not i.base.function.is_SymbolicData:\n # It didn't come from the outside and it's not in self, so\n # cannot determine if time-invariant; assume time-varying\n return False\n queue.extend([self[i].rhs for i in temporaries if self[i].rhs != item])\n return True",
"def test_evaluate_is_of_expression(self):\n value = self.evaluate_common(\"isof(2D,'Edm.Double')\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"isof(2M,'Edm.Double')\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"isof(2,'Edm.Double')\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"isof(2.0D,'Edm.Single')\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"isof('x','Edm.String')\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"isof(X'DEAD','Edm.String')\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"isof(false or true,'Edm.Boolean')\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"isof(null,'Edm.String')\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"isof('Edm.String')\")\n self.assertTrue(value.value is False, \"Expected False\")",
"def contains_insn(self, *args):\n return _ida_hexrays.cexpr_t_contains_insn(self, *args)",
"def HasType(t):\n return Or(search(field='types', method=IN, value=t),\n search(field='subtypes', method=IN, value=t),\n search(field='supertypes', method=IN, value=t))",
"def is_a(self, t):\n return isinstance(self._, t)",
"def is_lval(t):\n if not t:\n return False\n i = iter(t)\n if i.next() not in IDENTIFIER_START:\n return False\n return all(e in IDENTIFIER_PART for e in i)"
] | [
"0.6354421",
"0.6215181",
"0.61420554",
"0.61094767",
"0.6092375",
"0.6010836",
"0.5990448",
"0.58116657",
"0.5651406",
"0.5560447",
"0.5551944",
"0.5544172",
"0.5540352",
"0.55251205",
"0.5510958",
"0.5440789",
"0.54013324",
"0.53979707",
"0.5379893",
"0.5332278",
"0.5304918",
"0.52597237",
"0.5252362",
"0.525091",
"0.5233631",
"0.5231769",
"0.52317333",
"0.52230585",
"0.5209466",
"0.5198293"
] | 0.65743965 | 0 |
Count the number of times the PanelLikelihoodTrajectory is used in the formula. It should trigger an error if it is used more than once. | def countPanelTrajectoryExpressions(self):
nbr = 0
for e in self.children:
nbr += e.countPanelTrajectoryExpressions()
return nbr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def countPanelTrajectoryExpressions(self):\n return 1 + self.child.countPanelTrajectoryExpressions()",
"def num_trials(self):",
"def num_trajs(self):\n return len(list(self.run_traj_idx_tuples()))",
"def count(self, trace):\n return len(trace)",
"def num_run_trajs(self, run_idx):\n return len(self._h5['{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES)])",
"def count(self):\n # TODO not implemented yet\n return 0",
"def Points_Counting(self):\n return len(self.__traectory_list)",
"def num_injectors(self):\n injectors = self.info_wells.groupby('well_type').get_group('inj')\n return injectors['well'].nunique()",
"def number_of_iterations(self) -> int:\n pass",
"def _get_observation_count(self):\n observation_count = 0\n for sequence in self.seq_list:\n observation_count += sequence.shape[0] \n \n return observation_count",
"def count():",
"def _count_explores(self) -> int:\n explore_count = 0\n for model in self.project.models:\n explore_count += len(model.explores)\n return explore_count",
"def n_pos(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 > self.prob.C - self.deltas[i] and self.prob.Y[i] == 1:\n running_total += 1\n return running_total if running_total > 0 else 1",
"def n_pos(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 > self.prob.C - self.deltas[i] and self.prob.Y[i] == 1:\n running_total += 1\n return running_total if running_total > 0 else 1",
"def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)",
"def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)",
"def count(self):\n\n raise NotImplementedError",
"def getNumberOfTraces(self) -> int:\n\n if not self.debug:\n self.myFieldFox.write(\"CALC:PAR:COUN?\")\n ret = self.myFieldFox.read()\n else:\n ret = 4\n return ret",
"def _linear_count(self, empty_registers):\n return self._m * math.log(self._m / empty_registers)",
"def count(self):\n nreq, nres = 0, 0\n for entry in self.__history:\n if entry.oreq is not None:\n nreq += 1\n if entry.ores is not None:\n nres += 1\n return nreq, nres",
"def count_pegs(self):\r\n count = 0\r\n\r\n for i in range(0, len(self.matrix)):\r\n for j in range(0, len(self.matrix[i])):\r\n if self.matrix[i][j] == \"1\":\r\n count += 1\r\n\r\n return count",
"def number_of_loc_changes(self) -> int:\n raise NotImplementedError('not implemented')",
"def call_count(self):\n return len(self.mock_calls)",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def count_parms(self):\n min_freq = self.get_high_pass_index()\n rejection = self.rejection_at(np.arange(min_freq, self.nf))\n if rejection.ndim < 2:\n return np.sum(rejection)\n else:\n return np.sum(rejection, axis=1)",
"def numel(self) -> int:\n return sum(p.numel() for p in self.parameters)",
"def count_explorations():\n return exp_models.ExplorationModel.get_exploration_count()"
] | [
"0.7567535",
"0.65474737",
"0.64351207",
"0.63596004",
"0.6308121",
"0.6222953",
"0.61417466",
"0.6131484",
"0.6076631",
"0.60758704",
"0.606734",
"0.60238755",
"0.60216856",
"0.60216856",
"0.5999179",
"0.5999179",
"0.5995761",
"0.5994588",
"0.5986202",
"0.5981513",
"0.59426993",
"0.5937496",
"0.5909628",
"0.5885994",
"0.5885994",
"0.5885994",
"0.5885994",
"0.58828",
"0.5869653",
"0.58682054"
] | 0.72264415 | 1 |
Count the number of times the PanelLikelihoodTrajectory is used in the formula. | def countPanelTrajectoryExpressions(self):
return 1 + self.child.countPanelTrajectoryExpressions() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def countPanelTrajectoryExpressions(self):\n nbr = 0\n for e in self.children:\n nbr += e.countPanelTrajectoryExpressions()\n return nbr",
"def num_trajs(self):\n return len(list(self.run_traj_idx_tuples()))",
"def num_trials(self):",
"def count(self, trace):\n return len(trace)",
"def n_pos(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 > self.prob.C - self.deltas[i] and self.prob.Y[i] == 1:\n running_total += 1\n return running_total if running_total > 0 else 1",
"def n_pos(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 > self.prob.C - self.deltas[i] and self.prob.Y[i] == 1:\n running_total += 1\n return running_total if running_total > 0 else 1",
"def num_run_trajs(self, run_idx):\n return len(self._h5['{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES)])",
"def Points_Counting(self):\n return len(self.__traectory_list)",
"def _get_observation_count(self):\n observation_count = 0\n for sequence in self.seq_list:\n observation_count += sequence.shape[0] \n \n return observation_count",
"def getNumberOfTraces(self) -> int:\n\n if not self.debug:\n self.myFieldFox.write(\"CALC:PAR:COUN?\")\n ret = self.myFieldFox.read()\n else:\n ret = 4\n return ret",
"def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)",
"def n_tracers(self):\n # Extract parameters\n pzs = self.params[0]\n return len(pzs)",
"def count():",
"def nPos(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 and self.prob.Y[i] == 1:\n running_total += 1\n return running_total if running_total > 0 else 1",
"def count_params(model):\n param_count = np.sum([np.prod(p.size()) for p in model.parameters()])\n return param_count",
"def count(self):\n # TODO not implemented yet\n return 0",
"def _count_explores(self) -> int:\n explore_count = 0\n for model in self.project.models:\n explore_count += len(model.explores)\n return explore_count",
"def count_parameters(model):\r\n count = 0\r\n for parameter in list(model.parameters()):\r\n subcount = 1\r\n for size in list(parameter.size()):\r\n subcount *= size\r\n count += subcount\r\n return count",
"def count_explorations():\n return exp_models.ExplorationModel.get_exploration_count()",
"def num_injectors(self):\n injectors = self.info_wells.groupby('well_type').get_group('inj')\n return injectors['well'].nunique()",
"def count_pegs(self):\r\n count = 0\r\n\r\n for i in range(0, len(self.matrix)):\r\n for j in range(0, len(self.matrix[i])):\r\n if self.matrix[i][j] == \"1\":\r\n count += 1\r\n\r\n return count",
"def numel(self) -> int:\n return sum(p.numel() for p in self.parameters)",
"def count_parms(self):\n min_freq = self.get_high_pass_index()\n rejection = self.rejection_at(np.arange(min_freq, self.nf))\n if rejection.ndim < 2:\n return np.sum(rejection)\n else:\n return np.sum(rejection, axis=1)",
"def tally(self):\n return self.count",
"def count(self):\n nreq, nres = 0, 0\n for entry in self.__history:\n if entry.oreq is not None:\n nreq += 1\n if entry.ores is not None:\n nres += 1\n return nreq, nres",
"def _linear_count(self, empty_registers):\n return self._m * math.log(self._m / empty_registers)",
"def num_polys(self):\n ret_val = self._num_polys()\n return ret_val",
"def count_parameters(model):\n return sum(p.numel() for p in model.parameters())",
"def Numtrials(self):\n\t\treturn self._get_attribute('numtrials')",
"def params_count(model):\n return np.sum([p.numel() for p in model.parameters()]).item()"
] | [
"0.78097856",
"0.66617143",
"0.6469263",
"0.644151",
"0.63895994",
"0.63895994",
"0.6379835",
"0.6366051",
"0.6280522",
"0.6234562",
"0.6233104",
"0.6233104",
"0.6197536",
"0.6165541",
"0.6160118",
"0.6159054",
"0.6153425",
"0.6145203",
"0.6144707",
"0.6135027",
"0.6134179",
"0.6115405",
"0.61071366",
"0.60884494",
"0.6066353",
"0.60563195",
"0.6049363",
"0.60493416",
"0.6040977",
"0.60203105"
] | 0.77726966 | 1 |
The signature of a string characterizing an expression. This is designed to be communicated to C++, so that the expression can be reconstructed in this environment. | def getSignature(self):
if self.uniqueId is None:
error_msg = (
f'No id has been defined for elementary expression '
f'{self.name}.'
)
raise excep.biogemeError(error_msg)
if self.variableId is None:
error_msg = f'No id has been defined for variable {self.name}.'
raise excep.biogemeError(error_msg)
signature = f'<{self.getClassName()}>'
signature += f'{{{id(self)}}}'
signature += f'"{self.name}",{self.uniqueId},{self.variableId}'
return [signature.encode()] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def signature(function: model.Function) -> str:\n return str(function.signature)",
"def signature(self, p_int): # real signature unknown; restored from __doc__\n return \"\"",
"def getSignature(self):\n if self.uniqueId is None:\n error_msg = (\n f'No id has been defined for elementary '\n f'expression {self.name}.'\n )\n raise excep.biogemeError(error_msg)\n if self.rvId is None:\n error_msg = (\n f'No id has been defined for random variable {self.name}.'\n )\n raise excep.biogemeError(error_msg)\n\n signature = f'<{self.getClassName()}>'\n signature += f'{{{id(self)}}}'\n signature += f'\"{self.name}\",{self.uniqueId},{self.rvId}'\n return [signature.encode()]",
"def _op_sig(op, arity): # pylint: disable-msg=invalid-name\n return op + '/' + str(arity)",
"def getSignature(self):\n if self.uniqueId is None:\n error_msg = (\n f'No id has been defined for elementary '\n f'expression {self.name}.'\n )\n raise excep.biogemeError(error_msg)\n if self.drawId is None:\n error_msg = f'No id has been defined for draw {self.name}.'\n raise excep.biogemeError(error_msg)\n signature = f'<{self.getClassName()}>'\n signature += f'{{{id(self)}}}'\n signature += f'\"{self.name}\",{self.uniqueId},{self.drawId}'\n return [signature.encode()]",
"def keyify(expr):\n # the type of an applied function is an instance of UndefinedFunction.\n if not isinstance(expr, (sy.Symbol, sy.Derivative)) and \\\n not isinstance(expr.__class__, UndefinedFunction):\n raise TypeError(\"Expected symbol, derivative or applied function; got {} {}\".format(type(expr), expr))\n\n if isinstance(expr, sy.Derivative):\n expr = symutil.canonize_derivative(expr) # we assume at least C^k continuity.\n\n return symutil.strip_function_arguments(expr)",
"def input_signature(self):\n return self._function_spec.input_signature",
"def getSignature(self):\n listOfSignatures = []\n listOfSignatures += self.keyExpression.getSignature()\n for i, e in self.dictOfExpressions.items():\n listOfSignatures += e.getSignature()\n signature = '<{}>'.format(self.getClassName())\n signature += '{{{}}}'.format(id(self))\n signature += '({})'.format(len(self.dictOfExpressions))\n signature += ',{}'.format(id(self.keyExpression))\n for i, e in self.dictOfExpressions.items():\n signature += f',{i},{id(e)}'\n listOfSignatures += [signature.encode()]\n return listOfSignatures",
"def getSignature(self):\n if self.uniqueId is None:\n error_msg = (\n f'No id has been defined for elementary '\n f'expression {self.name}.'\n )\n raise excep.biogemeError(error_msg)\n if self.betaId is None:\n raise excep.biogemeError(\n f'No id has been defined for parameter {self.name}.'\n )\n\n signature = f'<{self.getClassName()}>'\n signature += f'{{{id(self)}}}'\n signature += (\n f'\"{self.name}\"[{self.status}],{self.uniqueId},{self.betaId}'\n )\n return [signature.encode()]",
"def signature(function):\n pass",
"def format_method_signature(self, locals, code):\n\n res = \"\"\n is_args = code.co_flags & 4\n is_kwargs = code.co_flags & 8\n total_args = code.co_argcount\n if is_args:\n total_args += 1\n if is_kwargs:\n total_args += 1\n for i in xrange(total_args):\n varname = code.co_varnames[i]\n\n if is_args and is_kwargs and i == total_args - 2:\n varname = \"*\" + varname\n elif is_args and is_kwargs and i == total_args - 1:\n varname = \"**\" + varname\n elif is_args and i == total_args - 1:\n varname = \"*\" + varname\n elif is_kwargs and i == total_args - 1:\n varname = \"**\" + varname\n if res == \"\":\n res = varname\n else:\n res += \", \" + varname\n\n return \"(%s)\" % res",
"def signature(function):\n\tdesc = inspect.getargspec(function)\n\tif desc[3]:\n\t\tldefault = len(desc[3])\n\t\tdefault = desc[3]\n\t\tsign = ','.join(desc[0][:-ldefault])\n\telse:\n\t\tldefault = 0\n\t\tdefault=[]\n\t\tsign = ','.join(desc[0])\t\n\tfor n,v in zip(desc[0][-ldefault:],default):\n\t\tsign += ','+n+\"=\"+str(v)\t\n\tif desc[1]:\n\t\tsign +=',*'+desc[1]\n\tif desc[2]:\n\t\tsign +=',**'+desc[2]\t\n\tif sign and sign[0]==',': sign = sign[1:]\n\treturn sign",
"def callsignature(function):\n\tdesc = inspect.getargspec(function)\n\tsign = ','.join(desc[0])\n\tif desc[1]:\n\t\tsign +=',*'+desc[1]\n\tif desc[2]:\n\t\tsign +=',**'+desc[2]\t\n\tif sign and sign[0]==',': sign = sign[1:]\n\treturn sign",
"def _function_sig_key(name: str, *args: Any, **kwargs: Any) -> int:\n function_sig = name\n for arg in args:\n function_sig += str(arg)\n for _, value in kwargs.items():\n function_sig += str(value)\n\n return hash(function_sig)",
"def chrf_signature(args, numrefs):\n\n # Abbreviations for the signature\n abbr = {\n 'test': 't',\n 'lang': 'l',\n 'numchars': 'n',\n 'space': 's',\n 'case': 'c',\n 'numrefs': '#',\n 'version': 'v',\n 'origlang': 'o',\n 'subset': 'S',\n }\n\n signature = {'version': VERSION,\n 'space': args.chrf_whitespace,\n 'numchars': args.chrf_order,\n 'numrefs': numrefs,\n 'case': 'lc' if args.lc else 'mixed'}\n\n if args.test_set is not None:\n signature['test'] = args.test_set\n\n if args.langpair is not None:\n signature['lang'] = args.langpair\n\n if args.origlang is not None:\n signature['origlang'] = args.origlang\n if args.subset is not None:\n signature['subset'] = args.subset\n\n sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x, signature[x]) for x in sorted(signature.keys())])\n\n return sigstr",
"def cFormal(self):\n if not self.type:\n return self.name # special case for '...'\n else:\n arr = self.array or ''\n pointers = self.pointers or ''\n return \"%s %s%s%s\" % (self.type, pointers, self.name, arr)",
"def getSignature(self):\n signature = f'<{self.getClassName()}>'\n signature += f'{{{id(self)}}}'\n signature += f',{self.value}'\n return [signature.encode()]",
"def handle_signature(self, sig, signode):\n if self.options.get('op'):\n fullname = self.options['op']\n name = sig\n arglist = []\n signode['fullname'] = fullname\n op_parse_arglist(signode, sig, fullname)\n return fullname, ''\n\n m = jme_sig_re.match(sig)\n if m is None:\n raise ValueError\n name, arglist = m.groups()\n\n fullname = name\n\n signode['fullname'] = fullname\n\n signode += addnodes.desc_name(name, name)\n if not arglist:\n if self.needs_arglist():\n # for callables, add an empty parameter list\n signode += addnodes.desc_parameterlist()\n return fullname, ''\n\n _pseudo_parse_arglist(signode, arglist)\n return fullname, ''",
"def expression(self):\n\n result = u\"{}({}\".format(self.function.lower(),\n self.metric_name)\n\n if self.dimensions_str:\n result += u\"{{{}}}\".format(self.dimensions_str)\n\n if self.deterministic:\n result += u\", deterministic\"\n\n if self.period:\n result += u\", {}\".format(str(self.period))\n\n result += u\")\"\n\n result += u\" {} {}\".format(self.operator,\n str(self.threshold))\n\n if self.periods:\n result += u\" times {}\".format(str(self.periods))\n\n return result",
"def call_spec_string():\n # pylint: disable=protected-access\n frame = sys._getframe(1)\n argvals = inspect.getargvalues(frame)\n if argvals.args[0] == 'self':\n return inspect.formatargvalues(argvals.args[1:], *argvals[1:])\n else:\n return inspect.formatargvalues(*argvals)",
"def type_signature(self) -> computation_types.Type:\n raise NotImplementedError",
"def signature(request) -> str:\n return get_test_data(request, __name__, \"signature\", \"r\")",
"def intern(string): # real signature unknown; restored from __doc__\n return \"\"",
"def _get_signature(self):\n if hasattr(self, '_signature'):\n return self._signature\n fullargspec = inspect.getargspec(self.callable)\n argspec = fullargspec[0]\n assert argspec[0:2] == ['self', 'req'] or argspec[0] == 'req', \\\n 'Invalid argspec %s for %s' % (argspec, self.name)\n while argspec and (argspec[0] in ('self', 'req')):\n argspec.pop(0)\n argspec.reverse()\n defaults = fullargspec[3]\n if not defaults:\n defaults = []\n else:\n defaults = list(defaults)\n args = []\n sig = []\n for sigcand in self.xmlrpc_signatures():\n if len(sig) < len(sigcand):\n sig = sigcand\n sig = list(sig)\n for arg in argspec:\n if defaults:\n value = defaults.pop()\n if type(value) is str:\n if '\"' in value:\n value = \"'%s'\" % value\n else:\n value = '\"%s\"' % value\n arg += '=%s' % value\n args.insert(0, RPC_TYPES[sig.pop()] + ' ' + arg)\n self._signature = '%s %s(%s)' % (RPC_TYPES[sig.pop()], self.name, ', '.join(args))\n return self._signature",
"def get_signature(self):\n return \" \".join(self.segments[-1].unixtext.replace(\n u\"\\n\", \" \").strip().split())",
"def signature(self, params):\n string = ''.join(key + params[key] for key in sorted(params.keys()))\n return md5(string + self.cfg('secret'))",
"def seriesSignature(self) -> str:\r\n\r\n return self.__series_signature",
"def __str__(self):\n\n def args_with_defaults(args, defaults):\n \"\"\"\n Args to string, with defaults inserted where appropriate\n\n :param args: arguments\n :type args: ``list``\n :param defaults: default value of arguments\n :type defaults: ``list``\n\n :return: string representation of the signature arguments\n :rtype: ``str``\n \"\"\"\n\n def argument(arg, default):\n \"\"\"\n Arg=Default pair if Default is present\n\n :param arg: argument name\n :type arg: ``str``\n :param default: default value for argument\n :type default: ``object``\n\n :return: string representation\n :rtype: ``str``\n \"\"\"\n return \"{0}={1}\".format(arg, default) if default else arg\n\n return \", \".join(\n reversed(\n [\n argument(arg, default)\n for arg, default in zip_longest(\n reversed(args), reversed(defaults)\n )\n ]\n )\n )\n\n args = \"\".join(\n [\n args_with_defaults(self.args, self.defaults),\n \", *{0}\".format(self.varargs) if self.varargs else \"\",\n \", **{0}\".format(self.keywords) if self.keywords else \"\",\n ]\n )\n\n return \"{0}({1})\".format(self.name, args)",
"def handle_signature(self, sig, signode):\n m = jme_sig_re.match(sig)\n if m is None:\n raise ValueError\n name, _ = m.groups()\n signode['fullname'] = name\n signode += addnodes.desc_name(name, name)\n return name, ''",
"def format_args(self, **kwargs: Any) -> str:\n decl = self.declaration\n\n # The logic allows this to be used for both function like and non\n # function like macros.\n # 'SOME_DEFINE'.partition('(')\n # >>> 'SOME_DEFINE', '', ''\n #\n # 'FUNCTION_LIKE(_a, _b)'.partition('(')\n # >>> 'FUNCTION_LIKE', '(', '_a, _b)'\n _, part, args = decl.partition(\"(\")\n return part + args"
] | [
"0.6619273",
"0.6473422",
"0.6309802",
"0.61847234",
"0.6073295",
"0.6059989",
"0.60226774",
"0.5935619",
"0.5928081",
"0.5888818",
"0.58887196",
"0.5830866",
"0.5825439",
"0.58098805",
"0.5744874",
"0.57229364",
"0.5679679",
"0.56364614",
"0.5612615",
"0.5531022",
"0.55234027",
"0.551099",
"0.5474382",
"0.5467594",
"0.5429843",
"0.5408151",
"0.5388439",
"0.5378443",
"0.5355992",
"0.53551173"
] | 0.6510526 | 1 |
Test the count function. | def test_count(self):
self._test_count_func(count) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_own_count(self):\n self._test_count_func(it_count)",
"def count():",
"def count() -> int:\n pass",
"def test_count_0(self):\n self.assertEqual(count(0), 0, 'Between 0 and 0, there is 0 lucky numbers.')",
"def test_count_10(self):\n value: int = 10\n result: int = 2\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_abcdee():\n assert part_01.count_for('abcdee', 2) == 1\n assert part_01.count_for('abcdee', 3) == 0",
"def test_count_66(self):\n value: int = 66\n result: int = 18\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def test_counter(self):\n self.assertEqual(self._n_registered, 1)",
"def test_count_666(self):\n value: int = 666\n result: int = 264\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_count_20(self):\n value: int = 20\n result: int = 4\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_count_9(self):\n value: int = 9\n result: int = 2\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_count_5(self):\n value: int = 5\n result: int = 0\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_bababc():\n assert part_01.count_for('bababc', 2) == 1\n assert part_01.count_for('bababc', 3) == 1",
"def test_abcccd():\n assert part_01.count_for('abcccd', 2) == 0\n assert part_01.count_for('abcccd', 3) == 1",
"def test_count_1719(self):\n value: int = 2645\n result: int = 1113\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_count_72(self):\n value: int = 72\n result: int = 21\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def testArticleCount(self):\n\n self.articleCount(17)",
"def test_count_173(self):\n value: int = 173\n result: int = 55\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_abbcde():\n assert part_01.count_for('abbcde', 2) == 1\n assert part_01.count_for('abbcde', 3) == 0",
"def test_count_2645(self):\n value: int = 1719\n result: int = 723\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_new_count(self):\n self.assertEqual(2, self.alice_storage.new_count)\n self.assertEqual(3, self.bob_storage.new_count)\n self.assertEqual(0, self.carol_storage.new_count)\n self.assertEqual(0, self.anonymous_storage.new_count)",
"async def count(self, **kw):\n\n pass",
"def test_count_35(self):\n value: int = 35\n result: int = 6\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_counts(self):\n c = array([5,0,1,1,5,5])\n obs = counts(c)\n exp = array([1,2,0,0,0,3])\n self.assertEqual(obs, exp)\n d = array([2,2,1,0])\n obs = counts(d, obs)\n exp = array([2,3,2,0,0,3])\n self.assertEqual(obs, exp)",
"def test_all_count(self):\n self.assertEqual(2, self.alice_inbox.all_count)\n self.assertEqual(3, self.bob_inbox.all_count)\n self.assertEqual(0, self.carol_inbox.all_count)",
"def test_count_6_645_243(self):\n value: int = 6_645_243\n result: int = 3_615_948\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')"
] | [
"0.84164256",
"0.820292",
"0.7990008",
"0.78495294",
"0.7781927",
"0.76635265",
"0.76356196",
"0.7626337",
"0.7626337",
"0.7626337",
"0.7626337",
"0.7524166",
"0.7483875",
"0.74832076",
"0.7470555",
"0.746315",
"0.746018",
"0.7457858",
"0.7455038",
"0.7362271",
"0.7359845",
"0.73558474",
"0.7353885",
"0.7350373",
"0.7329401",
"0.7328461",
"0.7304646",
"0.7282343",
"0.7270739",
"0.7245517"
] | 0.9106443 | 0 |
Test own count implementation. | def test_own_count(self):
self._test_count_func(it_count) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_count(self):\n self._test_count_func(count)",
"def count():",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def count() -> int:\n pass",
"def test_count_0(self):\n self.assertEqual(count(0), 0, 'Between 0 and 0, there is 0 lucky numbers.')",
"def test_count_10(self):\n value: int = 10\n result: int = 2\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def testArticleCount(self):\n\n self.articleCount(17)",
"def test_counter(self):\n self.assertEqual(self._n_registered, 1)",
"def test_abcdee():\n assert part_01.count_for('abcdee', 2) == 1\n assert part_01.count_for('abcdee', 3) == 0",
"def test_count_66(self):\n value: int = 66\n result: int = 18\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def count(self):\n\n raise NotImplementedError",
"def test_new_count(self):\n self.assertEqual(2, self.alice_storage.new_count)\n self.assertEqual(3, self.bob_storage.new_count)\n self.assertEqual(0, self.carol_storage.new_count)\n self.assertEqual(0, self.anonymous_storage.new_count)",
"def count(self, value):\n # YOUR CODE HERE\n raise NotImplementedError()",
"def test_count_1719(self):\n value: int = 2645\n result: int = 1113\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_new_count(self):\n self.assertEqual(2, self.alice_inbox.new_count)\n self.assertEqual(3, self.bob_inbox.new_count)\n self.assertEqual(0, self.carol_inbox.new_count)",
"def count(self, value): # real signature unknown; restored from __doc__\n return 0",
"def test_count_666(self):\n value: int = 666\n result: int = 264\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def count(self, sub) -> int:\n pass",
"def test_count_20(self):\n value: int = 20\n result: int = 4\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_count_72(self):\n value: int = 72\n result: int = 21\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_count_5(self):\n value: int = 5\n result: int = 0\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_counts(self):\n c = array([5,0,1,1,5,5])\n obs = counts(c)\n exp = array([1,2,0,0,0,3])\n self.assertEqual(obs, exp)\n d = array([2,2,1,0])\n obs = counts(d, obs)\n exp = array([2,3,2,0,0,3])\n self.assertEqual(obs, exp)",
"def test_count_9(self):\n value: int = 9\n result: int = 2\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_abcccd():\n assert part_01.count_for('abcccd', 2) == 0\n assert part_01.count_for('abcccd', 3) == 1",
"def test_all_count(self):\n self.assertEqual(2, self.alice_inbox.all_count)\n self.assertEqual(3, self.bob_inbox.all_count)\n self.assertEqual(0, self.carol_inbox.all_count)",
"def test_count_2645(self):\n value: int = 1719\n result: int = 723\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def count(self):\n # TODO not implemented yet\n return 0"
] | [
"0.8753977",
"0.8241986",
"0.8033116",
"0.8033116",
"0.8033116",
"0.8033116",
"0.7945021",
"0.7638478",
"0.75200033",
"0.7499305",
"0.74754155",
"0.746561",
"0.7461913",
"0.7429032",
"0.7391608",
"0.73801625",
"0.73676854",
"0.7343682",
"0.73298603",
"0.73264694",
"0.7312503",
"0.73056406",
"0.730274",
"0.72964424",
"0.729305",
"0.7290248",
"0.7290042",
"0.72788095",
"0.7260048",
"0.72585493"
] | 0.8527373 | 1 |
Function to get all tasselCap indices | def addAllTasselCapIndices(self,img):
def getTasseledCap(img):
"""Function to compute the Tasseled Cap transformation and return an image"""
coefficients = ee.Array([
[0.3037, 0.2793, 0.4743, 0.5585, 0.5082, 0.1863],
[-0.2848, -0.2435, -0.5436, 0.7243, 0.0840, -0.1800],
[0.1509, 0.1973, 0.3279, 0.3406, -0.7112, -0.4572],
[-0.8242, 0.0849, 0.4392, -0.0580, 0.2012, -0.2768],
[-0.3280, 0.0549, 0.1075, 0.1855, -0.4357, 0.8085],
[0.1084, -0.9022, 0.4120, 0.0573, -0.0251, 0.0238]
]);
bands=ee.List(['blue','green','red','nir','swir1','swir2'])
# Make an Array Image, with a 1-D Array per pixel.
arrayImage1D = img.select(bands).toArray()
# Make an Array Image with a 2-D Array per pixel, 6x1.
arrayImage2D = arrayImage1D.toArray(1)
componentsImage = ee.Image(coefficients).matrixMultiply(arrayImage2D).arrayProject([0]).arrayFlatten([['brightness', 'greenness', 'wetness', 'fourth', 'fifth', 'sixth']]).float();
# Get a multi-band image with TC-named bands.
return img.addBands(componentsImage);
def addTCAngles(img):
""" Function to add Tasseled Cap angles and distances to an image. Assumes image has bands: 'brightness', 'greenness', and 'wetness'."""
# Select brightness, greenness, and wetness bands
brightness = img.select('brightness');
greenness = img.select('greenness');
wetness = img.select('wetness');
# Calculate Tasseled Cap angles and distances
tcAngleBG = brightness.atan2(greenness).divide(math.pi).rename(['tcAngleBG']);
tcAngleGW = greenness.atan2(wetness).divide(math.pi).rename(['tcAngleGW']);
tcAngleBW = brightness.atan2(wetness).divide(math.pi).rename(['tcAngleBW']);
tcDistBG = brightness.hypot(greenness).rename(['tcDistBG']);
tcDistGW = greenness.hypot(wetness).rename(['tcDistGW']);
tcDistBW = brightness.hypot(wetness).rename(['tcDistBW']);
img = img.addBands(tcAngleBG).addBands(tcAngleGW).addBands(tcAngleBW).addBands(tcDistBG).addBands(tcDistGW).addBands(tcDistBW);
return img;
img = getTasseledCap(img)
img = addTCAngles(img)
return img | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def addAllTasselCapIndices(self,img):\n\t\t\n\t\tdef getTasseledCap(img):\n\t\t\t\"\"\"Function to compute the Tasseled Cap transformation and return an image\"\"\"\n\t\t\t\n\t\t\tcoefficients = ee.Array([\n\t\t\t\t[0.3037, 0.2793, 0.4743, 0.5585, 0.5082, 0.1863],\n\t\t\t\t[-0.2848, -0.2435, -0.5436, 0.7243, 0.0840, -0.1800],\n\t\t\t\t[0.1509, 0.1973, 0.3279, 0.3406, -0.7112, -0.4572],\n\t\t\t\t[-0.8242, 0.0849, 0.4392, -0.0580, 0.2012, -0.2768],\n\t\t\t\t[-0.3280, 0.0549, 0.1075, 0.1855, -0.4357, 0.8085],\n\t\t\t\t[0.1084, -0.9022, 0.4120, 0.0573, -0.0251, 0.0238]\n\t\t\t]);\n\t\t\n\t\t\tbands=ee.List(['blue','green','red','nir','swir1','swir2'])\n\t\t\t\n\t\t\t# Make an Array Image, with a 1-D Array per pixel.\n\t\t\tarrayImage1D = img.select(bands).toArray()\n\t\t\n\t\t\t# Make an Array Image with a 2-D Array per pixel, 6x1.\n\t\t\tarrayImage2D = arrayImage1D.toArray(1)\n\t\t\n\t\t\tcomponentsImage = ee.Image(coefficients).matrixMultiply(arrayImage2D).arrayProject([0]).arrayFlatten([['brightness', 'greenness', 'wetness', 'fourth', 'fifth', 'sixth']]).float();\n\t \n\t\t\t# Get a multi-band image with TC-named bands.\n\t\t\treturn img.addBands(componentsImage);\t\n\t\t\t\n\t\t\t\n\t\tdef addTCAngles(img):\n\n\t\t\t\"\"\" Function to add Tasseled Cap angles and distances to an image. Assumes image has bands: 'brightness', 'greenness', and 'wetness'.\"\"\"\n\t\t\t\n\t\t\t# Select brightness, greenness, and wetness bands\t\n\t\t\tbrightness = img.select('brightness');\n\t\t\tgreenness = img.select('greenness');\n\t\t\twetness = img.select('wetness');\n\t \n\t\t\t# Calculate Tasseled Cap angles and distances\n\t\t\ttcAngleBG = brightness.atan2(greenness).divide(math.pi).rename(['tcAngleBG']);\n\t\t\ttcAngleGW = greenness.atan2(wetness).divide(math.pi).rename(['tcAngleGW']);\n\t\t\ttcAngleBW = brightness.atan2(wetness).divide(math.pi).rename(['tcAngleBW']);\n\t\t\ttcDistBG = brightness.hypot(greenness).rename(['tcDistBG']);\n\t\t\ttcDistGW = greenness.hypot(wetness).rename(['tcDistGW']);\n\t\t\ttcDistBW = brightness.hypot(wetness).rename(['tcDistBW']);\n\t\t\timg = img.addBands(tcAngleBG).addBands(tcAngleGW).addBands(tcAngleBW).addBands(tcDistBG).addBands(tcDistGW).addBands(tcDistBW);\n\t\t\t\n\t\t\treturn img;\n\t\n\t\timg = getTasseledCap(img)\n\t\timg = addTCAngles(img)\n\t\treturn img",
"def indices(self, _user=None):\n return [p.index for p in self.get_active_smallvariant_cases()]",
"def get_cv_indices(df, col, time_slices):\n return [\n ## get train and holdout indices for slice\n tuple(get_row_indices(df, col, slc[x]) for x in range(2))\n\n ## get indices for each slice\n for slc in time_slices\n ]",
"def get_34index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==1]",
"def vir_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==0:\n indices.append(index)\n return indices",
"def get_indexes(self, dataset):\n\n for i in range(self.max_iters):\n index = random.randint(0, len(dataset))\n gt_bboxes_i = dataset.get_ann_info(index)['bboxes']\n if len(gt_bboxes_i) != 0:\n break\n\n return index",
"def occ_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==1:\n indices.append(index)\n return indices",
"def get_indexes(self):\n indexes = []\n for c in self.components:\n indexes.extend(c.get_indexes())\n return indexes",
"def mainIndices(self):\n return self.i1, self.i2",
"def _pick_triplets(len_data, n_triplets):\n\n n_samples = n_triplets * N_CAMS\n\n indices = np.zeros((n_samples), dtype = np.int)\n\n _indices = range(0, len_data, N_CAMS)\n\n for i in range(0, n_samples, N_CAMS):\n k = np.random.choice(_indices)\n indices[i] = k + 1 # Left\n indices[i + 1] = k # Center\n indices[i + 2] = k + 2 # Right\n\n return indices",
"def get_indices(self):\n selection_model = self.selectionModel()\n return selection_model.selectedRows()",
"def csm_indices(csm):\r\n return csm_properties(csm)[1]",
"def tc_index(*args):\n index = []\n x = check_tc_data(args[0])\n i = 0\n for line in args[0].Data.TCData.tc_data:\n i += 1\n if line != x[i - 1]:\n index.append(0)\n elif line == x[i - 1]:\n index.append(i)\n return index",
"def getLandmarkindices(self):\n return self.subsetindices",
"def inidices(self):\n return [Index(name, index) for name, index in self._data['indices'].iteritems()]",
"def get_indexes(self, dataset):\n\n indexs = [random.randint(0, len(dataset)) for _ in range(3)]\n return indexs",
"def indices(self):\n return range(len(self))",
"def indices(self):\n return self.index.indices",
"def getIndices(schema_attr_levels):\n return np.array(list(schema_attr_levels.values())).flatten().tolist()",
"def selectCompatibleIndices(bigTimes, smallTimes):\r\n indices = []\r\n for idx, _ in enumerate(smallTimes):\r\n distances = (bigTimes - smallTimes[idx])**2\r\n def getValue(k):\r\n return distances[k]\r\n thisIndices = sorted(range(len(distances)), key=getValue)\r\n indices.append(thisIndices[0])\r\n return np.array(indices)",
"def get_indexes(self, variable, *args):\n\n return [get_subset_idxs(data, min, max)\n for data, (min, max) in args]",
"def get_indices(self):\r\n return self._indices",
"def test_indices(self, circuit):\n gate = jet.GateFactory.create(\"H\")\n circuit.append_gate(gate, wire_ids=[0])\n assert list(circuit.indices([0])) == [\"0-1\"]\n assert list(circuit.indices([1, 2, 3])) == [\"1-0\", \"2-0\", \"3-0\"]",
"def indices(self):\n return self._kbounded_partitions",
"def get_5index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==0]",
"def get_index_array(self):\n return self.region_pairs",
"def get_idxvals(self):\n input_rows = list()\n input_cols = list()\n for key in self.index:\n input_rows.append(key[0])\n input_cols.append(key[1])\n\n return list(OrderedSet(input_rows)), list(OrderedSet(input_cols))",
"def getIndices(self,img,covariates):\n\t\t\n\t\t# no need to add indices that are already there\n\t\tindices = self.removeDuplicates(covariates,img.bandNames().getInfo())\n\t\t\n\t\tfor item in indices:\n\t\t\timg = self.functionList[item](img)\n\n\t\treturn img",
"def weyl_stabilizer(self, index_set=None):\n\n if index_set is None:\n index_set = self.parent().cartan_type().index_set()\n alphavee = self.parent().coroot_lattice().basis()\n return [i for i in index_set if self.scalar(alphavee[i]) == 0]",
"def indices(self) -> np.ndarray:\n return self.impl.indices"
] | [
"0.6802761",
"0.6170096",
"0.6133901",
"0.60912454",
"0.5845652",
"0.5765843",
"0.5763669",
"0.57543373",
"0.5751262",
"0.5718671",
"0.56785035",
"0.56776994",
"0.56630826",
"0.5637315",
"0.56315714",
"0.56188244",
"0.55988926",
"0.5597698",
"0.5585836",
"0.55607986",
"0.5544925",
"0.5544195",
"0.55339146",
"0.55250394",
"0.5511644",
"0.55094254",
"0.5499057",
"0.5482031",
"0.5460003",
"0.5459655"
] | 0.6790514 | 1 |
Function to compute the Tasseled Cap transformation and return an image | def getTasseledCap(img): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def transform(self, previousimage):",
"def getTasseledCap(img):\n\t\t\t\n\t\t\tcoefficients = ee.Array([\n\t\t\t\t[0.3037, 0.2793, 0.4743, 0.5585, 0.5082, 0.1863],\n\t\t\t\t[-0.2848, -0.2435, -0.5436, 0.7243, 0.0840, -0.1800],\n\t\t\t\t[0.1509, 0.1973, 0.3279, 0.3406, -0.7112, -0.4572],\n\t\t\t\t[-0.8242, 0.0849, 0.4392, -0.0580, 0.2012, -0.2768],\n\t\t\t\t[-0.3280, 0.0549, 0.1075, 0.1855, -0.4357, 0.8085],\n\t\t\t\t[0.1084, -0.9022, 0.4120, 0.0573, -0.0251, 0.0238]\n\t\t\t]);\n\t\t\n\t\t\tbands=ee.List(['blue','green','red','nir','swir1','swir2'])\n\t\t\t\n\t\t\t# Make an Array Image, with a 1-D Array per pixel.\n\t\t\tarrayImage1D = img.select(bands).toArray()\n\t\t\n\t\t\t# Make an Array Image with a 2-D Array per pixel, 6x1.\n\t\t\tarrayImage2D = arrayImage1D.toArray(1)\n\t\t\n\t\t\tcomponentsImage = ee.Image(coefficients).matrixMultiply(arrayImage2D).arrayProject([0]).arrayFlatten([['brightness', 'greenness', 'wetness', 'fourth', 'fifth', 'sixth']]).float();\n\t \n\t\t\t# Get a multi-band image with TC-named bands.\n\t\t\treturn img.addBands(componentsImage);",
"def __compose_transformation(self):\n s = self.scale\n rotR = self.rotation\n t = self.translation\n T = np.eye(4)\n T[0:3, 3] = t\n R = np.eye(4)\n R[0:3, 0:3] = rotR\n M = T.dot(R)\n if s == 1:\n M = T.dot(R)\n else:\n S = np.eye(4)\n S[0:3, 0:3] = np.diag([s, s, s])\n M = T.dot(R).dot(S)\n return M",
"def imageTransform(self):\n ims = self.imageShape\n acs = self.activeShape\n dx = self.colVector\n dy = self.rowVector\n\n p0 = self.activeOrigin\n p1 = p0 + acs[2] * dx\n p2 = p0 + acs[1] * dy\n\n # print p0, p1, p2\n # print acs, dx, dy\n\n localPts = list(map(pg.Vector, [[0,0], [ims[2],0], [0,ims[1]], [0,0,1]])) # w and h of data of image in pixels.\n globalPts = list(map(pg.Vector, [p0, p1, p2, [0,0,1]]))\n m = pg.solve3DTransform(localPts, globalPts)\n m[:,2] = m[:,3]\n m[2] = m[3]\n m[2,2] = 1\n tr = Qt.QTransform(*m[:3,:3].transpose().reshape(9))\n return tr",
"def make_t(self):\n self.img[1, 1:-1] = 1\n self.img[2:-1, self.l_i / 2] = 1\n self.img_name = 'T'",
"def addAllTasselCapIndices(self,img):\n\t\t\n\t\tdef getTasseledCap(img):\n\t\t\t\"\"\"Function to compute the Tasseled Cap transformation and return an image\"\"\"\n\t\t\t\n\t\t\tcoefficients = ee.Array([\n\t\t\t\t[0.3037, 0.2793, 0.4743, 0.5585, 0.5082, 0.1863],\n\t\t\t\t[-0.2848, -0.2435, -0.5436, 0.7243, 0.0840, -0.1800],\n\t\t\t\t[0.1509, 0.1973, 0.3279, 0.3406, -0.7112, -0.4572],\n\t\t\t\t[-0.8242, 0.0849, 0.4392, -0.0580, 0.2012, -0.2768],\n\t\t\t\t[-0.3280, 0.0549, 0.1075, 0.1855, -0.4357, 0.8085],\n\t\t\t\t[0.1084, -0.9022, 0.4120, 0.0573, -0.0251, 0.0238]\n\t\t\t]);\n\t\t\n\t\t\tbands=ee.List(['blue','green','red','nir','swir1','swir2'])\n\t\t\t\n\t\t\t# Make an Array Image, with a 1-D Array per pixel.\n\t\t\tarrayImage1D = img.select(bands).toArray()\n\t\t\n\t\t\t# Make an Array Image with a 2-D Array per pixel, 6x1.\n\t\t\tarrayImage2D = arrayImage1D.toArray(1)\n\t\t\n\t\t\tcomponentsImage = ee.Image(coefficients).matrixMultiply(arrayImage2D).arrayProject([0]).arrayFlatten([['brightness', 'greenness', 'wetness', 'fourth', 'fifth', 'sixth']]).float();\n\t \n\t\t\t# Get a multi-band image with TC-named bands.\n\t\t\treturn img.addBands(componentsImage);\t\n\t\t\t\n\t\t\t\n\t\tdef addTCAngles(img):\n\n\t\t\t\"\"\" Function to add Tasseled Cap angles and distances to an image. Assumes image has bands: 'brightness', 'greenness', and 'wetness'.\"\"\"\n\t\t\t\n\t\t\t# Select brightness, greenness, and wetness bands\t\n\t\t\tbrightness = img.select('brightness');\n\t\t\tgreenness = img.select('greenness');\n\t\t\twetness = img.select('wetness');\n\t \n\t\t\t# Calculate Tasseled Cap angles and distances\n\t\t\ttcAngleBG = brightness.atan2(greenness).divide(math.pi).rename(['tcAngleBG']);\n\t\t\ttcAngleGW = greenness.atan2(wetness).divide(math.pi).rename(['tcAngleGW']);\n\t\t\ttcAngleBW = brightness.atan2(wetness).divide(math.pi).rename(['tcAngleBW']);\n\t\t\ttcDistBG = brightness.hypot(greenness).rename(['tcDistBG']);\n\t\t\ttcDistGW = greenness.hypot(wetness).rename(['tcDistGW']);\n\t\t\ttcDistBW = brightness.hypot(wetness).rename(['tcDistBW']);\n\t\t\timg = img.addBands(tcAngleBG).addBands(tcAngleGW).addBands(tcAngleBW).addBands(tcDistBG).addBands(tcDistGW).addBands(tcDistBW);\n\t\t\t\n\t\t\treturn img;\n\t\n\t\timg = getTasseledCap(img)\n\t\timg = addTCAngles(img)\n\t\treturn img",
"def img_map_transforms(ts):\n # XXX TODO: unchecked textures give error of variable referenced before assignment XXX\n # POV-Ray \"scale\" is not a number of repetitions factor, but ,its\n # inverse, a standard scale factor.\n # 0.5 Offset is needed relatively to scale because center of the\n # scale is 0.5,0.5 in blender and 0,0 in POV\n # Strange that the translation factor for scale is not the same as for\n # translate.\n # TODO: verify both matches with other blender renderers / internal in previous versions.\n image_map_transforms = \"\"\n image_map_transforms = \"scale <%.4g,%.4g,%.4g> translate <%.4g,%.4g,%.4g>\" % (\n ts.scale[0],\n ts.scale[1],\n ts.scale[2],\n ts.offset[0],\n ts.offset[1],\n ts.offset[2],\n )\n # image_map_transforms = (\" translate <-0.5,-0.5,0.0> scale <%.4g,%.4g,%.4g> translate <%.4g,%.4g,%.4g>\" % \\\n # ( 1.0 / ts.scale.x,\n # 1.0 / ts.scale.y,\n # 1.0 / ts.scale.z,\n # (0.5 / ts.scale.x) + ts.offset.x,\n # (0.5 / ts.scale.y) + ts.offset.y,\n # ts.offset.z))\n # image_map_transforms = (\n # \"translate <-0.5,-0.5,0> \"\n # \"scale <-1,-1,1> * <%.4g,%.4g,%.4g> \"\n # \"translate <0.5,0.5,0> + <%.4g,%.4g,%.4g>\" % \\\n # (1.0 / ts.scale.x,\n # 1.0 / ts.scale.y,\n # 1.0 / ts.scale.z,\n # ts.offset.x,\n # ts.offset.y,\n # ts.offset.z)\n # )\n return image_map_transforms",
"def get_transform(center, scale, res, rot=0):\n # Generate transformation matrix\n h = 200 * scale\n t = np.zeros((3, 3))\n t[0, 0] = float(res[1]) / h\n t[1, 1] = float(res[0]) / h\n t[0, 2] = res[1] * (-float(center[0]) / h + .5)\n t[1, 2] = res[0] * (-float(center[1]) / h + .5)\n t[2, 2] = 1\n if not rot == 0:\n rot = -rot # To match direction of rotation from cropping\n rot_mat = np.zeros((3,3))\n rot_rad = rot * np.pi / 180\n sn,cs = np.sin(rot_rad), np.cos(rot_rad)\n rot_mat[0,:2] = [cs, -sn]\n rot_mat[1,:2] = [sn, cs]\n rot_mat[2,2] = 1\n # Need to rotate around center\n t_mat = np.eye(3)\n t_mat[0,2] = -res[1]/2\n t_mat[1,2] = -res[0]/2\n t_inv = t_mat.copy()\n t_inv[:2,2] *= -1\n t = np.dot(t_inv,np.dot(rot_mat,np.dot(t_mat,t)))\n return t",
"def transform_image(image, transform, mapping, alpha = 1, incr_x = 10, incr_y = 10):\r\n background = [255, 255, 255, 0]\r\n width, height = image.size\r\n image_in = np.array(image.convert(\"RGBA\"))\r\n image_out = [[background[:] for j in range(width)] for i in range(height)]\r\n transform_row = []\r\n for i in range(0, width + incr_x, incr_x):\r\n transform_row.append(transform(vec2(i, 0), mapping, alpha))\r\n for i in range(incr_y, height + incr_y, incr_y):\r\n p_ur = transform_row[0]\r\n p_lr = transform_row[0] = transform(vec2(0, i), mapping, alpha)\r\n for j in range(incr_x, width + incr_x, incr_x):\r\n p_ul = p_ur\r\n p_ll = p_lr\r\n p_ur = transform_row[j//incr_x]\r\n p_lr = transform_row[j//incr_x] = transform(vec2(j, i), mapping, alpha)\r\n a = p_ur - p_ul\r\n b = p_ll - p_ul\r\n det = a.x*b.y - a.y*b.x\r\n if (det != 0.0):\r\n for p in triangle(p_ul, p_ur, p_ll, width, height):\r\n c = p - p_ul\r\n rx = (b.y*c.x - b.x*c.y)/det\r\n ry = (a.x*c.y - a.y*c.x)/det\r\n image_out[p.y][p.x] = image_in[min(height - 1, max(0, round(i + (ry - 1)*incr_y)))][min(width - 1, max(0, round(j + (rx - 1)*incr_x)))]\r\n a = p_lr - p_ll\r\n b = p_lr - p_ur\r\n det = a.x*b.y - a.y*b.x\r\n if (det != 0.0):\r\n p_ulr = p_ur + p_ll - p_lr\r\n for p in triangle(p_ur, p_ll, p_lr, width, height):\r\n c = p - p_ulr\r\n rx = (b.y*c.x - b.x*c.y)/det\r\n ry = (a.x*c.y - a.y*c.x)/det\r\n image_out[p.y][p.x] = image_in[min(height - 1, max(0, round(i + (ry - 1)*incr_y)))][min(width - 1, max(0, round(j + (rx - 1)*incr_x)))]\r\n image_out = Image.fromarray(np.uint8(image_out))\r\n return image_out",
"def retarget_image(img, T, C, r, c):\n row, col = img.shape[:2]\n seam_path = optimal_path(T, C, r, c)\n img_final = img\n for i in seam_path:\n if i == 0:\n img_final, _ = seam_removal_horizontal(img_final)\n else:\n img_final, _ = seam_removal_vertical(img_final, [])\n return img_final",
"def addAllTasselCapIndices(self,img):\n\t\t\n\t\tdef getTasseledCap(img):\n\t\t\t\t\"\"\"Function to compute the Tasseled Cap transformation and return an image\"\"\"\n\t\t\t\t\n\t\t\tcoefficients = ee.Array([\n\t\t\t\t[0.3037, 0.2793, 0.4743, 0.5585, 0.5082, 0.1863],\n\t\t\t\t[-0.2848, -0.2435, -0.5436, 0.7243, 0.0840, -0.1800],\n\t\t\t\t[0.1509, 0.1973, 0.3279, 0.3406, -0.7112, -0.4572],\n\t\t\t\t[-0.8242, 0.0849, 0.4392, -0.0580, 0.2012, -0.2768],\n\t\t\t\t[-0.3280, 0.0549, 0.1075, 0.1855, -0.4357, 0.8085],\n\t\t\t\t[0.1084, -0.9022, 0.4120, 0.0573, -0.0251, 0.0238]\n\t\t\t]);\n\t\t\t\n\t\t\tbands=ee.List(['blue','green','red','nir','swir1','swir2'])\n\t\t\t\t\n\t\t\t# Make an Array Image, with a 1-D Array per pixel.\n\t\t\tarrayImage1D = img.select(bands).toArray()\n\t\t\t\n\t\t\t# Make an Array Image with a 2-D Array per pixel, 6x1.\n\t\t\tarrayImage2D = arrayImage1D.toArray(1)\n\t\t\t\n\t\t\tcomponentsImage = ee.Image(coefficients).matrixMultiply(arrayImage2D).arrayProject([0]).arrayFlatten([['brightness', 'greenness', 'wetness', 'fourth', 'fifth', 'sixth']]).float();\n\t\t \n\t\t\t# Get a multi-band image with TC-named bands.\n\t\t\treturn img.addBands(componentsImage);\t\n\t\t\t\t\n\t\t\t\t\n\t\tdef addTCAngles(img):\n\n\t\t\t\"\"\" Function to add Tasseled Cap angles and distances to an image. Assumes image has bands: 'brightness', 'greenness', and 'wetness'.\"\"\"\n\t\t\t\t\t\n\t\t\t# Select brightness, greenness, and wetness bands\t\n\t\t\tbrightness = img.select('brightness');\n\t\t\tgreenness = img.select('greenness');\n\t\t\twetness = img.select('wetness');\n\t \n\t\t\t# Calculate Tasseled Cap angles and distances\n\t\t\ttcAngleBG = brightness.atan2(greenness).divide(math.pi).rename(['tcAngleBG']);\n\t\t\ttcAngleGW = greenness.atan2(wetness).divide(math.pi).rename(['tcAngleGW']);\n\t\t\ttcAngleBW = brightness.atan2(wetness).divide(math.pi).rename(['tcAngleBW']);\n\t\t\ttcDistBG = brightness.hypot(greenness).rename(['tcDistBG']);\n\t\t\ttcDistGW = greenness.hypot(wetness).rename(['tcDistGW']);\n\t\t\ttcDistBW = brightness.hypot(wetness).rename(['tcDistBW']);\n\t\t\timg = img.addBands(tcAngleBG).addBands(tcAngleGW).addBands(tcAngleBW).addBands(tcDistBG).addBands(tcDistGW).addBands(tcDistBW);\n\t\t\t\t\n\t\t\treturn img;\n\t\t\n\t\t\n\t\timg = getTasseledCap(img)\n\t\timg = addTCAngles(img)\n\t\treturn img",
"def build_transform(self):\n cfg = self.cfg\n\n # we are loading images with OpenCV, so we don't need to convert them\n # to BGR, they are already! So all we need to do is to normalize\n # by 255 if we want to convert to BGR255 format, or flip the channels\n # if we want it to be in RGB in [0-1] range.\n if cfg.INPUT.TO_BGR255:\n to_bgr_transform = T.Lambda(lambda x: x * 255)\n else:\n to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])\n\n normalize_transform = T.Normalize(\n mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD\n )\n\n transform = T.Compose(\n [\n T.ToPILImage(),\n T.Resize((256, 768)),\n T.ToTensor(),\n to_bgr_transform,\n normalize_transform,\n ]\n )\n return transform",
"def get_opt_translate(obj_img,\n back_img,\n back_center_x,\n back_center_y,\n obj_center_x,\n obj_center_y,\n prev_row_trans=0,\n prev_col_trans=0,\n is_erosion=False):\n width = obj_img.shape[0]\n obj_center_x = int(obj_center_x)\n obj_center_y = int(obj_center_y)\n curr_row_trans, curr_col_trans = prev_row_trans, prev_col_trans\n induce_x = int(back_center_x - obj_center_x + curr_col_trans)\n induce_y = int(back_center_y - obj_center_y + curr_row_trans)\n combine_img = back_img.copy()\n combine_img[induce_y:induce_y + width, induce_x:induce_x + width] -= obj_img\n neg_count = len(np.argwhere(combine_img < 0))\n if is_erosion:\n trans_amount = 4\n else:\n trans_amount = 8\n while trans_amount > 1:\n trans_amount = trans_amount / 2\n neg_count_1 = compute_img_diff(obj_img,\n back_img,\n induce_x,\n induce_y,\n trans_row=trans_amount,\n trans_col=0)\n neg_count_2 = compute_img_diff(obj_img,\n back_img,\n induce_x,\n induce_y,\n trans_row=(-trans_amount),\n trans_col=0)\n if neg_count_1 < neg_count_2:\n if neg_count_1 < neg_count:\n neg_count = neg_count_1\n curr_row_trans += trans_amount\n else:\n if neg_count_2 < neg_count:\n neg_count = neg_count_2\n curr_row_trans -= trans_amount\n\n induce_y = back_center_y - obj_center_y + curr_row_trans\n if is_erosion:\n trans_amount = 4\n else:\n trans_amount = 16\n while trans_amount > 1:\n trans_amount = trans_amount / 2\n neg_count_1 = compute_img_diff(obj_img,\n back_img,\n induce_x,\n induce_y,\n trans_row=0,\n trans_col=trans_amount)\n neg_count_2 = compute_img_diff(obj_img,\n back_img,\n induce_x,\n induce_y,\n trans_row=0,\n trans_col=(-trans_amount))\n if neg_count_1 < neg_count_2:\n if neg_count_1 < neg_count:\n neg_count = neg_count_1\n curr_col_trans += trans_amount\n else:\n if neg_count_2 < neg_count:\n neg_count = neg_count_2\n curr_col_trans -= trans_amount\n # print('Negative Pix Count Translation: %d.' % neg_count)\n # print(curr_row_trans, curr_col_trans)\n return curr_row_trans, curr_col_trans, neg_count",
"def _apply_transform(self, img: np.ndarray): \n img = self.transform(image=img)[\"image\"]\n return img",
"def _apply_transform(self, img: np.ndarray): \n img = self.transform(image=img)[\"image\"]\n return img",
"def build_transform(self):\n cfg = self.cfg\n\n # we are loading images with OpenCV, so we don't need to convert them\n # to BGR, they are already! So all we need to do is to normalize\n # by 255 if we want to convert to BGR255 format, or flip the channels\n # if we want it to be in RGB in [0-1] range.\n if cfg.INPUT.TO_BGR255:\n to_bgr_transform = T.Lambda(lambda x: x * 255)\n else:\n to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])\n\n normalize_transform = T.Normalize(\n mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD\n )\n\n transform = T.Compose(\n [\n T.ToPILImage(),\n T.Resize(self.min_image_size),\n T.ToTensor(),\n to_bgr_transform,\n normalize_transform,\n ]\n )\n return transform",
"def build_transform(self):\n cfg = self.cfg\n\n # we are loading images with OpenCV, so we don't need to convert them\n # to BGR, they are already! So all we need to do is to normalize\n # by 255 if we want to convert to BGR255 format, or flip the channels\n # if we want it to be in RGB in [0-1] range.\n if cfg.INPUT.TO_BGR255:\n to_bgr_transform = T.Lambda(lambda x: x * 255)\n else:\n to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])\n\n normalize_transform = T.Normalize(\n mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD\n )\n\n transform = T.Compose(\n [\n T.ToPILImage(),\n T.Resize(self.min_image_size),\n T.ToTensor(),\n to_bgr_transform,\n normalize_transform,\n ]\n )\n return transform",
"def build_transform(self):\n cfg = self.cfg\n\n # we are loading images with OpenCV, so we don't need to convert them\n # to BGR, they are already! So all we need to do is to normalize\n # by 255 if we want to convert to BGR255 format, or flip the channels\n # if we want it to be in RGB in [0-1] range.\n if cfg.INPUT.TO_BGR255:\n to_bgr_transform = T.Lambda(lambda x: x * 255)\n else:\n to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])\n\n normalize_transform = T.Normalize(\n mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD\n )\n\n transform = T.Compose(\n [\n T.ToPILImage(),\n T.Resize(self.min_image_size),\n T.ToTensor(),\n to_bgr_transform,\n normalize_transform,\n ]\n )\n return transform",
"def preprocess(img, transformation = transforms.ToTensor(), to_size = (200,200)):\n if isinstance(img, np.ndarray):\n img = Image.fromarray(img)\n img.thumbnail(to_size)\n img = np.array(img)\n diff_x = np.ceil((to_size[0] - img.shape[0])/2).astype(int)\n diff_y = np.ceil((to_size[1] - img.shape[1])/2).astype(int)\n padded = np.pad(img/255.0 , pad_width= ((diff_x,diff_x),(diff_y,diff_y),(0,0)))\n return transformation(padded[0:to_size[0], 0:to_size[1]])",
"def get_transformation():\n return transforms.Compose([transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])",
"def getPreprocessingTransform(cls, modality):\n if modality == \"CT\":\n trans = [SlicerLoadImage(keys=[\"image\"]), AddChanneld(keys=[\"image\"]),\n Spacingd(keys=[\"image\"], pixdim=(1.5, 1.5, 2.0), mode=\"bilinear\"),\n Orientationd(keys=[\"image\"], axcodes=\"RAS\"),\n ScaleIntensityRanged(keys=[\"image\"], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),\n AddChanneld(keys=[\"image\"]),\n ToTensord(keys=[\"image\"]), ]\n return Compose(trans)\n elif modality == \"MRI\":\n trans = [SlicerLoadImage(keys=[\"image\"]), AddChanneld(keys=[\"image\"]),\n Spacingd(keys=[\"image\"], pixdim=(1.5, 1.5, 2.0), mode=\"bilinear\"),\n Orientationd(keys=[\"image\"], axcodes=\"LPS\"),\n Normalized(keys=[\"image\"]),\n AddChanneld(keys=[\"image\"]),\n ToTensord(keys=[\"image\"])]\n return Compose(trans)",
"def process(img):\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) \n x_t = cv2.resize(img, (112, 160), interpolation=cv2.INTER_AREA)\n x_t = np.nan_to_num(x_t)\n x_t = cv2.Laplacian(x_t,cv2.CV_8U)\n\n return x_t.astype(np.uint8)",
"def calc_transform(src_, dst_):\n M_ = cv2.getPerspectiveTransform(src_, dst_)\n Minv_ = cv2.getPerspectiveTransform(dst_, src_)\n return M_, Minv_",
"def _get_transforms(self, image, mask, transforms):\n\n for i in transforms:\n\n if i == 'Horizontal_Flip':\n p = np.random.rand()\n if p > 0.5:\n image = cv2.flip(image, 1)\n mask = cv2.flip(mask, 1)\n if i == 'Brightness_adjust':\n p = np.random.rand()\n if p > 0.5:\n image = self.brightness_augment(image)\n\n return image, mask",
"def get_result_image(self):\n\n im_result = cv2.cvtColor(self.im_t, cv2.COLOR_GRAY2BGR)\n pcd_final = copy.deepcopy(self.pcd_s)\n\n pcd_final.transform(self.trans_final)\n np_final = np.asarray(pcd_final.points, np.int)\n\n for i in range(np_final.shape[0]):\n im_result = cv2.circle(\n im_result,\n (np_final[i, 1], np_final[i, 0]),\n 2,\n (0, 255, 0),\n -1,\n cv2.LINE_AA,\n )\n\n im_result = cv2.rectangle(im_result, (5, 12), (170, 38), (0, 0, 0), -1)\n # Draw rotation in image\n _, _, rotate = mat2rpy(self.trans_final)\n d_rotate = np.degrees(rotate)\n str_rotate = format(d_rotate, \".2f\") + \"[deg](CW)\"\n im_result = cv2.putText(\n im_result, str_rotate, (5, 30), 1, 1.25, (255, 255, 255), 2, cv2.LINE_AA\n )\n im_result = cv2.putText(\n im_result, str_rotate, (5, 30), 1, 1.25, (0, 255, 255), 1, cv2.LINE_AA\n )\n return im_result",
"def apply_model(self, original, t1, t2, resolution_scaling_factor=1):\n img = Image()\n img.time_stamp = t2\n\n if t1 == t2:\n img.initialize_with_image(original)\n return img\n\n calc_shift_fnc = self.calculate_shift\n orig_get_fnc = original.get\n interp_fnc = my_math.linear_interpolation\n\n def generate(y, x):\n \"\"\"Function describing the transformed image\"\"\"\n realy = y / resolution_scaling_factor\n realx = x / resolution_scaling_factor\n\n # move to time t2\n posy = y + calc_shift_fnc(realy, realx, t2, 0) - \\\n calc_shift_fnc(realy, realx, t1, 0)\n posx = x + calc_shift_fnc(realy, realx, t2, 1) - \\\n calc_shift_fnc(realy, realx, t1, 1)\n\n x_left = int(posx) # math.floor(pos[0])\n x_right = x_left + 1 # math.ceil(pos[0])\n y_down = int(posy) # math.floor(pos[1])\n y_up = y_down + 1 # math.ceil(pos[1])\n\n v11 = orig_get_fnc(y_down, x_left, resolution_scaling_factor)\n v12 = orig_get_fnc(y_down, x_right, resolution_scaling_factor)\n v21 = orig_get_fnc(y_up, x_left, resolution_scaling_factor)\n v22 = orig_get_fnc(y_up, x_right, resolution_scaling_factor)\n\n return interp_fnc(y_down, x_left, y_up, x_right, v11, v12, v21, v22,\n posy, posx)\n\n img.image_data = np.fromfunction(np.vectorize(generate),\n (original.shape()[0]*resolution_scaling_factor,\n original.shape()[1]*resolution_scaling_factor))\n\n if resolution_scaling_factor != 1:\n img.image_data = skimage.transform.resize(img.image_data,\n original.shape(),\n preserve_range=True)\n\n return img",
"def process_image(image, transpose = True):\n \n # TODO: Process a PIL image for use in a PyTorch model\n im = Image.open(image)\n\n #Resize to 256x256 thumbnail \n width, height = im.size\n if width < height:\n height = int(height * 256 / width)\n im = im.resize((256, height))\n else:\n width = int(width * 256 / height)\n im = im.resize((width, 256))\n \n #Set bounding box and crop\n width, height = im.size\n left = (width - 224) / 2\n top = (height - 224)/2\n right = (width + 224)/2\n bottom = (height + 224)/2\n \n im = im.crop((left, top, right, bottom))\n \n #Normalize color channel values\n im = np.array(im)\n im = im / 255\n \n #means = np.array([0.485, 0.456, 0.406])\n #sds = np.array([0.229, 0.224, 0.225])\n \n #im = (im - means) / sds \n \n #Transpose array\n if transpose:\n im = im.transpose((2, 0, 1))\n else:\n return im\n \n return im",
"def __call__(self, img: torch.Tensor) -> torch.Tensor:\n return self._trafo(img)",
"def transpose(im: Image) -> Image:\n return im.transpose(\n random.choice([Image.FLIP_TOP_BOTTOM, Image.FLIP_LEFT_RIGHT])\n )",
"def _image_transform(self, img, source, title):\n conf = source.conf[title]\n \n xmin = conf.get('xmin', 0)\n ymin = conf.get('ymin', 0)\n\n xmax = img.shape[-1] + xmin\n ymax = img.shape[-2] + ymin\n if \"xmax\" in conf:\n if(conf['xmax'] <= xmin):\n logging.warning(\"xmax <= xmin for title %s on %s. Ignoring xmax\", title, source.name())\n else:\n xmax = conf['xmax']\n if \"ymax\" in conf:\n if(conf['ymax'] <= ymin):\n logging.warning(\"ymax <= ymin for title %s on %s. Ignoring xmax\", title, source.name())\n else:\n ymax = conf['ymax']\n\n \n translate_transform = QtGui.QTransform().translate(ymin, xmin)\n\n # The order of dimensions in the scale call is (y,x) as in the numpy\n # array the last dimension corresponds to the x.\n scale_transform = QtGui.QTransform().scale((ymax-ymin)/img.shape[-2],\n (xmax-xmin)/img.shape[-1])\n \n #rotate_transform = QtGui.QTransform()\n #if source.data_type[title] == 'image':\n # if \"angle\" in conf:\n # rotate_transform = QtGui.QTransform(numpy.cos(conf[\"angle\"]), numpy.sin(conf[\"angle\"]), -numpy.sin(conf[\"angle\"]), numpy.cos(conf[\"angle\"]), 0, 0)\n\n transpose_transform = QtGui.QTransform()\n if source.data_type[title] == 'image':\n transpose_transform *= QtGui.QTransform(0, 1, 0,\n 1, 0, 0,\n 0, 0, 1)\n if(self.settingsWidget.ui.transpose.currentText() == 'Yes' or\n (self.settingsWidget.ui.transpose.currentText() == 'Auto' \n and \"transpose\" in conf)):\n transpose_transform *= QtGui.QTransform(0, 1, 0,\n 1, 0, 0,\n 0, 0, 1)\n \n transform = scale_transform * translate_transform * transpose_transform\n #transform = scale_transform * translate_transform * rotate_transform * transpose_transform\n \n # print('|%f %f %f|' % (transform.m11(), transform.m12(), transform.m13()))\n # print('|%f %f %f|' % (transform.m21(), transform.m22(), transform.m23()))\n # print('|%f %f %f|' % (transform.m31(), transform.m32(), transform.m33()))\n return transform"
] | [
"0.6478242",
"0.6399425",
"0.6175375",
"0.60631186",
"0.599424",
"0.58806276",
"0.5850955",
"0.58474565",
"0.5803862",
"0.57794195",
"0.57721114",
"0.5757488",
"0.5738938",
"0.57200027",
"0.57200027",
"0.5703677",
"0.5703677",
"0.5703677",
"0.57029206",
"0.56939614",
"0.5690659",
"0.56886953",
"0.56866646",
"0.568167",
"0.56769896",
"0.5672785",
"0.5659888",
"0.5634956",
"0.5628291",
"0.56279254"
] | 0.71848303 | 0 |
Function to add 30m SRTM elevation and derived slope, aspect, eastness, and northness to an image. Elevation is in meters, slope is between 0 and 90 deg, aspect is between 0 and 359 deg. Eastness and northness are unitless and are between 1 and 1. | def addTopography(self,img):
# Import SRTM elevation data
elevation = ee.Image("USGS/SRTMGL1_003");
# Calculate slope, aspect, and hillshade
topo = ee.Algorithms.Terrain(elevation);
# From aspect (a), calculate eastness (sin a), northness (cos a)
deg2rad = ee.Number(math.pi).divide(180);
aspect = topo.select(['aspect']);
aspect_rad = aspect.multiply(deg2rad);
eastness = aspect_rad.sin().rename(['eastness']).float();
northness = aspect_rad.cos().rename(['northness']).float();
# Add topography bands to image
topo = topo.select(['elevation','slope','aspect']).addBands(eastness).addBands(northness);
img = img.addBands(topo);
return img; | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_slope_aspect(slope, aspect, wkt, xform, fp, tmpdir):\n w, h = slope.shape\n \n try:\n handle, filename = mkstemp(dir=tmpdir, prefix='slope-aspect-', suffix='.tif')\n close(handle)\n \n driver = gdal.GetDriverByName('GTiff')\n gtiff_options = ['COMPRESS=JPEG', 'JPEG_QUALITY=95', 'INTERLEAVE=BAND']\n ds_both = driver.Create(filename, w, h, 2, gdal.GDT_Byte, gtiff_options)\n \n ds_both.SetGeoTransform(xform)\n ds_both.SetProjection(wkt)\n \n band_slope = ds_both.GetRasterBand(1)\n band_slope.SetRasterColorInterpretation(gdal.GCI_Undefined)\n band_slope.WriteRaster(0, 0, w, h, slope2bytes(slope).tostring())\n \n band_aspect = ds_both.GetRasterBand(2)\n band_aspect.SetRasterColorInterpretation(gdal.GCI_Undefined)\n band_aspect.WriteRaster(0, 0, w, h, aspect2bytes(aspect).tostring())\n \n ds_both.FlushCache()\n ds_both = None # GDAL is lame about actually writing data until this object is out of scope\n fp.write(open(filename, 'r').read())\n \n finally:\n unlink(filename)",
"def slope_lines(self,image):\r\n img_copy = image.copy()\r\n \r\n left_lines,right_lines=self.makeLeftRightline()\r\n left_line = np.mean(left_lines, axis=0)\r\n right_line = np.mean(right_lines, axis=0)\r\n\r\n poly_vertices = []\r\n order = [0,1,3,2]\r\n\r\n for slope, intercept in [left_line, right_line]:\r\n #getting height of image in y1\r\n rows, cols = image.shape[:2]\r\n y1= int(rows) \r\n #taking y2 upto 68% of y1\r\n y2= int(rows*0.68) \r\n #y=mx +c can be written as x=(y-c)/m\r\n x1=int((y1-intercept)/slope)\r\n x2=int((y2-intercept)/slope)\r\n poly_vertices.append((x1, y1))\r\n poly_vertices.append((x2, y2))\r\n\r\n # DRAWING LINES AND PATH ON THE IMAGE\r\n thickness_of_line=9\r\n color_of_line=[20, 255, 20]\r\n lines=np.array([[[x1,y1,x2,y2]]])\r\n for i in lines:\r\n for x1,y1,x2,y2 in i:\r\n cv2.line(img_copy, (x1, y1), (x2, y2), color_of_line, thickness_of_line)\r\n poly_vertices = [poly_vertices[i] for i in order]\r\n #filling polygon color\r\n cv2.fillPoly(img_copy, pts = np.array([poly_vertices],'int32'), color = (200,20,20))\r\n final_out=cv2.addWeighted(image,0.7,img_copy,0.4,0.)\r\n return final_out",
"def shadowingfunction_wallheight_23(a, vegdem, vegdem2, azimuth, altitude, scale, amaxvalue, bush, walls, aspect):\n\n if not walls.size:\n \"\"\" needs to be checked\n walls=ordfilt2(a,4,[0 1 0; 1 0 1; 0 1 0]);\n walls=walls-a;\n walls(walls<3)=0;\n sizex=size(a,1);%might be wrong\n sizey=size(a,2);\n dirwalls = filter1Goodwin_as_aspect_v3(walls,sizex,sizey,scale,a);\n aspect=dirwalls*pi/180;\n \"\"\"\n\n # conversion\n degrees = np.pi/180\n azimuth *= degrees\n altitude *= degrees\n \n # measure the size of the image\n\n sizex = np.shape(a)[0]\n sizey = np.shape(a)[1]\n \n # initialise parameters\n dx = 0\n dy = 0\n dz = 0\n \n sh = np.zeros((sizex, sizey))\n vbshvegsh = np.copy(sh)\n vegsh = np.copy(sh)\n f = np.copy(a)\n shvoveg = np.copy(vegdem) # for vegetation shadowvolume\n g = np.copy(sh)\n bushplant = bush > 1\n #wallbol = np.array([np.float(boolean) for row in walls > 0 for boolean in row])\n # wallbol = np.copy(sh)\n # wallbol[walls > 0] = 1.\n wallbol = (walls > 0).astype(float)\n wallbol[wallbol == 0] = np.nan\n\n pibyfour = np.pi/4\n threetimespibyfour = 3*pibyfour\n fivetimespibyfour = 5*pibyfour\n seventimespibyfour = 7*pibyfour\n sinazimuth = np.sin(azimuth)\n cosazimuth = np.cos(azimuth)\n tanazimuth = np.tan(azimuth)\n signsinazimuth = np.sign(sinazimuth)\n signcosazimuth = np.sign(cosazimuth)\n dssin = np.abs(1/sinazimuth)\n dscos = np.abs(1/cosazimuth)\n tanaltitudebyscale = np.tan(altitude)/scale\n\n tempvegdem = np.zeros((sizex, sizey))\n tempvegdem2 = np.zeros((sizex, sizey))\n temp = np.zeros((sizex, sizey))\n\n index = 0\n\n # main loop\n while (amaxvalue>=dz) and (np.abs(dx)<sizex) and (np.abs(dy)<sizey):\n if ((pibyfour <= azimuth) and (azimuth < threetimespibyfour)) or \\\n ((fivetimespibyfour <= azimuth) and (azimuth < seventimespibyfour)):\n dy = signsinazimuth*(index+1)\n dx = -1*signcosazimuth*np.abs(np.round((index+1)/tanazimuth))\n ds = dssin\n else:\n dy = signsinazimuth*np.abs(np.round((index+1)*tanazimuth))\n dx = -1*signcosazimuth*(index+1)\n ds = dscos\n\n # note: dx and dy represent absolute values while ds is an incremental value\n dz = ds*(index+1)*tanaltitudebyscale\n tempvegdem[0:sizex, 0:sizey] = 0\n tempvegdem2[0:sizex, 0:sizey] = 0\n temp[0:sizex, 0:sizey] = 0\n \n absdx = np.abs(dx)\n absdy = np.abs(dy)\n\n xc1 = int((dx+absdx)/2)\n xc2 = int(sizex+(dx-absdx)/2)\n yc1 = int((dy+absdy)/2)\n yc2 = int(sizey+(dy-absdy)/2)\n\n xp1 = -int((dx-absdx)/2)\n xp2 = int(sizex-(dx+absdx)/2)\n yp1 = -int((dy-absdy)/2)\n yp2 = int(sizey-(dy+absdy)/2)\n\n tempvegdem[int(xp1):int(xp2), int(yp1):int(yp2)] = vegdem[int(xc1):int(xc2), int(yc1):int(yc2)] - dz\n tempvegdem2[int(xp1):int(xp2), int(yp1):int(yp2)] = vegdem2[int(xc1):int(xc2), int(yc1):int(yc2)] - dz\n temp[int(xp1):int(xp2), int(yp1):int(yp2)] = a[int(xc1):int(xc2), int(yc1):int(yc2)] - dz\n\n f = np.max([f, temp], axis=0)\n #f = np.array([np.max(val) for val in zip(f, temp)])\n shvoveg = np.max([shvoveg, tempvegdem], axis=0)\n sh[f > a] = 1\n sh[f <= a] = 0 #Moving building shadow\n fabovea = (tempvegdem > a).astype(int) #vegdem above DEM\n gabovea = (tempvegdem2 > a).astype(int) #vegdem2 above DEM\n vegsh2 = fabovea - gabovea\n vegsh = np.max([vegsh, vegsh2], axis=0)\n vegsh[vegsh*sh > 0] = 0 # removing shadows 'behind' buildings\n vbshvegsh = np.copy(vegsh) + vbshvegsh\n\n # vegsh at high sun altitudes\n if index == 0:\n firstvegdem = np.copy(tempvegdem) - np.copy(temp)\n firstvegdem[firstvegdem <= 0] = 1000\n vegsh[firstvegdem < dz] = 1\n vegsh *= (vegdem2 > a)\n vbshvegsh = np.zeros((sizex, sizey))\n\n # Bush shadow on bush plant\n if np.max(bush) > 0 and np.max(fabovea*bush) > 0:\n tempbush = np.zeros((sizex, sizey))\n tempbush[int(xp1):int(xp2), int(yp1):int(yp2)] = bush[int(xc1):int(xc2), int(yc1):int(yc2)] - dz\n g = np.max([g, tempbush], axis=0)\n g = bushplant * g\n \n # if index<3 #removing shadowed walls 1\n # tempfirst(1:sizex,1:sizey)=0;\n # tempfirst(xp1:xp2,yp1:yp2)= a(xc1:xc2,yc1:yc2);\n # if index==1 # removing shadowed walls 2\n # tempwalls(1:sizex,1:sizey)=0;\n # tempwalls(xp1:xp2,yp1:yp2)= wallbol(xc1:xc2,yc1:yc2);\n # wallfirst=((tempwalls+wallbol).*wallbol)==2;\n # wallfirstaspect=aspect.*wallbol.*wallfirst;\n # wallfirstaspect(wallfirstaspect==0)=NaN;\n # wallfirstsun=(wallfirstaspect>azimuth-pi/2 & wallfirstaspect<azimuth+pi/2);\n # wallfirstshade=wallfirst-wallfirstsun;\n # end\n # end\n \n index += 1\n # imagesc(h),axis image,colorbar\n # Stopping loop if all shadows reached the ground\n # stopbuild=stopbuild==f;\n # imagesc(stopbuild),axis image,pause(0.3)\n # fin=find(stopbuild==0, 1);\n # stopbuild=f;\n # stopveg=stopveg==vegsh;\n # finveg=find(stopveg==0, 1);\n # stopveg=vegsh;\n # if isempty(fin) && isempty(finveg)\n # dz=amaxvalue+9999;\n # end\n\n # Removing walls in shadow due to selfshadowing\n azilow = azimuth - np.pi/2\n azihigh = azimuth + np.pi/2\n if azilow >= 0 and azihigh < 2*np.pi: # 90 to 270 (SHADOW)\n facesh = np.logical_or(aspect < azilow, aspect >= azihigh).astype(float) - wallbol + 1 # TODO check\n elif azilow < 0 and azihigh <= 2*np.pi: # 0 to 90\n azilow = azilow + 2*np.pi\n facesh = np.logical_or(aspect > azilow, aspect <= azihigh) * -1 + 1 # (SHADOW)\n elif azilow > 0 and azihigh >= 2*np.pi: # 270 to 360\n azihigh -= 2 * np.pi\n facesh = np.logical_or(aspect > azilow, aspect <= azihigh)*-1 + 1 # (SHADOW)\n\n sh = 1-sh\n vbshvegsh[vbshvegsh > 0] = 1\n vbshvegsh = vbshvegsh-vegsh\n \n if np.max(bush) > 0:\n g = g-bush\n g[g > 0] = 1\n g[g < 0] = 0\n vegsh = vegsh-bushplant+g\n vegsh[vegsh < 0] = 0\n\n vegsh[vegsh > 0] = 1\n shvoveg = (shvoveg-a) * vegsh #Vegetation shadow volume\n vegsh = 1-vegsh\n vbshvegsh = 1-vbshvegsh\n\n #removing walls in shadow\n # tempfirst=tempfirst-a;\n # tempfirst(tempfirst<2)=1;\n # tempfirst(tempfirst>=2)=0;\n \n shvo = f - a # building shadow volume\n\n facesun = np.logical_and(facesh + (walls > 0).astype(float) == 1, walls > 0).astype(float)\n #facesun = np.reshape(np.array([np.float(boolean) for row in facesun for boolean in row]), facesun.shape)\n\n wallsun = np.copy(walls-shvo)\n wallsun[wallsun < 0] = 0\n wallsun[facesh == 1] = 0 # Removing walls in \"self\"-shadow\n # wallsun(tempfirst = = 0) = 0# Removing walls in shadow 1\n # wallsun(wallfirstshade = = 1) = 0# Removing walls in shadow 2\n wallsh = np.copy(walls-wallsun)\n # wallsh(wallfirstshade = = 1) = 0\n # wallsh = wallsh+(wallfirstshade.*walls)\n #wallbol = np.reshape(np.array([np.float(boolean) for row in walls > 0 for boolean in row]), walls.shape)\n wallbol = (walls > 0).astype(float)\n\n wallshve = shvoveg * wallbol\n wallshve = wallshve - wallsh\n wallshve[wallshve < 0] = 0\n id = np.where(wallshve > walls)\n wallshve[id] = walls[id]\n wallsun = wallsun-wallshve # problem with wallshve only\n id = np.where(wallsun < 0)\n wallshve[id] = 0\n wallsun[id] = 0\n \n # subplot(2,2,1),imagesc(facesh),axis image ,colorbar,title('facesh')#\n # subplot(2,2,2),imagesc(wallsun,[0 20]),axis image, colorbar,title('Wallsun')#\n # subplot(2,2,3),imagesc(sh-vegsh*0.8), colorbar,axis image,title('Groundsh')#\n # subplot(2, 2, 4), imagesc(wallshve, [0 20]), axis image, colorbar, title('Wallshve')#\n return vegsh, sh, vbshvegsh, wallsh, wallsun, wallshve, facesh, facesun",
"def draw_lines(args, img, lines, color=[255, 0, 0], thickness=8):\n slopes = [ (line[0][3]-line[0][1])/(line[0][2]-line[0][0]) for line in lines]\n rights = [ [line, slope, line[0][1] - slope*line[0][0]] for line,slope in zip(lines, slopes) if slope > 0.0 ] # and slope < 0.5 and not np.isnan(slope) ]\n lefts = [ [line, slope, line[0][1] - slope*line[0][0]] for line,slope in zip(lines, slopes) if slope < 0.0 ] # and slope > -0.5 and not np.isnan(slope) ]\n #lefts[0] = [ [[x1,y1,x2,y2]] , slope , y_intercept ]\n\n y_mins = [ min(line[0][1],line[0][3]) for line in lines]\n y_min = min(y_mins)\n y_max = img.shape[0]\n\n log_new = [slopes, rights, lefts, y_mins, y_min, y_max]\n\n for lanes in [rights,lefts]:\n slope_mean = np.mean( [ lane[1] for lane in lanes ] )\n slope_std = np.std ( [ lane[1] for lane in lanes ] )\n if slope_std == 0:\n slope = slope_mean\n else:\n slope = np.mean( [ lane[1] for lane in lanes if lane[1] - slope_mean < 2*slope_std ] ) \n print()\n print('slope : {}'.format(slope))\n\n intercept_mean = np.mean( [ lane[2] for lane in lanes ] )\n intercept_std = np.std ( [ lane[2] for lane in lanes ] )\n if intercept_std == 0:\n intercept = intercept_mean\n else:\n intercept = np.mean( [ lane[2] for lane in lanes if lane[2] - intercept_mean < 2*intercept_std ] )\n print('intercept : {}'.format(intercept))\n \n x_min = int( ( y_min - intercept ) / slope ) \n x_max = int( ( y_max - intercept ) / slope )\n\n log_new.append(slope)\n log_new.append(intercept)\n log_new.append(x_min)\n log_new.append(x_max)\n\n cv2.line(img, (x_min, y_min), (x_max, y_max), color, thickness)\n\n try: \n log_line = pd.read_csv(args.path+args.csv_file, skiprows=[0], names=args.header)\n except:\n log_line = pd.DataFrame([ ], columns=args.header)\n finally:\n df = pd.DataFrame([ log_new ], columns=args.header)\n # update log: add new entry into the log\n result = pd.concat([log_line, df], ignore_index=True)\n result.to_csv(args.path+args.csv_file) #, index=False)",
"def compute_slope(self):\n\n # assign variables\n slope = 'slope'\n aspect = 'aspect'\n dx = 'dx'\n dy = 'dy'\n grow_slope = 'grow_slope'\n grow_aspect = 'grow_aspect'\n grow_dx = 'grow_dx'\n grow_dy = 'grow_dy'\n\n # compute slope and partial derivatives\n gscript.run_command(\n 'r.slope.aspect',\n elevation=self.elevation,\n slope=slope,\n dx=dx,\n dy=dy,\n overwrite=True)\n\n # grow border to fix edge effects of moving window computations\n gscript.run_command(\n 'r.grow.distance',\n input=slope,\n value=grow_slope,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{slope}={grow_slope}\".format(\n slope=slope,\n grow_slope=grow_slope),\n overwrite=True)\n gscript.run_command(\n 'r.grow.distance',\n input=dx,\n value=grow_dx,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{dx}={grow_dx}\".format(\n dx=dx,\n grow_dx=grow_dx),\n overwrite=True)\n gscript.run_command(\n 'r.grow.distance',\n input=dy,\n value=grow_dy,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{dy}={grow_dy}\".format(\n dy=dy,\n grow_dy=grow_dy),\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['grow_slope',\n 'grow_dx',\n 'grow_dy'],\n flags='f')\n\n return slope, dx, dy",
"def __init__(self, mapfile, camera=None, light=None,\n width=100.0, depth=100.0, height=10.0,\n divx=0, divy=0, ntiles=1.0, name=\"\",\n x=0.0, y=0.0, z=0.0, rx=0.0, ry=0.0, rz=0.0,\n sx=1.0, sy=1.0, sz=1.0, cx=0.0, cy=0.0, cz=0.0, smooth=True, cubic=False):\n super(ElevationMap, self).__init__(camera, light, name, x, y, z, rx, ry, rz,\n sx, sy, sz, cx, cy, cz)\n if mapfile[0] != '/':\n mapfile = sys.path[0] + '/' + mapfile\n if VERBOSE:\n print(\"Loading height map ...\", mapfile)\n\n if divx > 200 or divy > 200:\n print(\"... Map size can't be bigger than 200x200 divisions\")\n divx = 200\n divy = 200\n\n im = Image.open(mapfile)\n im = ImageOps.invert(im)\n ix, iy = im.size\n if (ix > 200 and divx == 0) or (divx > 0):\n if divx == 0:\n divx = 200\n divy = 200\n im = im.resize((divx, divy), Image.ANTIALIAS)\n ix, iy = im.size\n if not im.mode == \"P\":\n im = im.convert('P', palette=Image.ADAPTIVE)\n\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\n self.pixels = im.load()\n self.width = width\n self.depth = depth\n self.height = height\n self.ix = ix\n self.iy = iy\n self.ttype = GL_TRIANGLE_STRIP\n\n if VERBOSE:\n print(\"Creating Elevation Map ...\", ix, iy)\n\n wh = width * 0.5\n hh = depth * 0.5\n ws = width / ix\n hs = depth / iy\n ht = height / 255.0\n tx = 1.0*ntiles / ix\n ty = 1.0*ntiles / iy\n\n verts = []\n norms = []\n tex_coords = []\n idx = []\n\n for y in xrange(0, iy):\n for x in xrange(0, ix):\n hgt = (self.pixels[x, y])*ht\n this_x = -wh + x*ws\n this_z = -hh + y*hs\n if cubic:\n \"\"\" this is a bit experimental. It tries to make the map either zero\n or height high. Vertices are moved 'under' adjacent ones if there is\n a step to make vertical walls. Goes wrong in places - mainly because\n it doesn't check diagonals\n \"\"\"\n if hgt > height / 2:\n hgt = height\n else:\n hgt = 0.0\n if hgt == 0 and y > 0 and y < iy-1 and x > 0 and x < ix-1:\n if self.pixels[x-1, y] > 127:\n this_x = -wh + (x-1)*ws\n elif self.pixels[x+1, y] > 127:\n this_x = -wh + (x+1)*ws\n elif self.pixels[x, y-1] > 127:\n this_z = -hh + (y-1)*hs\n elif self.pixels[x, y+1] > 127:\n this_z = -hh + (y+1)*hs\n elif self.pixels[x-1, y-1] > 127:\n this_x = -wh + (x-1)*ws\n this_z = -hh + (y-1)*hs\n elif self.pixels[x-1, y+1] > 127:\n this_x = -wh + (x-1)*ws\n this_z = -hh + (y+1)*hs\n elif self.pixels[x+1, y-1] > 127:\n this_x = -wh + (x+1)*ws\n this_z = -hh + (y-1)*hs\n elif self.pixels[x+1, y+1] > 127:\n this_x = -wh + (x+1)*ws\n this_z = -hh + (y+1)*hs\n verts.append((this_x, hgt, this_z))\n tex_coords.append(((ix-x) * tx,(iy-y) * ty))\n\n s = 0\n #create one long triangle_strip by alternating X directions\n for y in range(0, iy-1):\n for x in range(0, ix-1):\n i = (y * ix)+x\n idx.append((i, i+ix, i+ix+1))\n idx.append((i+ix+1, i+1, i))\n s += 2\n\n self.buf = []\n self.buf.append(Buffer(self, verts, tex_coords, idx, None, smooth))",
"def generate_slope_raster(in_path, out_path):\n cmd = \"gdaldem slope -alg ZevenbergenThorne {} {}\".format(in_path, out_path)\n os.system(cmd)",
"def elevation(x, y):\n file = os.path.abspath(\"..\") + \"\\Shape\\Shape.vrt\"\n layer = gdal.Open(file)\n gt = layer.GetGeoTransform()\n rasterx = int((x - gt[0]) / gt[1])\n rastery = int((y - gt[3]) / gt[5])\n print('elevation =', layer.GetRasterBand(1).ReadAsArray(rasterx, rastery, 1, 1)[0][0], 'm above sea level')",
"def add_elevation_bands(img,\n dem_img):\n elevation = ee.Image(dem_img)\n slope = ee.Terrain.slope(elevation)\n aspect = ee.Terrain.aspect(elevation)\n topo = elevation.addBands(slope).addBands(aspect)\\\n .select([0, 1, 2], ['elevation', 'slope', 'aspect'])\n return ee.Image(img).addBands(topo)",
"def shade_hills_onelight(slope, aspect, azimuth, altitude):\n deg2rad = pi/180\n\n shaded = sin(altitude * deg2rad) * numpy.sin(slope) \\\n + cos(altitude * deg2rad) * numpy.cos(slope) \\\n * numpy.cos((azimuth - 90.0) * deg2rad - aspect)\n \n return shaded",
"def read_slope_aspect(filename):\n if not exists(filename):\n raise IOError('Missing file \"%s\"' % filename)\n \n ds = gdal.Open(str(filename))\n \n if ds is None:\n raise IOError('Unopenable file \"%s\"' % filename)\n \n slope = bytes2slope(ds.GetRasterBand(1).ReadAsArray())\n aspect = bytes2aspect(ds.GetRasterBand(2).ReadAsArray())\n \n return slope, aspect",
"def __init__(self, mapfile, camera=None, light=None,\r\n width=100.0, depth=100.0, height=10.0,\r\n divx=0, divy=0, ntiles=1.0, name=\"\",\r\n x=0.0, y=0.0, z=0.0, rx=0.0, ry=0.0, rz=0.0,\r\n sx=1.0, sy=1.0, sz=1.0, cx=0.0, cy=0.0, cz=0.0, smooth=True, cubic=False):\r\n super(ElevationMap, self).__init__(camera, light, name, x, y, z, rx, ry, rz,\r\n sx, sy, sz, cx, cy, cz)\r\n if divx > 200 or divy > 200:\r\n print(\"... Map size can't be bigger than 200x200 divisions\")\r\n divx = 200\r\n divy = 200\r\n if issubclass(type(mapfile), type(\"\")): #HORRIBLE. Only way to cope with python2v3\r\n if mapfile[0] != '/':\r\n mapfile = sys.path[0] + '/' + mapfile\r\n if VERBOSE:\r\n print(\"Loading height map ...\", mapfile)\r\n\r\n im = Image.open(mapfile)\r\n im = ImageOps.invert(im)\r\n else:\r\n im = mapfile #allow image files to be passed as mapfile\r\n ix, iy = im.size\r\n if (ix > 200 and divx == 0) or (divx > 0):\r\n if divx == 0:\r\n divx = 200\r\n divy = 200\r\n im = im.resize((divx, divy), Image.ANTIALIAS)\r\n ix, iy = im.size\r\n if not im.mode == \"P\":\r\n im = im.convert('P', palette=Image.ADAPTIVE)\r\n\r\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\r\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\r\n self.pixels = im.load()\r\n self.width = width\r\n self.depth = depth\r\n self.height = height\r\n self.ix = ix\r\n self.iy = iy\r\n self.ttype = GL_TRIANGLE_STRIP\r\n\r\n if VERBOSE:\r\n print(\"Creating Elevation Map ...\", ix, iy)\r\n\r\n wh = width * 0.5\r\n hh = depth * 0.5\r\n ws = width / ix\r\n hs = depth / iy\r\n ht = height / 255.0\r\n tx = 1.0*ntiles / ix\r\n ty = 1.0*ntiles / iy\r\n\r\n verts = []\r\n norms = []\r\n tex_coords = []\r\n idx = []\r\n\r\n for y in xrange(0, iy):\r\n for x in xrange(0, ix):\r\n hgt = (self.pixels[x, y])*ht\r\n this_x = -wh + x*ws\r\n this_z = -hh + y*hs\r\n if cubic:\r\n \"\"\" this is a bit experimental. It tries to make the map either zero\r\n or height high. Vertices are moved 'under' adjacent ones if there is\r\n a step to make vertical walls. Goes wrong in places - mainly because\r\n it doesn't check diagonals\r\n \"\"\"\r\n if hgt > height / 2:\r\n hgt = height\r\n else:\r\n hgt = 0.0\r\n if hgt == 0 and y > 0 and y < iy-1 and x > 0 and x < ix-1:\r\n if self.pixels[x-1, y] > 127:\r\n this_x = -wh + (x-1)*ws\r\n elif self.pixels[x+1, y] > 127:\r\n this_x = -wh + (x+1)*ws\r\n elif self.pixels[x, y-1] > 127:\r\n this_z = -hh + (y-1)*hs\r\n elif self.pixels[x, y+1] > 127:\r\n this_z = -hh + (y+1)*hs\r\n elif self.pixels[x-1, y-1] > 127:\r\n this_x = -wh + (x-1)*ws\r\n this_z = -hh + (y-1)*hs\r\n elif self.pixels[x-1, y+1] > 127:\r\n this_x = -wh + (x-1)*ws\r\n this_z = -hh + (y+1)*hs\r\n elif self.pixels[x+1, y-1] > 127:\r\n this_x = -wh + (x+1)*ws\r\n this_z = -hh + (y-1)*hs\r\n elif self.pixels[x+1, y+1] > 127:\r\n this_x = -wh + (x+1)*ws\r\n this_z = -hh + (y+1)*hs\r\n verts.append((this_x, hgt, this_z))\r\n tex_coords.append(((ix-x) * tx,(iy-y) * ty))\r\n\r\n s = 0\r\n #create one long triangle_strip by alternating X directions\r\n for y in range(0, iy-1):\r\n for x in range(0, ix-1):\r\n i = (y * ix)+x\r\n idx.append((i, i+ix, i+ix+1))\r\n idx.append((i+ix+1, i+1, i))\r\n s += 2\r\n\r\n self.buf = []\r\n self.buf.append(Buffer(self, verts, tex_coords, idx, None, smooth))",
"def slope_alphas(dm):\n\n # Calculate the alphas for the displacement map. We could have just as easily read this in from the image.\n for x in range(image_size - 1):\n for y in range(image_size - 1):\n h = dm[x, y]\n # Start with the assumption that this is straight sand\n a = 0\n # If we're above the water, then the alpha value will equal the height above the water (* 2), meaning that\n # areas 128 units above water-level will be pure grass\n if h > water_height:\n a = (h - water_height) * 2\n if a > 255:\n a = 255\n\n # Get the angle of the slope here\n slope_angle = math.degrees(dm.get_slope((x, y)))\n\n # Add a sharp decrease in alpha around 45 degrees, so that unclimbable areas are visible\n if slope_angle > 30 and slope_angle < 60:\n a -= (slope_angle - 30) * 7\n elif slope_angle >= 60:\n a -= (slope_angle - 60) * 2 + 210\n\n if a < 0:\n a = 0\n # Finally, we set the actual value to what we've determined\n dm.source_alphas[x, y] = a",
"def snow_depth(lon, lat, month):\n\n im = month - 1\n\n h0 = np.array( [28.01, 30.28, 33.89, 36.80, 36.93, 36.59,\n 11.02, 4.64, 15.81, 22.66, 25.57, 26.67] )\n a = np.array( [ 0.1270, 0.1056, 0.5486, 0.4046, 0.0214, 0.7021,\n 0.3008, 0.3100, 0.2119, 0.3594, 0.1496, -0.1876] )\n b = np.array( [-1.1833, -0.5908, -0.1996, -0.4005, -1.1795, -1.4819,\n -1.2591, -0.6350, -1.0292, -1.3483, -1.4643, -1.4229] )\n c = np.array( [-0.1164, -0.0263, 0.0280, 0.0256, -0.1076, -0.1195,\n -0.0811, -0.0655, -0.0868, -0.1063, -0.1409, -0.1413] )\n d = np.array( [-0.0051, -0.0049, 0.0216, 0.0024, -0.0244, -0.0009,\n -0.0043, 0.0059, -0.0177, 0.0051, -0.0079, -0.0316] )\n e = np.array( [ 0.0243, 0.0044, -0.0176, -0.0641, -0.0142, -0.0603,\n -0.0959, -0.0005, -0.0723, -0.0577, -0.0258, -0.0029] )\n\n x = (90. - lat) * np.cos( np.radians(lon) )\n y = (90. - lat) * np.sin( np.radians(lon) )\n\n h = ( h0[im] + ( a[im] * x ) + ( b[im] * y ) + ( c[im] * x * y ) +\n ( d[im] * x * x ) + ( e[im] * y * y ) )\n\n return h",
"def sim_image_ramp(det, im_slope, verbose=False, **kwargs):\n if verbose:\n _log.info('Generating image acquisition ramp...')\n\n return sim_dark_ramp(det, im_slope, ramp_avg_ch=None, verbose=False, **kwargs)",
"def set_elevation(tiff_file, api_key, turn=0,\n api_endpoint=(\"https://engine.tygron.com/api/session/\"\n \"event/editorgeotiff/add/?\")):\n with open(tiff_file, 'rb') as f:\n heightmap = f.read()\n # the \"True\" value in below's if statement should be \"start\"\n json = elevation_json(turn, heightmap)\n r = requests.post(url=api_endpoint+api_key, json=json)\n try:\n heightmap_id = r.json()\n except ValueError:\n print(\"UPLOAD FAILED: Received no heightmap id from Tygron.\")\n api_endpoint = (\"https://engine.tygron.com/api/session/event/\"\n \"editormap/set_height_geotiff/?\")\n r = requests.post(url=api_endpoint+api_key, json=[heightmap_id])\n return heightmap_id",
"def addNightLights(self,img,y):\n\t\t\n\t\tstartDate = ee.Date.fromYMD(y, 1, 1)\n\t\tendDate = ee.Date.fromYMD(y, 12, 31)\n\t\t\n\t\tif y < 2012:\n\t\t\n\t\t\tnightLights = ee.Image(ee.ImageCollection(\"NOAA/DMSP-OLS/NIGHTTIME_LIGHTS\").filterDate(startDate,endDate).mean())\t\n\t\t\timg = img.addBands(nightLights.select([\"stable_lights\"]).rename([\"stable_lights\"]))\n\t\t\n\t\tif y >= 2012:\n\t\t\tnightLights = ee.Image(ee.ImageCollection(\"NOAA/VIIRS/DNB/MONTHLY_V1/VCMCFG\").filterDate(startDate,endDate).mean())\t\n\t\t\timg = img.addBands(nightLights.select([\"avg_rad\"]).rename([\"stable_lights\"]))\n\t\t\n\t\treturn img",
"def draw_avg_ext_lines(img, lines, color=[255, 0, 0], thickness=10):\n debug=False\n lSlopeAvg=0\n rSlopeAvg=0\n slope_tolerance=.1\n slope_tolerance_from_zero=.5\n bottom_y = img.shape[0]\n top_y = int(bottom_y /1.6)\n \n lLines = []\n rLines = []\n l = 1\n r = 1\n \n for line in lines:\n for x1,y1,x2,y2 in line:\n slope = (y2-y1)/(x2-x1)\n if np.absolute(slope) == np.inf or np.absolute(slope) < slope_tolerance_from_zero:\n continue\n if debug:\n cv2.line(img, (x1, y1), (x2, y2), color, thickness)\n if slope < 0:\n lSlopeAvg = lSlopeAvg + (slope - lSlopeAvg) / l\n l += 1\n if np.absolute(lSlopeAvg - slope) < slope_tolerance :\n lLines.append((x1,y1))\n lLines.append((x2,y2))\n else:\n rSlopeAvg = rSlopeAvg + (slope - rSlopeAvg) / r\n r += 1\n if np.absolute(rSlopeAvg - slope) < slope_tolerance :\n rLines.append((x1,y1))\n rLines.append((x2,y2)) \n \n \"\"\"\n After having split the lines, I use cv2.fitline to fit the sets of points to a single line.\n cv2.fitline gives back a unit vector and a point in the line, both of which we can use\n to calculate the slope and intercept of the line function.\n \"\"\"\n if len(lLines) > 0 and len(rLines) > 0 :\n [left_vx,left_vy,left_x,left_y] = cv2.fitLine(np.array(lLines, dtype=np.int32), cv2.DIST_L2,0,0.01,0.01) \n left_slope = left_vy / left_vx\n left_b = left_y - (left_slope*left_x)\n\n [right_vx,right_vy,right_x,right_y] = cv2.fitLine(np.array(rLines, dtype=np.int32), cv2.DIST_L2,0,0.01,0.01) \n right_slope = right_vy / right_vx\n right_b = right_y - (right_slope*right_x)\n\n # Average this line with previous frames \n prev_lines.append((left_b, left_slope, right_b, right_slope))\n \n if len(prev_lines) > 0: \n avg = np.sum(prev_lines, -3) /len(prev_lines)\n left_b = avg[0]\n left_slope = avg[1]\n right_b = avg[2]\n right_slope = avg[3]\n\n \"\"\"\n Having the slope and intercept enables us to calculate the x coordinates of our desired\n start and end points at the top and the bottom of the road.\n \"\"\"\n ltop_x = (top_y - left_b) / left_slope\n lbottom_x = (bottom_y - left_b) / left_slope\n\n rtop_x = (top_y - right_b) / right_slope\n rbottom_x = (bottom_y - right_b) / right_slope\n\n cv2.line(img, (lbottom_x, bottom_y), (ltop_x, top_y), color, thickness)\n cv2.line(img, (rbottom_x, bottom_y), (rtop_x, top_y), color, thickness)",
"def avg_slope_intercept(image, lines):\n left_fit = []\n right_fit = []\n for line in lines:\n x1, y1, x2, y2 = line.reshape(4)\n # Fit first order function\n #params = np.polyfit((x1, y1), (x2, y2), 1)\n slope = (y2 - y1) / (x2 - x1)\n intercept = y1 - slope * x1\n if(slope < 0):\n left_fit.append((slope, intercept))\n cv2.line(image, (x1, y1), (x2, y2), (255, 0, 0), 2)\n else:\n right_fit.append((slope, intercept))\n cv2.line(image, (x1, y1), (x2, y2), (0, 255, 0), 2)\n\n if(DEBUG_IMAGES):\n cv2.imshow(\"Hough Lines\", image) \n\n left_fit_avg, right_fit_avg= [], []\n left_line, right_line = [], []\n if(len(left_fit) > 0):\n left_fit_avg = np.average(left_fit, axis=0)\n left_line = make_coordinates(image, left_fit_avg)\n if(len(right_fit) > 0):\n right_fit_avg = np.average(right_fit, axis=0)\n right_line = make_coordinates(image, right_fit_avg)\n\n \"\"\"\n left_line, right_line = [], []\n if(len(left_fit_avg) > 0):\n left_line = make_coordinates(image, left_fit_avg)\n if(len(right_fit_avg) > 0):\n right_line = make_coordinates(image, right_fit_avg)\n \"\"\"\n\n return np.array([left_line, right_line])",
"def draw_final_image(self, image, warped, undist, ploty, left_fitx, right_fitx, Minv, left_rad, right_rad):\n gray = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)\n # Create an image to draw the lines on\n warp_zero = np.zeros_like(gray).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n\n # Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])\n pts = np.hstack((pts_left, pts_right))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))\n\n # Warp the blank back to original image space using inverse perspective matrix (Minv)\n newwarp = cv2.warpPerspective(color_warp, Minv, (image.shape[1], image.shape[0]))\n # Combine the result with the original image\n result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)\n\n font = cv2.FONT_HERSHEY_SIMPLEX\n off_center = calculate_center(left_fitx, right_fitx, image.shape)\n direction_str = 'left' if off_center < 0 else 'right'\n center_str = '{:.2f} m of center {}'.format(abs(off_center), direction_str)\n cv2.putText(result, center_str, (430, 630), font, 1, (0, 0, 255), 2, cv2.LINE_AA)\n if left_rad and right_rad:\n curvature = 0.5 * (round(right_rad / 1000, 1) + round(left_rad / 1000, 1))\n else:\n curvature = 0\n str2 = 'Radius of curvature: {} km'.format(curvature)\n cv2.putText(result, str2, (430, 670), font, 1, (0, 0, 255), 2, cv2.LINE_AA)\n\n if self.args.is_test:\n plt.imshow(result)\n plt.show()\n\n return result",
"def draw_lines(img, lines, color=[0, 0, 255], thickness=10):\n \n yFinal = 540 # tweak these values as per the frame size\n yIni = 350\n xPlus = []\n yPlus = []\n xMinus = []\n yMinus= []\n slope_range = 0.2\n\n if lines is not None:\n for line in lines:\n if line is not None:\n for x1,y1,x2,y2 in line:\n # check slope \n slope = (y2-y1)/(x2-x1)\n\t\t \n \t\t # Collect all points with + ve slope (right lane)\n if (slope > slope_range):\n xPlus.append(x1)\n xPlus.append(x2)\n yPlus.append(y1)\n yPlus.append(y2)\n\n # Collect all points with - ve slope (left lane)\n elif ((slope) < (-slope_range)):\n xMinus.append(x1)\n xMinus.append(x2)\n yMinus.append(y1)\n yMinus.append(y2)\n # If out of range, lists defined in beginning of this function will be empty \n else:\n continue\n \n # draw right lane\n x1,y1,x2,y2 = fit_line(xPlus, yPlus, yIni, yFinal)\n cv2.line(img,(x1,y1),(x2,y2),color, thickness) \n\n # draw left lane\n x1,y1,x2,y2 = fit_line(xMinus, yMinus, yIni, yFinal)\n cv2.line(img,(x1,y1),(x2,y2),color,thickness)",
"def drawLatentClass(regions, lclass, \\\n sensorLocations=\"../../data/locations/bb_floor2_locations_old.txt\", \\\n writeLocation = \"../../output/latent.png\", \\\n sensorDirections = \"../../data/locations/bb_floor2_draw_directions.txt\", \\\n bgImage = \"../../images/bb_floor2.png\", \\\n sensorSize = 12, \\\n baseSize = 10, \\\n scale = 5, \\\n lengths = 8):\n \n im = Image.open(bgImage)\n d = ImageDraw.Draw(im)\n locations = []\n directions = []\n classSpot = 0\n \n #Open and parse the locations file\n f = open(sensorLocations, 'r')\n \n for line in f.readlines():\n split = line.split(' ')\n locations.append((split[1], split[2], split[0]))\n \n f = open(sensorDirections, 'r')\n \n for line in f.readlines():\n try:\n split = line.split(' ')\n directions.append(int(split[1]))\n except:\n pass\n \n _drawSensors(d, locations, sensorSize)\n \n for c in regions:\n \n c.matrixToModel(c.modelList)\n \n #Get first sensor\n sens = c.sensors[0]\n data = numpy.zeros((lengths, len(c.sensors)), float)\n \n sindex = bbdata.allSensors.index(sens)\n \n foo = locations[sindex]\n foodir = directions[sindex]\n \n x = int(foo[0])\n y = int(foo[1])\n \n if foodir == 0:\n y -= lengths * scale + sensorSize + 5\n x -= sensorSize - 5\n if foodir == 1:\n x += sensorSize + 5\n y -= sensorSize - 5\n if foodir == 2:\n y += lengths * scale + 10\n x -= sensorSize - 5\n if foodir == 3:\n x -= len(c.sensors) * scale + sensorSize + 5\n y -= sensorSize - 5 \n \n total = None\n localSum = 0\n for m in c.models:\n #Calculate array\n bar = hmmextra.generateAvgModel(m, lengths)\n \n if total == None:\n total = lclass[classSpot] * bar\n else:\n total += lclass[classSpot] * bar\n\n localSum += lclass[classSpot]\n classSpot += 1\n \n #Normalize\n total /= localSum\n _drawArray(d, total, x, y, scale = scale)\n \n im.save(writeLocation, \"PNG\")",
"def add_altitude():\n\n doc = Metashape.app.document\n if not len(doc.chunks):\n raise Exception(\"No chunks!\")\n\n # alt = Metashape.app.getFloat(\"Please specify the height to be added:\", 100)\n alt = float(sys.argv[1])\n\n\n chunk = doc.chunk\n\n for camera in chunk.cameras:\n if camera.reference.location:\n coord = camera.reference.location\n camera.reference.location = Metashape.Vector([coord.x, coord.y, coord.z + alt])\n print(\"Add : \"+str(sys.argv[1]))",
"def get_atmospheric_light(self,img, *, size, percent):\n #Get the atmospheric light factor from the image\n m, n, _ = img.shape\n\n flat_img = img.reshape(m * n, 3)\n flat_dark = self.get_dark_channel(img, size=size).ravel()\n count = math.ceil(m * n * percent / 100)\n indices = np.argpartition(flat_dark, -count)[:-count]\n\n return np.amax(np.take(flat_img, indices, axis=0), axis=0)",
"def calcTotalInsolation(latitude, slope, azimuth):\n\tdata = readSolarData('Inputs/LocationSolarData.csv')\n\tdf = addStandardColumns(data)\n\tdf = addCalcSolarVars(df, latitude)\n\tdf = addCalcMethodVars(df, latitude, azimuth, slope)\n\tdf['insolation_tilted'] = df['r_bar'] * df['insolation_horizontal']\n\treturn df",
"def solarelevation_function_overcast(latitude_deg, longitude_deg, utc_datetime,\n elevation = elevation_default, temperature_celsius = 25,\n pressure_millibars = 1013.25):\n altitude = solar.GetAltitude(latitude_deg, longitude_deg,utc_datetime, elevation, temperature_celsius,pressure_millibars)\n return ((-0.0067133) + (0.78600 * (math.sin(altitude)))) + (0.22401 * (0.5 * (1 - math.cos(2 * altitude))))",
"def addCalcMethodVars(df, latitude, azimuth, slope):\n\tdf['a'] = 0.409 + (0.5016 * np.sin(np.deg2rad(df['sunset_hour_angle'] - 60)))\n\tdf['a_prime'] = df['a'] - df['diffuse_fraction']\n\tdf['b'] = 0.6609 - (0.4767 * np.sin(np.deg2rad(df['sunset_hour_angle'] - 60)))\n\tdf['d'] = np.sin(np.deg2rad(df['sunset_hour_angle'])) - np.deg2rad(df['sunset_hour_angle'] * np.cos(np.deg2rad(df['sunset_hour_angle'])))\n\tdf['A'] = np.cos(np.deg2rad(slope)) + (np.tan(np.deg2rad(latitude)) * np.cos(np.deg2rad(azimuth)) * np.sin(np.deg2rad(slope)))\n\tdf['B'] = (np.cos(np.deg2rad(df['sunset_hour_angle'])) * np.cos(np.deg2rad(slope))) + (np.tan(np.deg2rad(df['declination'])) * np.sin(np.deg2rad(slope)) * np.cos(np.deg2rad(azimuth)))\n\tdf['C'] = np.sin(np.deg2rad(slope)) * np.sin(np.deg2rad(azimuth)) / np.cos(np.deg2rad(latitude))\n\tdf['omega_sr_abs'] = np.absolute(\n\t\tnp.minimum(\n\t\t\tdf['sunset_hour_angle'], \n\t\t\tnp.rad2deg(np.arccos(((df['A'] * df['B']) + (df['C'] * np.sqrt((df['A'] ** 2) - (df['B'] ** 2) + (df['C'] ** 2)))) / ((df['A'] ** 2) + (df['C'] ** 2))))\n\t\t\t)\n\t\t)\n\tdf['omega_sr'] = np.where(\n\t\t((df['A'] > 0.0) & (df['B'] > 0)) | (df['A'] >= df['B']), \n\t\t-df['omega_sr_abs'], \n\t\tdf['omega_sr_abs']\n\t\t)\n\tdf['omega_ss_abs'] = np.absolute(\n\t\tnp.minimum(\n\t\t\tdf['sunset_hour_angle'], \n\t\t\tnp.rad2deg(np.arccos(((df['A'] * df['B']) - (df['C'] * np.sqrt((df['A'] ** 2) - (df['B'] ** 2) + (df['C'] ** 2)))) / ((df['A'] ** 2) + (df['C'] ** 2))))\n\t\t\t)\n\t\t)\n\tdf['omega_ss'] = np.where(\n\t\t((df['A'] > 0.0) & (df['B'] > 0)) | (df['A'] >= df['B']), \n\t\tdf['omega_ss_abs'], \n\t\t-df['omega_ss_abs']\n\t\t)\n\tdf['D'] = np.where(\n\t\tdf['omega_ss'] >= df['omega_sr'],\n\t\tnp.maximum(0.0,\n\t\t\t((1 / (2 * df['d'])) * \\\n\t\t (np.deg2rad(((df['b'] * df['A'] / 2) - (df['a_prime'] * df['B'])) * (df['omega_ss'] - df['omega_sr'])) + \\\n\t\t \t\t\t (((df['a_prime'] * df['A']) - (df['b'] * df['B'])) * (np.sin(np.deg2rad(df['omega_ss'])) - np.sin(np.deg2rad(df['omega_sr'])))) - \\\n\t\t\t\t\t\t (df['a_prime'] * df['C'] * (np.cos(np.deg2rad(df['omega_ss'])) - np.cos(np.deg2rad(df['omega_sr'])))) + \\\n\t\t\t\t\t\t ((df['b'] * df['A'] / 2) * ((np.sin(np.deg2rad(df['omega_ss'])) * np.cos(np.deg2rad(df['omega_ss']))) - \\\n\t\t\t\t\t\t \t\t \t\t\t\t\t (np.sin(np.deg2rad(df['omega_sr'])) * np.cos(np.deg2rad(df['omega_sr']))))) + \\\n\t\t\t\t\t\t ((df['b'] * df['C'] / 2) * (((np.sin(np.deg2rad(df['omega_ss']))) ** 2) - ((np.sin(np.deg2rad(df['omega_sr']))) ** 2)))\n\t\t\t\t\t\t )\n\t\t )\n\t\t\t),\n\t\tnp.maximum(0.0,\n\t\t\t((1 / (2 * df['d'])) * \\\n\t\t\t (np.deg2rad(((df['b'] * df['A'] / 2) - (df['a_prime'] * df['B'])) * \\\n\t\t\t\t\t\t (df['omega_ss'] - (-df['sunset_hour_angle']))) + \\\n\t\t\t (((df['a_prime'] * df['A']) - (df['b'] * df['B'])) * \\\n\t\t\t (np.sin(np.deg2rad(df['omega_ss'])) - np.sin(np.deg2rad(-df['sunset_hour_angle'])))) - \\\n\t\t\t (df['a_prime'] * df['C'] * (np.cos(np.deg2rad(df['omega_ss'])) - np.cos(np.deg2rad(-df['sunset_hour_angle'])))) + \\\n\t\t\t ((df['b'] * df['A'] / 2) * ((np.sin(np.deg2rad(df['omega_ss'])) * \\\n\t\t\t\t\t\t \t\t \t\t np.cos(np.deg2rad(df['omega_ss']))) - \\\n\t\t\t\t\t\t \t\t \t\t (np.sin(np.deg2rad(-df['sunset_hour_angle'])) * np.cos(np.deg2rad(-df['sunset_hour_angle']))))) + \\\n\t\t\t ((df['b'] * df['C'] / 2) * (((np.sin(np.deg2rad(df['omega_ss']))) ** 2) - ((np.sin(np.deg2rad(-df['sunset_hour_angle']))) ** 2)))\n\t\t\t )\n\t\t\t ) + \\\n\t\t\t((1 / (2 * df['d'])) * (np.deg2rad(((df['b'] * df['A'] / 2) - (df['a_prime'] * df['B'])) * (df['sunset_hour_angle'] - df['omega_sr'])) + \\\n\t\t\t\t\t\t \t\t \t(((df['a_prime'] * df['A']) - (df['b'] * df['B'])) * (np.sin(np.deg2rad(df['sunset_hour_angle'])) - np.sin(np.deg2rad(df['omega_sr'])))) - \\\n\t\t\t\t\t\t \t\t \t(df['a_prime'] * df['C'] * (np.cos(np.deg2rad(df['sunset_hour_angle'])) - np.cos(np.deg2rad(df['omega_sr'])))) + \\\n\t\t\t\t\t\t \t\t \t((df['b'] * df['A'] / 2) * ((np.sin(np.deg2rad(df['sunset_hour_angle'])) * np.cos(np.deg2rad(df['sunset_hour_angle']))) - \\\n\t\t\t\t\t\t \t\t \t (np.sin(np.deg2rad(df['omega_sr'])) * np.cos(np.deg2rad(df['omega_sr']))))) + \\\n\t\t\t\t\t\t \t\t \t((df['b'] * df['C'] / 2) * (((np.sin(np.deg2rad(df['sunset_hour_angle']))) ** 2) - ((np.sin(np.deg2rad(df['omega_sr']))) ** 2)))\n\t\t\t\t\t\t \t\t \t)\n\t\t\t)\n\t\t\t)\n\t\t)\n\tdf['r_bar'] = df['D'] + \\\n\t\t\t\t (df['diffuse_fraction'] * \\\n\t\t\t\t (1 + np.cos(np.deg2rad(slope))) / 2) + \\\n\t\t\t\t (df['albedo'] * \\\n\t\t\t\t (1 - np.cos(np.deg2rad(slope))) / 2)\n\treturn df",
"def add_output_light_settings(self, image_id = 0,image_variation = 0, light_name=\"No name given\", light_location=[0,0,0], light_rotation=[0,0,0], light_intensity=0, light_pol_azi_angle= [0,0]):\n temp_array=[image_id, image_variation, light_name, light_location[0], light_location[1], light_location[2], light_rotation[0], light_rotation[1], light_rotation[2], light_intensity, float(light_pol_azi_angle[0]), float(light_pol_azi_angle[1])]\n self.light_information.append(temp_array)",
"def get_sea_surface_height_trend_image():\n r = request.get_json()\n\n image = ee.Image('users/fbaart/ssh-trend-map')\n\n image = image.visualize(**{'bands': ['time'], 'min': -0.03, 'max': 0.03,\n 'palette': [\"151d44\", \"156c72\", \"7eb390\",\n \"fdf5f4\", \"db8d77\", \"9c3060\",\n \"340d35\"]})\n\n m = image.getMapId()\n\n mapid = m.get('mapid')\n token = m.get('token')\n\n url = 'https://earthengine.googleapis.com/map/{0}/{{z}}/{{x}}/{{y}}?token={1}'.format(\n mapid, token)\n\n response = Response(json.dumps({'url': url}), status=200,\n mimetype='application/json')\n\n return response",
"def average_slope_intercept(self,image):\n left_fit = []\n right_fit = []\n if self.lines is None:\n return None\n for line in self.lines:\n for x1, y1, x2, y2 in line:\n # Polyfit computes the 1st order fitting of the lane points\n fit = np.polyfit((x1,x2), (y1,y2), 1)\n slope = fit[0]\n intercept = fit[1]\n if slope < 0: # y is reversed in image\n left_fit.append((slope, intercept))\n else:\n right_fit.append((slope, intercept))\n # add more weight to longer lines\n left_fit_average = np.average(left_fit, axis=0)\n right_fit_average = np.average(right_fit, axis=0)\n self.left_line = self.make_points(image,left_fit_average)\n self.right_line = self.make_points(image,right_fit_average)\n self.averaged_lines = [self.left_line, self.right_line]\n return self.averaged_lines"
] | [
"0.56199247",
"0.5305509",
"0.52611077",
"0.5192918",
"0.51714647",
"0.5160872",
"0.51578385",
"0.51510555",
"0.5125877",
"0.5098165",
"0.50949097",
"0.506125",
"0.50448906",
"0.50039434",
"0.49909866",
"0.49797404",
"0.49714604",
"0.49694428",
"0.48181123",
"0.48124117",
"0.47804737",
"0.47802678",
"0.4776116",
"0.4769344",
"0.47429517",
"0.47411552",
"0.4717208",
"0.4709317",
"0.47005615",
"0.46993262"
] | 0.5537641 | 1 |
Evaluate Delta R between two objects | def delta_r(o1, o2):
d_phi = o1.phi - o2.phi
if d_phi < -pi: d_phi += pix2
if d_phi > pi: d_phi -= pix2
return hypot(o1.eta - o2.eta, d_phi) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __rtruediv__(self, other):\n value = -1 / (self.val * self.val)\n total = {self.var: other * value}\n return AutoDiffReverse(other / self.val, None, total)",
"def epsilon_delta(self):",
"def __truediv__(self, other):\n try:\n value = -1 / (other.val * other.val)\n total = {self.var: 1 / other.val, other.var: value * self.val}\n return AutoDiffReverse(self.val / other.val, None, total)\n except AttributeError:\n total = {self.var: 1 / other}\n return AutoDiffReverse(self.val / other, None, total)",
"def delta(self) -> None:",
"def KroDelta(a,b):\n \n if (a==b):\n return 1\n else:\n return 0",
"def delta(self):\r\n return self.nd1()",
"def compute_egocentric_delta(p1, r1, p2, r2):\n x1, y1, z1 = p1\n x2, y2, z2 = p2\n theta_1 = compute_heading_from_quaternion(r1)\n theta_2 = compute_heading_from_quaternion(r2)\n\n D_rho = math.sqrt((x1 - x2) ** 2 + (z1 - z2) ** 2)\n D_phi = (\n math.atan2(x2 - x1, -z2 + z1) - theta_1\n ) # counter-clockwise rotation about Y from -Z to X\n D_theta = theta_2 - theta_1\n\n return (D_rho, D_phi, D_theta)",
"def deltaR2( e1, p1, e2=None, p2=None):\n\tif (e2 == None and p2 == None):\n\t\treturn deltaR2(e1.eta,e1.phi, p1.eta, p1.phi)\n\tde = e1 - e2\n\tdp = deltaPhi(p1, p2)\n\treturn de*de + dp*dp",
"def m_delta(r, m_x, **kwargs):\n return m_x",
"def difference_state(self, a: Vector, b: Vector, u: float, dt: float) -> Vector:\n return vectorops.mul(vectorops.sub(a,b),1.0/dt)",
"def test_sub():\n # Test for subtraction with Rnode object\n x = Rnode(0.11)\n y = Rnode(0.5)\n z = x - y\n z.grad_value = 1.0\n\n try:\n assert z.value == x.value - y.value\n # assert x.grad() == sum(weight * var.grad()\n # for weight, var in x.children)\n except AssertionError as e:\n print(e)\n # Test for subtraction with scalar Rnode object and float value\n x = Rnode(0.5)\n z = x - 0.1\n try:\n assert z.value == x.value - 0.1\n # assert x.grad() == sum(weight * var.grad()\n # for weight, var in x.children)\n except AssertionError as e:\n print(e)",
"def __rmul__(self, other):\n\t\t# Maintain state of self and create new trace variable new_var\n\t\tnew_var = Var(self.val, self.der)\n\t\treturn new_var.__mul__(other)",
"def vd(v2,v1):\n return v2-v1",
"def __rsub__(self, other):\n try:\n total = {self.var: 1, other.var: -1}\n return AutoDiffReverse(self.val - other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val - other, None, {self.var: -1})",
"def test_delta_in_diff(self):\n xk = 1 * self.ureg.kelvin\n yk = 2 * self.ureg.kelvin\n yf = yk.to('degF')\n yc = yk.to('degC')\n self.assertEqual(yk - xk, 1 * self.ureg.kelvin)\n self.assertEqual(yf - xk, 1 * self.ureg.kelvin)\n self.assertEqual(yc - xk, 1 * self.ureg.kelvin)",
"def diff(self, x1, x2):\n return x2 - x1",
"def diff(self, other):\n return mldivide(self, other)",
"def diff(self, other):\n return mldivide(self, other)",
"def calcDelta(self, energy1, energy2):\n \n return math.fabs(energy2-energy1)",
"def __sub__(self,other):\n return np.linalg.norm(self.ngdv-other.ngdv)",
"def diff(self, wrt, otherframe):\n\n wrt = sympify(wrt)\n self._check_frame(otherframe)\n outvec = 0\n for i,v in enumerate(self.args):\n if v[1] == otherframe:\n outvec += Vector([(v[0].diff(wrt), otherframe)])\n else:\n if otherframe.dcm(v[1]).diff(wrt) == zeros(3, 3):\n d = v[0].diff(wrt)\n outvec += Vector([(d, v[1])])\n else:\n d = (Vector([v]).express(otherframe)).args[0][0].diff(wrt)\n outvec += Vector([(d, otherframe)]).express(v[1])\n return outvec",
"def __rtruediv__(self, other):\r\n return other * self.reciprocal()",
"def test_truediv():\n # Test for division with scalar Rnode object and float value\n x = Rnode(0.11)\n z = x / 4\n z.grad_value = 1.0\n\n try:\n assert z.value == x.value / 4\n except AssertionError as e:\n print(e)\n\n# Test for division with scalar Rnode objects\n x = Rnode(0.11)\n y = Rnode(0.5)\n z = x / y\n z.grad_value = 1.0\n\n try:\n assert z.value == x.value / y.value\n except AssertionError as e:\n print(e)",
"def test_delta_val2(self):\n d = Delta(\"+2.5-1.5\")\n self.assertEqual(d.cmp(0, 1), False)\n self.assertEqual(d.cmp(1, 3), False)\n self.assertEqual(d.cmp(3, 1), True)",
"def dif(q_1: Q, q_2: Q) -> Q:\n\n q_1.check_representations(q_2)\n\n end_dif_q_type = f\"{q_1.q_type}-{q_2.q_type}\"\n\n t_2, x_2, y_2, z_2 = q_2.t, q_2.x, q_2.y, q_2.z\n t_1, x_1, y_1, z_1 = q_1.t, q_1.x, q_1.y, q_1.z\n\n dif_q = Q(q_type=end_dif_q_type, representation=q_1.representation)\n dif_q.t = t_1 - t_2\n dif_q.x = x_1 - x_2\n dif_q.y = y_1 - y_2\n dif_q.z = z_1 - z_2\n\n return dif_q",
"def rel_diff(d1, d2):\n\n if d1 is None or d2 is None:\n return np.NaN\n if pd.isnull(d1) or pd.isnull(d2):\n return np.NaN\n try:\n d1 = float(d1)\n d2 = float(d2)\n except ValueError:\n return np.NaN\n if d1 == 0.0 and d2 == 0.0:\n return 0\n else:\n # Compute the relative difference between two numbers\n # ref: https://en.wikipedia.org/wiki/Relative_change_and_difference\n x = (2*abs(d1 - d2)) / (d1 + d2)\n return x",
"def test_rsub():\n # Test for reverse subtraction with scalar Rnode object and float value\n x = Rnode(0.5)\n z = 0.1 - x\n try:\n assert z.value == x.value - 0.1\n except AssertionError as e:\n print(e)\n raise AssertionError",
"def diff_v_x1(x1, x2, t=0.):\n return (omega) ** 2 * x1",
"def getR(self):\n # Reynolds number uses the absolute value of the velocity\n V = abs(self.V)\n return (V * self.D) / self.v # formula for Reynolds number",
"def delta(self) -> float:\n return self._delta"
] | [
"0.65177035",
"0.6383682",
"0.6307158",
"0.62640095",
"0.6199991",
"0.6117177",
"0.6008128",
"0.5971522",
"0.59231496",
"0.58871377",
"0.5877812",
"0.5806873",
"0.57781816",
"0.5731674",
"0.57224244",
"0.5720133",
"0.5700213",
"0.5700213",
"0.5693242",
"0.5667665",
"0.56521773",
"0.56431985",
"0.5640473",
"0.5628163",
"0.5611641",
"0.56070095",
"0.5604314",
"0.56008285",
"0.559463",
"0.5592555"
] | 0.6505122 | 1 |
Processes a gpx route file to assign scariness score to each waypoint | def get_route_with_scariness_from_file(route_file_path):
route = read_gpx.read_gpx(route_file_path)
route = read_gpx.pad_gpx_dataframe(route)
route_bounds = read_gpx.get_route_bounds(route)
if not csp.check_route_bounds_fit_location_data(route_bounds):
abort(400)
altitudes_df = csp.get_complete_route_altitude_df(route_bounds)
route = csp.calculate_route_scariness(route, altitudes_df)
administer_route_database.insert_route_into_db_table(
administer_route_database.prepare_route_for_insertion(route, route_file_path),
administer_route_database.get_route_db_connection(), 'waypoints'
)
return route | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gtfs_routes(gtfs, output_f):\n\n\t# Load up the stop times so we can find which are the best routes.\n\t#TODO\n\tstop_times_file = [x for x in gtfs.namelist() if 'stop_times' in x][0]\n\n\tstoptimes_c = csv.reader((gtfs.open(stop_times_file, 'r')))\n\theader = stoptimes_c.next()\n\ttrip_id_col = header.index('trip_id')\n\tarrtime_col = header.index('arrival_time')\n\tdeptime_col = header.index('departure_time')\n\tstopseq_col = header.index('stop_sequence')\n\ttrip_times = {}\n\tfor row in stoptimes_c:\n\t\tif row[trip_id_col] not in trip_times:\n\t\t\t# earliest seq, latest seq, earliest seq dep time, latest seq dep time\n\t\t\ttrip_times[row[trip_id_col]] = [None, None, None, None]\n\n\t\tarrtime = time_as_timedelta(row[arrtime_col])\n\t\tdeptime = time_as_timedelta(row[deptime_col])\n\t\tif arrtime is None or deptime is None:\n\t\t\t# bad data, skip!\n\t\t\tcontinue\n\t\tseq = int(row[stopseq_col])\n\n\t\t# Find if this is an earlier item in the sequence\n\t\tif trip_times[row[trip_id_col]][0] is None or trip_times[row[trip_id_col]][0] > seq:\n\t\t\ttrip_times[row[trip_id_col]][0] = seq\n\t\t\ttrip_times[row[trip_id_col]][2] = deptime\n\n\t\t# Find if this is an later item in the sequence\n\t\tif trip_times[row[trip_id_col]][1] is None or trip_times[row[trip_id_col]][1] < seq:\n\t\t\ttrip_times[row[trip_id_col]][1] = seq\n\t\t\ttrip_times[row[trip_id_col]][3] = arrtime\n\n\t# Load the shapes into a map that we can lookup.\n\t# We should do all the geometry processing here so that we only have to do\n\t# this once-off.\n\t#TODO\n\tshapes_file = [x for x in gtfs.namelist() if 'shapes' in x][0]\n\tshapes_c = csv.reader(swallow_windows_unicode(gtfs.open(shapes_file, 'r')))\n\n\theader = shapes_c.next()\n\tshape_id_col = header.index('shape_id')\n\tshape_lat_col = header.index('shape_pt_lat')\n\tshape_lng_col = header.index('shape_pt_lon')\n\tshape_seq_col = header.index('shape_pt_sequence')\n\tshape_dist_col = header.index('shape_dist_traveled') if 'shape_dist_traveled' in header else None\n\n\tshapes = {}\n\tshape_lengths = {}\n\tfor row in shapes_c:\n\t\tif row[shape_id_col] not in shapes:\n\t\t\tshapes[row[shape_id_col]] = {}\n\n\t\tshapes[row[shape_id_col]][int(row[shape_seq_col])] = (Decimal(row[shape_lng_col]), Decimal(row[shape_lat_col]))\n\n\t\t# Calculate length according to GTFS\n\t\t# This could also be calculated by the geometry, but we trust GTFS, right...\n\t\tif shape_dist_col is not None and row[shape_dist_col]:\n\t\t\tlength = Decimal(row[shape_dist_col])\n\t\t\tif row[shape_id_col] not in shape_lengths or shape_lengths[row[shape_id_col]] < length:\n\t\t\t\tshape_lengths[row[shape_id_col]] = length\n\n\t# translate the shapes into a LineString for use by the GeoJSON module\n\tfor shape_id in shapes.iterkeys():\n\t\tshape_keys = shapes[shape_id].keys()\n\t\tshape_keys.sort()\n\t\tshape = []\n\t\tfor ordinal in shape_keys:\n\t\t\tshape.append(shapes[shape_id][ordinal])\n\n\t\tshapes[shape_id] = shape\n\n\t# Make a matching dict between routes and shapes\n\ttrips = {}\n\ttrips_ref = {}\n\troute_time = {}\n\n\t#TODO\n\ttrips_file = [x for x in gtfs.namelist() if 'trips' in x][0]\n\n\ttrips_c = csv.reader(swallow_windows_unicode(gtfs.open(trips_file, 'r')))\n\theader = trips_c.next()\n\troute_id_col = header.index('route_id')\n\tshape_id_col = header.index('shape_id')\n\ttrip_id_col = header.index('trip_id')\n\tfor row in trips_c:\n\t\t# reference count the shapes\n\t\tif row[route_id_col] not in trips_ref:\n\t\t\t# route is unknown, create dict\n\t\t\ttrips_ref[row[route_id_col]] = {}\n\t\t\troute_time[row[route_id_col]] = trip_times[row[trip_id_col]]\n\n\t\tif row[shape_id_col] not in trips_ref[row[route_id_col]]:\n\t\t\t# shape is unknown, create counter\n\t\t\ttrips_ref[row[route_id_col]][row[shape_id_col]] = 0\n\n\t\t# increment counter\n\t\ttrips_ref[row[route_id_col]][row[shape_id_col]] += 1\n\n\t# now we're done, iterate through the reference-counters and find the best\n\t# shape\n\tfor route_id, candidate_shapes in trips_ref.iteritems():\n\t\tpopular_shape, popular_shape_refs = None, 0\n\t\tfor shape_id, refs in candidate_shapes.iteritems():\n\t\t\tif refs > popular_shape_refs:\n\t\t\t\tpopular_shape, popular_shape_refs = shape_id, refs\n\n\t\t# now we should have the route's shape\n\t\tassert popular_shape is not None, 'Couldn\\'t find a shape for route %r' % route_id\n\t\ttrips[route_id] = popular_shape\n\n\t# Cleanup unused variables\n\tdel trip_times\n\n\t# lets setup our output file\n\toutput_layer = geojson.FeatureCollection([])\n\t# assume WGS84 CRS\n\toutput_layer.crs = geojson.crs.Named('urn:ogc:def:crs:OGC:1.3:CRS84')\n\n\t# now we have all the shapes available, translate the routes\n\t#TODO\n\troutes_file = [x for x in gtfs.namelist() if 'routes' in x][0]\n\n\troutes_c = csv.reader(swallow_windows_unicode(gtfs.open(routes_file, 'r')))\n\theader = routes_c.next()\n\troute_id_col = header.index('route_id')\n\n\tfor row in routes_c:\n\t\t# make dict of other properties\n\t\tprops = dict()\n\t\tfor i, h in enumerate(header):\n\t\t\tif row[i] != '':\n\t\t\t\tprops[h] = row[i]\n\n\t\tif row[route_id_col] not in trips:\n\t\t\t# Route has no trips!\n\t\t\tprint \"Warning: route has no trips, skipping: %r\" % (row,)\n\t\t\tcontinue\n\n\t\tprops['shape_id'] = trips[row[route_id_col]]\n\t\tprops['shape_refs'] = trips_ref[row[route_id_col]][props['shape_id']]\n\t\tif shape_dist_col is not None and len(shape_lengths) > 0:\n\t\t\tprops['shape_length'] = shape_lengths[props['shape_id']]\n\t\tprops['duration_sec'] = (route_time[row[route_id_col]][3] - route_time[row[route_id_col]][2]).total_seconds()\n\n\t\toutput_layer.features.append(geojson.Feature(\n\t\t\tgeometry=geojson.LineString(\n\t\t\t\tcoordinates=shapes[trips[row[route_id_col]]]\n\t\t\t),\n\t\t\tproperties=props,\n\t\t\tid=row[route_id_col]\n\t\t))\n\n\t# now flush the GeoJSON layer to a file.\n\tgeojson.dump(output_layer, output_f, cls=DecimalEncoder)",
"def execute_waypoint_sequence(detail_of_trip):\n\n # rets (route_line, line_points)\n sliced_route_and_line_points = chunk_user_route(detail_of_trip)\n\n sliced_route = sliced_route_and_line_points[0]\n line_points = sliced_route_and_line_points[1]\n\n # Interpolate/Break into 1/10 segments\n segmented_points = interpolate_points(sliced_route, line_points)\n waypoints = find_crime_areas(segmented_points)\n\n # print \"segmented_points\", json.dumps(segmented_points, indent=2)\n print \"\\n\\n\\n\\n\" # compensating for the giant GET request\n return waypoints",
"def routes_gen(num) -> Generator[Route, None, None]:\n with open(f'data/route-costs-{num}.txt', 'rb') as routes:\n for route in routes:\n prefix, cost = route[:-1].split(b',')\n yield (prefix, float(cost))",
"def one_time_route_cost_check(routes_file, numbers_file):\n routes = open(routes_file, \"r\")\n read_file = routes.read().split()\n read_file = list(map(lambda x: x.split(\",\"), read_file))\n\n numbers = open(numbers_file, \"r\")\n numbers_read_file = numbers.read()\n\n return read_file",
"def _parse_routepart(self, data):\n points = [self._parse_trip_point(point) for point in data.findall('./itdPoint')]\n\n path = []\n for coords in data.findall('./itdPathCoordinates/itdCoordinateBaseElemList/itdCoordinateBaseElem'):\n path.append(Coordinates(int(coords.find('y').text) / 1000000, int(coords.find('x').text) / 1000000))\n\n motdata = self._parse_mot(data.find('./itdMeansOfTransport'))\n\n if motdata is None or data.attrib['type'] == 'IT':\n waytype = {\n '98': 'walk',\n '99': 'walk',\n '100': 'walk',\n '101': 'bike',\n '104': 'car',\n '105': 'taxi'\n }[data.find('./itdMeansOfTransport').attrib['type']]\n # 98 = gesichter anschluss\n\n way = Way(WayType(waytype), points[0].stop, points[1].stop)\n way.distance = data.attrib.get('distance')\n if way.distance is not None:\n way.distance = float(way.distance)\n duration = data.attrib.get('timeMinute', None)\n if duration is not None:\n way.duration = timedelta(minutes=int(duration))\n if path:\n way.path = path\n return way\n\n else:\n origin, destination, line, ridenum, ridedir, canceled = motdata\n\n if data.find('./genAttrList/genAttrElem[value=\"HIGHSPEEDTRAIN\"]') is not None:\n line.linetype = LineType('train.longdistance.highspeed')\n elif data.find('./genAttrList/genAttrElem[value=\"LONG_DISTANCE_TRAIN\"]') is not None:\n line.linetype = LineType('train.longdistance')\n\n train_line = line.linetype in self.train_station_lines\n\n # Build Ride Objekt with known stops\n ride = Ride(line, ridenum)\n ride.canceled = canceled\n ride.direction = ridedir\n for infotext in data.findall('./infoTextList/infoTextListElem'):\n ride.infotexts.append(infotext)\n\n first = None\n last = None\n waypoints = False\n if data.find('./itdStopSeq'):\n new_points = [self._parse_trip_point(point, train_line=train_line) for point in data.findall('./itdStopSeq/itdPoint')]\n if not new_points or new_points[0].stop != new_points[0].stop:\n new_points.insert(0, points[0])\n if new_points[-1].stop != points[1].stop:\n new_points.append(points[1])\n points = new_points\n waypoints = True\n\n for p in points:\n if not waypoints and first is None:\n ride.append(None)\n pointer = ride.append(p)\n if first is None:\n first = pointer\n last = pointer\n\n if origin is not None:\n if origin != ride[0].stop:\n ride.prepend(None)\n ride.prepend(TimeAndPlace(Platform(origin)))\n else:\n ride.prepend(None)\n\n if destination is not None:\n if destination != ride[-1].stop:\n ride.append(None)\n ride.append(TimeAndPlace(Platform(destination)))\n else:\n ride.append(None)\n\n segment = ride[first:last]\n paths = self._split_path(path, [p.platform.coords for p in segment])[:-1]\n for i, point in segment.items():\n if not paths:\n break\n segment.ride._paths[i] = paths.pop(0)\n return segment",
"def parse_routes_file(route_filename):\n\n list_route_descriptions = []\n tree = ET.parse(route_filename)\n for route in tree.iter(\"route\"):\n route_town = route.attrib['map']\n route_id = route.attrib['id']\n waypoint_list = [] # the list of waypoints that can be found on this route\n for waypoint in route.iter('waypoint'):\n waypoint_list.append(waypoint) # Waypoints is basically a list of XML nodes\n\n list_route_descriptions.append({\n 'id': route_id,\n 'town_name': route_town,\n 'trajectory': waypoint_list\n })\n\n return list_route_descriptions",
"def roadSegments(locations, API_key=\"Avah46_M-gfFeQ3P1w09Qq1ElAV9ZEHFDm9b8JRCRa8qPP5uVn21hDqAPVJgV4i_\"): \n \n # Base URL\n uri = 'http://dev.virtualearth.net/' # Resource URL \n path = 'REST/v1/Routes?'\n \n \n # URL Parameters\n params = { 'wayPoint.0' : locations[0]+',Singapore',\n 'wayPoint.1' : locations[1]+',Singapore',\n 'routeAttributes':'routePath',\n 'key' : API_Key} # by default 'optimize' : 'time'} # this is by default\n \n url = uri+path\n\n results = requests.get(\n url,\n params = params\n ).json()# ['resourceSets']\n\n # Retrieving values\n statusCode = results['statusCode']\n if statusCode == 200:\n # print(statusCode)\n\n # TODO review the exceptions and modify these basic exception handlings\n try:\n travelDistance = results['resourceSets'][0]['resources'][0]['travelDistance']\n except:\n travelDistance = 0\n try:\n travelDuration = results['resourceSets'][0]['resources'][0]['travelDuration']\n except:\n travelDuration = 0\n try:\n travelDurationTraffic = results['resourceSets'][0]['resources'][0]['travelDurationTraffic']\n except:\n travelDurationTraffic = 0\n\n try:\n numberSegments = len(results['resourceSets'][0]['resources'][0]['routeLegs'][0] \\\n ['itineraryItems'])\n except:\n numberSegments = 0\n try:\n itineraryItems = results['resourceSets'][0]['resources'][0]['routeLegs'][0] \\\n ['itineraryItems']\n except:\n itineraryItems = 'No items'\n\n pathCoord = results['resourceSets'][0]['resources'][0]['routePath']['line']['coordinates']\n\n roadName = []\n travelDistances = []\n travelDurations = []\n maneuverType = []\n\n for seg in itineraryItems:\n for i in range(len(seg['details'])):\n # print(i)\n try:\n roadName.append(seg['details'][i]['names'])\n except:\n roadName.append(0)\n try:\n travelDistances.append(seg['travelDistance'])\n except:\n travelDistances.append(0)\n\n try:\n travelDurations.append(seg['travelDuration'])\n except:\n travelDurations.append(0)\n try:\n maneuverType.append(seg['details'][i]['maneuverType'])\n except:\n maneuverType.append(0)\n\n\n return statusCode,travelDistance,travelDuration,travelDurationTraffic,numberSegments,roadName, \\\n travelDistances, travelDurations, maneuverType, pathCoord\n\n else:\n print(\"Unsuccessful route calculation.\")",
"def Read_MapGen(filename,stats = False):\n from numpy import array\n with open(filename,'rt') as file_:\n data = [s.strip() for s in file_.readlines()]\n\n Shorelines = []\n segment = []\n for line in data:\n if line == \"# -b\": #New segment beginning\n if segment: Shorelines.append(array(segment))\n segment = []\n else:\n segment.append(map(float,string.split(line)))\n if segment: Shorelines.append(array(segment))\n\n if stats:\n NumSegments = len(Shorelines)\n NumPoints = False\n for segment in Shorelines:\n NumPoints = NumPoints + len(segment)\n AvgPoints = NumPoints / NumSegments\n print(\"Number of Segments: \", NumSegments)\n print(\"Average Number of Points per segment: \", AvgPoints)\n\n return Shorelines",
"def interpolate_points(route_line, line_points):\n\n segment_size = 0.1 # value to break the entire route into 1/10 segments\n distance_along_line = 0.1 # start distance along line at the segment size\n\n # break up the line into 1/10 segments, iterate. We are ignoring the 0th\n # element as that's the start position and that's already stored\n segmented_points = [] # creating an empty list to store these points\n\n # hold all the waypoints and other data\n segmented_points.append({'data': {'waypoints': []}})\n\n # for our start points that the user defines, geocoded\n segmented_points[0]['data']['start'] = {}\n segmented_points[0]['data']['end'] = {}\n\n for i in range(1, 10): # excluding the start and the end points\n # Note: the output of interpolate is a Point data type\n # Return a point at the specified distance along a linear geometric object.\n point = route_line.interpolate(distance_along_line, normalized=True)\n print \"Point \", i, point\n\n # call the function that checks to see what geohash the line falls under\n # and if it is a high crime area\n # geohash_data is a dict: crime_index, total_crimes, lng, lat, geohash\n geohash_data = get_position_geohash([(point.x, point.y)])[0] # dict\n\n # set the is_high_crime variable value to false, for testing\n geohash_data['is_high_crime'] = False\n\n # extract the datapoints from the point datatype\n geohash_data['lat'] = point.x\n geohash_data['lng'] = point.y\n\n segmented_points.append(geohash_data) # append data on location\n distance_along_line += segment_size\n\n # also add the point A, point B latitude and longitude that the user gives\n # to the data that will be sent back to JS\n segmented_points[0]['data']['start'] = {\n 'lat': line_points[0][0],\n 'lng': line_points[0][1]\n }\n\n segmented_points[0]['data']['end'] = {\n 'lat': line_points[-1][0],\n 'lng': line_points[-1][1]\n }\n\n return segmented_points",
"def process_route(self, route_data: str):\r\n route_dict = {}\r\n\r\n route_data = re.sub(r';+[^;]+;+\\n', '', route_data)\r\n route_data = route_data.replace('\\n', '')\r\n if 'LINE NAME' in route_data:\r\n route_data = route_data.split('LINE NAME=\\\"')\r\n route_data = [rd for rd in route_data if len(rd) > 0 if 'N=' in rd]\r\n for rd in route_data:\r\n line_name = rd[:rd.index('\\\"')]\r\n node_seq = rd[rd.index('N='):] # start from the first N\r\n node_seq = node_seq.replace('N=', '')\r\n node_seq = re.sub(r'TIME\\s*=\\s*[0-9\\.]+\\s*,', '', node_seq)\r\n if len(node_seq) > 0:\r\n route_dict[line_name] = node_seq\r\n else:\r\n route_dict['current_line'] = route_data\r\n\r\n return route_dict",
"def node_route_data(th_object, topology_info, file_name):\n route_information(th_object, topology_info, file_name, \"20\", \"91\", \"extracted_data/Route_data/\")\n route_information(th_object, topology_info, file_name, \"22\", \"91\", \"extracted_data/Route_data/\")\n route_information(th_object, topology_info, file_name, \"22\", \"71\", \"extracted_data/Route_data/\")",
"def read_waypoints():\n\tfilename = \"waypoints.txt\"\n\tfile = open(filename, \"r\")\n\twp_list = []\n\n\tfor line in file:\n\t\t# Get the individual elements, splitting by whitespace\n\t\tdata_list = line.split()\n\t\tcoordinate = {'x': data_list[0], 'y': data_list[1], 'z': data_list[2]}\n\t\twaypoint = {'radius': data_list[3], 'point': coordinate}\n\n\t\twp_list.append (waypoint)\n\n\treturn wp_list",
"def process(path):\n # get parameter value:\n with open('config.cym', 'r') as f:\n line = f.readline()\n #print(line)\n pam = float(line[1:])\n f.close()\n # get position of aster:\n with open('aster.txt', 'r') as f:\n for line in f:\n if len(line)>3 and not line[0]=='%':\n #print(line)\n val = line.split()\n x = float(val[2])\n y = float(val[3])\n #z = float(val[4])\n #pos = math.sqrt(x*x+y*y+z*z)\n pos = math.sqrt(x*x+y*y)\n\n f.close()\n return (pam, pos)",
"def route_validity_checker(): #below route list was rerturned from bus_routes function above, copy and pasted to eliminate need to re-run\n route_list=['15', '46A', '14', '41B', '39A', '65', '40D', '11', '31', '27', '67', '79', '42', '66A', '33B', '140', '44', '83A', '27B', '38', '16C', '747', '41C', '39', '25', '239', '43', '70', '13', '150', '145', '77A', '184', '84', '61', '83', '40', '66', '15A', '123', '17A', '16', '14C', '9', '4', '37', '32', '33', '49', '56A', '151', '25A', '45A', '54A', '47', '18', '7', '17', '102', '120', '65B', '41', '122', '29A', '76', '68', '59', '25B', '69', '27A', '66B', '38B', '7D', '75', '15B', '84A', '63', '84X', '33X', '68A', '1', '76A', '7B', '270', '236', '130', '238', '220', '44B', '40B', '26', '32B', '8', '41A', '53', '67X', '104', '32A', '79A', '114', '185', '66X', '31B', '32X', '51X', '51D', '41X', '142', '111', '69X', '27X', '116', '46E', '161', '118', '25X', '38A', '33A', 'PP07', '53B', '31A', 'OL84']\n count_dict={}\n for route in route_list: #dictionary with key for every route in the list\n count_dict[route]=0 #used to count number of occurrences in files\n os.chdir(\"../Data\")\n for file in glob.glob(\"*.csv\"): #for every file\n print(file)\n reader=csv.reader(open(file))\n for line in reader:\n route=extract_bus_route(line[3])\n if route!=\"\":\n count_dict[extract_bus_route(line[3])]+=1 #incremenent the counter of the route with the associated journey id code\n return count_dict #result is that 3 routes are likely due to strange circumstances or errors in data",
"def _post_process_route_fcs(self):\r\n # Create the final output feature class\r\n desc = arcpy.Describe(self.route_fcs[0])\r\n helpers.run_gp_tool(\r\n LOGGER,\r\n arcpy.management.CreateFeatureclass, [\r\n os.path.dirname(self.out_routes),\r\n os.path.basename(self.out_routes),\r\n \"POLYLINE\",\r\n self.route_fcs[0], # template feature class to transfer full schema\r\n \"SAME_AS_TEMPLATE\",\r\n \"SAME_AS_TEMPLATE\",\r\n desc.spatialReference\r\n ]\r\n )\r\n\r\n # Insert the rows from all the individual output feature classes into the final output\r\n fields = [\"SHAPE@\"] + [f.name for f in desc.fields]\r\n with arcpy.da.InsertCursor(self.out_routes, fields) as cur: # pylint: disable=no-member\r\n for fc in self.route_fcs:\r\n for row in arcpy.da.SearchCursor(fc, fields): # pylint: disable=no-member\r\n cur.insertRow(row)",
"def post_process():\n for route in os.listdir(GFR_ROUTES_LOCATION):\n if os.path.isfile(MISSING_LOCATION + route):\n # If the route is missing, output the reference data with correct OSM tags.\n\n copyfile(MISSING_LOCATION + route, OUTPUT_LOCATION + route)\n add_property(OUTPUT_LOCATION + route, 'error_type', 'missing')\n elif os.path.isfile(DIFF_MISSING_LOCATION + route) and os.path.isfile(DIFF_WRONG_LOCATION + route) \\\n and merge_differences(route, DIFF_MISSING_LOCATION + route, DIFF_WRONG_LOCATION + route,\n OUTPUT_LOCATION + route):\n # If there's a geometrical difference, combine the two difference files and output it.\n\n add_property(OUTPUT_LOCATION + route, 'error_type', 'difference')\n elif os.path.isfile(TAGS_LOCATION + route):\n # When there's no geometrical difference, output the OSM data possibly containing missing tags.\n\n copyfile(TAGS_LOCATION + route, OUTPUT_LOCATION + route)\n else:\n raise Exception(\"No output file could be generated for route: \" + route)\n\n copy_to_site()\n\n # Export a last updated timestamp\n with open('last_updated', 'w') as fp:\n fp.write(str(int(time.time() * 1000)))",
"def Read_MapGen(self, filename, stats = 0,AllLines=0):\n with open(filename,'rt') as file_:\n data = [s.strip() for s in file_]\n\n Shorelines = []\n segment = []\n for line in data:\n if line:\n if line == \"# -b\": #New segment beginning\n if segment: Shorelines.append(N.array(segment))\n segment = []\n else:\n segment.append([float(e) for e in line.split()])\n if segment: Shorelines.append(N.array(segment))\n\n if stats:\n NumSegments = len(Shorelines)\n NumPoints = 0\n for segment in Shorelines:\n NumPoints = NumPoints + len(segment)\n AvgPoints = NumPoints / NumSegments\n print(\"Number of Segments: \", NumSegments)\n print(\"Average Number of Points per segment: \", AvgPoints)\n if AllLines:\n Lines = []\n for segment in Shorelines:\n Lines.append(segment[0])\n for point in segment[1:-1]:\n Lines.append(point)\n Lines.append(point)\n Lines.append(segment[-1])\n return Lines\n else:\n return Shorelines",
"def chunk_user_route(detail_of_trip):\n\n # $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n # since I can't get javascript to load, here's a hacky way of loading json\n # that details the route based on the user's point A and point B\n # detail_of_trip = api.directions(\n # (40.760350, -73.976209),\n # (40.754009, -73.981097),\n # mode=\"walking\"\n # )[0]\n # $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n\n # now that I have javascript sending over the json, load json that details\n # the route based on the user's point A and point B\n\n # -------------- This section is for interpolation/splitting using shapely\n first = True # to see if this is the start position for the entire route\n line_points = [] # stores all the points to the route based on dict passed\n\n for leg in detail_of_trip['legs']:\n for step in leg['steps']:\n # Create a list of two element lists that represent points along the\n # route. via google. line_points = [ [lat1, lng1], [lat2, lng2],...]\n # Only add the starting point the first time. Every other iteration\n # we will just tack on the end points to our line.\n if first:\n line_points.append([step['start_location']['lat'], step['start_location']['lng']])\n first = False\n line_points.append([step['end_location']['lat'], step['end_location']['lng']])\n\n # Now load those points into a geometry, here shapely's LineString type.\n route_line = LineString(line_points)\n return (route_line, line_points)",
"def rout(pour_point, uh_box, fdr_data, fdr_atts, rout_dict):\n log.info(\"Starting routing program for point: %s\", pour_point)\n # ---------------------------------------------------------------- #\n # Unpack a few structures\n uh_t = uh_box['time']\n uh_box = uh_box['func']\n # ---------------------------------------------------------------- #\n\n # ---------------------------------------------------------------- #\n # Find Basin Dims and ID\n basin_id = fdr_data[rout_dict['BASIN_ID_VAR']][pour_point.routy, pour_point.routx]\n\n log.info('Input Latitude: %f' % pour_point.lat)\n log.info('Input Longitude: %f' % pour_point.lon)\n log.info('Global Basid ID: %i' % basin_id)\n\n y_inds, x_inds = np.nonzero(fdr_data[rout_dict['BASIN_ID_VAR']] == basin_id)\n y = np.arange(len(fdr_data[rout_dict['LATITUDE_VAR']]))\n x = np.arange(len(fdr_data[rout_dict['LONGITUDE_VAR']]))\n\n x_min = min(x[x_inds])\n x_max = max(x[x_inds])+1\n y_min = min(y[y_inds])\n y_max = max(y[y_inds])+1\n # ---------------------------------------------------------------- #\n\n # ---------------------------------------------------------------- #\n # Create the Basin Dictionary, a subset of the fdr_data\n basin = {}\n basin['lat'] = fdr_data[rout_dict['LATITUDE_VAR']][y_min:y_max]\n basin['lon'] = fdr_data[rout_dict['LONGITUDE_VAR']][x_min:x_max]\n basin['basin_id'] = fdr_data[rout_dict['BASIN_ID_VAR']][y_min:y_max, x_min:x_max]\n basin['flow_direction'] = fdr_data[rout_dict['FLOW_DIRECTION_VAR']][y_min:y_max, x_min:x_max]\n basin['flow_distance'] = fdr_data[rout_dict['FLOW_DISTANCE_VAR']][y_min:y_max, x_min:x_max]\n basin['velocity'] = fdr_data['velocity'][y_min:y_max, x_min:x_max]\n basin['diffusion'] = fdr_data['diffusion'][y_min:y_max, x_min:x_max]\n\n log.debug('Grid cells in subset: %i' % basin['velocity'].size)\n\n pour_point.basiny, pour_point.basinx = latlon2yx(plats=pour_point.lat,\n plons=pour_point.lon,\n glats=basin['lat'],\n glons=basin['lon'])\n # ---------------------------------------------------------------- #\n\n # ---------------------------------------------------------------- #\n # Create the rout_data Dictionary\n rout_data = {'lat': basin['lat'], 'lon': basin['lon']}\n\n # ---------------------------------------------------------------- #\n # Determine low direction syntax\n if 'VIC' in fdr_atts[rout_dict['FLOW_DIRECTION_VAR']]:\n # VIC Directions: http://www.hydro.washington.edu/Lettenmaier/Models/VIC/Documentation/Routing/FlowDirection.shtml\n dy = {1: -1, 2: -1, 3: 0, 4: 1, 5: 1, 6: 1, 7: 0, 8: -1}\n dx = {1: 0, 2: 1, 3: 1, 4: 1, 5: 0, 6: -1, 7: -1, 8: - 1}\n log.debug('Using VIC flow directions (1-8).')\n else:\n # ARCMAP Directions: http://webhelp.esri.com/arcgisdesktop/9.2/index.cfm?TopicName=flow_direction\n dy = {1: 0, 2: 1, 4: 1, 8: 1, 16: 0, 32: -1, 64: -1, 128: -1}\n dx = {1: 1, 2: 1, 4: 0, 8: -1, 16: -1, 32: -1, 64: 0, 128: 1}\n log.debug('Using ARCMAP flow directions (1-128).')\n # ---------------------------------------------------------------- #\n\n # ---------------------------------------------------------------- #\n # Find timestep (timestep is determined from uh_BOX input file)\n input_interval = find_ts(uh_t)\n rout_data['unit_hydrograph_dt'] = input_interval\n t_cell = int(rout_dict['CELL_FLOWDAYS']*SECSPERDAY/input_interval)\n t_uh = int(rout_dict['BASIN_FLOWDAYS']*SECSPERDAY/input_interval)\n # ---------------------------------------------------------------- #\n\n # ---------------------------------------------------------------- #\n # Read direction grid and find to_col (to_x) and to_row (to_y)\n to_y, to_x = read_direction(basin['flow_direction'], dy, dx)\n # ---------------------------------------------------------------- #\n\n # ---------------------------------------------------------------- #\n # Find all grid cells upstream of pour point\n catchment, rout_data['fraction'] = search_catchment(to_y, to_x, pour_point,\n basin['basin_id'],\n basin_id)\n # ---------------------------------------------------------------- #\n\n # ---------------------------------------------------------------- #\n # Make uh for each grid cell upstream of basin pour point\n # (linear routing model - Saint-Venant equation)\n uh = make_uh(input_interval, t_cell, catchment['y_inds'],\n catchment['x_inds'], basin['velocity'], basin['diffusion'],\n basin['flow_distance'])\n\n # ---------------------------------------------------------------- #\n # Make uh_river by incrementally moving upstream comining uh functions\n uh_river = make_grid_uh_river(t_uh, t_cell, uh, to_y, to_x, pour_point,\n catchment['y_inds'], catchment['x_inds'],\n catchment['count_ds'])\n\n # ---------------------------------------------------------------- #\n # Make uh_s for each grid cell upstream of basin pour point\n # (combine IRFs for all grid cells in flow path)\n uh_s = make_grid_uh(t_uh, t_cell, uh_river, uh_box, to_y, to_x,\n catchment['y_inds'], catchment['x_inds'],\n catchment['count_ds'])\n # ---------------------------------------------------------------- #\n\n # ---------------------------------------------------------------- #\n # Agregate to output timestep\n rout_data['unit_hydrograph'], rout_data['timesteps'] = adjust_uh_timestep(uh_s, t_uh,\n input_interval,\n rout_dict['OUTPUT_INTERVAL'],\n catchment['x_inds'],\n catchment['y_inds'])\n # ---------------------------------------------------------------- #\n return rout_data",
"def stops_on_routes_with_direction():\n routes_and_stops = {}\n routes = ['102y', '102z', '104y', '104z', '111y', '111z', '114y', '114z', '116y', '116z', '118y', '11y', '11z', '120y', '120z', '122y', '122z', '123y', '123z', '130y', '130z', '13y', '13z', '140y', '140z', '142y', '142z', '145y', '145z', '14Cy', '14Cz', '14y', '14z', '150y', '150z', '151y', '151z', '15Ay', '15Az', '15By', '15Bz', '15y', '15z', '161y', '161z', '16Cy', '16Cz', '16y', '16z', '17Ay', '17Az', '17y', '17z', '184y', '184z', '185y', '185z', '18y', '18z', '1y', '1z', '220y', '220z', '236y', '236z', '238y', '238z', '239y', '239z', '25Ay', '25Az', '25By', '25Bz', '25Xy', '25Xz', '25y', '25z', '26y', '26z', '270y', '270z', '27Ay', '27Az', '27By', '27Bz', '27Xy', '27Xz', '27y', '27z', '29Ay', '29Az', '31Ay', '31Az', '31By', '31Bz', '31y', '31z', '32Ay', '32Az', '32By', '32Bz', '32Xy', '32Xz', '32y', '32z', '33Ay', '33Az', '33By', '33Bz', '33Xy', '33Xz', '33y', '33z', '37y', '37z', '38Ay', '38Az', '38By', '38Bz', '38y', '38z', '39Ay', '39Az', '39y', '39z', '40By', '40Bz', '40Dy', '40Dz', '40y', '40z', '41Ay', '41By', '41Bz', '41Cy', '41Cz', '41Xy', '41Xz', '41y', '41z', '42y', '42z', '43y', '43z', '44By', '44Bz', '44y', '44z', '45Ay', '45Az', '46Ay', '46Az', '46Ey', '47y', '47z', '49y', '49z', '4y', '4z', '51Dy', '51Dz', '51Xy', '53By', '53Bz', '53y', '53z', '54Ay', '54Az', '56Ay', '56Az', '59y', '59z', '61y', '61z', '63y', '63z', '65By', '65Bz', '65y', '65z', '66Ay', '66Az', '66By', '66Bz', '66Xy', '66Xz', '66y', '66z', '67Xy', '67Xz', '67y', '67z', '68Ay', '68Az', '68y', '68z', '69Xy', '69Xz', '69y', '69z', '70y', '70z', '747y', '747z', '75y', '75z', '76Ay', '76Az', '76y', '76z', '77Ay', '77Az', '79Ay', '79Az', '79y', '79z', '7By', '7Bz', '7Dy', '7Dz', '7y', '7z', '83Ay', '83Az', '83y', '83z', '84Ay', '84Az', '84Xy', '84Xz', '84y', '84z', '8y', '8z', '9y', '9z']\n for route in routes:\n routes_and_stops[route] = [] # new array value for each route key\n reader = csv.reader(open(\"../Data/Sorted Data/stopped_bus_data.csv\"))\n for line in reader:\n try:\n current_route = extract_route_and_direction(line[3])\n if int(line[13]) not in routes_and_stops[current_route]:\n routes_and_stops[current_route].append(int(line[13]))\n except:\n continue\n return routes_and_stops",
"def main() -> None:\n\n data = Ground(sys.argv[1])\n DeliveryMan.show_route(data.coordinates)",
"def load_gps_route(segment_id):\n \n \n\n sql = \"\"\"select lat, lon, reported_update_time\n from tracked_routes\n where gps_segment_id=%(segID)s\n order by reported_update_time\"\"\"\n\n cur = get_cursor() \n SQLExec(cur,sql,{'segID':segment_id});\n res = [r for r in cur];\n cur.close();\n\n trip_id,trip_date,veh_id,sched_err,sched_off= load_gps_segment_header(segment_id);\n\n rows = [[r['lat'],r['lon'],r['reported_update_time']] for r in res];\n\n return trip_id,trip_date,veh_id,sched_err,sched_off,rows",
"def main():\n #short GPS Test\n filename = 'KML_short_test.kml'\n gps_filename = 'gps_short_test.txt'\n gpsfile = open(gps_filename, 'r')\n file = open(filename, 'w')\n addHeader(file)\n coordinate_lst = convert(gpsfile)\n cleaned = GPS_to_CostMap.clean_gps_data(coordinate_lst)\n write_coordinates(cleaned, file)\n addTrailer(file)\n file.close()\n\n #Repeat test\n filename = 'KML_repeat_test1.kml'\n gps_filename = 'gps_1.txt'\n gpsfile = open(gps_filename, 'r')\n file = open(filename, 'w')\n addHeader(file)\n coordinate_lst = convert(gpsfile)\n cleaned = GPS_to_CostMap.clean_gps_data(coordinate_lst)\n write_coordinates(cleaned, file)\n addTrailer(file)\n file.close()\n\n filename = 'KML_repeat_test2.kml'\n gps_filename = 'gps_1.txt'\n gpsfile = open(gps_filename, 'r')\n file = open(filename, 'w')\n addHeader(file)\n coordinate_lst = convert(gpsfile)\n cleaned = GPS_to_CostMap.clean_gps_data(coordinate_lst)\n write_coordinates(cleaned, file)\n addTrailer(file)\n file.close()",
"def main(input_file):\n # Read the map and split it into lines.\n # Make sure to access it like this `mapData[y][x]`\n mapData = input_file.read().strip().splitlines()\n mapWidth = len(mapData[0])\n mapHeight = len(mapData)\n\n # Iterate through each point on the map\n slopeCounts = []\n for y in range(mapHeight):\n for x in range(mapWidth):\n obj = mapData[y][x]\n\n # If the current position is empty, skip it\n if obj == \".\":\n continue\n\n # Iterate through each OTHER point on the map and calculate its\n # slope from the station point\n slopes = set()\n for y2 in range(mapHeight):\n for x2 in range(mapWidth):\n # Make sure to skip the station point and the empty points\n if x2 == x and y2 == y or mapData[y2][x2] == \".\":\n continue\n slopes.add(\n (\n (y2 - y) / (x2 - x) if x2 != x else math.inf,\n math.copysign(1.0, x2 - x),\n math.copysign(1.0, y2 - y),\n )\n )\n\n slopeCounts.append((len(slopes), x, y))\n\n # We need the results of part 1 to solve this\n resultP1 = max(slopeCounts, key=lambda s: s[0])\n\n # Condense the asteroids down to a list and sort it based on distance\n # to the station\n station = (resultP1[1], resultP1[2])\n print(\"STATION AT\", station)\n asteroids = []\n for y in range(mapHeight):\n for x in range(mapWidth):\n obj = mapData[y][x]\n if obj == \"#\" and (x, y) != station:\n asteroids.append((x, y))\n asteroids.sort(\n key=lambda coord: math.hypot(\n coord[0] - station[0], coord[1] - station[1]\n )\n )\n\n # Group the asteroids by cardinality and slope\n groups = dict()\n for target in asteroids:\n slope = (\n (target[1] - station[1]) / (target[0] - station[0])\n if target[0] != station[0]\n else math.inf\n )\n cardinalX = math.copysign(1.0, target[0] - station[0])\n cardinalY = math.copysign(1.0, target[1] - station[1])\n\n if (cardinalX, cardinalY) not in groups:\n groups[(cardinalX, cardinalY)] = collections.defaultdict(list)\n\n # Vertical or horizontal points are subgrouped together\n if slope == math.inf or slope == 0:\n subgroup = slope\n else:\n subgroup = abs(slope) ** (cardinalX * cardinalY)\n # subgroup = math.copysign(slope, cardinalX * cardinalY)\n\n # Add the asteroid to its group and subgroup\n groups[(cardinalX, cardinalY)][subgroup].append(target)\n\n # Iterate through the groups in clockwise order, popping off\n # asteroids to find the 200th\n count = 0\n found = None\n while found is None:\n for direction in [\n (0, -1),\n (1, -1),\n (1, 0),\n (1, 1),\n (0, 1),\n (-1, 1),\n (-1, 0),\n (-1, -1),\n ]:\n if direction in groups:\n for slope, targets in sorted(\n groups[direction].items(), key=lambda item: item[0]\n ):\n if len(targets):\n target = targets.pop(0)\n count += 1\n if count == 200:\n found = target\n\n print(\"RESULT:\", found[0] * 100 + found[1])",
"def process_file():\n global distances_between_cities\n global number_of_cities\n global unvisited_cities\n\n text_file = open(sys.argv[1].strip('\\r'))\n distances_between_cities = [[int(i) for i in line.strip(\"\\r\\n\").split()[1:]] for line in text_file.readlines()[1:]]\n number_of_cities = len(distances_between_cities)\n\n # set the initial conditions of the problem (you have already visited madrid)\n unvisited_cities = range(number_of_cities)\n visit_city(0)",
"def route(self, ori, dest, pois):\n #find one route from ori to dest\n departure_time = int(time.time())\n routes = util.query_routes(origin=ori, \n destination=dest,\n departure_time=departure_time)\n if routes is None or routes['status'] != \"OK\":\n print ',=====',routes\n return None\n\n route = routes[\"routes\"][0] #get the first route\n\n #get the points in the route to search the potential poi\n points = util.extract_points(route)\n\n if points is None or len(points) ==0:\n print \"Error in extracting points\"\n return None\n #get the candiates in the route\n candidates = []\n way_points = pois.split(\"|\")\n for point in points:\n information = {}\n information[\"location\"] = point\n for way_p in way_points:\n response = util.get_nearby_points(location=point, keyword=way_p)\n if response is None or response[\"status\"] != \"OK\":\n information[way_p] = []\n continue\n ps = []\n for result in response[\"results\"]:\n poi = {\"geometry\": result[\"geometry\"],\n \"name\": result[\"name\"],\n \"price_level\": result.get(\"price_level\", None),\n \"rating\": result.get(\"rating\", None),\n \"vicinity\": result[\"vicinity\"]}\n ps.append(poi)\n information[way_p] = ps\n candidates.append(information)\n \n cost_matrix = waypoint.find_waypoints([candidates], way_points)\n cost_matrix.sort(key=lambda x:x[1])\n\n top_candidate = cost_matrix[0]\n json.dump(top_candidate, open('./top_candidate.json','w'))\n final_route = self.get_direction(ori, dest, top_candidate)\n json.dump(final_route, open(\"./real_route.json\", \"w\"))\n\n return final_route, top_candidate",
"def main():\n # IMPORTANT: Specify a path to the new shapefile!\n data_dir = os.path.join(\"C:\\\\\",\"Users\",\"janni\",\"OneDrive\",\"Desktop\",\"data\")\n\n #Store route identification codes in to a list\n L_tracks=['\"tag_ident\"=72413','\"tag_ident\"=72417','\"tag_ident\"=73053','\"tag_ident\"=72364',\\\n '\"tag_ident\"=73054','\"tag_ident\"=79694','\"tag_ident\"=79698']\n\n if(os.path.isdir(data_dir)):\n print(\"Very good! You have chosen a valid directory!\")\n # load the point shapefile of the white-fronted goose manually!\n # access the active layer\n point_layer = iface.activeLayer()\n if not point_layer:\n print(\"Shape file failed to load!\")\n else:\n # 1\n addTimeAndDateObs(point_layer)\n print(\"-----------Created Date and Time objects-------------\")\n # 2\n addDistance(point_layer, L_tracks)\n print(\"-----------Distances calculation finished-------------\")\n # 3\n extractPoints(point_layer,Statistics(point_layer),data_dir)\n print(\"-----------Low distance points extracted and save to a new shapefile-------------\")\n print('Done')\n\n raster_fn = os.path.join(data_dir,\"Eurasia_Landcover.tif\")\n landuse_legend_fn = os.path.join(data_dir,'Eurasia_Landcover_Legend.csv')\n in_shape_fn = os.path.join(data_dir,\"lowDistance.shp\")\n out_shape_fn = os.path.join(data_dir,\"lowDistanceLanduseID.shp\")\n\n\n if(QgsProject.instance().mapLayersByName('lowDistanceLanduseID')==[]):\n processing.run(\"qgis:rastersampling\",\n {'COLUMN_PREFIX' : 'LanduseNr_',\n 'INPUT' : in_shape_fn,\n 'OUTPUT' : out_shape_fn,\n 'RASTERCOPY' : raster_fn})\n updated_shapefile = iface.addVectorLayer(out_shape_fn, '', 'ogr')\n else:\n updated_shapefile = QgsProject.instance().mapLayersByName('lowDistanceLanduseID')[0]\n #2\n convertIdFloatToInt(updated_shapefile)\n #3\n legend = preProcessLegend(landuse_legend_fn)\n #4\n convertIdToName(legend,updated_shapefile)\n #5\n plotLandUse(updated_shapefile,\"Pie\")\n print(\"-----------finished!-------------\")\n print(\"DONE! :)\")\n else:\n iface.messageBar().pushMessage(\"Error\", \"The directory does not exist. Please change data_dir in the code\",level = 1)\n print(\"Please specify a valid directory in the main function of Code_Distance.py!\")",
"def addSNMPRoutes(self, routingtable):\n\n ipCidrRouteDest = \"\"\n ipCidrRouteNextHopAS = \"\"\n ipCidrRouteMetric1 = 0\n ipCidrRouteMetric2 = 0\n ipCidrRouteMetric3 = 0\n ipCidrRouteMetric4 = 0\n ipCidrRouteMetric5 = 0\n ipCidrRouteStatus = 0\n ipCidrRouteMask = \"\"\n ipCidrRouteTos = 0\n ipCidrRouteNextHop = \"\"\n ipCidrRouteIfIndex = 0\n ipCidrRouteType = 0\n ipCidrRouteProto = 0\n ipCidrRouteAge = 0\n ipCidrRouteInfo = 0\n\n for loop_rtIndex in routingtable:\n for ifAttr in routingtable[loop_rtIndex]:\n if ifAttr == 1:\n ipCidrRouteDest = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 10:\n ipCidrRouteNextHopAS = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 11:\n ipCidrRouteMetric1 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 12:\n ipCidrRouteMetric2 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 13:\n ipCidrRouteMetric3 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 14:\n ipCidrRouteMetric4 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 15:\n ipCidrRouteMetric5 = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 16:\n ipCidrRouteStatus = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 2:\n ipCidrRouteMask = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 3:\n ipCidrRouteTos = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 4:\n ipCidrRouteNextHop = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 5:\n ipCidrRouteIfIndex = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 6:\n ipCidrRouteType = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 7:\n ipCidrRouteProto = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 8:\n ipCidrRouteAge = routingtable[loop_rtIndex][ifAttr]\n elif ifAttr == 9:\n ipCidrRouteInfo = routingtable[loop_rtIndex][ifAttr]\n\n self.routingtable[loop_rtIndex] = device_routingtable( \\\n ipCidrRouteDest, ipCidrRouteNextHopAS, ipCidrRouteMetric1, \\\n ipCidrRouteMetric2, ipCidrRouteMetric3, ipCidrRouteMetric4, \\\n ipCidrRouteMetric5, ipCidrRouteStatus, ipCidrRouteMask, \\\n ipCidrRouteTos, ipCidrRouteNextHop, ipCidrRouteIfIndex, \\\n ipCidrRouteType, ipCidrRouteProto, ipCidrRouteAge, \\\n ipCidrRouteInfo)",
"def _parse_routes(self, data):\n trips = []\n routes = data.findall('./itdRoute')\n for route in routes:\n trip = Trip()\n interchange = None\n for routepart in route.findall('./itdPartialRouteList/itdPartialRoute'):\n part = self._parse_routepart(routepart)\n if interchange is not None:\n if isinstance(part, RideSegment):\n interchange.destination = part[0].platform\n else:\n interchange.destination = part[0].origin\n trip._parts.append(part)\n\n interchange = self._parse_interchange(routepart)\n if isinstance(part, RideSegment):\n if interchange is not None:\n interchange.origin = part[-1].platform\n trip._parts.append(interchange)\n else:\n if interchange is not None:\n part.events = interchange.events\n interchange = None\n\n ticketlist = TicketList()\n tickets = route.find('./itdFare/itdSingleTicket')\n if tickets:\n authority = tickets.attrib['net']\n ticketlist.single = TicketData(authority, tickets.attrib['unitsAdult'], float(tickets.attrib['fareAdult']), float(tickets.attrib['fareChild']))\n ticketlist.bike = TicketData(authority, tickets.attrib['unitsBikeAdult'], float(tickets.attrib['fareBikeAdult']), float(tickets.attrib['fareBikeChild']))\n ticketlist.currency = tickets.attrib['currency']\n ticketlist.level_name = tickets.attrib['unitName']\n for ticket in tickets.findall('./itdGenericTicketList/itdGenericTicketGroup'):\n t = TicketData()\n name = ticket.find('./itdGenericTicket[ticket=\"TICKETTYPE\"]/value')\n if name is None or not name.text:\n continue\n\n authority = ticket.find('./itdGenericTicket[ticket=\"TARIFF_AUTHORITY\"]/value')\n if authority is not None and authority.text:\n t.authority = authority.text\n\n level = ticket.find('./itdGenericTicket[ticket=\"FARE_CATEGORY\"]/value')\n if level is not None and level.text:\n t.level = level.text\n\n prices = []\n adult = ticket.find('./itdGenericTicket[ticket=\"TICKET_ID_ADULT\"]/value')\n if adult is not None and adult.text:\n price = ticket.find('./itdGenericTicket[ticket=\"FARE_ADULT\"]/value')\n if price is not None and price.text:\n prices.append(float(price.text))\n\n child = ticket.find('./itdGenericTicket[ticket=\"TICKET_ID_CHILD\"]/value')\n if child is not None and child.text:\n price = ticket.find('./itdGenericTicket[ticket=\"FARE_CHILD\"]/value')\n if price is not None and price.text:\n prices.append(float(price.text))\n\n if not prices:\n continue\n\n t.price = prices[0]\n if len(prices) == 2:\n t.price_child = prices[1]\n ticketlist.other[name.text] = t\n trip.tickets = ticketlist\n\n trips.append(trip)\n\n return trips",
"def main():\n #refreshes mbta route list, isn't ideal, but simplest implementation at the moment\n fetch_mbta_routes()\n subway_route_list = mbta_route_list()\n #delays for 10 seconds to prevent exceeding api request limit\n time.sleep(10)\n\n for x in subway_route_list:\n #gets system time, system time is assumed to be in EST/EDT\n year = datetime.now().strftime('%Y')\n month = datetime.now().strftime('%m')\n day = datetime.now().strftime('%d')\n hour = datetime.now().strftime('%H')\n minute = datetime.now().strftime('%M')\n sec = datetime.now().strftime('%S')\n\n save_path = year + '/' + month + '/' + day + '/'\n file_name = hour + '.' + minute + '.' + sec + '.txt'\n complete_path = save_path + file_name\n\n train_route = fetch_trains_per_route(x)\n\n if train_route == None:\n pass\n else:\n #checks if save path exists, creates save path if it doesn't exist\n if os.path.exists(save_path):\n f = open(complete_path, 'a')\n f.write(str(train_route))\n f.close()\n #delays loop for 10 seconds to prevent exceeding api request limit\n time.sleep(10)\n else:\n os.makedirs(save_path)\n f = open(complete_path, 'a')\n f.write(str(train_route))\n f.close()\n #delays loop for 10 seconds to prevent exceeding api request limit\n time.sleep(10)"
] | [
"0.5804729",
"0.57079357",
"0.56287354",
"0.5589738",
"0.5480822",
"0.54700845",
"0.54322046",
"0.5416901",
"0.53724426",
"0.5369428",
"0.5328261",
"0.5310065",
"0.5301427",
"0.52883816",
"0.52178824",
"0.5206266",
"0.5193167",
"0.5183766",
"0.5160514",
"0.5158721",
"0.5149442",
"0.5141179",
"0.51383364",
"0.51272815",
"0.5118636",
"0.5117852",
"0.51103455",
"0.5109899",
"0.5058319",
"0.50459313"
] | 0.6581875 | 0 |
Checks that a route matching the route file name is not already in the Routes database | def check_route_not_loaded(route_file):
loaded_routes = administer_route_database.get_loaded_routes(
administer_route_database.get_route_db_connection()
)
route_file_route_name = Path(route_file).name.replace('.gpx', '')
for group in loaded_routes:
if route_file_route_name in group:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_routes(routes, serverdir):\n for route in routes:\n file_ = os.path.join(serverdir,\n routes[route].lstrip('/'))\n if not os.path.isfile(file_):\n print('no file for route rule:', route+','+file_)\n sys.exit(1)",
"def validate_route(self, route):\n\n for router in ROUTER:\n if router.value == route:\n return True\n return False",
"def selectroute(self, route_key):\n with open(self.filedb, 'r') as f:\n try:\n lines = list()\n routeexists = False\n code = 200\n message = 'Route does not exists'\n for line in f:\n line = line.rstrip('\\n\\r').upper()\n if line[0:7] == route_key:\n routeexists = True\n message = 'Route exists.'\n code = 400\n else:\n if lines != \"\":\n lines.append(line)\n except Exception as e:\n logging.error('Database is not accessible.' + str(e))\n return True, False, 'Database is not accessible.', 503, []\n\n return False, routeexists, message, code, lines",
"def test_abort_route_when_id_route_not_exist(self):\n\n pass",
"def exists(self, destination: Route) -> bool:\n i = hash(destination.addr)\n return i in self.keys()",
"def schedule_existing_route_fails(self):\n self.schedule_route_successfully()\n response = self.client.post(\n self.schedule_route_url, self.valid_route_details, format='json',\n HTTP_AUTHORIZATION='token {}'.format(self.token_two))\n\n return response",
"def parse_route_files():\n a_copy = PY_FILES[::]\n for f in a_copy:\n if 'routes' in f:\n URL_FILES.append(f)\n PY_FILES.remove(f)",
"def validate_routes(route):\n if ROUTE_PATTERN.match(route):\n if route[0] == route[1]:\n raise argparse.ArgumentTypeError('Invalid route format, cannot have same city: %s' % route)\n return route\n else:\n raise argparse.ArgumentTypeError('Invalid route format for: %s. Should be {A-Z}{A-Z}{0-9}+' % route)",
"def route_validity_checker(): #below route list was rerturned from bus_routes function above, copy and pasted to eliminate need to re-run\n route_list=['15', '46A', '14', '41B', '39A', '65', '40D', '11', '31', '27', '67', '79', '42', '66A', '33B', '140', '44', '83A', '27B', '38', '16C', '747', '41C', '39', '25', '239', '43', '70', '13', '150', '145', '77A', '184', '84', '61', '83', '40', '66', '15A', '123', '17A', '16', '14C', '9', '4', '37', '32', '33', '49', '56A', '151', '25A', '45A', '54A', '47', '18', '7', '17', '102', '120', '65B', '41', '122', '29A', '76', '68', '59', '25B', '69', '27A', '66B', '38B', '7D', '75', '15B', '84A', '63', '84X', '33X', '68A', '1', '76A', '7B', '270', '236', '130', '238', '220', '44B', '40B', '26', '32B', '8', '41A', '53', '67X', '104', '32A', '79A', '114', '185', '66X', '31B', '32X', '51X', '51D', '41X', '142', '111', '69X', '27X', '116', '46E', '161', '118', '25X', '38A', '33A', 'PP07', '53B', '31A', 'OL84']\n count_dict={}\n for route in route_list: #dictionary with key for every route in the list\n count_dict[route]=0 #used to count number of occurrences in files\n os.chdir(\"../Data\")\n for file in glob.glob(\"*.csv\"): #for every file\n print(file)\n reader=csv.reader(open(file))\n for line in reader:\n route=extract_bus_route(line[3])\n if route!=\"\":\n count_dict[extract_bus_route(line[3])]+=1 #incremenent the counter of the route with the associated journey id code\n return count_dict #result is that 3 routes are likely due to strange circumstances or errors in data",
"def _find_route(self, route_path_or_name):\n for route in self.router.routes:\n if route.path == route_path_or_name or route.name == route_path_or_name:\n return route",
"def _matches_route(path, request, route_name):\n\n introspector = request.registry.introspector\n\n # `route` is a pyramid.interfaces.IRoute\n route = introspector.get(\"routes\", route_name)[\"object\"]\n return route.match(path) is not None",
"def is_route_used(_, hash_arg):\n for hash_object in Hash.objects.all():\n if hash_object.hash == hash_arg:\n return HttpResponse(\n json.dumps({\"Used\": True}), mimetype=\"application/json\")\n\n Hash(hash=hash_arg).save()\n return HttpResponse(\n json.dumps({\"Used\": False}), mimetype=\"application/json\")",
"def load_routes():\n\n print (\"routes\")\n\n Route.query.delete()\n\n with open(\"seed_data/routes_seed.psv\") as routes:\n for row in routes:\n route, route_acronym = row.strip().split(\"|\")\n\n # Checks if seed is empty, if so, inserts a Null cell into the db\n acronym = None if route_acronym == 'None' else route_acronym\n\n route = Route(route=route,\n route_acronym=acronym)\n\n\n db.session.add(route)\n\n db.session.commit()",
"def test_add_url_rule_duplicate():\n\n with pytest.raises(DuplicateRouteURLError):\n application_services.add_url_rule('/tests/application/duplicate/rule',\n view_func=mock_view_function,\n methods=HTTPMethodEnum.GET)\n\n application_services.add_url_rule('/tests/application/duplicate/rule',\n view_func=mock_view_function,\n methods=HTTPMethodEnum.GET)",
"def bus_routes():\n route_list = []\n os.chdir(\"../Data\")\n for file in glob.glob(\"*.csv\"):\n print(file)\n reader = csv.reader(open(file))\n for line in reader:\n route=extract_bus_route(line[3]) #Journey ID field\n if route not in route_list and route!=\"\": #error handling for extract_bus_routes function\n route_list.append(route)\n return route_list",
"def _handle_existing_agent_key_path(restored_key_path,\n db_key_path):\n with open(db_key_path) as key_file:\n content_1 = key_file.read()\n with open(restored_key_path) as key_file:\n content_2 = key_file.read()\n if content_1 != content_2:\n raise NonRecoverableError(\n 'Agent key path already taken: {0}'.format(db_key_path)\n )\n ctx.logger.debug('Agent key path already exist: '\n '{0}'.format(db_key_path))",
"def is_existing_name(self, name):\n\n original_name = self.view.settings().get('regreplace.name', None)\n rules = sublime.load_settings('reg_replace_rules.sublime-settings').get('replacements', {})\n msg = \"The name '%s' already exists in the replacment list. Do you want to replace existing rule?\" % name\n return not (name == original_name or name not in rules or sublime.ok_cancel_dialog(msg))",
"def validate_no_duplicate_paths(self, resources):\r\n paths = set()\r\n for item in resources:\r\n file_name = item.get('path')\r\n if file_name in paths:\r\n raise ValueError(\r\n '%s path was specified more than once in the metadata' %\r\n file_name)\r\n paths.add(file_name)",
"def assure_exists(self, name: str):\n result = self.l2.exists(name)\n if result:\n logging.debug(f'{name} l2 hit')\n return self.l2.get_path(name)\n\n self.l3.download(name, self.l2.get_path(name))\n result = self.l2.exists(name)\n if not result:\n raise Exception('file not found anywhere')\n else:\n logging.debug(f'{name} l3 hit')\n return self.l2.get_path(name)",
"def test_no_routes(self):\n response = self.client.get(reverse('routes_app:index'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"No routes are available.\")",
"def is_correct_route(network, route):\n id_actual = 0\n id_next = 1\n while id_next < len(route):\n road_id_actual = route[id_actual]\n road_id_next = route[id_next]\n if get_end(network, road_id_actual) != get_start(network, road_id_next):\n return False\n id_actual += 1\n id_next += 1\n return True",
"def node_no_route_found_for_packet(self, node, packet):\n for subscriber in self.subscribers:\n subscriber.node_no_route_found_for_packet(node, packet)",
"def _check_file(self, name):\n self.assertTrue(os.path.exists(name), \"Could not find table %s.\" % name)",
"def require_notfound(cls,path):\n if os.path.exists(path):\n raise exceptions.PathFoundError(path)",
"def exists(self, path):",
"def test_add_route(self):\n\n post = {\n 'ip': 'test_ip',\n 'next_hop': 'test_nexthop',\n 'communities': 'test_commu'\n }\n route_id = self.database.add_route(post)\n post2 = self.database.route.find_one({'ip': post['ip']})\n self.database.delete_route({'_id': route_id})\n self.assertEqual(post2['ip'], post['ip'], 'insertion failed')\n self.assertEqual(post2['next_hop'], post['next_hop'],\n 'insertion failed')\n self.assertEqual(post2['communities'], post['communities'],\n 'insertion failed')",
"def exists_withdraws(self, route: Route) -> bool:\n return hash(route.addr) in self.withdraws.keys()",
"def test_get():\n\n start_ln = len(routes.routes['GET'])\n\n @get('/s/foo')\n def foo_route(request):\n return 200, ''\n\n for path, fn in routes.routes['GET']:\n if fn == foo_route:\n found = (path, fn)\n assert found\n routes.routes['GET'].remove(found)\n assert len(routes.routes['GET']) == start_ln",
"def check_is_event_valid(self, event):\n if event.src_path == template_file_path:\n self.__init__()\n if not hasattr(event, 'dest_path'):\n event.dest_path = None\n for path in [event.src_path, event.dest_path]:\n if path is not None:\n dir, name = self.__parse_full_path(path)\n if dir.find(controlled_path\n ) >= 0 and not self._check_is_name_valid(name):\n if path == event.dest_path:\n os.system('cp {dest} {src}'.format(\n dest=event.dest_path, src=event.src_path))\n os.system('rm -rf {dir}{name}'.format(dir=dir, name=name))",
"def __checkDestination(self):\n return os.path.exists(self.__targetPath)"
] | [
"0.6775998",
"0.64381325",
"0.61428326",
"0.5804251",
"0.5692228",
"0.5555167",
"0.5551878",
"0.5467528",
"0.54438454",
"0.537621",
"0.5356725",
"0.53260255",
"0.53109175",
"0.5305829",
"0.5243538",
"0.52422684",
"0.52327466",
"0.5220092",
"0.5192756",
"0.5159098",
"0.515553",
"0.513806",
"0.513773",
"0.50804806",
"0.5075635",
"0.5047549",
"0.5045314",
"0.5042774",
"0.5038039",
"0.50175154"
] | 0.73544973 | 0 |
Translates the fear level given by the application to a minimum scariness rating at which to flag Waypoints | def translate_fear_level(fear_level):
if fear_level == 1:
scariness_threshold = 6
elif fear_level == 2:
scariness_threshold = 5
else:
scariness_threshold = 4
return scariness_threshold | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_overall_rating(player_dict):\r\n if player_dict[\"position\"].upper() == \"QB\":\r\n throw_power = int(max(min(int(player_dict[\"throw_power\"]), 99), 70))\r\n throw_accuracy = int(max(min(math.ceil(\r\n ((2 * (\r\n int(player_dict[\"throw_accuracy_short\"]) + \r\n int(player_dict[\"throw_accuracy_mid\"]) + \r\n int(player_dict[\"throw_accuracy_deep\"]) + \r\n int(player_dict[\"throw_on_the_run\"]) + \r\n int(player_dict[\"playaction\"])\r\n )) - (2 * min(\r\n int(player_dict[\"throw_accuracy_short\"]), \r\n int(player_dict[\"throw_accuracy_mid\"]), \r\n int(player_dict[\"throw_accuracy_deep\"]), \r\n int(player_dict[\"throw_on_the_run\"]), \r\n int(player_dict[\"playaction\"])\r\n ))\r\n ) / 8\r\n ), 99), 60))\r\n break_tackles = int(max(min(\r\n math.ceil(((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 7), \r\n 90), 20))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 98), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 55))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((throw_power - 50.0) / 10.0) * 4.9\r\n overall_rating += ((throw_accuracy - 50.0) / 10.0) * 5.8\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.0\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"HB\":\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 70), 25))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 99), 50))\r\n carrying = int(max(min(int(player_dict[\"carrying\"]), 99), 60))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 45))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 50))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 0.33\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((carrying - 50.0) / 10.0) * 2.0\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.8\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.0\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.6\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 1.4\r\n overall_rating = int(max(min((round(overall_rating) + 27), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"FB\":\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 75), 40))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 85), 45))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 99), 55))\r\n carrying = int(max(min(int(player_dict[\"carrying\"]), 99), 60))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 55))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 95), 60))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 60))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 1.0\r\n overall_rating += ((run_block - 50.0) / 10.0) * 7.2\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 1.8\r\n overall_rating += ((carrying - 50.0) / 10.0) * 1.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.0\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 1.8\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.8\r\n overall_rating += ((catching - 50.0) / 10.0) * 5.2\r\n overall_rating = int(max(min((round(overall_rating) + 39), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"WR\":\r\n break_tackles = int(max(min(\r\n math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2), \r\n 80), 35))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 75))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 75))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 35))\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 99), 65))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.3\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.3\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.3\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.8\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 4.75\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.4\r\n overall_rating = int(max(min((round(overall_rating) + 26), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"TE\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 55))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 55))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 55))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 60))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 99), 45))\r\n break_tackles = int(max(min(\r\n (math.ceil((int(player_dict[\"elusiveness\"]) + int(player_dict[\"trucking\"])) / 2) + 5), \r\n 95), 20))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 80), 35))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 85), 35))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 2.65\r\n overall_rating += ((strength - 50.0) / 10.0) * 2.65\r\n overall_rating += ((awareness - 50.0) / 10.0) * 2.65\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.25\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.25\r\n overall_rating += ((catching - 50.0) / 10.0) * 5.4\r\n overall_rating += ((break_tackles - 50.0) / 10.0) * 1.2\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 1.2\r\n overall_rating += ((run_block - 50.0) / 10.0) * 5.4\r\n overall_rating = int(max(min((round(overall_rating) + 35), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LT\" or player_dict[\"position\"].upper() == \"RT\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 85), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 85), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 90), 60))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 99), 60))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 0.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.3\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.3\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 0.8\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 4.75\r\n overall_rating += ((run_block - 50.0) / 10.0) * 3.75\r\n overall_rating = int(max(min((round(overall_rating) + 26), 99), 40))\r\n return overall_rating\r\n \r\n if (player_dict[\"position\"].upper() == \"LG\" or player_dict[\"position\"].upper() == \"RG\" or \r\n player_dict[\"position\"].upper() == \"C\"):\r\n speed = int(max(min(int(player_dict[\"speed\"]), 85), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 85), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 90), 60))\r\n pass_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"pass_block\"]) + \r\n int(player_dict[\"pass_block_power\"]) + \r\n int(player_dict[\"pass_block_finesse\"])\r\n ) / 3\r\n ), 99), 65))\r\n run_block = int(max(min(math.ceil(\r\n (\r\n int(player_dict[\"run_block\"]) + \r\n int(player_dict[\"run_block_power\"]) + \r\n int(player_dict[\"run_block_finesse\"])\r\n ) / 3\r\n ), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.7\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.25\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.25\r\n overall_rating += ((agility - 50.0) / 10.0) * 0.8\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.7\r\n overall_rating += ((pass_block - 50.0) / 10.0) * 3.25\r\n overall_rating += ((run_block - 50.0) / 10.0) * 4.8\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LE\" or player_dict[\"position\"].upper() == \"RE\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 90), 55))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 90), 45))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.75\r\n overall_rating += ((awareness - 50.0) / 10.0) * 1.75\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.75\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 3.8\r\n overall_rating += ((tackle - 50.0) / 10.0) * 5.5\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"DT\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 90), 45))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 70))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 90), 40))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 1.8\r\n overall_rating += ((strength - 50.0) / 10.0) * 5.5\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.8\r\n overall_rating += ((agility - 50.0) / 10.0) * 1\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.8\r\n overall_rating += ((tackle - 50.0) / 10.0) * 4.55\r\n overall_rating = int(max(min((round(overall_rating) + 29), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"LOLB\" or player_dict[\"position\"].upper() == \"ROLB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 70))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 65))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 75))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 90), 20))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 60))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 2.4\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.6\r\n overall_rating += ((agility - 50.0) / 10.0) * 2.4\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.3\r\n overall_rating += ((catching - 50.0) / 10.0) * 1.3\r\n overall_rating += ((tackle - 50.0) / 10.0) * 4.8\r\n overall_rating = int(max(min((round(overall_rating) + 29), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"MLB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 95), 65))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 99), 60))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 95), 65))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 95), 75))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 99), 65))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 0.75\r\n overall_rating += ((strength - 50.0) / 10.0) * 3.4\r\n overall_rating += ((awareness - 50.0) / 10.0) * 5.2\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.65\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.75\r\n overall_rating += ((tackle - 50.0) / 10.0) * 5.2\r\n overall_rating = int(max(min((round(overall_rating) + 27), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"CB\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 80))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 40))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 35))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 75))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 40))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 85), 30))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.85\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.9\r\n overall_rating += ((awareness - 50.0) / 10.0) * 3.85\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.55\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.35\r\n overall_rating += ((catching - 50.0) / 10.0) * 3\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.55\r\n overall_rating += ((tackle - 50.0) / 10.0) * 1.55\r\n overall_rating = int(max(min((round(overall_rating) + 28), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"FS\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 75))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 85), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 35))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 90), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.0\r\n overall_rating += ((strength - 50.0) / 10.0) * 0.9\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.85\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.5\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 2.5\r\n overall_rating += ((catching - 50.0) / 10.0) * 3.0\r\n overall_rating += ((jumping - 50.0) / 10.0) * 1.5\r\n overall_rating += ((tackle - 50.0) / 10.0) * 2.5\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"SS\":\r\n speed = int(max(min(int(player_dict[\"speed\"]), 99), 75))\r\n strength = int(max(min(int(player_dict[\"strength\"]), 90), 45))\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 99), 40))\r\n agility = int(max(min(int(player_dict[\"agility\"]), 99), 70))\r\n acceleration = int(max(min(int(player_dict[\"acceleration\"]), 99), 80))\r\n catching = int(max(min(int(player_dict[\"catching\"]), 95), 35))\r\n jumping = int(max(min(int(player_dict[\"jumping\"]), 99), 65))\r\n tackle = int(max(min(int(player_dict[\"tackle\"]), 90), 45))\r\n \r\n overall_rating = 0.0\r\n overall_rating += ((speed - 50.0) / 10.0) * 3.2\r\n overall_rating += ((strength - 50.0) / 10.0) * 1.7\r\n overall_rating += ((awareness - 50.0) / 10.0) * 4.75\r\n overall_rating += ((agility - 50.0) / 10.0) * 1.7\r\n overall_rating += ((acceleration - 50.0) / 10.0) * 1.7\r\n overall_rating += ((catching - 50.0) / 10.0) * 3.2\r\n overall_rating += ((jumping - 50.0) / 10.0) * 0.9\r\n overall_rating += ((tackle - 50.0) / 10.0) * 3.2\r\n overall_rating = int(max(min((round(overall_rating) + 30), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"K\":\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 85), 35))\r\n kick_power = int(max(min(int(player_dict[\"kick_power\"]), 99), 80))\r\n kick_accuracy = int(max(min(int(player_dict[\"kick_accuracy\"]), 99), 70))\r\n \r\n overall_rating = (-177 + (0.218 * awareness) + (1.28 * kick_power) + (1.47 * kick_accuracy))\r\n overall_rating = int(max(min(round(overall_rating), 99), 40))\r\n return overall_rating\r\n \r\n if player_dict[\"position\"].upper() == \"P\":\r\n awareness = int(max(min(int(player_dict[\"awareness\"]), 85), 40))\r\n kick_power = int(max(min(int(player_dict[\"kick_power\"]), 99), 80))\r\n kick_accuracy = int(max(min(int(player_dict[\"kick_accuracy\"]), 99), 70))\r\n \r\n overall_rating = (-183 + (0.218 * awareness) + (1.5 * kick_power) + (1.33 * kick_accuracy))\r\n overall_rating = int(max(min(round(overall_rating), 99), 40))\r\n return overall_rating",
"def custom_score_7(game, player):\n \"\"\"custom_score_7 heuristic function also aims towards weighted chances heuristics\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n return float(1.5*length_my_player_moves*length_my_player_moves - length_opp_payer_moves*length_opp_payer_moves)",
"def custom_score_6(game, player):\n \"\"\"custom_score_6 heuristic function aims towards weighted chances heuristics\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n return float(length_my_player_moves*length_my_player_moves - 1.5*length_opp_payer_moves*length_opp_payer_moves)",
"def compute_rating(positive_count, neutral_count, negative_count):\n total = positive_count + neutral_count + negative_count\n if total < 5:\n return 'NEUTRAL'\n\n pos = positive_count/total\n neg = negative_count/total\n\n if pos > 0.3 and neg > 0.3:\n return 'CONTROVERSIAL'\n if pos > 0.7 or (pos > 0.5 and pos >= neg * 2):\n return 'POSITIVE'\n if neg > 0.7 or (neg > 0.5 and neg >= pos * 2):\n return 'NEGATIVE'\n return 'NEUTRAL'",
"def sharpness_penalty(self):\n # This polynomial function gives the gain for peaking filter which achieves 18 dB / octave max derivative\n # The polynomial estimate is accurate in the vicinity of 18 dB / octave\n gain_limit = -0.09503189270199464 + 20.575128011847003 * (1 / self.q)\n # Scaled sigmoid function as penalty coefficient\n x = self.gain / gain_limit - 1\n sharpness_penalty_coefficient = 1 / (1 + np.e ** (-x * 100))\n return np.mean(np.square(self.fr * sharpness_penalty_coefficient))",
"def custom_score_5(game, player):\n \"\"\"custom_score_5 heuristic function defines chances heuristics\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n return float(length_my_player_moves*length_my_player_moves - length_opp_payer_moves*length_opp_payer_moves)",
"def custom_score_4(game, player):\n \"\"\"custom_score_4 heuristic function aims at minimizing loosing chances of myPlayer\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = len(game.get_legal_moves(player)) #Calculate length of available moves for myPlayer\n length_opp_payer_moves = 1.0 * len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves for the oppositePlayer\n\n if length_my_player_moves == 0:\n return float(\"-inf\")\n\n if length_opp_payer_moves == 0:\n return float(\"inf\")\n\n return float(-length_opp_payer_moves/length_my_player_moves)",
"def custom_score(game, player):\n\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n heuristics_options = {\n \"heuristic_1_center\": heuristic_1_center,\n \"heuristic_2_reflection\": heuristic_2_reflection,\n \"heuristic_3_partition\": heuristic_3_partition,\n \"heuristic_combined_1_2\": heuristic_combined_1_2,\n \"heuristic_combined_1_3\": heuristic_combined_1_3,\n \"heuristic_combined_2_3\": heuristic_combined_2_3,\n \"heuristic_combined_1_2_3\": heuristic_combined_1_2_3,\n \"heuristic_combined_1_2_3_with_improve_score\": heuristic_combined_1_2_3_with_improve_score\n }\n\n return heuristics_options[\"heuristic_combined_1_2_3_with_improve_score\"](game, player)",
"def custom_score(game, player):\n # return penalize_corners_heuristic(game, player)\n # return favor_run_away_heuristic(game, player)\n return look_ahead_heuristic(game, player)",
"def distance_meaning(score):\r\n score = float(score)\r\n desc = \"unknown about\";\r\n if 0.6 < score <= 1.0:\r\n desc = \"against\"\r\n elif 0.4 < score <= 0.6:\r\n desc = \"a mixture of for and against\"\r\n elif score <= 0.4:\r\n desc = \"for\"\r\n\r\n return desc",
"def custom_score_general(game, player, constants=[]):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n v = []\n\n if constants[0] != 0 or constants[2] != 0:\n own_moves = number_moves(game, player) / 8\n\n if own_moves == 0:\n return float(\"-inf\")\n\n v.append(own_moves)\n\n if constants[1] != 0 or constants[2] != 0:\n opp_moves = number_moves(game, game.get_opponent(player)) / 8\n\n if opp_moves == 0:\n return float(\"inf\")\n\n v.append(opp_moves)\n\n if constants[2] != 0:\n move_ratio = (own_moves * 8) / (opp_moves * 8) / 8\n v.append(move_ratio)\n\n if constants[3] != 0 or constants[5] != 0:\n own_openness = nearby_openness(game, player) / 80\n v.append(own_openness)\n\n if constants[4] != 0 or constants[5] != 0:\n opp_openness = nearby_openness(game, game.get_opponent(player)) / 80\n v.append(opp_openness)\n\n if constants[5] != 0:\n openness_ratio = (own_openness * 80) / (opp_openness + 0.0001 * 80) /80\n v.append(openness_ratio)\n\n centerness_max = (game.width / 2.)**2 + (game.height / 2.)**2\n\n if constants[6] != 0 or constants[8] != 0:\n own_centerness = centerness(game, player) / centerness_max\n v.append(own_centerness)\n\n if constants[7] != 0 or constants[8] != 0:\n opp_centerness = centerness(game, game.get_opponent(player)) / centerness_max\n v.append(opp_centerness)\n\n if constants[8] != 0:\n centerness_ratio = (own_centerness * centerness_max) / (centerness_max * opp_centerness + 0.1) / centerness_max\n\n return sum([x * y for x, y in zip(constants, v)])",
"def set_score(self):\n if self.PotTax_intervention is None:\n if self.PotTax_reference is not None:\n self.score = (((self.PotTax_reference.sum().TFI - 29.33) /\n 1.4349) / 100)\n else:\n print(\"There is no Biosafe output to score\")\n return\n else:\n self.score = (((self.PotTax_intervention.sum().TFI - 29.33) /\n 1.4349) / 100)\n return",
"def get_fare(self):\r\n return super().get_fare()*self.fanciness_rating+self.flagfall",
"def showBestGainWon(self) :\n bestGainWon = 0\n for level in self.level_history :\n bestGainWon = level.profit if bestGainWon < level.profit else bestGainWon\n Scenario.messageGetBestGainWon(bestGainWon)",
"def custom_score_3(game, player):\n \n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n player_legal_move_count, opponent_legal_move_count = \\\n len(player_legal_moves), len(opponent_legal_moves)\n move_count_difference = player_legal_move_count - opponent_legal_move_count\n # Find coordinates of center box\n h, w = get_center_coordinates(game)\n # Retrieve player's coordinates\n y, x = game.get_player_location(player)\n # Obtain coordinate further, closest to origin\n furthest_coord, closest_coord = max(h - y, w -x), min(h - y, w - x)\n # Return weighted, vector-valued length from origin / sum of weights\n weighted_distance_from_center = \\\n math.sqrt((closest_coord**2 + 2*(furthest_coord**2)))/3\n feature_vector = (move_count_difference, weighted_distance_from_center)\n \n weight_vector = (1.0,0.1)\n \n weighted_difference_dot_product = sum(p*q for p,q, \\\n in zip(weight_vector, feature_vector)) \n \n return float(weighted_difference_dot_product)",
"def custom_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player) / 8\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player)) / 8\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = (own_moves * 8) / (opp_moves * 8) / 8\n\n # Calculate centerness_score\n completeness = completeness_of_game(game)\n centerness_score = 0\n if completeness < 0.5:\n centerness_max = (game.width / 2.)**2 + (game.height / 2.)**2\n\n own_centerness = centerness(game, player) / centerness_max\n opp_centerness = centerness(game, game.get_opponent(player)) / centerness_max\n centerness_ratio = (own_centerness * centerness_max) / (centerness_max * opp_centerness + 0.1) / centerness_max\n\n centerness_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score",
"def custom_score(game, player):\n\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n return __heuristic3__(game, player)",
"def getWeightsAttack(self, gameState, action):\r\n return {'minDistToFood': -1,'getFood': 100}",
"def improved_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)",
"def score(self):\n # loop over aminoacids in protein and calculate how often H and C are surrounded by H and C\n for aminoacid in self.aminoacids:\n if aminoacid.aminoacid_type == \"H\":\n self.stability = self.stability + (-1 * self.surrounded_by(aminoacid.location, \"H\", aminoacid.aminoacid_number)) + (-1 * self.surrounded_by(aminoacid.location, \"C\", aminoacid.aminoacid_number))\n elif aminoacid.aminoacid_type == \"C\":\n self.stability = self.stability + (-5 * self.surrounded_by(aminoacid.location, \"C\", aminoacid.aminoacid_number)) + (-1 * self.surrounded_by(aminoacid.location, \"H\", aminoacid.aminoacid_number))\n self.stability = self.stability/2\n return int(self.stability)",
"def score_to_rating_string(score):\n if score < 1:\n return 'Terrible'\n elif score < 2:\n return 'Bad'\n elif score < 3:\n return 'OK'\n elif score < 4:\n return 'Good'\n elif score < 5:\n return 'Excellent'\n else:\n return 'Super-excellent'",
"def get_prediction_from_score(score):\n if(score >= 0.03):\n return 'Positive'\n elif(score <= -0.03):\n return 'Negative'\n else:\n return 'Neutral'",
"def custom_score(game, player):\n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the improved score\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n improved = len(player_legal_moves) - len(opponent_legal_moves)\n if improved != 0:\n return float(improved)\n \n # Second get differences from center\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n player_coordinates = game.get_player_location(player)\n opponent_coordinates = game.get_player_location(opponent)\n player_center_dist = get_distances_from_center(center_coordinates, player_coordinates)\n opponent_center_dist = get_distances_from_center(center_coordinates, opponent_coordinates)\n center_dist_diff = player_center_dist - opponent_center_dist\n \n # Third obtain next_moves\n player_next_moves = [get_next_moves(game, move, list(move)) for move in player_legal_moves]\n opponent_next_moves = [get_next_moves(game, move, list(move)) for move in opponent_legal_moves] \n improved_next = len(player_next_moves) - len(opponent_next_moves)\n \n # Put player and opponent feature differences in a tuple/vector surrogoate\n feature_diff_vector = (improved, center_dist_diff, improved_next)\n \n # Provide a weighting vector for the features of each player-participant\n weight_vector = (1.5,0.1,1.0)\n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*q for p,q, \\\n in zip(feature_diff_vector, weight_vector))\n \n return float(weighted_difference_dot_product)",
"def heuristics(course, suggestedPlan, user):\n score = course.score\n bonus = 0\n return score + bonus",
"def testMatchSwarpNearestExposure(self):\n self.compareToSwarp(\"nearest\", useWarpExposure=True, atol=60)",
"def lookup_relevant(score):\n category = \"\"\n if score > 2.0:\n category = \"RELEVANT\"\n elif score > 0.0:\n category = \"PARTIALLY RELEVANT\"\n else:\n category = \"NOT RELEVANT\"\n return category",
"def custom_score_3(game, player):\n \"\"\"custom_score_3 heuristic function aims at maximizing win chances of my agent\n \"\"\"\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n length_my_player_moves = 1.0 * len(game.get_legal_moves(player))#Calculate length of available moves for myPlayer\n length_opp_payer_moves = len(game.get_legal_moves(game.get_opponent(player)))#Calculate length of available moves with oppositePlayer\n\n if length_my_player_moves == 0:\n return float(\"-inf\")\n\n if length_opp_payer_moves == 0:\n return float(\"inf\")\n\n return float(length_my_player_moves/length_opp_payer_moves)",
"def reward_threshold(self) -> Optional[float]:",
"def min_value(self, game, depth):\n if self.time_left() < self.TIMER_THRESHOLD: # Timeout check\n raise SearchTimeout()\n\n if game.is_loser(self) or game.is_winner(self) or depth == 0: # Terminal test, checks base cases\n return self.score(game,self) # returns the score, UTILITY of the current state\n legal_moves = game.get_legal_moves() # obtain all legal moves for game, ACTIONs that can be taken\n best_score = math.inf # abstraction assignment of infinity(highest possible value for MIN score)\n for m in legal_moves: # iterate through all available actions\n new_state = game.forecast_move(m) # for each available move, forecast the resulting state from that ACTION\n # RESULT of ACTION\n score = self.max_value(new_state, depth - 1) # recursively uses the new state\n best_score = min(best_score,score) # calculates the minimizing score between the states\n return best_score # propagates minimizing score for given state",
"def get_importance(self, key, value, depth):\n multiplier = 0.8 ** depth if depth > 1 else 1.0\n base = 0.0\n if key in ['condition', 'symptom', 'disease', 'treatment']:\n base += 5\n elif key in ['gender', 'age'] or 'location' in key:\n base += 4\n elif 'condition' in key or 'symptom' in key or 'disease' in key or 'treatment' in key:\n base += 3\n else:\n base += 2\n return multiplier * base"
] | [
"0.5829933",
"0.58176476",
"0.5638452",
"0.56209517",
"0.5589053",
"0.5571006",
"0.55488455",
"0.5503826",
"0.547983",
"0.5463169",
"0.5461306",
"0.5446944",
"0.538736",
"0.5354595",
"0.5354105",
"0.5351673",
"0.5351555",
"0.53312486",
"0.53265476",
"0.52974695",
"0.5292194",
"0.5290691",
"0.52859026",
"0.5285247",
"0.52783936",
"0.5275131",
"0.5265488",
"0.52554953",
"0.5245101",
"0.52433926"
] | 0.683665 | 0 |
detect a time series using cusum algorithm | def detect_via_cusum_lg(ts, istart=30, threshold_times=5):
S_h = 0
S_l = 0
S_list = np.zeros(istart) # 前面填充的30个空数据
meanArray = talib.SMA(ts,timeperiod = istart)
stdArray = talib.STDDEV(np.log(ts/meanArray),timeperiod = istart)
for i in range(istart, len(ts)): # 这里是否应该掐头去尾?
tslog = np.log(ts[i] / meanArray[i - 1])
S_h_ = max(0, S_h + tslog - stdArray[i-1])
S_l_ = min(0, S_l + tslog + stdArray[i-1])
if S_h_> threshold_times * stdArray[i-1]:
S_list = np.append(S_list,1) # 该点为上变点
S_h_ = 0
elif abs(S_l_)> threshold_times * stdArray[i-1]:
S_list = np.append(S_list, -1) # 该点为下变点
S_l_ = 0
else:
S_list = np.append(S_list, 0) # 该点无特殊情况
S_h = S_h_
S_l = S_l_
return S_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def CEPSTRUM(y, t):\n dt = t[2] - t[1]\n #Fs = 1.0 / dt\n L = len(y)\n #Y = fft(y, L)\n #amp = np.abs(Y)/(L/2) # FFT single sided spectrum\n #T = L * dt #1/T=Fs/L\n #freq = np.arange(0, Fs / 2, 1 / T) # list frequencies up to Nyquist frequency\n #C=real(ifft(log(abs(fft(y)))))\n C = np.abs(ifft(np.log(np.abs(fft(y))**2)))**2\n NumUniquePts = int(np.ceil((L + 1) / 2))\n C = C[0:NumUniquePts]\n q = np.arange(0, NumUniquePts, 1) * dt\n return q, C",
"def detect(\n t: np.ndarray,\n temp: np.ndarray,\n climatologyPeriod=[None, None],\n pctile=90,\n windowHalfWidth=5,\n smoothPercentile=True,\n smoothPercentileWidth=31,\n minDuration=5,\n joinAcrossGaps=True,\n maxGap=2,\n maxPadLength=False,\n coldSpells=False,\n alternateClimatology: Optional[List] = False,\n):\n mhw = initialise_output()\n T, year, month, day, doy, month_leapYear, day_leapYear, doy_leapYear, feb28, feb29 = generate_time_vectors(\n t\n )\n\n # Calculate threshold and seasonal climatology (varying with day-of-year)\n climatologyPeriod = set_climatology_period(climatologyPeriod)\n\n tempClim, TClim, yearClim, monthClim, dayClim, doyClim = initialise_climatology_arrays(\n alternateClimatology,\n temp,\n T,\n year,\n month,\n day,\n doy,\n month_leapYear,\n day_leapYear,\n doy_leapYear,\n )\n\n # Flip temp time series if detecting cold spells\n if coldSpells:\n temp = -1.0 * temp\n tempClim = -1.0 * tempClim\n\n # Pad missing values for all consecutive missing blocks of length <= maxPadLength\n if maxPadLength:\n temp = pad(temp, maxPadLength=maxPadLength)\n tempClim = pad(tempClim, maxPadLength=maxPadLength)\n\n clim_start, clim_end, thresh_climYear, seas_climYear, clim = initialise_climatology_dict(\n yearClim, climatologyPeriod, lenClimYear, TClim\n )\n\n clim = calculate_daily_climatology(\n pctile,\n windowHalfWidth,\n lenClimYear,\n smoothPercentile,\n smoothPercentileWidth,\n thresh_climYear,\n seas_climYear,\n clim,\n feb29,\n doyClim,\n clim_start,\n clim_end,\n tempClim,\n temp,\n )\n\n temp = fill_missing_temp_vals_with_climatology(temp, clim)\n\n events, n_events = find_exceedences(temp, clim)\n mhw = find_consecutive_exceedences_above_threshold(\n events, n_events, mhw, joinAcrossGaps, maxGap\n )\n\n mhw = calculate_event_characteristics(mhw, t, clim, temp)\n if coldSpells:\n clim, mhw = flip_for_cold_spells(clim, mhw)\n\n return mhw, clim",
"def test_single_ended_matching_sections_synthetic():\n from dtscalibration import DataStore\n\n cable_len = 100.0\n nt = 50\n nx = 200\n time = np.arange(nt)\n x = np.linspace(0.0, cable_len, nx)\n ts_cold = np.ones(nt) * 4.0\n ts_warm = np.ones(nt) * 20.0\n ts_ambient = np.ones(nt) * 12\n ts_valid = np.ones(nt) * 16\n\n C_p = 15246\n C_m = 2400.0\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask1 = np.logical_and(x > 0.125 * cable_len, x < 0.25 * cable_len)\n cold_mask2 = np.logical_and(x > 0.625 * cable_len, x < 0.75 * cable_len)\n warm_mask1 = np.logical_and(x > 0.75 * cable_len, x < 0.875 * cable_len)\n warm_mask2 = np.logical_and(x > 0.25 * cable_len, x < 0.375 * cable_len)\n valid_mask = np.logical_and(x > 0.40 * cable_len, x < 0.50 * cable_len)\n temp_real = np.ones((len(x), nt)) * 12 + 273.15\n temp_real[cold_mask1 + cold_mask2] = ts_cold + 273.15\n temp_real[warm_mask1 + warm_mask2] = ts_warm + 273.15\n temp_real[valid_mask] = ts_valid + 273.15\n\n st = (\n C_p\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_p * x[:, None])\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n ast = (\n C_m\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_m * x[:, None])\n / (np.exp(gamma / temp_real) - 1)\n )\n\n # Add attenuation\n tr_att = np.random.rand(nt) * 0.2 + 0.8\n st[int(x.size * 0.4) :] *= tr_att\n tr_att2 = np.random.rand(nt) * 0.2 + 0.8\n st[int(x.size * 0.6) :] *= tr_att2\n\n ds = DataStore(\n {\n \"st\": ([\"x\", \"time\"], st),\n \"ast\": ([\"x\", \"time\"], ast),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n \"cold\": ([\"time\"], ts_cold),\n \"warm\": ([\"time\"], ts_warm),\n \"ambient\": ([\"time\"], ts_ambient),\n },\n coords={\"x\": x, \"time\": time},\n attrs={\"isDoubleEnded\": \"0\"},\n )\n\n sections = {\n \"cold\": [slice(0.13 * cable_len, 0.24 * cable_len)],\n \"warm\": [slice(0.26 * cable_len, 0.365 * cable_len)],\n }\n\n matching_sections = [\n (\n slice(0.01 * cable_len, 0.09 * cable_len),\n slice(0.51 * cable_len, 0.59 * cable_len),\n True,\n ),\n (\n slice(0.01 * cable_len, 0.09 * cable_len),\n slice(0.91 * cable_len, 0.99 * cable_len),\n True,\n ),\n ]\n\n ds_test = ds.copy(deep=True)\n\n # WLS\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n matching_sections=matching_sections,\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=8)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )\n\n ds_test = ds.copy(deep=True)\n\n # Test fixing gamma + transient att.\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n fix_gamma=(482.6, 0),\n matching_sections=matching_sections,\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=10)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )\n\n ds_test = ds.copy(deep=True)\n\n # Test fixing dalpha + transient att.\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n fix_dalpha=(6.46e-05, 0),\n matching_sections=matching_sections,\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=10)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )\n\n ds_test = ds.copy(deep=True)\n\n # Test fixing gamma & dalpha + transient att.\n ds_test.calibration_single_ended(\n sections=sections,\n st_var=1.0,\n ast_var=1.0,\n method=\"wls\",\n fix_gamma=(482.6, 0),\n fix_dalpha=(6.46e-05, 0),\n matching_sections=matching_sections,\n trans_att=[40, 60],\n solver=\"sparse\",\n )\n\n assert_almost_equal_verbose(ds_test.gamma.values, gamma, decimal=10)\n assert_almost_equal_verbose(ds_test.tmpf.values, temp_real - 273.15, decimal=8)\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=0).talpha_fw, -np.log(tr_att), decimal=8\n )\n assert_almost_equal_verbose(\n ds_test.isel(trans_att=1).talpha_fw, -np.log(tr_att2), decimal=8\n )\n\n # Test conf. ints. for the combination of everything\n ds_test.conf_int_single_ended(\n p_val=\"p_val\",\n p_cov=\"p_cov\",\n st_var=1.0,\n ast_var=1.0,\n conf_ints=[2.5, 50.0, 97.5],\n mc_sample_size=50,\n )\n\n ds_test_1 = ds_test.isel(time=-1)\n # ds_test_1.tmpf\n # ds_test_1.tmpf_mc.isel(CI=0).values\n # ds_test_1.tmpf_mc.isel(CI=2).values\n\n assert np.all(\n np.less(ds_test_1.tmpf_mc.isel(CI=0).values, ds_test_1.tmpf)\n ), \"Single-ended, trans. att.; 2.5% confidence interval is incorrect\"\n\n assert np.all(\n np.greater(ds_test_1.tmpf_mc.isel(CI=2).values, ds_test_1.tmpf)\n ), \"Single-ended, trans. att.; 97.5% confidence interval is incorrect\"",
"def periodCheck(data):",
"def costSDT(graph, a):\n hit=0; miss=0; fa=0; cr=0\n check=(graph==a)\n for rnum, r in enumerate(a):\n for cnum, c in enumerate(r[:rnum]):\n if check[rnum,cnum]==True:\n if a[rnum,cnum]==1:\n hit += 1\n else:\n cr += 1\n else:\n if a[rnum,cnum]==1:\n miss += 1\n else:\n fa += 1\n return [hit, miss, fa, cr]",
"def test_continuity():\n dc = cs[:,0:Nr-1]-cs[:,1:Nr]\n assert dc.any < cmax",
"def test_MTCoherenceAnalyzer():\r\n\r\n Fs = np.pi\r\n t = np.arange(256)\r\n x = np.sin(10 * t) + np.random.rand(t.shape[-1])\r\n y = np.sin(10 * t) + np.random.rand(t.shape[-1])\r\n T = ts.TimeSeries(np.vstack([x, y]), sampling_rate=Fs)\r\n n_series = T.shape[0]\r\n NFFT = t.shape[0] // 2 + 1\r\n for adaptive in [True, False]:\r\n C = nta.MTCoherenceAnalyzer(T, adaptive=adaptive)\r\n npt.assert_equal(C.frequencies.shape[0], NFFT)\r\n npt.assert_equal(C.coherence.shape, (n_series, n_series, NFFT))\r\n npt.assert_equal(C.confidence_interval.shape, (n_series, n_series,\r\n NFFT))",
"def spike_count(spikeTime, start, stop, dt):\n\n\n #Spike time turned into a numpy array\n spikeTime = np.array(spikeTime)\n # print('Spike Times: ', spikeTime)\n\n #Creat interval array - intervals in which to break up the time array - sub time interval array\n duration = stop-start #Total run time\n n = duration/dt #How many subintervals from time horizon results from user defined interval\n splitInterval = np.linspace(0, duration, n+1) #create numpy array of subinterval over which to count spikes\n # print ('split interval: ', splitInterval)\n\n ##Find length over which to iterate in for loop\n length_splitInt = len(splitInterval)\n # print('length splitInterval: ', length_splitInt)\n length_time = len(spikeTime)\n # print('length time: ', length_time)\n length = length_splitInt + ((length_time) - 2)\n # print('length :', length)\n\n i=0 #inex for time array\n j=0 #index for splitInterval array.\n k=0 #index for new matrix that will store the grouped values from the split time array\n counter = 0 #counter variable to keep track of spike count for each subinterval through loop\n SpikeCount = [] #Initialize array to collect the number of spikes occuring wihtin each subinterval\n\n for i in range(length):\n if (i == 0) and (spikeTime[0] == splitInterval[0]):\n counter += 1\n i += 1\n\n # Spot check\n # print('if counter: ', counter)\n # print('time element: ', spikeTime[k])\n # print('splitInt: ', splitInterval[j], splitInterval[j + 1])\n # print('i: ', i)\n # print('if k: ', k)\n\n if k < (len(spikeTime) - 1):\n k += 1\n\n # Spot check\n # print('iff k: ', k)\n # print('iff counter: ', counter)\n\n else:\n j += 1\n\n # Spot check\n # print('iff counter: ', counter)\n # print(SpikeCount)\n # print('iff j: ', j)\n\n elif (spikeTime[k] > splitInterval[j]) and (spikeTime[k] <= splitInterval[j + 1]):\n counter += 1\n i += 1\n\n # Spot check\n # print('if counter: ', counter)\n # print('time element: ', spikeTime[k])\n # print('splitInt: ', splitInterval[j], splitInterval[j + 1])\n # print('i: ', i)\n # print('if k: ', k)\n\n if k < (len(spikeTime) - 1):\n k += 1\n\n # Spot check\n # print('iff k: ', k)\n # print('iff counter: ', counter)\n\n else:\n j += 1\n # Spot check\n SpikeCount.append(counter)\n # print('iff counter: ', counter)\n # print(SpikeCount)\n # print('iff j: ', j)\n\n\n\n else:\n SpikeCount.append(counter)\n counter = 0\n j += 1\n i += 1\n\n # Spot Check\n # print('else counter: ', counter)\n # print(SpikeCount)\n # print('time element: ', spikeTime[k])\n # print('splitInt: ', splitInterval[j], splitInterval[j + 1])\n # print('else j: ', j)\n # print('else i: ', i)\n # print('else k: ', k)\n\n return (SpikeCount, splitInterval)",
"def detect_saccades(x, y, time, missing=0.0, minlen=5, maxvel=40, maxacc=340):\n\n # CONTAINERS\n Ssac = []\n Esac = []\n\n # INTER-SAMPLE MEASURES\n # the distance between samples is the square root of the sum\n # of the squared horizontal and vertical interdistances\n intdist = (numpy.diff(x)**2 + numpy.diff(y)**2)**0.5\n # get inter-sample times\n inttime = numpy.diff(time)\n # recalculate inter-sample times to seconds\n inttime = inttime / 1000.0\n\n # VELOCITY AND ACCELERATION\n # the velocity between samples is the inter-sample distance\n # divided by the inter-sample time\n vel = intdist / inttime\n # the acceleration is the sample-to-sample difference in\n # eye movement velocity\n acc = numpy.diff(vel)\n\n # SACCADE START AND END\n t0i = 0\n stop = False\n while not stop:\n # saccade start (t1) is when the velocity or acceleration\n # surpass threshold, saccade end (t2) is when both return\n # under threshold\n\n # detect saccade starts\n sacstarts = numpy.where(\n (vel[1+t0i:] > maxvel).astype(int) + (acc[t0i:] > maxacc).astype(int) >= 1)[0]\n if len(sacstarts) > 0:\n # timestamp for starting position\n t1i = t0i + sacstarts[0] + 1\n if t1i >= len(time)-1:\n t1i = len(time)-2\n t1 = time[t1i]\n\n # add to saccade starts\n Ssac.append([t1])\n\n # detect saccade endings\n sacends = numpy.where(\n (vel[1+t1i:] < maxvel).astype(int) + (acc[t1i:] < maxacc).astype(int) == 2)[0]\n if len(sacends) > 0:\n # timestamp for ending position\n t2i = sacends[0] + 1 + t1i + 2\n if t2i >= len(time):\n t2i = len(time)-1\n t2 = time[t2i]\n dur = t2 - t1\n\n # ignore saccades that did not last long enough\n if dur >= minlen:\n # add to saccade ends\n Esac.append([t1, t2, dur, x[t1i], y[t1i], x[t2i], y[t2i]])\n else:\n # remove last saccade start on too low duration\n Ssac.pop(-1)\n\n # update t0i\n t0i = 0 + t2i\n else:\n stop = True\n else:\n stop = True\n\n return Ssac, Esac",
"def test_analyze_time_series():\n\n np.random.seed(4)\n random_array = np.random.rand(10)\n\n statistics = analyze_time_series(random_array, minimum_samples=3)\n expected_index, expected_value, _ = detect_equilibration(random_array, fast=False)\n\n assert expected_index == statistics.equilibration_index\n assert np.isclose(statistics.statistical_inefficiency, expected_value)\n assert statistics.n_total_points == 10\n assert 0 < statistics.n_uncorrelated_points <= 10\n assert 0 <= statistics.equilibration_index < 10",
"def _excitonic_coft_all(self,SS,AG):\n \n # SystemBathInteraction\n sbi = AG.get_SystemBathInteraction()\n # CorrelationFunctionMatrix\n cfm = sbi.CC\n \n c0 = AG.monomers[0].get_egcf((0,1))\n Nt = len(c0)\n \n Nst = AG.HamOp.dim\n ct = numpy.zeros((Nst,Nt),dtype=numpy.complex128)\n\n # electronic states corresponding to single excited states\n import time\n timecount = 0\n elst = numpy.where(AG.which_band == 1)[0]\n start = time.time()\n for el1 in elst:\n for el2 in elst:\n coft = cfm.get_coft(el1-1,el2-1)\n start2 = time.time()\n for kk in AG.vibindices[el1]:\n for ll in AG.vibindices[el2]:\n ct[:,:] += numpy.dot(\n numpy.expand_dims((SS[kk,:]**2)*(SS[ll,:]**2),axis=1),\n numpy.expand_dims(coft,axis=0))\n stop2 = time.time()\n timecount += stop2 - start2\n stop = time.time()\n print(stop-start,stop-start - timecount)\n return ct",
"def acc2vel(timeseries, dt):\n return np.cumsum(timeseries) * dt",
"def scanent_time(Utime, state, Udis):\n \n if state.shape[0]!=state.shape[1]:\n print('Error: must input a square density matrix')\n \n nqubit = int(np.log2(state.shape[0] ) )\n scanout = np.zeros((len(Utime), nqubit))\n \n for itp in range(len(Utime) ):\n \n tempstate = csc_matrix((2, 2), dtype=complex)\n tempstate = reduce(np.dot,[Udis, Utime[itp], \n state, \n Utime[itp].getH(), Udis.getH()])\n \n for ix in reversed(range(int(nqubit) ) ):\n #print(ix, np.around(tempstate.todense(),4))\n tempstate = pTraceM(tempstate, [ix+1])\n \n entval = -np.dot(tempstate.todense(), \n sp.linalg.logm(tempstate.todense() ) / np.log(2) ).diagonal().sum()\n scanout[itp, ix] = entval.astype(float)\n \n return scanout",
"def ftcs(_x,_y,_cs,_dx,_dt):\n #todo: in a loop ... think about a more pythonic way to do this\n\n # s = _cs * _dt / (2. * _dx)\n # next_y = np.zeros(np.shape(_y)) #next time step\n # nLen = len(_y)\n # for n in range(nLen):\n # n_next = (n + 1) if n < (nLen-1) else 0\n # n_prev = (n - 1) if n > 0 else nLen-1\n #\n # next_y[n] = _y[n] - s*(_y[n_next] - _y[n_prev])\n #\n # print(n, s, next_y[n], _y[n], _y[n_next], _y[n_prev])\n #\n # next_y = _y[:] - s * (np.append(_y[1:], _y[0]) - np.append(_y[-1], _y[:-1]))\n #\n #\n # return next_y\n\n #this can get out of hand fast (overflow), so will limit the max value\n if np.max(_y) > 1e30:\n _y /= 1e30 #rescale, but keep shape (it is a mess anyway, so there is no real harm)\n\n s = _cs * _dt / (2. * _dx)\n next_y = _y[:] - s * (np.append(_y[1:], _y[0]) - np.append(_y[-1], _y[:-1]))\n\n\n return next_y",
"def signal(date, cs, trange=(0, None), trace_type='dff',\n cutoff_before_lick_ms=-1, error_trials=-1,\n randomizations=500):\n\n # ncells x frames x nstimuli/onsets\n trs = stimulus.trials(date, cs, start_s=trange[0], end_s=trange[1],\n trace_type=trace_type,\n cutoff_before_lick_ms=cutoff_before_lick_ms,\n error_trials=error_trials)\n\n trs = np.nanmean(trs, axis=1)\n ncells = np.shape(trs)[0]\n corrs = np.zeros((ncells, ncells))\n\n # Catch cases when there aren't enough trials\n if np.shape(trs)[1] < 10:\n return corrs\n\n stimorder = np.arange(np.shape(trs)[1])\n if np.sum(np.invert(np.isfinite(trs))) == 0:\n for i in range(randomizations):\n for c in range(ncells):\n np.random.shuffle(stimorder)\n trs[c, :] = trs[c, stimorder]\n\n corrs += np.corrcoef(trs)/float(randomizations)\n else:\n for i in range(randomizations):\n for c in range(ncells):\n np.random.shuffle(stimorder)\n trs[c, :] = trs[c, stimorder]\n\n dftrs = pd.DataFrame(trs.T)\n corrs += dftrs.corr().as_matrix()/float(randomizations)\n\n return corrs",
"def cusum():\n try:\n content = request.get_json()\n data = content[\"data\"]\n for elm in data:\n data[elm] = [data[elm]]\n data = pd.DataFrame.from_dict(content[\"data\"])\n print(\"------- data -------\")\n print(data)\n print(\"------- data -------\")\n fname = inspect.stack()[0][3]\n param = content[\"param\"][fname]\n print(\"------- param -------\")\n print(param)\n print(\"------- param -------\")\n result = pd.DataFrame()\n remove_outliers = param[\"remove_outliers\"]\n training_length = param[\"training_length\"]\n param_db = dict()\n param_db[\"IdMachine\"] = data[\"IdMachine\"].unique()[0]\n print(\"------- param_db -------\")\n print(param_db)\n print(\"------- param_db -------\")\n # get cusum threshold if alredy computed\n df_param = db_manager.get_param(param_db)\n print(\"------- df_param -------\")\n print(df_param)\n print(\"------- df_param -------\")\n # get data stored in database\n df_data = db_manager.get_data(param_db)\n print(\"------- df_data -------\")\n print(df_data)\n print(\"------- df_data -------\")\n # merge old and new data\n df_data = pd.concat([df_data, data], ignore_index=True, sort=False)\n # check if cusum threshold already computed\n if df_param.shape[0] > 0:\n # sot values by timestamp\n df_data.sort_values(by=[\"TimeStamp\"], inplace=True, ascending=True)\n df_data.reset_index(drop=True, inplace=True)\n result = ts.run_cusum(df_data, df_param)\n # delete param in order to delete old values for sp and sn\n db_manager.delete_param(param_db)\n db_manager.store_param(result, param_db)\n db_manager.delete_data(param_db)\n # we don't want to publish these values out from sibyl\n else:\n # check if there is enough data for initialization\n if df_data.shape[0] >= training_length:\n # sort values by timestamp\n df_data.sort_values(by=[\"TimeStamp\"], inplace=True, ascending=True)\n df_data.reset_index(drop=True, inplace=True)\n result = ts.define_cusum(t_s, df_data, remove_outliers)\n # we want to publish these values out from sibyl# we want to publish these values out from sibyl\n result[\"content\"] = \"cusum\"\n # define sp and sn initialization in order to store in database\n cusum = pd.DataFrame(\n [[\"SP\", 0], [\"SN\", 0]], columns=[\"Kind\", \"Value\"]\n )\n cusum[\"IdMachine\"] = param_db[\"IdMachine\"]\n cusum[\"Source\"] = data[\"Source\"].unique()[0]\n cusum[\"IdSig\"] = data[\"IdSig\"].unique()[0]\n cusum = pd.concat(\n [cusum, result], axis=0, ignore_index=True, sort=False\n )\n # store sp and sn values\n db_manager.store_param(cusum, param_db)\n db_manager.delete_data(param_db)\n else:\n db_manager.store_data(data, param_db)\n if result.shape[0] > 0:\n result[\"IdMachine\"] = param_db[\"IdMachine\"]\n result[\"IdSig\"] = data[\"IdSig\"].unique()[0]\n result[\"TimeStamp\"] = df_data.tail(1)[\"TimeStamp\"].values[0]\n result[\"Source\"] = data[\"Source\"].unique()[0]\n return jsonify(result.to_json())\n except:\n raise",
"def test_coherence():\r\n\r\n for method in methods:\r\n f, c = tsa.coherence(tseries, csd_method=method)\r\n npt.assert_array_almost_equal(c[0, 1], c[1, 0])\r\n npt.assert_array_almost_equal(c[0, 0], np.ones(f.shape))",
"def dataIdentify(self, in_nc):\r\n data_nc = NET.Dataset(in_nc)\r\n time = data_nc.variables['time'][:]\r\n diff = NUM.unique(NUM.diff(time))\r\n data_nc.close()\r\n #time_interval_highres = NUM.array([1.0,3.0,6.0],dtype=float)\r\n #time_interval_lowres_full = NUM.array([3.0, 6.0],dtype=float)\r\n #time_interval_lowres = NUM.array([6.0],dtype=float)\r\n #time_interval_lowres_3Hr = NUM.array([3.0],dtype=float)\r\n\t\t\r\n time_interval_HRES1 = NUM.array([1.0],dtype=float) # Line Added/Modified CJB 20190108\r\n time_interval_HRES13 = NUM.array([1.0,3.0],dtype=float) # Line Added/Modified CJB 20190108\r\n time_interval_HRES136 = NUM.array([1.0,3.0,6.0],dtype=float) # Line Added/Modified CJB 20190108\r\n time_interval_ENS3 = NUM.array([3.0],dtype=float) # Line Added/Modified CJB 20190108\r\n time_interval_ENS36 = NUM.array([3.0,6.0],dtype=float) # Line Added/Modified CJB 20190108\r\n time_interval_ENS6 = NUM.array([6.0],dtype=float) # Line Added/Modified CJB 20190108\r\n\r\n\r\n #print \"SDR - diff:\", diff, time_interval_highres, time_interval_lowres_full, time_interval_lowres\r\n #if NUM.array_equal(diff, time_interval_highres):\r\n # return \"HighRes\"\r\n #elif NUM.array_equal(diff, time_interval_lowres_full):\r\n # return \"LowResFull\"\r\n #elif NUM.array_equal(diff, time_interval_lowres):\r\n # return \"LowRes\"\r\n #elif NUM.array_equal(diff, time_interval_lowres_3Hr):\r\n # return \"Low3HrRes\"\r\n #else:\r\n # return None\r\n\t\t\t\r\n if NUM.array_equal(diff, time_interval_HRES1): # Line Added/Modified CJB 20190108\r\n return \"HRES1\" # Line Added/Modified CJB 20190108\r\n elif NUM.array_equal(diff, time_interval_HRES13): # Line Added/Modified CJB 20190108\r\n return \"HRES13\" # Line Added/Modified CJB 20190108\r\n elif NUM.array_equal(diff, time_interval_HRES136): # Line Added/Modified CJB 20190108\r\n return \"HRES136\" # Line Added/Modified CJB 20190108\r\n elif NUM.array_equal(diff, time_interval_ENS3): # Line Added/Modified CJB 20190108\r\n return \"ENS3\" # Line Added/Modified CJB 20190108\r\n elif NUM.array_equal(diff, time_interval_ENS36): # Line Added/Modified CJB 20190108\r\n return \"ENS36\" # Line Added/Modified CJB 20190108\r\n elif NUM.array_equal(diff, time_interval_ENS6): # Line Added/Modified MJS, CJB 20190108\r\n return \"ENS6\" # Line Added/Modified CJB 20190108\r\n else: # Line Added/Modified CJB 20190108\r\n return None # Line Added/Modified CJB 20190108\r",
"def test_coherence_regularized():\r\n for method in methods:\r\n f, c = tsa.coherence_regularized(tseries, 0.05, 1000,\r\n csd_method=method)\r\n npt.assert_array_almost_equal(c[0, 1], c[1, 0])",
"def get_continous_time_periods(binary_array):\n binary_array = np.copy(binary_array).astype(\"int8\")\n n_times = len(binary_array)\n d_times = np.diff(binary_array)\n # show the +1 and -1 edges\n pos = np.where(d_times == 1)[0] + 1\n neg = np.where(d_times == -1)[0] + 1\n\n if (pos.size == 0) and (neg.size == 0):\n if len(np.nonzero(binary_array)[0]) > 0:\n return [(0, n_times-1)]\n else:\n return []\n elif pos.size == 0:\n # i.e., starts on an spike, then stops\n return [(0, neg[0])]\n elif neg.size == 0:\n # starts, then ends on a spike.\n return [(pos[0], n_times-1)]\n else:\n if pos[0] > neg[0]:\n # we start with a spike\n pos = np.insert(pos, 0, 0)\n if neg[-1] < pos[-1]:\n # we end with aspike\n neg = np.append(neg, n_times - 1)\n # NOTE: by this time, length(pos)==length(neg), necessarily\n h = np.matrix([pos, neg])\n # print(f\"len(h[1][0]) {len(h[1][0])} h[1][0] {h[1][0]} h.size {h.size}\")\n if np.any(h):\n result = []\n for i in np.arange(h.shape[1]):\n if h[1, i] == n_times-1:\n result.append((h[0, i], h[1, i]))\n else:\n result.append((h[0, i], h[1, i]-1))\n return result\n return []",
"def integrate_continuous_time_stochastic(self, y):\n\n self.compartments = self.convert_list_to_compartments(y)\n for label in self.compartments:\n self.compartments[label] = int(self.compartments[label])\n\n n_compartment = len(y)\n n_time = len(self.target_times)\n self.soln_array = numpy.zeros((n_time, n_compartment))\n\n time = self.target_times[0]\n self.soln_array[0, :] = y\n n_sample = 0\n for i_time, new_time in enumerate(self.target_times):\n\n if i_time == 0:\n continue\n\n while time < new_time:\n self.time = time\n self.calculate_vars()\n self.calculate_events()\n\n if len(self.events) == 0:\n # equilibrium reached, no more changes, so go\n # to end of interval\n dt = new_time - time\n else:\n event_rates = [event[2] for event in self.events]\n i_event = pick_event(event_rates)\n\n total_rate = sum(event_rates)\n dt = old_div(-math.log(random.random()), total_rate)\n\n from_label, to_label, rate = self.events[i_event]\n if from_label and to_label:\n self.compartments[from_label] -= 1\n self.compartments[to_label] += 1\n elif to_label is None:\n # death\n self.compartments[from_label] -= 1\n elif from_label is None:\n # birth\n self.compartments[to_label] += 1\n\n self.checks()\n time += dt\n n_sample += 1\n\n if i_time < n_time:\n y = self.convert_compartments_to_list(self.compartments)\n self.soln_array[i_time, :] = y",
"def test_cfu_cycles(self):\n # Input: (function, in0, in1, cmd_valid, rsp_ready)\n # Output: (result, rsp_valid, cmd_ready)\n X = None\n DATA = [\n # Nothing\n ((0, 0, 0, 0, 0), (X, 0, 1)),\n # Same cycle instruction, CPU not ready\n ((0, 1, 2, 1, 0), (3, 1, 1)),\n ((0, 0, 0, 0, 1), (3, 1, 0)),\n ((0, 0, 0, 0, 0), (X, 0, 1)),\n # Multi-cycle instruction, CPU ready\n ((3, 3, 0, 1, 1), (X, 0, 1)),\n ((0, 0, 0, 0, 1), (X, 0, 0)),\n ((0, 0, 0, 0, 1), (X, 0, 0)),\n ((0, 0, 0, 0, 1), (6, 1, 0)),\n # Same cycle instruction, CPU ready\n ((0, 5, 3, 1, 1), (8, 1, 1)),\n # Multi-cycle instruction, CPU not ready\n ((3, 2, 0, 1, 0), (X, 0, 1)),\n ((0, 0, 0, 0, 0), (X, 0, 0)),\n ((0, 0, 0, 0, 0), (2, 1, 0)),\n ((0, 0, 0, 0, 1), (2, 1, 0)),\n # Multi-cycle instruction, but always ready next cycle\n ((4, 3, 5, 1, 1), (X, 0, 1)),\n ((0, 0, 0, 0, 1), (8, 1, 0)),\n # CPU not ready\n ((4, 3, 4, 1, 0), (X, 0, 1)),\n ((0, 0, 0, 0, 0), (X, 1, 0)),\n ((0, 0, 0, 0, 0), (X, 1, 0)),\n ((0, 0, 0, 0, 1), (7, 1, 0)),\n # Fallback instruction - same cycle, CPU ready\n ((7, 0, 0, 1, 1), (X, 1, 1)),\n ]\n\n def process():\n for n, (inputs, expected_outputs) in enumerate(DATA):\n func, i0, i1, cmd_valid, rsp_ready = inputs\n exp_result, exp_rsp_valid, exp_cmd_ready = expected_outputs\n yield self.dut.cmd_function_id.eq(func)\n yield self.dut.cmd_in0.eq(i0)\n yield self.dut.cmd_in1.eq(i1)\n yield self.dut.cmd_valid.eq(cmd_valid)\n yield self.dut.rsp_ready.eq(rsp_ready)\n yield Delay(0.1)\n if exp_result is not None:\n self.assertEqual((yield self.dut.rsp_out), exp_result)\n if exp_rsp_valid is not None:\n self.assertEqual((yield self.dut.rsp_valid), exp_rsp_valid)\n # We don't currently support returning non-OK responses, so\n # if our response is valid, it must be OK.\n if exp_rsp_valid:\n self.assertTrue((yield self.dut.rsp_ok))\n if exp_cmd_ready is not None:\n self.assertEqual((yield self.dut.cmd_ready), exp_cmd_ready)\n yield\n self.run_sim(process, False)",
"def get_viscous_cfl(discr, eos, dt, cv):\n return dt / get_viscous_timestep(discr, eos=eos, cv=cv)",
"def _analyze_series(self, series):\n # bin series by analysis time\n # only analyze the last bin\n ts = array([si['timestamp'] for si in series])\n ds = diff(ts)\n\n # tolerance_seconds = 60 * 60 * self._bin_hours\n # ds = diff(ts) > tolerance_seconds\n # bounds = where(ds)[0]\n # itemidx = bounds[-1] if bounds else 0\n # series = series[itemidx:]\n\n for ci in self._conditionals:\n ret = self._execute_conditional(ci, series, ds)\n if ret:\n return ret",
"def test_coherency_regularized():\r\n\r\n for method in methods:\r\n f, c = tsa.coherency_regularized(tseries, 0.05, 1000,\r\n csd_method=method)\r\n npt.assert_array_almost_equal(c[0, 1], c[1, 0].conjugate())",
"def cpgram(ts):\n spectrum = np.fft.fft(ts)\n n = len(ts)\n y = (np.sqrt(spectrum.real**2 + spectrum.imag**2)) ** 2 / n\n if n % 2 == 0:\n n -= 1\n y = y[:n]\n\n freq = np.linspace(0, 0.5, n, endpoint=True)\n crit = 1.358 / (np.sqrt(n) + 0.12 + 0.11 / np.sqrt(n))\n\n return y, freq, crit",
"def check_temporal_coherency(self, tf):\n issues = []\n\n for tf, candles in self._candles.items():\n candles = self._candles.get(tf)\n number = len(candles)\n if candles:\n for i in range(len(candles)-1, max(-1, len(candles)-number-1), -1):\n if candles[i].timestamp - candles[i-1].timestamp != tf:\n logger.error(\"Timestamp inconsistency from %s and %s candles at %s delta=(%s)\" % (i, i-1, candles[i-1].timestamp, candles[i].timestamp - candles[i-1].timestamp))\n issues.append(('ohlc', tf, i, i-1, candles[i-1].timestamp, candles[i].timestamp - candles[i-1].timestamp))\n\n for tf, buy_sells in self._buy_sells.items():\n if buy_sells:\n number = len(buy_sells)\n for i in range(len(buy_sells)-1, max(-1, len(buy_sells)-number-1), -1):\n if buy_sells[i].timestamp - buy_sells[i-1].timestamp != tf:\n logger.error(\"Timestamp inconsistency from %s and %s buy/sell signals at %s delta=(%s)\" % (i, i-1, buy_sells[i-1].timestamp, buy_sells[i].timestamp - buy_sells[i-1].timestamp))\n issues.append(('buysell', tf, i, i-1, candles[i-1].timestamp, buy_sells[i].timestamp - buy_sells[i-1].timestamp))\n\n ticks = self._ticks\n if ticks:\n number = len(ticks)\n for i in range(len(ticks)-1, max(-1, len(ticks)-number-1), -1):\n if ticks[i][0] - ticks[i-1][0] != tf: \n logger.error(\"Timestamp inconsistency from %s and %s ticks at %s delta=(%s)\" % (i, i-1, ticks[i-1][0], ticks[i][0] - ticks[i-1][0]))\n issues.append(('tick', 0, i, i-1, ticks[i-1][0], ticks[i][0] - ticks[i-1][0]))\n \n return issues",
"def _calculate_c_change(self, s, ts):\n cc = self.c_change\n cs = self.c_stock\n criterium = (cs[:,0]==s) & (cs[:,1]==ts)\n nowtarget = numpy.where(criterium)[0]\n criterium = (cs[:,0]==s) & (cs[:,1]==ts-1)\n prevtarget = numpy.where(criterium)[0]\n if len(nowtarget) > 0 and len(prevtarget)>0:\n stepinf = numpy.array([[s, ts, 0., 0., 0., 0., 0., 0., 0., 0.]],\n dtype=numpy.float32)\n self.c_change = numpy.append(cc, stepinf, axis=0)\n self.c_change[-1, 2:] = cs[nowtarget, 2:] - cs[prevtarget, 2:]",
"def ChoppinessIndex(self, timeperiod = 14):\r\n return ta.C",
"def colocationEpochFeats(cur,uid,timestamp):\n\tj=0\n\thour=3600\n\tnearby_dev = []\n\tcloser_dev = []\n\tfor i in range(1,8):\n\t\ths_timestamp = timestamp-86400+(i-1)*hour3\n\t\the_timestamp = timestamp-86400+i*hour3\n\t\t# Determining if start/end time of given hour is in the night\n\t\t# If yes, proceed with feature calculation, if not skip\n\t\ts_epoch = epochCalc(hs_timestamp)\n\t\te_epoch = epochCalc(he_timestamp)\n\n\t\tif s_epoch[0][0]=='night' or e_epoch[0][0]=='night':\n\n\t\t\tcur.execute(\"SELECT time_stamp,mac,level FROM {0} WHERE time_stamp>= {1} AND time_stamp<={2}\"\n\t\t\t\t.format(uid+'bt',hs_timestamp,he_timestamp))\n\n\t\t\trecords = cur.fetchall()\n\t\t\t# In every BT scan all MACs share the same timestamp, thus the number of MACs\n\t\t\t# at each given time reveals the number of nearby devices which we assume \n\t\t\t# is positively correlated with the number of humans around the user.\n\t\t\t# A distinction between nearby and closer-to-user devices is being made\n\t\t\t# based on signal strength threshold\n\t\t\ttimes_near = [item[1] for item in records if item[2]<-80]\n\t\t\tnearby_dev.append( len(set(times_near)))\n\n\t\t\ttimes_closer = [item[1] for item in records if item[2]>=-80]\n\t\t\tcloser_dev.append(len(set(times_closer)))\n\n\tbt_feats = np.hstack((closer_dev,nearby_dev))\n\treturn(bt_feats)"
] | [
"0.60948336",
"0.5735917",
"0.54944384",
"0.54803497",
"0.5476483",
"0.54686457",
"0.5446705",
"0.54417604",
"0.539834",
"0.5380892",
"0.5366723",
"0.5334913",
"0.53329235",
"0.53267103",
"0.52999914",
"0.52974385",
"0.52880305",
"0.5285251",
"0.52793515",
"0.5261482",
"0.52575517",
"0.5253155",
"0.52464837",
"0.52350444",
"0.52302885",
"0.5224713",
"0.52207875",
"0.52059",
"0.52022713",
"0.5198251"
] | 0.6326485 | 0 |
If a Twitter request fails, sleep for 15 minutes. Do this at most max_tries times before quitting. | def robust_request(twitter, resource, params, max_tries=5):
for i in range(max_tries):
request = twitter.request(resource, params)
if request.status_code == 200:
return request
else:
print('Got error %s \nsleeping for 15 minutes.' % request.text)
sys.stderr.flush()
time.sleep(61 * 15) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def robust_request(twitter, resource, params, max_tries=5):\n \n for i in range(max_tries):\n request = twitter.request(resource, params)\n if request.status_code == 200:\n return request\n else:\n print('Got error %s \\nsleeping for 15 minutes.' % request.text)\n sys.stderr.flush()\n time.sleep(61 * 15)",
"def test_run_max_retries():\n responses = [httpretty.Response(body=\"Internal Server Error\", status=500),\n httpretty.Response(body=\"Internal Server Error\", status=500),\n httpretty.Response(body=\"Internal Server Error\", status=500)]\n httpretty.register_uri(httpretty.GET, URL, responses=responses)\n with mock.patch('httsleep.main.sleep'):\n httsleep = HttSleep(URL, {'status_code': 200}, max_retries=2)\n with pytest.raises(StopIteration):\n httsleep.run()",
"def set_retry_timeout(self, retry_timeout):",
"def test_sleep_request(self):\n date = datetime.now() - timedelta(minutes=14)\n RequestAPI.objects.create(total_request=450, date=date)\n start = time.time()\n ManagerRequestApiTwitter().handle_rate_limit()\n stop = time.time()\n total_time = stop - start\n self.assertGreater(total_time, 60)",
"def make_twitter_request(twitter_api_func, max_errors=10, *args, **kw):\n\n def handle_twitter_http_error(e, wait_period=2, sleep_when_rate_limited=True):\n if wait_period > 3600: # Seconds\n print >> sys.stderr, 'Too many retries. Quitting.'\n raise e\n # See https://dev.twitter.com/docs/error-codes-responses for common codes\n if e.e.code == 401:\n print >> sys.stderr, 'Encountered 401 Error (Not Authorized)'\n return None\n elif e.e.code == 404:\n print >> sys.stderr, 'Encountered 404 Error (Not Found)'\n return None\n elif e.e.code == 429:\n print >> sys.stderr, 'Encountered 429 Error (Rate Limit Exceeded)'\n if sleep_when_rate_limited:\n print >> sys.stderr, \"Retrying in 15 minutes...ZzZ...\"\n sys.stderr.flush()\n time.sleep(60 * 15 + 5)\n print >> sys.stderr, '...ZzZ...Awake now and trying again.'\n return 2\n else:\n raise e # Caller must handle the rate limiting issue\n elif e.e.code in (500, 502, 503, 504):\n print >> sys.stderr, 'Encountered %i Error. Retrying in %i seconds' % \\\n (e.e.code, wait_period)\n time.sleep(wait_period)\n wait_period *= 1.5\n return wait_period\n else:\n raise e\n # End of nested helper function\n\n wait_period = 2\n error_count = 0\n while True:\n try:\n return twitter_api_func(*args, **kw)\n except twitter.api.TwitterHTTPError, e:\n error_count = 0\n wait_period = handle_twitter_http_error(e, wait_period)\n if wait_period is None:\n return\n except URLError, e:\n error_count += 1\n print >> sys.stderr, \"URLError encountered. Continuing.\"\n if error_count > max_errors:\n print >> sys.stderr, \"Too many consecutive errors...bailing out.\"\n raise\n except BadStatusLine, e:\n error_count += 1\n print >> sys.stderr, \"BadStatusLine encountered. Continuing.\"\n if error_count > max_errors:\n print >> sys.stderr, \"Too many consecutive errors...bailing out.\"\n raise",
"def retry(self, times):\n return Retry((requests.ConnectionError, requests.Timeout), times)",
"def functionThatWillTimeOut():\n time.sleep(5)",
"def test_exp_backoff():\n stream = ReconnectingTweetStream('user', 'pass', initial_wait=1, max_wait=5,\n error_cb=error_callback)\n # A connection failure should happen automatically because of patch\n assert_raises(ConnectionError, stream.next)\n # By now, callback should have been invoked 3 times (1s, 2s, 4s)\n assert callback_invoked == 3",
"async def _retry_get(url: str, retries: int, **kwargs):\r\n retries -= 1\r\n if retries >= 0:\r\n logger.warning(\r\n f\"Retrying request to {url}. Retries remaining: {retries}\")\r\n return await asyncio.create_task(\r\n self.get(url, retries, **kwargs))\r\n logger.error(\r\n f\"Max retries exceeded: {url}. URL can not be navigated.\")",
"def test429Error300request(self):\n \"\"\" In this case the search() method send more than one rewuest per second, so twitter get 429 error. \"\"\"\n \"\"\" In this case we wait for 2 second before resend the request \"\"\"\n \"\"\" WARNING: TIME EXPENSIVE TEST: 20-25min needed \"\"\"\n wk = os.path.dirname(os.path.abspath(__file__))\n f = os.path.join(wk, \"search_tweets.config\")\n twitter_research = SearchTweets(self.db, f)\n with patch.object(twitter_research, '_SearchTweets__twitter_n_results',\n new_callable=PropertyMock(return_value=10)):\n with patch.object(twitter_research, '_SearchTweets__multi_user',\n new_callable=PropertyMock(return_value=False)):\n with patch.object(twitter_research, '_SearchTweets__twitter_users',\n new_callable=PropertyMock(return_value=[])):\n with patch.object(twitter_research, '_SearchTweets__twitter_keyword',\n new_callable=PropertyMock(return_value=\"Eurovision\")):\n with patch.object(twitter_research, '_SearchTweets__save'):\n logging.getLogger('SEARCH').propagate = False\n with self.assertLogs('SEARCH', level='INFO') as cm:\n for i in (tqdm(range(0, 301), desc=\"NUMBER OF REQUEST\", leave=True)):\n twitter_research.search()\n time.sleep(0.3)\n self.assertTrue('INFO:SEARCH:RATE LIMITS REACHED: WAITING' in cm.output)\n self.assertEqual(twitter_research.total_result, 3010)",
"def retry(func, *args, **kwargs):\n\n # config\n backoff = 1. + random.random() * 0.1\n max_backoff = 32\n max_retries = 5\n\n # try to make the request\n for i in range(max_retries):\n try:\n # return on success\n return func(*args, **kwargs)\n except Exception:\n # sleep on failure\n time.sleep(backoff)\n backoff = 2 * backoff if backoff < max_backoff else backoff\n \n # max retries exceeded\n raise RuntimeError('The connection to the server timed out.')",
"def retry(num=5):\n s = requests.Session()\n retries = Retry(total=num, backoff_factor=0.1,\n status_forcelist=[500, 502, 503, 504])\n s.mount('http://', HTTPAdapter(max_retries=retries))\n\n return s",
"def process_request(t):\n time.sleep(t)",
"def process_request(t):\n time.sleep(t)",
"def process_request(t):\n time.sleep(t)",
"def ping(self, times=10):\n logging.debug(\"checking for rate limit info\")\n url = \"https://api.twitter.com/1.1/application/rate_limit_status.json?resources=search\"\n response = self.client.get(url)\n result = response.json()\n\n # look for limits in the json or the http headers, which can\n # happen when we are rate limited from checking the rate limits :)\n\n if \"resources\" in result:\n self.reset = int(result[\"resources\"][\"search\"][\"/search/tweets\"][\"reset\"])\n self.remaining = int(result[\"resources\"][\"search\"][\"/search/tweets\"][\"remaining\"])\n elif 'x-rate-limit-reset' in response.headers:\n self.reset = int(response.headers[\"x-rate-limit-reset\"])\n self.remaining = int(response.headers[\"x-rate-limit-remaining\"])\n else:\n logging.error(\"missing x-rate-limit-reset in headers: %s\", response.headers)\n if times == 0:\n logging.error(\"ping isn't working :(\")\n raise Exception(\"unable to ping\")\n else:\n times -= 1\n time.sleep(1)\n logging.info(\"trying to ping again: %s\", times)\n return self.ping(times)\n\n logging.info(\"new rate limit remaining=%s and reset=%s\",\n self.remaining, self.reset)",
"def default_backoff(retries, max_retries):\n\n time.sleep(random.random() * (max_retries - retries) / max_retries * 2)",
"def sleep_for(self):\n return max(0, (self._retry_after - datetime.now()).total_seconds())",
"async def _sleep_backoff(\n self, settings: Dict[str, Any], transport: AsyncHttpTransport[HTTPRequestType, AsyncHTTPResponseType]\n ) -> None:\n backoff = self.get_backoff_time(settings)\n if backoff <= 0:\n return\n await transport.sleep(backoff)",
"def timeout(n):\n time.sleep(int(n))\n return 'ok', 200",
"def request_until_success(url, max_attempts=5, wait=5,\r\n fail_max=True):\r\n req = urllib.request.Request(url)\r\n success = False\r\n num_tries = 0\r\n while not success:\r\n try:\r\n num_tries += 1\r\n response = urllib.request.urlopen(req)\r\n success = response.getcode() == 200\r\n except urllib.request.HTTPError as e:\r\n logger.error('e')\r\n logger.error('Error url: {}'.format(e))\r\n if e.code == 500 and num_tries < max_attempts:\r\n logger.info('trying again soon')\r\n time.sleep(wait)\r\n else:\r\n logger.error(e.reason)\r\n raise e\r\n\r\n if not success:\r\n raise Error('Failed to make request')\r\n return json.loads(response.read().decode('utf-8'))",
"def test429Error1Second(self):\n \"\"\" In this case the search() method send more than one rewuest per second, so twitter get 429 error. \"\"\"\n \"\"\" In this case we wait for 2 second before resend the request \"\"\"\n wk = os.path.dirname(os.path.abspath(__file__))\n f = os.path.join(wk, \"search_tweets.config\")\n twitter_research = SearchTweets(self.db, f)\n with patch.object(twitter_research, '_SearchTweets__twitter_n_results',\n new_callable=PropertyMock(return_value=20)):\n with patch.object(twitter_research, '_SearchTweets__multi_user',\n new_callable=PropertyMock(return_value=False)):\n with patch.object(twitter_research, '_SearchTweets__twitter_users',\n new_callable=PropertyMock(return_value=[])):\n with patch.object(twitter_research, '_SearchTweets__twitter_keyword',\n new_callable=PropertyMock(return_value=\"Eurovision\")):\n with patch.object(twitter_research, '_SearchTweets__save'):\n twitter_research.__save = MagicMock()\n for i in range(0, 3):\n twitter_research.search()\n\n self.assertEqual(twitter_research.total_result, 60)",
"def _retry_request(self, request, timeout=2, attempts=3):\n import googleapiclient\n\n try:\n return request.execute()\n except BrokenPipeError as ex:\n if attempts > 0:\n time.sleep(timeout)\n return self._retry_request(request, timeout * 2, attempts - 1)\n raise ex\n except googleapiclient.errors.HttpError as ex:\n log_verbose_traceback(ex)\n raise ex\n except Exception as ex:\n log_verbose_traceback(ex)\n raise ex",
"def sleeper5():\n print \"Threaded sleeper of 5 seconds\"\n sleep(5)",
"def try_get(url, ntries=3, delay=10, verbose = False):\r\n attempts = ntries\r\n\r\n while attempts>0:\r\n attempts_try = attempts\r\n if verbose:\r\n print(\"%d attemps left, trying %s\" % (attempts, url))\r\n try:\r\n result = requests.get(url)\r\n code = \"Ok\"\r\n attempts = 0\r\n result.raise_for_status()\r\n except requests.exceptions.HTTPError as errh:\r\n attempts = 0\r\n code = \"Error\"\r\n result = \"Http Error: %s\" % errh\r\n if verbose:\r\n print (result)\r\n except requests.exceptions.ConnectionError as errc:\r\n attempts = attempts_try - 1\r\n code = \"Error\"\r\n result = \"Error Connecting: %s\" % errc\r\n if verbose:\r\n print (result)\r\n sleep(delay)\r\n except requests.exceptions.Timeout as errt:\r\n attempts = attempts_try - 1\r\n code = \"Error\"\r\n result = \"Timeout Error: %s\" % errt\r\n if verbose:\r\n print (result)\r\n sleep(delay)\r\n except requests.exceptions.RequestException as err:\r\n attempts = 0\r\n code = \"Error\"\r\n if verbose:\r\n print (result)\r\n result = \"Oops: Something Else %s\" % err\r\n if verbose:\r\n print(\" \",code, result if code==\"Error\" else \"\")\r\n return {'code': code, 'result': result}",
"def retry_multi(max_retries=5):\n\n def retry(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n num_retries = 0\n ret = None\n while num_retries <= max_retries:\n try:\n ret = func(*args, **kwargs)\n break\n except Exception as e:\n logger.exception(e)\n if num_retries == max_retries:\n raise\n num_retries += 1\n time.sleep(5)\n return ret\n\n return wrapper\n\n return retry",
"def retry(times):\n return repeat_with_success_at_least(times, 1)",
"def retry(callback, retries, sleep=0.5, catch=Exception, *args, **kwargs):\n r = 0\n while r < retries:\n r += 1\n try:\n return callback(*args, **kwargs)\n except catch as c:\n if r == retries:\n raise c\n else:\n time.sleep(r * sleep)",
"def url_socket_retry(func, *args, **kw):\n min_delay = 1\n max_delay = 32\n max_attempts = 4\n\n for idx, delay in enumerate(\n backoff_delays(min_delay, max_delay, jitter=True)):\n try:\n return func(*args, **kw)\n except HTTPError as err:\n if not (err.status == 503 and 'Slow Down' in err.reason):\n raise\n if idx == max_attempts - 1:\n raise\n except URLError as err:\n if not isinstance(err.reason, socket.error):\n raise\n if err.reason.errno not in (104, 110):\n raise\n if idx == max_attempts - 1:\n raise\n\n time.sleep(delay)",
"def wait_for_url(url: str, timeout=120) -> None:\n for _ in range(timeout):\n try:\n requests.get(url)\n return\n except requests.exceptions.ConnectionError:\n sleep(1)\n raise TimeoutError(f\"No response from APP in {timeout} seconds\")"
] | [
"0.76129365",
"0.64839786",
"0.63807046",
"0.6314336",
"0.62921405",
"0.62880915",
"0.61898947",
"0.6158489",
"0.6138314",
"0.6099748",
"0.6068707",
"0.6052159",
"0.59786904",
"0.59786904",
"0.59786904",
"0.597821",
"0.59454453",
"0.5937674",
"0.58483416",
"0.58388543",
"0.5835946",
"0.5832751",
"0.5825266",
"0.5813316",
"0.5795857",
"0.57916635",
"0.5790303",
"0.57896334",
"0.5786414",
"0.5776292"
] | 0.76959914 | 0 |
Retrieve the Twitter user objects for each screen_name. | def get_users(twitter, screen_names):
###TODO-- Completed
#create a request for Twitter to fetch data, using robust_request function, limiting to 200
#get the requests for every screen_name and store it in a list
requests = [robust_request(twitter,'users/lookup',{'screen_name':screen_name, 'count':200}).json()[0] for screen_name in screen_names]
#for request in requests:
# print(request)
return requests | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_users(twitter, screen_names):\n request = robust_request(twitter, 'users/lookup', {'screen_name': screen_names}, max_tries=5)\n user_info = []\n for user in request:\n \tuser_info.append(user)\n return user_info",
"def get_data_user(twitter, screen_names):\n\n data_user = []\n for name in screen_names:\n request = robust_request(twitter, 'users/lookup', {'screen_name': name}, max_tries=5)\n user = [val for val in request]\n friends = []\n request = robust_request(twitter, 'friends/ids', {'screen_name': name, 'count': 5000}, max_tries=5)\n friends = sorted([str(val) for val in request])\n fr = {'screen_name': user[0]['screen_name'],\n 'id': str(user[0]['id']),\n 'friends_id': friends}\n data_user.append(fr)\n \n return data_user",
"def getTwitterUsers(users,credentials=False):\n userList = ','.join(users)\n chain(twitterCall.s('lookup_user',{'screen_name':userList},credentials), pushTwitterUsers.s())()",
"def get_tweets_from_username(api, screen_name):\n\n # initialize a list to hold all the Tweets\n alltweets = []\n output = []\n\n # make initial request for most recent tweets\n # (200 is the maximum allowed count)\n new_tweets = api.user_timeline(screen_name=screen_name, count=200, tweet_mode=\"extended\")\n\n # save most recent tweets\n alltweets.extend(new_tweets)\n\n # save the id of the oldest tweet less one to avoid duplication\n oldest = alltweets[-1].id - 1\n\n # keep grabbing tweets until there are no tweets left\n while len(new_tweets) > 0:\n print(\"Getting tweets before %s\" % (oldest))\n\n # all subsequent requests use the max_id param to prevent\n # duplicates\n new_tweets = api.user_timeline(screen_name=screen_name, count=200, max_id=oldest, tweet_mode=\"extended\")\n\n # save most recent tweets\n alltweets.extend(new_tweets)\n\n # update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n print(\"... %s tweets downloaded so far\" % (len(alltweets)))\n\n # transform the tweepy tweets into a 2D array that will\n for tweet in alltweets:\n output.append([tweet.id_str,\n tweet.created_at,\n tweet.full_text,\n tweet.in_reply_to_screen_name,\n tweet.user.name,\n tweet.user.location,\n tweet.user.followers_count,\n tweet.user.friends_count,\n tweet.geo,\n tweet.coordinates,\n tweet.retweet_count,\n tweet.favorite_count,\n tweet.lang,\n tweet.retweeted])\n\n # Convert to dataframe\n df = pd.DataFrame.from_records(output, columns=[\"id_str\",\n \"created_at\",\n \"full_text\",\n \"in_reply_to_screen_name\",\n \"user_name\",\n \"user_location\",\n \"user_followers_count\",\n \"user_friends_count\",\n \"geo\",\n \"coordinates\",\n \"retweet_count\",\n \"favorite_count\",\n \"lang\",\n \"retweeted\"])\n return df",
"def getUsersLookup(self, **kwargs):\n screen_name = handleShouldBeList(kwargs.get('screen_name', None))\n user_id = handleShouldBeList(kwargs.get('user_id', None))\n\n params = {\n 'include_entities': kwargs.get('include_entities', None),\n 'tweet_mode': kwargs.get('tweet_mode', None)\n }\n\n if screen_name:\n params['screen_name'] = ','.join(screen_name)\n\n if user_id:\n params['user_id'] = ','.join(str(uid) for uid in user_id)\n \n query = createQuery(params)\n uri = self.api_url + '/users/lookup.json'\n\n response = self.session.post(uri + query).json()\n return response",
"def get_user(screen_name):\r\n # Get all tweets, sorted from newest to oldest\r\n user_tweets = list(mongo_coll_tweets.find({'user.screen_name':\r\n screen_name[1:]}).sort([('id', -1)]))\r\n user = {}\r\n\r\n try:\r\n download_media_file('user-profile-img',\r\n user_tweets[0]['user']['profile_image_url_https'],\r\n 'user-{}-current.jpg'.format(screen_name[1:]),\r\n 'jpg',\r\n 'dashboard/static/img/users')\r\n user[\"profile_picture\"] = True\r\n except:\r\n # The user probably does not have any profile picture\r\n user[\"profile_picture\"] = False\r\n\r\n user[\"screen_name\"] = screen_name[1:]\r\n user[\"number_of_tweets\"] = len(user_tweets)\r\n\r\n if user[\"number_of_tweets\"] > 0:\r\n try:\r\n user[\"last_tweet\"] = user_tweets[0]['text']\r\n except KeyError:\r\n user[\"last_tweet\"] = user_tweets[0]['full_text']\r\n user[\"last_tweet_date\"] = user_tweets[0]['created_at']\r\n else:\r\n user[\"last_tweet\"] = 'N/A'\r\n user[\"last_tweet_date\"] = 'N/A'\r\n\r\n return(user)",
"def get_user_tweets(api, screen_name, output_path):\n logger = logging.getLogger(__name__)\n logger.info('Pulling tweets')\n\n # Create empty list for tweet objects\n tweets = []\n # Pulls users must recent 200 tweets\n new_tweets = api.user_timeline(screen_name=screen_name, count=200)\n tweets.extend(new_tweets)\n oldest = tweets[-1].id - 1\n\n # Continues to pull tweets 200 at a time until limit is hit\n while len(new_tweets) > 0:\n new_tweets = api.user_timeline(screen_name=screen_name,\n count=200, max_id=oldest)\n tweets.extend(new_tweets)\n oldest = tweets[-1].id - 1\n\n logger.info(\"...%s tweets downloaded and cleaned\" % (len(tweets)))\n\n # Write all text of tweets to a file\n filename = screen_name + '.csv'\n file = open(join(output_path, filename), 'w')\n\n # Iterates through all tweets and cleans them before outputting\n for tweet in tweets:\n clean_tweet = clean_string(tweet.text)\n line = screen_name + ', ' + clean_tweet + '\\n'\n file.write(line)\n logger.info(\"Done pulling tweets for %s\" % screen_name)\n file.close()",
"def get_friends(twitter, screen_name):\n request = robust_request(twitter, 'friends/ids', {'screen_name': screen_name}, max_tries=5)\n friend_list = []\n for r in request:\n friend_list.append(r)\n return sorted(friend_list)",
"def get_tweets(twitter, screen_name, num_tweets):\n\n request = robust_request(twitter, 'search/tweets', {'q': screen_name, 'count': num_tweets})\n tweets = [a['text'] for a in request]\n\n return tweets",
"def userTweets(username):\n api = twitter.Api()\n user_tweets = api.GetUserTimeline(username)\n for tweet in user_tweets:\n util.safe_print(tweet.GetText())",
"def get_all_tweets(screen_name: object):\r\n temptweets = []\r\n alltweets = []\r\n new_tweets = api.user_timeline(screen_name=screen_name, count=199)\r\n alltweets.extend(new_tweets)\r\n print(alltweets[1].id)\r\n oldest = alltweets[-1].id - 1\r\n while 0 < len(new_tweets) < 200:\r\n new_tweets = tweepy.Cursor(api.user_timeline, screen_name=screen_name, count=199, max_id=oldest).items(1500)\r\n alltweets.extend(new_tweets)\r\n for tweet in alltweets:\r\n if (not tweet.retweeted) and ('RT @' not in tweet.text):\r\n temptweets.append(tweet)\r\n oldest = alltweets[-1].id - 1\r\n print(\"Total tweets downloaded from %s are %s\" % (screen_name, len(temptweets)))\r\n return temptweets",
"def get_friends(twitter, screen_name):\n ###TODO-- Completed\n\n #Requesting twitter with query to get friends of all the screen_name(s) passed as a parameter\n friends_ids = robust_request(twitter,'friends/ids',{'screen_name':screen_name}).json()\n\n #Returns a dictionary having several values, selecting the one which has KEY: ids,\n # i.e. get ids of all the friends in a sorted manner\n return sorted(friends_ids['ids'])",
"def fetch_tweets(self, screen_name, count):\n return {}",
"def get_tweets(username, amount):\n tweets = []\n twitter = Twython()\n\n finished = False\n page = 1\n while not finished:\n\n if amount <= 200:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count=str(amount))\n finished = True\n\n else:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count='200')\n amount -= 200\n page += 1\n\n if isinstance(search_results, dict) and search_results['error']:\n raise TwitterAPIException(str(search_results['error']))\n elif not search_results:\n raise TwitterAPIException('User has no tweets.')\n\n for result in search_results:\n tweets.append(result['text']) \n\n return tweets",
"def get_all_users():",
"def get_user_timeline(self, username,\n count=settings.TWITTER_DEFAULT_LIMIT):\n url = urljoin(self.base_url, \"/statuses/user_timeline.json\")\n response = self.session.get(\n url,\n params={\n \"screen_name\": username,\n \"count\": count,\n # \"include_entities\": True\n },\n auth=self.__auth,\n )\n data = response.json()\n if response.ok:\n data = [Tweet(tweet_data) for tweet_data in data]\n else:\n if 'error' in data:\n raise TwitterException(data['error'], code=response.status_code)\n elif 'errors' in data:\n error = data['errors'][0]\n raise TwitterException(error['message'], code=response.status_code)\n return data",
"def get_users(self):\n users = []\n page = 1\n while not len(users) % 100:\n users += self._get('/users?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not users:\n break\n page += 1\n return users",
"def getTweetsByUser(username, maxTweets=1000):\n if 'api' not in globals():\n startTwitterApi(getApiKeys(fileName=\"apiConf2.txt\"))\n myTweets=[]\n if words:\n apiRes = tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items()\n for tweet in apiRes:\n if any(containsWord(tweet._json['full_text'],word) for word in words):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n if sortBy=='newest':\n for tweet in tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items(maxTweets):\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n else:\n for tweet in tweepy.Cursor(api.user_timeline,screen_name=username, count=100, tweet_mode='extended', include_rts=not removeRetweets).items():\n tweet._json['text']=tweet._json['full_text']\n del (tweet._json['full_text'])\n myTweets.append(tweet._json)\n\n return getTopNTweets(myTweets, maxTweets)",
"def list_user_tweets(username):\n userdata = query_db('select * from user where username = ?',\n [username], one=True)\n if userdata is None:\n abort(404)\n else:\n user_details = {\"username\": userdata['username'],\"user_id\":userdata['user_id']}\n\n followed = False\n if request.json.get('user_id') is not None:\n followed = query_db('''select 1 from follower where\n follower.who_id = ? and follower.whom_id = ?''',\n [request.json.get('user_id'), user_details.get('user_id')],\n one=True) is not None\n\n user_tweets = []\n if user_details is None:\n return jsonify({'message': 'User not found'}), 404\n tuples = query_db('''\n select message.*, user.* from message, user where\n user.user_id = message.author_id and user.user_id = ?\n order by message.pub_date desc limit ?''',\n [user_details['user_id'], PER_PAGE])\n\n for tuple in tuples:\n user_tweet = {}\n user_tweet[\"username\"] = tuple['username']\n user_tweet[\"email\"] = tuple['email']\n user_tweet[\"text\"] = tuple['text']\n user_tweet[\"pub_date\"] = tuple['pub_date']\n user_tweets.append(user_tweet)\n\n return jsonify({'user_tweets':user_tweets, 'followed' : followed, 'user_details':user_details}),200",
"def get_users(self):\n fields = ['name', ]\n return self.get_data(\"myUsers\", fields)",
"def search_users(self):\n for name in self.userlist:\n dict_json = {}\n x = 0\n users = api.GetUsersSearch(term=name)\n for user in users:\n id_number = \"ID=\" + str(user.id)\n screen_name = \"ScreenName=\" + user.screen_name\n json_str = json.JSONEncoder().encode({\"User\": [id_number, screen_name]})\n dict_json[x] = json_str\n x += 1\n with open(\"Different\" + name + \".json\", \"w\") as outfile:\n json.dump(dict_json, outfile)\n outfile.close()",
"def extract_mentioned_screen_names(self, transform = lambda x: x):\r\n return [transform(mention['screen_name']) for mention in self.extract_mentioned_screen_names_with_indices()]",
"def fetch_usernames(self, users):\n user_list = []\n for user in users:\n user_list.append(user.username)\n return user_list",
"def users(self, site = None):\r\n uids = self.user_ids()\r\n if uids:\r\n users = Account._byID(uids, True, return_dict = False)\r\n return [self.ajax_user(u) for u in users]\r\n else:\r\n return ()",
"def get_all_tweets(user, alltweets):\n\n #TODO check that user is a valid screen name??\n\n #make initial request for most recent tweets (200 is the maximum allowed count)\n new_tweets = api.user_timeline(user, count=200)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n #print alltweets[0].text\n\n #save the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n\n #print \"starting loop\"\n #keep grabbing tweets until there are no tweets left to grab\n while len(new_tweets) > 0:\n\n #all subsiquent requests starting with oldest\n new_tweets = api.user_timeline(user, count=200, max_id=oldest)\n\n #save most recent tweets\n alltweets.extend(new_tweets)\n\n #update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1",
"def user_checkins(screen_name):\n cks = db.checkin\\\n .find({'user.screen_name': screen_name})\\\n .sort('created_at', -1)\\\n .limit(1200)\n return [strip_checkin(c) for c in cks]",
"def twitter(self):\n\n q = \" OR \".join(self.search_terms) + \" -filter:retweets\"\n results = self.__api.search(q=q, lang='en', count=100)\n\n tweets = []\n\n for res in results:\n\n publishedAt = datetime.strptime(res._json['created_at'], '%a %b %d %H:%M:%S +0000 %Y').strftime(\"%Y-%m-%d\")\n\n if (res._json['in_reply_to_screen_name'] == None and publishedAt == datetime.now().strftime(\"%Y-%m-%d\")):\n tweets.append([res._json['id'],\n res._json['text'],\n res._json['user']['screen_name'],\n publishedAt,\n res._json['user']['followers_count']])\n\n self.list = pd.DataFrame(tweets, columns=['id', 'title', 'user', 'publishedAt', 'followers_count']).nlargest(10,\n 'followers_count')\n\n return",
"def load_users(self):\n for user_type in self.user_types:\n url_string = \"%s_url\" % user_type\n try:\n url = self.lookup(url_string)\n users = self._fetcher.get_entities(url)\n except AttributeError as ate:\n logger.err(str(ate))\n continue\n user_list = []\n for user in users:\n if 'username' in user:\n user_list.append({'name': user['username']})\n if len(user_list) > 0:\n setattr(self, user_type, user_list)",
"def __list_all_users(self):\n\n usernames_dict = get_data.get_usernames_dict()\n if len(usernames_dict) > 0:\n first_str = 'user'\n second_str = 'posts scraped'\n descriptor = '{:<40} {}'\n print('')\n print(descriptor.format(first_str, second_str))\n print(descriptor.format(len(first_str) * '-', len(second_str) * '-'))\n for number, username in usernames_dict.items():\n space_str = ' ' if len(str(number)) > 1 else ' '\n first = '[' + space_str + str(number) + '] ' + username\n second = str(get_data.get_user_post_count(username))\n print(descriptor.format(first, second))\n else:\n print('no users found in the database')",
"def fetch_all_users():\n url = \"{}/workspace/{}/users\".format(V1_API_URL, WORKSPACE_ID)\n responses = requests.get(url, headers=HEADERS)\n return [\n {\n \"acronym\": user[\"name\"].lower(),\n \"clockify_id\": user[\"id\"],\n \"email\": user[\"email\"].lower(),\n }\n for user in responses.json()\n ]"
] | [
"0.8145805",
"0.7709179",
"0.7126518",
"0.6822808",
"0.67804223",
"0.6779172",
"0.66131747",
"0.65748686",
"0.65124315",
"0.6483101",
"0.64769137",
"0.63471603",
"0.6294934",
"0.6165395",
"0.61579555",
"0.6129354",
"0.6095629",
"0.60620123",
"0.60600054",
"0.6058718",
"0.6029069",
"0.60215765",
"0.59856325",
"0.5970846",
"0.59445906",
"0.59401387",
"0.59105647",
"0.5901742",
"0.5884459",
"0.58808184"
] | 0.78942174 | 1 |
Print the number of friends per candidate, sorted by candidate name. See Log.txt for an example. | def print_num_friends(users):
###TODO-- Completed
#Creating a new dictionary to store the KEY, VALUE pair for friends of every screen_name i.e. user
# and their counts i.e. number of friends per user
all_friends_dict = {}
for user in users:
all_friends_dict[user['screen_name']] = len(user['friends'])
for candidate in sorted(all_friends_dict):
print(candidate,all_friends_dict[candidate]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_num_friends(users):\n for u_dict in users:\n print (\"%s %d\" %(u_dict['screen_name'], len(u_dict['friends'])))",
"def count_friends(users):\n all_friends=[]\n for u_dict in users:\n for items in u_dict['friends']:\n all_friends.append(items)\n count = Counter()\n for frnd in all_friends:\n count[frnd]+=1\n return count",
"def total_candidate_mentions(self):\n total_candidate_mentions = self.sentence_data().loc[:, self.candidates].sum(axis = 0, skipna = True)\n total_candidate_mentions = total_candidate_mentions.to_frame(name = 'count').rename_axis('candidate').reset_index()\n \n self._total_candidate_mentions = total_candidate_mentions\n \n return self._total_candidate_mentions",
"def count_friends(users):\n ###TODO-- Completed\n\n #Creating a Counter object, to count the mapping\n c = Counter()\n c.update(friend_id for user in users for friend_id in user['friends'])\n return c",
"def get_friend_with_most_friends(friendships=friendships):\n friend_list = defaultdict(list)\n for i, j in friendships:\n friend1 = users[i]\n friend2 = users[j]\n friend_list[friend1].append(friend2)\n friend_list[friend2].append(friend1)\n user_name = sorted(friend_list, key=lambda friend: len(friend_list[friend]), reverse=True)[0]\n user_friends = friend_list[user_name]\n return user_name, user_friends",
"def print_leader(self):\r\n return \"Best particle found:\\n{0}\".format(\r\n repr(self.population[self.leader]))",
"def summary(origin, neighbors):\n\n for neighbor in neighbors:\n logging.info(\"%s, %s\", origin.distance(neighbor), neighbor)\n\n return",
"def suggest_friend(graph, node_name):\n suggested_friends = [] # This is where all the potential suggested friends (from second degree) are stored\n checked_node = set() # Here are already checked second neighbors, not to repeat the check\n for first_neighbor_name in graph.nodes[node_name].neighbors.keys():\n for second_neighbor_name in graph.nodes[first_neighbor_name].neighbors.keys():\n # I am not checking automatically every neighbor of second degree, it need to meet the folloewing\n # conditions: It cannot be already checked node, it cannot be node_name, and not any of its neighbors\n if all([second_neighbor_name not in checked_node, second_neighbor_name != node_name,\\\n second_neighbor_name not in graph.nodes[node_name].neighbors.keys()]):\n common_friends = 0\n # The following for loop check the common friend of second_neighbor and node_name. It actually\n # chacks if the neighbors of third degree are also the first neighbors (node_name connections)\n for third_neighbors_name in graph.nodes[second_neighbor_name].neighbors.keys():\n if third_neighbors_name in graph.nodes[node_name].neighbors.keys():\n common_friends +=1\n friend_potential = (second_neighbor_name, common_friends) # A summary of the potential of second\n suggested_friends.append(friend_potential)\n checked_node.add(second_neighbor_name)\n sorted_suggested_friends = sorted(suggested_friends, key=lambda tup: tup[1], reverse = True)\n suggest_friend_with_most_common = sorted_suggested_friends[0][0]\n n_common_friends_for_suggested = sorted_suggested_friends[0][1]\n return \"The suggested friend for {0} is {1}, with {2} commmon friends.\".format(node_name, suggest_friend_with_most_common, n_common_friends_for_suggested)",
"def TopCheckins(results, search_term):\n top_five = Counter([row[1] for row in results]).most_common()[:5]\n if search_term == 'total':\n search_term = 'checkin'\n (commander, count) = top_five.pop(0)\n text = 'The %s %s is %s with %s checkins.\\n' % (\n search_term.title(), FoodRank(search_term), commander, count)\n text += 'Other %s fans include: ' % search_term\n if len(top_five) > 1:\n for eater in top_five[:-1]:\n text += '%s with %s, ' % eater\n text += 'and %s with %s.' % top_five[-1]\n elif len(top_five) == 1:\n text += '%s with %s.' % top_five[0]\n else:\n text += 'No one.'\n return text",
"def count_friends(self):\n query = read_query('content exploration/count_friends')\n response = self._submit_query(query)\n return response[0]['count']['value']",
"def number_of_friends(user):\n user_id = user[\"id\"]\n friend_ids = friendships[user_id]\n return len(friend_ids)",
"def get_top_friends(common_friends_list):\n n = 4\n top_n_users = []\n top_scores = sorted(common_friends_list, reverse=True)\n for score in top_scores:\n top_n_users.append(sorted(common_friends_list[score]))\n top_n_users = sum(top_n_users, [])\n return top_n_users[:n]",
"def rank_candidates(table):\n ranking = []\n\n # get list of all candidates who received a vote\n full_list = elim_dupe([name for vote in table for name in vote])\n # print full_list\n \n while len(ranking) < len(full_list):\n \n # All unranked candidates are considered eligible\n eligible = [name for name in full_list if name not in ranking]\n \n while True:\n \n # Remove ineligible and eliminated candidates from votes\n temp_ballots = [[name for name in vote if name in eligible] for vote in table]\n \n # If no candidates on the ballot are eligible and the ballot does not have\n # \"no confidence\" written on it, the ballot is discarded and not considered a vote.\n temp_ballots = [vote for vote in temp_ballots if len(vote) > 0]\n\n total_votes = len(temp_ballots)\n\n if total_votes == 0:\n return ranking\n\n top_choices = [vote[0] for vote in temp_ballots]\n \n # All ballots are considered to be a vote for the\n # highest-ranked eligible candidate on the ballot.\n vote_count = {name: top_choices.count(name) for name in eligible}\n print vote_count\n winner = [k for k in vote_count if (vote_count[k]*2) > total_votes]\n\n if len(winner) > 0:\n # If a single candidate has a majority of the\n # votes, they receive the next highest ranking\n if winner[0] == NO_CONFIDENCE:\n return ranking\n \n ranking += winner\n \n break;\n\n vote_count.pop(NO_CONFIDENCE, None)\n\n # If no single candidate has a majority of the votes,\n # then one will be deemed ineligible.\n\n min_votes = vote_count[min(vote_count, key=vote_count.get)]\n \n least_voted = {k:vote_count[k] for k in vote_count if vote_count[k] == min_votes}\n \n # If a single candidate has the least amount of votes, they become ineligible,\n while len(least_voted) > 1:\n temp_ballots = [vote[1:] for vote in temp_ballots if len(vote[1:]) > 0]\n if len(temp_ballots) == 0:\n return ranking\n next_choices = [vote[0] for vote in temp_ballots if vote[0] in least_voted]\n least_voted = {name: (next_choices.count(name) + least_voted[name]) for name in least_voted}\n min_votes = least_voted[min(least_voted, key=least_voted.get)]\n least_voted = {k: least_voted[k] for k in least_voted if least_voted[k] == min_votes}\n \n remove = least_voted.keys()[0]\n eligible = [name for name in eligible if name != remove]\n\n\n return ranking",
"def printCounterByCount(counter):\n for k, v in counter.most_common():\n print(k, v)",
"def popular_authors() :\n query = \"\"\"SELECT authors.name,count(*) AS total_views FROM authors,articles,log WHERE log.path like concat ('/article/',articles.slug)\n AND articles.author=authors.id group by authors.name order by total_views desc\"\"\"\n result = get_data(query)\n print(\" 2. The most popular articles authors of all time:\")\n print(\"\")\n for record in result :\n print(' ' +' ' + str(record[0]) + ' -' + ' ' + str(record[1]) + ' ' +'views')\n print(\" \")",
"def write_friends_to_file(self):\r\n if self.friends is not None:\r\n friends_list = []\r\n\r\n for friend in self.friends:\r\n if friend.text.strip() != \"\":\r\n friends_list.append(friend.text)\r\n\r\n print(\"\\nFound \" + str(len(friends_list)) + \" Facebook friends.\")\r\n print(\"Writing Facebook friends to output file: \" + self.output_filepath)\r\n\r\n friends_list.sort()\r\n\r\n with open(self.output_filepath, \"w\", encoding=\"utf-8\") as file_handle:\r\n for friend in friends_list:\r\n print(friend, file=file_handle)\r\n\r\n file_handle.close()\r\n print(\"*** Output file written successfully. Safe to exit program. ***\")\r\n \r\n else:\r\n print(\"Unable to write friends to file. 'self.friends' is empty. Exit program and try again.\")",
"async def cgames(self, ctx):\r\n server = ctx.message.server\r\n members = server.members\r\n\r\n freq_list = {}\r\n for member in members:\r\n if member != None and member.game != None and member.game.name != None and not member.bot:\r\n if member.game.name not in freq_list:\r\n freq_list[member.game.name] = 0\r\n freq_list[member.game.name]+=1\r\n\r\n sorted_list = sorted(freq_list.items(), key=operator.itemgetter(1), reverse = True) \r\n\r\n if not freq_list:\r\n await self.bot.say(\"Surprisingly, no one is playing anything.\")\r\n else: \r\n # create display\r\n msg = \"```These are the server's most played games at the moment: \\n\\n\"\r\n msg += \"{:<25s}{:>25s}\\n\".format(\"Game:\", \"# Playing:\")\r\n max_games = min(len(sorted_list), 10)\r\n for i in range(max_games):\r\n game, freq = sorted_list[i]\r\n if len(game) > 25:\r\n trunc_game = game [0:21] + \"...\"\r\n msg+= \"{:<25s}{:>25d}\\n\".format(trunc_game, freq_list[game])\r\n else:\r\n msg+= \"{:<25s}{:>25d}\\n\".format(game, freq_list[game])\r\n msg += \"```\" \r\n await self.bot.say(msg)",
"def print_sorted(self):\n\n for person in sorted(self.people, key=lambda x: x.last):\n print('{:25s}. {:4d}. Aliases: {}'.format(str(person), person.count, person.aliases))",
"def candidate_count(self):\n return self.candidate_set.count()",
"def candidate_count(self):\n return self.candidate_set.count()",
"def view_candidates(self):\n items = ['id', self.filter, 'half_light', 'separation', 'P_c']\n for add_on in ['P_O', 'P_Ox']:\n if add_on in self.candidates.keys():\n items += [add_on]\n print(self.candidates[items])",
"def main():\n twitter = get_twitter()\n screen_names = read_screen_names('candidates.txt')\n print('Established Twitter connection.')\n print('Read screen names: %s' % screen_names)\n users = sorted(get_users(twitter, screen_names), key=lambda x: x['screen_name'])\n print('found %d users with screen_names %s' %\n (len(users), str([u['screen_name'] for u in users])))\n add_all_friends(twitter, users)\n print('Friends per candidate:')\n print_num_friends(users)\n friend_counts = count_friends(users)\n print('Most common friends:\\n%s' % str(friend_counts.most_common(5)))\n print('Friend Overlap:\\n%s' % str(friend_overlap(users)))\n print('User followed by Hillary and Donald: %s' % followed_by_hillary_and_donald(users, twitter))\n\n graph = create_graph(users, friend_counts)\n print('graph has %s nodes and %s edges' % (len(graph.nodes()), len(graph.edges())))\n draw_network(graph, users, 'network.png')\n print('network drawn to network.png')",
"def num_links(self):\n count=0.0\n for cluster in self.clusters:\n if self.clusters[cluster] == self.clusters[cluster].antecessor:\n numberofmembers=self.clusters[cluster].number_of_members\n count+=numberofmembers\n return count",
"def main():\n twitter = get_twitter()\n screen_names = read_screen_names('candidates.txt')\n print('Established Twitter connection.')\n print('Read screen names: %s' % screen_names)\n users = sorted(get_users(twitter, screen_names), key=lambda x: x['screen_name'])\n print('found %d users with screen_names %s' %\n (len(users), str([u['screen_name'] for u in users])))\n add_all_friends(twitter, users)\n print('Friends per candidate:')\n print_num_friends(users)\n friend_counts = count_friends(users)\n print('Most common friends:\\n%s' % str(friend_counts.most_common(5)))\n print('Friend Overlap:\\n%s' % str(friend_overlap(users)))\n print('User followed by Hillary and Donald: %s' % followed_by_hillary_and_donald(users, twitter))\n graph = create_graph(users, friend_counts)\n print('graph has %s nodes and %s edges' % (len(graph.nodes()), len(graph.edges())))\n draw_network(graph, users, 'network.png')\n print('network drawn to network.png')",
"def main():\n twitter = get_twitter()\n screen_names = read_screen_names('candidates.txt')\n print('Established Twitter connection.')\n print('Read screen names: %s' % screen_names)\n users = sorted(get_users(twitter, screen_names), key=lambda x: x['screen_name'])\n print('found %d users with screen_names %s' %\n (len(users), str([u['screen_name'] for u in users])))\n add_all_friends(twitter, users)\n print('Friends per candidate:')\n print_num_friends(users)\n friend_counts = count_friends(users)\n print('Most common friends:\\n%s' % str(friend_counts.most_common(5)))\n print('Friend Overlap:\\n%s' % str(copy_overlap(users)))\n print('User followed by Hillary and Donald: %s' % str(followed_by_hillary_and_donald(users, twitter)))\n\n graph = create_graph(users, friend_counts)\n print('graph has %s nodes and %s edges' % (len(graph.nodes()), len(graph.edges())))\n draw_network(graph, users, 'network.png')\n print('network drawn to network.png')",
"def printResults(contact_map):\n print(\"----\")\n for participant in contact_map.values():\n print participant.getName()\n print \"Messages: \", participant.getMessageCount()\n print \"Words: \", participant.getWordCount()\n print \"Avg Words: \", participant.avgWords()\n print \"Messages initiaited: \", participant.getFirstMessageCount()\n print \"Hourly count: \", participant.getHourlyMessageCount()\n print \"Daily count: \", participant.getDailyMessageCount()\n print \"Monthly count: \", participant.getMonthlyMessageCount()\n print \"Most common word: \", participant.getMostCommonWord()\n print \"----\"",
"async def people(self, context):\n collection = db['people']\n person_count = []\n count_dict = {}\n for person in collection.find({}, {'_id': 0, 'person': 1}):\n person_count.append(person['person'])\n for person in list(set(person_count)):\n count_dict[person] = person_count.count(person)\n person_print = [f'`{k.capitalize()}: {v}`\\t' for k, v in sorted(count_dict.items())]\n\n await context.send('Current Image Totals:\\n')\n await context.send(''.join(person_print))",
"def count_party_votes(votes):\r\n vote_count = {'Pineapple Pizza Party': 0, 'Pronounced Jiff Union': 0, 'Socks and Crocs Reform League': 0}\r\n for person in votes:\r\n vote_count[votes[person]] += 1\r\n return vote_count",
"def show_card_counts(self, faction_type):\n\n faction_list = mtg.Faction.get_factions(faction_type)\n print(\"{}Total cards in:{}\".format(Style.BRIGHT, Style.RESET_ALL))\n for f in sorted(faction_list):\n print(\"{:12}{}\".format(f, self.card_count(f)))",
"def author_rank():\n db, c = connect(DBNAME)\n c.execute(\"select name, count(name) as views from \\\"authorpath\\\" \"\n \"group by name order by views desc\")\n author_table = c.fetchall()\n db.close()\n print \"\\nMost Popular Article Authors of All Time:\"\n for author in author_table:\n print str(author[0]) + \" - \" + str(author[1]) + \" views\""
] | [
"0.6788219",
"0.59221166",
"0.56460106",
"0.56306136",
"0.5574623",
"0.5539996",
"0.5467627",
"0.54092085",
"0.5366763",
"0.53536737",
"0.53502715",
"0.53304505",
"0.52850896",
"0.5251744",
"0.5247304",
"0.52443355",
"0.52171576",
"0.520803",
"0.5205573",
"0.5205573",
"0.5190928",
"0.5176434",
"0.5161674",
"0.5150945",
"0.51429677",
"0.51391506",
"0.5133959",
"0.51194733",
"0.5117306",
"0.51056814"
] | 0.7238758 | 0 |
Count how often each friend is followed. | def count_friends(users):
###TODO-- Completed
#Creating a Counter object, to count the mapping
c = Counter()
c.update(friend_id for user in users for friend_id in user['friends'])
return c | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def count_friends(users):\n all_friends=[]\n for u_dict in users:\n for items in u_dict['friends']:\n all_friends.append(items)\n count = Counter()\n for frnd in all_friends:\n count[frnd]+=1\n return count",
"def count_friends(self):\n query = read_query('content exploration/count_friends')\n response = self._submit_query(query)\n return response[0]['count']['value']",
"def number_of_friends(user):\n user_id = user[\"id\"]\n friend_ids = friendships[user_id]\n return len(friend_ids)",
"def print_num_friends(users):\n for u_dict in users:\n print (\"%s %d\" %(u_dict['screen_name'], len(u_dict['friends'])))",
"async def friend_request_count(self) -> int:\n e = await self.request.request(url=f'https://friends.roblox.com/v1/user/friend-requests/count', method='get',\n )\n return e['count']",
"def count_counts(self):\n count_counts = defaultdict(Counter)\n for token, followers in self._dict.items():\n for f, count in followers.items():\n count_counts[token][count] += 1\n count_counts[token][0] = len(self._dict) - sum(count_counts[token].values())\n return count_counts",
"def follower_followee_count(user_id):\n profile = instaloader.Profile.from_username(context(), user_id)\n return {'follower_cnt': profile.followers,\n 'followee_cnt': profile.followees}",
"def update_insta_follower_count(self):\n\n df = self.Instagram.get_followers_df()\n n_followers = df.shape[0]\n self.GSpread.write_raw_log('INSTAGRAM',\n '',\n 'FOLLOWER_COUNT',\n n_followers)",
"def print_num_friends(users):\n ###TODO-- Completed\n\n #Creating a new dictionary to store the KEY, VALUE pair for friends of every screen_name i.e. user\n # and their counts i.e. number of friends per user\n all_friends_dict = {}\n\n for user in users:\n all_friends_dict[user['screen_name']] = len(user['friends'])\n\n for candidate in sorted(all_friends_dict):\n print(candidate,all_friends_dict[candidate])",
"def num_links(self):\n count=0.0\n for cluster in self.clusters:\n if self.clusters[cluster] == self.clusters[cluster].antecessor:\n numberofmembers=self.clusters[cluster].number_of_members\n count+=numberofmembers\n return count",
"def get_following_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_count(khoros_object, user_settings['id'], 'following')",
"def people_count(self):\n return len(self.__users)",
"def count_anyone_answered(group: list) -> int:\n return len(functools.reduce(lambda a, b : a + b, [collections.Counter(answers) for answers in group]))",
"def get_num_followers(self):\n a = self.soup.find('div', class_ = 'zm-topic-side-followers-info').a\n if a:\n return a.get_text(strip = True).encode('utf-8')\n return ''",
"def getFollowers():\n followers = []\n next_max_id = True\n while next_max_id:\n # first iteration hack\n if next_max_id is True:\n next_max_id = ''\n\n _ = GetInstagramAnswer.igApi.getUserFollowers(GetInstagramAnswer.igApi.username_id, maxid=next_max_id)\n followers.extend(GetInstagramAnswer.igApi.LastJson.get('users',[]))\n next_max_id = GetInstagramAnswer.igApi.LastJson.get('next_max_id','')\n return \"You have currently \"+str(len(followers))+\" Followers on Instagram.\"",
"def count_everyone_answered(group: list) -> int:\n return len(functools.reduce(lambda a, b : a & b, [collections.Counter(answers) for answers in group]))",
"async def membercount(ctx, *args):\n if ctx.message.channel.is_private:\n await bot.delete_message(ctx.message)\n return\n\n g = ctx.message.server\n\n gid = g.id\n membs = str(len(g.members))\n membs_on = str(len([m for m in g.members if not m.status == Status.offline]))\n users = str(len([m for m in g.members if not m.bot]))\n users_on = str(len([m for m in g.members if not m.bot and not m.status == Status.offline]))\n bots = str(len([m for m in g.members if m.bot]))\n bots_on = str(len([m for m in g.members if m.bot and not m.status == Status.offline]))\n created = str(g.created_at)\n \n em = Embed(title=\"Membercount\")\n em.description = \"```\\n\" \\\n \"Members: %s (%s)\\n\" \\\n \" Users: %s (%s)\\n\" \\\n \" Bots: %s (%s)\\n\" \\\n \"Created: %s\\n\" \\\n \"```\" % (membs, membs_on, users, users_on, bots, bots_on, created)\n\n await client.send_message(ctx.message.channel, embed=em)\n await client.delete_message(ctx.message)",
"def member_count(self) -> int:\n return sum([g.member_count for g in self.guilds])",
"def count():",
"def __numHeads(self):\n count = 1\n\n while (self.__coinFlip() == 1):\n count += 1\n return count",
"def amount_of_receivers(self) -> int:\n return sum([1 for _ in self.receivers])",
"def follows(self, other):\n\t\treturn self.followed.filter(followers.c.followed_by == other.id).count() > 0",
"def test_how_many_friends(self):\n expected = [\n (1, 3), (2, 3), (3, 3), (5, 3), (8, 3),\n (0, 2), (4, 2), (6, 2), (7, 2), (9, 1),\n ]\n self.assertEqual(expected, self.users.how_many_friends())",
"def get_followers_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_count(khoros_object, user_settings['id'], 'followers')",
"async def get_follow_counts(db, account: str):\n account_id = await _get_account_id(db, account)\n sql = \"\"\"SELECT following, followers\n FROM hive_accounts\n WHERE id = :account_id\"\"\"\n return dict(await db.query_row(sql, account_id=account_id))",
"def count_fingers(self):\n return self._finger.count(True)",
"def is_following(self, you, them):\n if self.filter(from_user=you, to_user=them).count() > 0:\n return True\n return False",
"def process_friend(friend):\n friends_lang[friend.lang] += 1 # Getting friend language & timezone\n if friend.time_zone:\n friends_timezone[friend.time_zone] += 1",
"def follows(self):\r\n return relationships.Follows(self)",
"def friend_stats(request):\n \n r = {}\n fb_ids = FacebookProfile.objects.all().values(\"facebook_id\") \n for u in FacebookProfile.objects.all():\n friends = Friends.objects.filter(facebook_id__in=fb_ids)\n num_friends = Friends.objects.filter(facebook_id=u.facebook_id, friends__in=friends).count()\n participant = OTNUser.objects.get(facebook_profile__facebook_id=u.facebook_id)\n r[u.facebook_id]=\"%s (%d): %d\"%(participant.name, participant.id, num_friends)\n\n return JSONHttpResponse(r)"
] | [
"0.7489338",
"0.70336175",
"0.70297045",
"0.69893944",
"0.6874314",
"0.67994153",
"0.67738897",
"0.66341037",
"0.6608121",
"0.62928677",
"0.62310994",
"0.6174809",
"0.603691",
"0.6025284",
"0.60199904",
"0.59938794",
"0.59653294",
"0.5948239",
"0.59337586",
"0.5919943",
"0.59168315",
"0.5914305",
"0.59068924",
"0.585086",
"0.58406836",
"0.58006227",
"0.5795091",
"0.5782663",
"0.5766093",
"0.5764752"
] | 0.72688586 | 1 |
Compute the number of shared accounts followed by each pair of users. | def friend_overlap(users):
###TODO-- Completed
#Creating a list of tuples to store the values for number of shared accounts by each of the user
overlap_tuples = []
#Trying for all the combination if user's without repetition
for outer_idx,_ in enumerate(users):
for inner_idx,_ in enumerate(users):
if (inner_idx != len(users)-1) and (outer_idx < inner_idx+1):
#Creating a SET of friends for 2 users and finding the INTERSECTION i.e. Common friends between these users
overlap_tuples.append(tuple((users[outer_idx]['screen_name'],users[inner_idx+1]['screen_name'],
len(list(set(users[outer_idx]['friends']) & set(users[inner_idx+1]['friends']))))))
#Sort based on first KEY as N i.e. number of shared account in descending order,
# for ties break using screen_name of user one, further on screen_name of user two
return sorted(overlap_tuples, key=lambda x:[-x[2], x[0], x[1]])
#for perm in combinations(screen_names,2):
# overlap_tuples.append(tuple(perm[0],perm[1],len(list(set(user[perm[0]]['friends']) & set(perm[1]['friends'])))))
#print(len(list(set(users[0]['friends']) & set(users[1]['friends'])))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_pairwise_user_similarity(self, user1_preferences, user2_preferences):\r\n\r\n shared_items = set(user1_preferences.indices) & set(user2_preferences.indices)\r\n\r\n all_items = set(user1_preferences.indices) | set(user2_preferences.indices)\r\n\r\n num_agreements = sum(1 for x in shared_items if abs(user1_preferences[0, x] - user2_preferences[0, x]) <= 2)\r\n\r\n return (num_agreements / len(all_items) if len(all_items) > 0 else 0)",
"def count_common_connections(network, user_A, user_B):\n count = 0\n if user_A not in network or user_B not in network:\n return False\n for person in network[user_A][0]:\n if person in network[user_B][0]:\n count += 1\n return count",
"def num_links(self):\n count=0.0\n for cluster in self.clusters:\n if self.clusters[cluster] == self.clusters[cluster].antecessor:\n numberofmembers=self.clusters[cluster].number_of_members\n count+=numberofmembers\n return count",
"def count_common_connections(network, user_A, user_B):\n if user_A not in network or user_B not in network:\n return False\n common_connections = 0\n for conn in network[user_A]['connections']:\n if conn in network[user_B]['connections']:\n common_connections += 1\n return common_connections",
"def count_total_each_user():\r\n trans = transaction.begin()\r\n user_list = UserMgr.get_list(active=True)\r\n for user in user_list:\r\n StatBookmarkMgr.count_user_bookmarks(user.username)\r\n trans.commit()",
"async def _sync_total(self, ctx: Context):\n\n guild = ctx.guild\n\n player_id = await self.config.guild(guild).player_id()\n player_role = discord.utils.get(guild.roles, id=player_id)\n\n signed = len([\n user.id for user in guild.members\n if player_role in user.roles\n ])\n\n await self.config.guild(guild).signed.set(signed)\n\n await ctx.send(_(\"Synced total number of signed-up players.\"))",
"def count_friends(users):\n all_friends=[]\n for u_dict in users:\n for items in u_dict['friends']:\n all_friends.append(items)\n count = Counter()\n for frnd in all_friends:\n count[frnd]+=1\n return count",
"def compute_user_user_sim_base_on_common_items(self):\n self.sim_matrix = {}\n for item in self.items.values():\n # convert to list of tuples for indexing\n users = list(item.covered_users.items())\n item_popularity = len(users)\n # iter through all user pairs\n for i in range(len(users)-1):\n for j in range(i+1, len(users)):\n user_A_info, user_B_info = users[i], users[j]\n # remember to update pair wise!\n self.update_user_user_sim(user_A_info, user_B_info,\n item_popularity)\n self.update_user_user_sim(user_B_info, user_A_info,\n item_popularity)",
"def n_users(self):\n if self._n_users is None:\n self._n_users = len(self.user_unique_vals)\n return self._n_users",
"def count_friends(users):\n ###TODO-- Completed\n\n #Creating a Counter object, to count the mapping\n c = Counter()\n c.update(friend_id for user in users for friend_id in user['friends'])\n return c",
"def count_same(pairs):\n same_count = 0\n for x, y in pairs:\n if x == y:\n same_count = same_count + 1\n return same_count",
"def shared_nb(self):\n return self.bbsitting_set.count() + self.booked.count()",
"def common_count(self, node_1, node_2):\n return int(len(set(nx.neighbors(self.graph, node_1)).intersection(set(nx.neighbors(self.graph, node_2)))))",
"def share_combining(self, shares):\n mod_shares = [share[1] * (calc_lambda(shares,\n share[0], self.precomputed_fac)) for i, share in enumerate(shares)]\n return sum(mod_shares)",
"def people_count(self):\n return len(self.__users)",
"def getConnectedUsersCount(self):\n\n\t\treturn len(self.connectedUsers)",
"def numIdenticalPairs(self, nums: List[int]) -> int:\n #brute force\n # res = 0\n # for i in range(len(nums)):\n # for j in range(len(nums)):\n # if i >= j:\n # continue\n\n # if nums[i] == nums[j]:\n # res += 1\n # return res\n #with memoization\n res = 0\n memo = {}\n \n for n in nums:\n \n if n not in memo:\n memo[n] = 1\n \n else:\n # count number of pairs based on duplicate values\n if memo[n] == 1:\n res += 1\n else:\n res += memo[n]\n \n memo[n] += 1\n \n return res",
"def count_pairs(assignments, v1, v2, M):\n assert v1 != v2\n pairs = assignments[:, v1].astype(np.int32) * M + assignments[:, v2]\n return np.bincount(pairs, minlength=M * M).reshape((M, M))",
"def print_num_friends(users):\n ###TODO-- Completed\n\n #Creating a new dictionary to store the KEY, VALUE pair for friends of every screen_name i.e. user\n # and their counts i.e. number of friends per user\n all_friends_dict = {}\n\n for user in users:\n all_friends_dict[user['screen_name']] = len(user['friends'])\n\n for candidate in sorted(all_friends_dict):\n print(candidate,all_friends_dict[candidate])",
"def get_beneficiary_grant_app_pair_counts():\n\n # Init counts dict\n counts_returned = {}\n\n # Grant real/synth bene counts (includes granted to multiple apps)\n start_time = datetime.utcnow().timestamp()\n\n # Setup base queryset\n grant_queryset = DataAccessGrant.objects.values(\"beneficiary\", \"application\")\n\n real_grant_queryset = grant_queryset.filter(\n ~Q(beneficiary__crosswalk___fhir_id__startswith=\"-\")\n & ~Q(beneficiary__crosswalk___fhir_id=\"\")\n & Q(beneficiary__crosswalk___fhir_id__isnull=False)\n ).values(\"beneficiary\", \"application\")\n\n synthetic_grant_queryset = grant_queryset.filter(\n Q(beneficiary__crosswalk___fhir_id__startswith=\"-\")\n & ~Q(beneficiary__crosswalk___fhir_id=\"\")\n & Q(beneficiary__crosswalk___fhir_id__isnull=False)\n ).values(\"beneficiary\", \"application\")\n\n counts_returned[\"grant_total\"] = grant_queryset.count()\n counts_returned[\"real_grant\"] = real_grant_queryset.count()\n counts_returned[\"synthetic_grant\"] = synthetic_grant_queryset.count()\n\n # Setup base queryset\n grant_archived_queryset = ArchivedDataAccessGrant.objects.values(\n \"beneficiary\", \"application\"\n )\n\n real_grant_archived_queryset = grant_archived_queryset.filter(\n ~Q(beneficiary__crosswalk___fhir_id__startswith=\"-\")\n & ~Q(beneficiary__crosswalk___fhir_id=\"\")\n & Q(beneficiary__crosswalk___fhir_id__isnull=False)\n ).values(\"beneficiary\", \"application\")\n\n synthetic_grant_archived_queryset = grant_archived_queryset.filter(\n Q(beneficiary__crosswalk___fhir_id__startswith=\"-\")\n & ~Q(beneficiary__crosswalk___fhir_id=\"\")\n & Q(beneficiary__crosswalk___fhir_id__isnull=False)\n ).values(\"beneficiary\", \"application\")\n\n # Get total table count\n counts_returned[\"grant_archived_total\"] = grant_archived_queryset.count()\n counts_returned[\"real_grant_archived\"] = real_grant_archived_queryset.count()\n counts_returned[\n \"synthetic_grant_archived\"\n ] = synthetic_grant_archived_queryset.count()\n\n \"\"\"\n Bene<->App pair differences\n \"\"\"\n # Pairs in Grant but not in ArchivedGrant.\n counts_returned[\"grant_vs_archived_difference_total\"] = grant_queryset.difference(\n grant_archived_queryset\n ).count()\n counts_returned[\n \"real_grant_vs_archived_difference_total\"\n ] = real_grant_queryset.difference(real_grant_archived_queryset).count()\n counts_returned[\n \"synthetic_grant_vs_archived_difference_total\"\n ] = synthetic_grant_queryset.difference(synthetic_grant_archived_queryset).count()\n\n # Pairs in ArchivedGrant but not in Grant.\n counts_returned[\n \"archived_vs_grant_difference_total\"\n ] = grant_archived_queryset.difference(grant_queryset).count()\n counts_returned[\n \"real_archived_vs_grant_difference_total\"\n ] = real_grant_archived_queryset.difference(real_grant_queryset).count()\n counts_returned[\n \"synthetic_archived_vs_grant_difference_total\"\n ] = synthetic_grant_archived_queryset.difference(synthetic_grant_queryset).count()\n\n counts_returned[\"elapsed\"] = round(datetime.utcnow().timestamp() - start_time, 3)\n\n return counts_returned",
"def number_users_active(self) -> int:\r\n unique_users = {\r\n row['user']\r\n for row in self.rows\r\n }\r\n\r\n return len(unique_users)",
"def number_of_friends(user):\n user_id = user[\"id\"]\n friend_ids = friendships[user_id]\n return len(friend_ids)",
"def number_of_connections(self, asn):\n customer_count = 0\n provider_count = 0\n peer_count = 0\n\n for neighbor in nx.all_neighbors(self, asn):\n edge_data = self.get_edge_data(asn, neighbor)\n if edge_data[\"relationship\"] == -1 and edge_data[\"as1\"] == asn:\n customer_count += 1\n elif edge_data[\"relationship\"] == -1 and edge_data[\"as2\"] == asn:\n provider_count += 1\n elif edge_data[\"relationship\"] == 0:\n peer_count += 1\n return customer_count, provider_count, peer_count",
"def count_amino_acids(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_amino_acids()\n return n",
"def size(self) -> Tuple[int, int]:\n count_keys = 0 # store the number of different 'key'.\n count_values = 0 # store the the number of different 'value'.\n for node in self.hashTable:\n count_values = count_values + node.count\n count_keys = count_keys + len(node.keys)\n return count_keys, count_values",
"def member_count(self) -> int:\n return sum([g.member_count for g in self.guilds])",
"def get_amount_users() -> User:\n return User.objects.all().count()",
"def membership_count(self, S_go, S_gene):\n return self.go_count(S_gene, S_go)\n #c=self.go_count(S_gene)\n #if type(S_go)!=set:\n # S_go=set(S_go)\n #c={ k:v for k,v in c.items() if k in S_go }\n #return c",
"def get_workspace_shares(workspaces_dict, kb_staff):\n client = MongoClient(mongoDB_metrics_connection + to_workspace)\n db = client.workspace\n max_shared_count = 0\n perms_dict = {10: \"view\", 20: \"edit\", 30: \"admin\"}\n\n shares_query = db.workspaceACLs.find(\n {\"perm\": {\"$in\": [10, 20, 30]}}, {\"id\": 1, \"user\": 1, \"perm\": 1, \"_id\": 0}\n )\n for record in shares_query:\n if record[\"id\"] in workspaces_dict:\n # do stuff as it is a users narrative and has at least 1 share.\n is_kb_staff = 0\n if record[\"user\"] in kb_staff:\n is_kb_staff = 1\n share_entry = [record[\"user\"], perms_dict[record[\"perm\"]], str(is_kb_staff)]\n workspaces_dict[record[\"id\"]][\"shares_list\"].extend(share_entry)\n\n max_shared_count = 0\n for ws in workspaces_dict:\n share_number = len(workspaces_dict[ws][\"shares_list\"])\n if share_number > max_shared_count:\n max_shared_count = share_number\n return (workspaces_dict, int(max_shared_count / 3))",
"def print_num_friends(users):\n for u_dict in users:\n print (\"%s %d\" %(u_dict['screen_name'], len(u_dict['friends'])))"
] | [
"0.6410819",
"0.63967514",
"0.6126403",
"0.6060102",
"0.60091025",
"0.5765657",
"0.5672716",
"0.56712496",
"0.5659659",
"0.5650826",
"0.5642087",
"0.5629103",
"0.5578294",
"0.5570148",
"0.54584384",
"0.54282904",
"0.5406067",
"0.5401401",
"0.5397055",
"0.5396023",
"0.5381516",
"0.5364576",
"0.53590304",
"0.5304504",
"0.5260001",
"0.5254087",
"0.5212402",
"0.520769",
"0.52065516",
"0.5205124"
] | 0.64838386 | 0 |
Find and return the screen_name of the one Twitter user followed by both Hillary Clinton and Donald Trump. Using the TwitterAPI to convert the Twitter ID to a screen_name. | def followed_by_hillary_and_donald(users, twitter):
###TODO-- Completed
for user in users:
if user['screen_name'] == 'HillaryClinton':
friends_Hillary = user['friends']
#print(len(friends_Hillary))
elif user['screen_name'] == 'realDonaldTrump':
friends_donald = user['friends']
#print(len(friends_donald))
common_followed_id = list(set(friends_Hillary) & set(friends_donald))
commn_followed_user = robust_request(twitter,'users/lookup',{'user_id':common_followed_id}).json()
#print(commn_followed_user[0]['screen_name'])#['screen_name'])
return commn_followed_user[0]['screen_name']
#pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def followed_by_hillary_and_donald(users, twitter):\n\n str = ''\n set1 = set()\n set2 = set()\n for u_dict in users:\n \tif u_dict['screen_name'] == 'HillaryClinton':\n \t\tset1 = set(u_dict['friends'])\n \telif u_dict['screen_name'] == 'realDonaldTrump':\n \t\tset2 = set(u_dict['friends'])\n \t\t\n common = set.intersection(set1, set2)\n request = robust_request(twitter, 'users/lookup', {'user_id': common}, max_tries=5)\n for user in request:\n \tstr = user['screen_name']\t\n return str",
"def find_screen_name(user_id: UserID) -> Optional[str]:\n screen_name = db.session \\\n .query(DbUser.screen_name) \\\n .filter_by(id=user_id) \\\n .scalar()\n\n if screen_name is None:\n return None\n\n return screen_name",
"def compute_user_mentions_screen_name(row):\n entity_series = pd.read_json(json.dumps(row['entities']), typ='series')\n user_mentions_screen_name = list(map(lambda entry: entry['screen_name'], entity_series['user_mentions']))\n return ','.join(user_mentions_screen_name)",
"def lookup_handle(aHandle):\n\n theHandle = aHandle.replace(\"@\", \"\")\n\n twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\n\n try:\n result = twitter.show_user(screen_name=theHandle)\n\n except:\n return \"\"\n\n return ''.join(ch for ch in result['name'].encode('utf-8').strip() if ch.isalnum() or ch.isspace())",
"def extract_mentioned_screen_names(self, transform = lambda x: x):\r\n return [transform(mention['screen_name']) for mention in self.extract_mentioned_screen_names_with_indices()]",
"def get_tweets_from_username(api, screen_name):\n\n # initialize a list to hold all the Tweets\n alltweets = []\n output = []\n\n # make initial request for most recent tweets\n # (200 is the maximum allowed count)\n new_tweets = api.user_timeline(screen_name=screen_name, count=200, tweet_mode=\"extended\")\n\n # save most recent tweets\n alltweets.extend(new_tweets)\n\n # save the id of the oldest tweet less one to avoid duplication\n oldest = alltweets[-1].id - 1\n\n # keep grabbing tweets until there are no tweets left\n while len(new_tweets) > 0:\n print(\"Getting tweets before %s\" % (oldest))\n\n # all subsequent requests use the max_id param to prevent\n # duplicates\n new_tweets = api.user_timeline(screen_name=screen_name, count=200, max_id=oldest, tweet_mode=\"extended\")\n\n # save most recent tweets\n alltweets.extend(new_tweets)\n\n # update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n print(\"... %s tweets downloaded so far\" % (len(alltweets)))\n\n # transform the tweepy tweets into a 2D array that will\n for tweet in alltweets:\n output.append([tweet.id_str,\n tweet.created_at,\n tweet.full_text,\n tweet.in_reply_to_screen_name,\n tweet.user.name,\n tweet.user.location,\n tweet.user.followers_count,\n tweet.user.friends_count,\n tweet.geo,\n tweet.coordinates,\n tweet.retweet_count,\n tweet.favorite_count,\n tweet.lang,\n tweet.retweeted])\n\n # Convert to dataframe\n df = pd.DataFrame.from_records(output, columns=[\"id_str\",\n \"created_at\",\n \"full_text\",\n \"in_reply_to_screen_name\",\n \"user_name\",\n \"user_location\",\n \"user_followers_count\",\n \"user_friends_count\",\n \"geo\",\n \"coordinates\",\n \"retweet_count\",\n \"favorite_count\",\n \"lang\",\n \"retweeted\"])\n return df",
"def follow_someone(screen_name):\n twitter.create_friendship(screen_name=screen_name)",
"def get_actor_display_name(actor, truncate=250):\n name = ' '.join(actor.type_id.replace('_', '.').title().split('.')[1:])\n return (name[:truncate - 1] + u'\\u2026') if len(name) > truncate else name",
"def extract_reply_screen_name(self, transform = lambda x: x):\r\n if not self.text:\r\n return None\r\n\r\n possible_screen_name = REGEXEN['valid_reply'].match(self.text)\r\n if possible_screen_name is not None:\r\n if possible_screen_name.group(1).find('http') > -1:\r\n possible_screen_name = None\r\n else:\r\n possible_screen_name = transform(possible_screen_name.group(1))\r\n return possible_screen_name",
"def get_name(user_id):\n try:\n student = _UserProfile.objects.get(user_id=user_id)\n except _UserProfile.DoesNotExist:\n log.exception(f'Could not find UserProfile for id {user_id}')\n return None\n return student.name or None",
"def get_data_user(twitter, screen_names):\n\n data_user = []\n for name in screen_names:\n request = robust_request(twitter, 'users/lookup', {'screen_name': name}, max_tries=5)\n user = [val for val in request]\n friends = []\n request = robust_request(twitter, 'friends/ids', {'screen_name': name, 'count': 5000}, max_tries=5)\n friends = sorted([str(val) for val in request])\n fr = {'screen_name': user[0]['screen_name'],\n 'id': str(user[0]['id']),\n 'friends_id': friends}\n data_user.append(fr)\n \n return data_user",
"def second_name(self, instance):\r\n return instance.user.profile.second_name",
"async def get_user_name(self, user_target: str) -> str:\n user = await self.get_user(user_target=user_target)\n if user is None:\n return user_target\n return user.display_name",
"def detect_author(user_to_tweets: Dict[str, List[tuple]], tweet_text: str) -> \\\n str:\n acc = []\n \n for keys in user_to_tweets:\n author_hashes = hashtag_seperator(user_to_tweets[keys])\n text_hashes = extract_hashtags(tweet_text)\n if set(text_hashes).issubset(author_hashes):\n acc.append(keys)\n if len(acc) == 1:\n return acc[0]\n return 'unknown'",
"def _get_user_name_from_user_id(self, user_id):\n if user_id in self._user_id_to_user_name.keys():\n return self._user_id_to_user_name[user_id]\n return self._get_user_name_from_user_id_by_slack_client(user_id)",
"def removing_screen_names_repetition(urls):\n\n screen_name = set()\n for url in urls:\n nom = extract_screen_name_from_twitter_url(url)\n if nom:\n screen_name.add(nom)\n return screen_name",
"def ldap_get_lastname(self, user):\n result = super(Auth42, self)._search_not_empty(user)\n if result is not None:\n lastname = result.get(\"last-name\")[0]\n return lastname\n\n return None",
"def get_one_profile(api, user_id=None, screen_name=None):\n url = u'https://api.twitter.com/1.1/users/show.json'\n if user_id:\n params = {'user_id': user_id}\n elif screen_name:\n params = {'screen_name': screen_name}\n else:\n return {}\n params['include_entities'] = True\n rate_status = check_rate_limit(api, url)\n if not rate_status['remaining']:\n delay = rate_status['reset'] - time.time()\n if delay > 0:\n print \"Sleeping {0}...\".format(delay)\n time.sleep(delay) \n response = api.get(url, params=params)\n return response.json()",
"def get_user_name(user: User) -> str:\n user_name = user.get(\"display_name\")\n if not user_name:\n user_name = user[\"fullname\"]\n if not user_name:\n user_name = user[\"name\"]\n return user_name",
"def change_screen_name(user_id):\n user = _get_user_or_404(user_id)\n\n form = ChangeScreenNameForm(request.form)\n if not form.validate():\n return change_screen_name_form(user.id, form)\n\n old_screen_name = user.screen_name\n new_screen_name = form.screen_name.data.strip()\n initiator_id = g.user.id\n reason = form.reason.data.strip()\n\n event = user_command_service.change_screen_name(\n user.id, new_screen_name, initiator_id, reason=reason\n )\n\n user_signals.screen_name_changed.send(None, event=event)\n\n flash_success(\n gettext(\n \"User '%(old_screen_name)s' has been renamed to '%(new_screen_name)s'.\",\n old_screen_name=old_screen_name,\n new_screen_name=new_screen_name,\n )\n )\n return redirect_to('.view', user_id=user.id)",
"def getFollowerIDs(self, screen_name):\n follower_ids = []\n for follower_id in tweepy.Cursor(self.api.followers_ids,id=screen_name).items():\n print follower_id\n follower_ids.append(follower_id)\n return follower_ids",
"def find_user_by_screen_name(\n screen_name: str, *, case_insensitive=False\n) -> Optional[DbUser]:\n query = DbUser.query\n\n if case_insensitive:\n query = query.filter(\n db.func.lower(DbUser.screen_name) == screen_name.lower()\n )\n else:\n query = query.filter_by(screen_name=screen_name)\n\n return query.one_or_none()",
"def get_friends(twitter, screen_name):\n request = robust_request(twitter, 'friends/ids', {'screen_name': screen_name}, max_tries=5)\n friend_list = []\n for r in request:\n friend_list.append(r)\n return sorted(friend_list)",
"def get_username_and_id(self, obj):\n return \"%s - %s\" % (obj.user.username, obj.user.id)",
"def RecipientScreenName(self):\n return self._recipient_screen_name",
"def get_user_display_name():\n user_display_name = session.get(\"user_display_name\")\n return user_display_name if user_display_name else None",
"def get_full_name(cls, user_id):\n\n u = cls.query.get_or_404(user_id)\n u_first = u.first_name\n u_last = u.last_name \n\n u_full = f\"{u_first} {u_last}\"\n\n return u_full",
"def get_friends(twitter, screen_name):\n ###TODO-- Completed\n\n #Requesting twitter with query to get friends of all the screen_name(s) passed as a parameter\n friends_ids = robust_request(twitter,'friends/ids',{'screen_name':screen_name}).json()\n\n #Returns a dictionary having several values, selecting the one which has KEY: ids,\n # i.e. get ids of all the friends in a sorted manner\n return sorted(friends_ids['ids'])",
"def name(self):\n name = self.__telegram_info.message.from_user.name\n return name[0].upper() + name[1::]",
"def get_displayname(self):\n return self.full_name or self.user.username"
] | [
"0.7442753",
"0.675429",
"0.62897015",
"0.6174623",
"0.5731249",
"0.57199556",
"0.57191813",
"0.5523849",
"0.5438702",
"0.54316574",
"0.53900385",
"0.5376651",
"0.5340155",
"0.5266645",
"0.5266013",
"0.525878",
"0.52508754",
"0.52349985",
"0.5231683",
"0.5230022",
"0.52211875",
"0.52099454",
"0.52093875",
"0.5199093",
"0.51944757",
"0.51926386",
"0.5173391",
"0.51663166",
"0.51586676",
"0.5137602"
] | 0.7037834 | 1 |
Create a networkx undirected Graph, adding each candidate and friend | def create_graph(users, friend_counts):
###TODO-- Completed
G = nx.Graph()
#For Filtering the Nodes
#print(friend_counts)
friend_nodes = [friend for friend in friend_counts if friend_counts[friend] > 1]
candidate_nodes = [user['screen_name'] for user in users]
#print("Nodes: ",len(friend_nodes), len(candidate_nodes))
#Adding Nodes to graph
G.add_nodes_from(friend_nodes + candidate_nodes)
#Connecting the Nodes with Edges
for candidate in users:
for friend in friend_nodes:
if friend in candidate['friends']:
G.add_edge(candidate['screen_name'], friend)
return G | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_graph(friends: list, mutuals: dict) -> nx.classes.graph.Graph:\n friends_ids = [friend['id'] for friend in friends]\n G = nx.Graph()\n G.add_nodes_from(range(len(friends_ids)))\n\n for idx in tqdm(friends_ids):\n node_id = friends_ids.index(idx)\n G.nodes[node_id]['vk_id'] = idx\n G.nodes[node_id]['first_name'] = friends[node_id]['first_name']\n G.nodes[node_id]['last_name'] = friends[node_id]['last_name']\n G.nodes[node_id]['gender'] = friends[node_id]['sex']\n G.nodes[node_id]['relation'] = friends[node_id].get('relation')\n G.nodes[node_id]['city'] = friends[node_id].get('city', {}).get('title')\n G.nodes[node_id]['country'] = friends[node_id].get('country', {}).get('title')\n G.nodes[node_id]['schools'] = friends[node_id].get('schools')\n G.nodes[node_id]['universities'] = friends[node_id].get('universities')\n G.nodes[node_id]['career'] = friends[node_id].get('career')\n idx_mutuals = mutuals.get(idx)\n if idx_mutuals != None:\n edges = [(node_id, friends_ids.index(friend_id)) for friend_id in idx_mutuals]\n G.add_edges_from(edges)\n\n return G",
"def new_friends(self, G):\r\n H = G.to_undirected() #creates an undirected copy of the original graph\r\n n = nx.preferential_attachment(H) #uses the preferential_attachment method from networkx to create friends\r\n for u, v, p in n:\r\n chance = random.randint(0, 100) #chance is a randomly generated number between 0 and 100\r\n if p >= len(G.edges) and chance >= 90: #creates a new relationship (edge) between two nodes if their preferential\r\n G.add_edge(u, v, weight=random.uniform(-1, 1)) #attachment number is higher than the total number of edges and\r\n else: #chance is greater than 90.\r\n continue\r\n return G",
"def __toNetworkX(self):\n G = nx.Graph()\n G.add_nodes_from(range(self.n))\n for u in range(self.n):\n for v in range(self.n):\n if self.adjacent(u, v):\n G.add_edge(u, v)\n\n return G",
"def _CreateAdjacencyListGraph(self):\n graph = dict()\n for nodes in self.nodes:\n graph[nodes[0]] = set()\n for edges in self.edges:\n graph[edges[0]].add(edges[1])\n return graph",
"def generate_graph(self):\n temp_graph = [[] for i in xrange(Parameters.num_peers)]\n unconnected = set([i for i in xrange(Parameters.num_peers)])\n while len(unconnected) > 1:\n node1 = random.sample(unconnected, 1)[0]\n unconnected.remove(node1)\n node2 = random.sample(unconnected, 1)[0]\n temp_graph[node2].append(self.nodes[node1])\n temp_graph[node1].append(self.nodes[node2])\n unconnected = set([i for i in xrange(Parameters.num_peers)])\n i = 0\n for i in xrange(Parameters.num_peers*Parameters.num_neighbours/2-Parameters.num_peers):\n a = random.sample(unconnected, 1)[0]\n b = random.sample(unconnected, 1)[0]\n while b == a:\n b = random.sample(unconnected, 1)[0]\n temp_graph[a].append(self.nodes[b])\n temp_graph[b].append(self.nodes[a])\n graph = {}\n for i in xrange(len(self.nodes)):\n graph[\"P_\" + str(i)] = list(set(temp_graph[i]))\n return graph",
"def _CreateGraph(self):\n self.nodes = []\n self.edges = []\n for i, r in self.airports.set_index('airport_id').iterrows():\n self.nodes.append((i,r.to_dict()))\n for i, r in self.routes.set_index(['src_id','dst_id']).iterrows():\n self.edges.append((i[0],i[1],r.to_dict()))\n # print('node ex: {}'.format(self.nodes[0]))\n # print('edge ex: {}'.format(self.edges[0]))\n\n self.graph = self._CreateAdjacencyListGraph()",
"def build_node_graph(self):\n G = pgv.AGraph(strict=False, directed=True)\n temp_dict = defaultdict(int) #key - from_to_ip, val - counter\n\n for i, ip in enumerate(self.node_graph_dict.keys()):\n G.add_node(ip, shape='rect', label='%d' % i)\n logger.info(\"All nodes added\")\n\n for ip, droplist in self.node_graph_dict.iteritems():\n for gnid, dropids in droplist:\n for did in dropids:\n tip = self.gnid_ip_dict[self.oid_gnid_dict[did]]\n k = '{0}_{1}'.format(ip, tip)\n temp_dict[k] += 1\n\n for k, v in temp_dict.iteritems():\n ks = k.split('_')\n G.add_edge(ks[0], ks[1], weight=v)\n\n return G",
"def _create_connections(self):\n self.predecessors = {}\n self.successors = {}\n for nd in self.nodes:\n self.predecessors[nd.name] = []\n self.successors[nd.name] = []\n\n for (nd_out, nd_in) in self.edges:\n self.predecessors[nd_in.name].append(nd_out)\n self.successors[nd_out.name].append(nd_in)",
"def construct_network_from_neighbours_list(related_characters: list):\n graph = nx.Graph()\n for edge in related_characters:\n sentiment = edge[1]\n color = ''\n if sentiment == 'Positive':\n color = 'g'\n elif sentiment == 'Negative':\n color = 'r'\n elif sentiment == 'Neutral':\n color = 'k'\n # graph.add_node(edge[0][0], popularity=\n graph.add_edge(edge[0][0], edge[0][1], color=color, weight=edge[2])\n\n return graph",
"def read_friendship_graph(self):\n graph = nx.read_gpickle(self.graph_path)\n return graph",
"def to_networkx(self):\n g = nx.Graph()\n for v in self.vs.values():\n g.add_node(v)\n for v in self.fs:\n g.add_node(v)\n for u in v.neighbors:\n g.add_edge(v, u)\n return g",
"def make_nodes_and_paths(friends_lst):\n\n # nodes = {}\n\n # for item in friends_lst:\n # friend1, friend2, group = item\n # for person in pair:\n # if not nodes.get(person):\n # nodes[person] = pair[1]\n\n # nodes = [{'name': person, 'friend': nodes[person]} for person in nodes.keys()]\n\n nodes = {}\n for item in friends_lst:\n friend1, friend2, group = item\n if not nodes.get(friend1):\n nodes[friend1] = group\n elif nodes.get(friend1) > group:\n nodes[friend1] = group\n\n nodes = [{'name': person, 'group': nodes[person]} for person in nodes.keys()]\n\n index_nodes = {}\n for idx, n in enumerate(nodes):\n index_nodes[n['name']] = (idx, n['group'])\n\n paths = []\n\n # paths.append({'source': item[1], 'target': item[0]})\n\n for item in friends_lst:\n # one = User.query.get(item.user_id)\n # two = User.query.get(item.friend_id)\n source, target, group = item\n paths.append({'source': index_nodes[source][0], 'target': index_nodes[target][0]})\n\n # print nodes\n # print index_nodes\n # print paths\n\n return nodes, paths",
"def _create_nx_graph(self):\n #_graph = nx.Graph()\n graph = nx.DiGraph()\n for name, lemma in self._lemmas_info.get_parent_lemmas():\n added_children = []\n for child_n in lemma.evidence_lemmas:\n child_node = str(child_n)\n if not self._should_be_filtered( added_children, child_node ):\n added_children.append( child_node )\n \n graph.add_node( name ) # it's OK if it exists from the previous iteration\n graph.add_node( child_node )\n # lemma1 because lemma2, means that lemma2 -> lemma1\n graph.add_edge( child_node, name )\n \n self._append_source_and_target( graph )\n return graph",
"def example_graph():\n g = nx.Graph()\n g.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'C'), ('B', 'D'), ('D', 'E'), ('D', 'F'), ('D', 'G'), ('E', 'F'), ('G', 'F')])\n return g",
"def _generate_graph(self) -> None:\n self.g_ = nx.random_partition_graph(list(self._community_sizes),\n p_in=self.community_p_in,\n p_out=self.community_p_out,\n seed=self.seed)\n\n for _, nv in self.g_.nodes.data():\n nv[\"infected\"] = 0\n nv[\"immune\"] = False\n nv[\"alive\"] = True\n nv[\"_edges\"] = []\n nv[\"isolated\"] = False\n nv[\"mask\"] = 0.0",
"def build_graph(self):\n for node in self.nodes:\n self.graph.add_node(node.id, node_obj=node)\n edges = []\n for i in range(0, len(self.nodes)):\n for j in range(i+1, len(self.nodes)):\n if (self.nodes[i].distance(self.nodes[j]) < self.radio_range):\n edges.append((self.nodes[i].id, self.nodes[j].id,1))\n self.graph.add_weighted_edges_from(edges)",
"def make_graph(self):\n # update the neighbors in the graph\n self.update_neighbors()\n\n # Go through each node and get their neighbors\n self.edges = []\n for node_name in self.nodes:\n\n # get the neighbors\n node_neighbors = self.nodes[node_name].neighbors\n\n # go through neighbors\n for neighbor_name in node_neighbors:\n\n # Make the edge key\n edge_key = \"-\".join(sorted([node_name, neighbor_name]))\n\n # Add it to the edge list if it is not already present\n if edge_key not in self.edges:\n\n self.edges.append(edge_key)\n\n return self.edges",
"def network(self):\n G = nx.MultiDiGraph()\n reaction_hash = []\n product_count = 0\n mapping = {}\n reaction_count = 0\n\n for r in self.reactions:\n reaction_count += 1\n\n reaction_dict = r.__dict__\n G.add_edge(reaction_dict.get('left'), hash(r))\n G.add_edge(reaction_dict.get('right'), hash(r))\n G.add_edge(hash(r), reaction_dict.get('left2'))\n G.add_edge(hash(r), reaction_dict.get('right2'))\n\n product_count += 1\n mapping[reaction_dict.get('left')] = \"x{}\".format(product_count)\n product_count += 1\n mapping[reaction_dict.get('right')] = \"x{}\".format(product_count)\n product_count += 1\n mapping[reaction_dict.get('left2')] = \"x{}\".format(product_count)\n product_count += 1\n mapping[reaction_dict.get('right2')] = \"x{}\".format(product_count)\n\n mapping[hash(r)] = \"r{}\".format(reaction_dict.get(\"reaction_n\"))\n reaction_hash.append(hash(r))\n\n return G, mapping",
"def create_graph_network(start_node, connections):\n graph = nx.Graph()\n graph.add_node(start_node)\n print(connections.index)\n graph.add_nodes_from(connections.index)\n edge_list = list(zip(itertools.repeat(start_node), connections.index))\n print(\"edge list is \", edge_list)\n graph.add_edges_from(edge_list)\n for i in graph.edges():\n graph[i[0]][i[1]]['weight'] = connections.loc[i[1]]['count']\n # graph[i[0]][i[1]]['proposal_number'] = connections.loc[i[1]]['proposal_number']\n # graph[i[0]][i[1]]['institution'] = connections.loc[i[1]]['institution']\n # graph[i[0]][i[1]]['proposal_title'] = connections.loc[i[1]]['proposal_title']\n # graph[i[0]][i[1]]['project_status'] = connections.loc[i[1]]['project_status']\n\n # Adding random position data to the graph.\n # pos = nx.spring_layout(graph, k=1)\n pos = nx.circular_layout(graph)\n nx.set_node_attributes(graph, 'pos', pos)\n return graph",
"def prepare_graph(label, nodes, edges, graphID):\n features = {'label': label}\n\n G = nx.DiGraph()\n nodes[\"id\"] = nodes[\"id\"].apply(lambda x : str(x))\n features['num_nodes'] = nodes.shape[0]\n op_node = None\n times = []\n friends = []\n followers = []\n for index, row in nodes.iterrows():\n G.add_node(row['id'], time=row['time'], friends=row['friends'], followers = row['followers'])\n times.append(row['time'])\n friends.append(2**row['friends'])\n followers.append(2**row['followers'])\n if row['time'] == 0:\n features['poster_friend_cnt'] = 2**row['friends']\n features['poster_follower_cnt'] = 2**row['followers']\n tweeter_id = row['id']\n op_node = row['id']\n features['avg_time'] = np.mean(times)\n features['avg_friends'] = np.mean(friends)\n features['avg_followers'] = np.mean(followers)\n features['max_followers'] = max(followers)\n features['max_friends'] = max(friends)\n features['friends_25th_percentile'] = np.percentile(friends, 25)\n features['friends_75th_percentile'] = np.percentile(friends, 75)\n features['followers_25th_percentile'] = np.percentile(followers, 25)\n features['followers_75th_percentile'] = np.percentile(followers, 75)\n node_list = []\n edge_count = 0\n for pair in edges:\n node1, node2 = pair.split()[0], pair.split()[1]\n node_list.append(node1)\n node_list.append(node2)\n G.add_edge(node1, node2)\n edge_count += 1\n features['num_edges'] = edge_count\n sum_users_without_followers = sum([1 for (node, val) in G.in_degree() if val==0])\n features['ratio_users_w/out_followers'] = sum_users_without_followers / len(G.nodes)\n features['num_connected_components'] = nx.number_strongly_connected_components(G)\n features['number_of_OPs_followers_who_retweeted'] = G.in_degree(tweeter_id)\n features['percentage_of_OPs_followers_who_retweeted'] = G.in_degree(tweeter_id) / features['poster_follower_cnt']\n features['avg_clustering'] = nx.average_clustering(G)\n features['op_clustering'] = nx.clustering(G,op_node)\n features['transitivity'] = nx.transitivity(G)\n node_list = list(set(node_list))\n features['nodeID_list'] = np.array(node_list)\n features['graph_id'] = graphID\n return features, G",
"def build_graph(self):\n for each_list in self.lab.look():\n vertice = self._add_vertice(each_list)\n if vertice:\n self.unvisited.add(vertice)\n self.graph.addEdge((self.current, vertice))\n \n self.unvisited -= self.visited\n self._connect_neighbours()",
"def parse_graph(self):\n\t\tnx_graph = nx.Graph()\n\t\tfor node in self.vertices:\n\t\t\tnx_graph.add_node(node)\n\n\t\tfor edge in self.edges:\n\t\t\tnode1, node2, weight = edge\n\t\t\tnx_graph.add_edge(node1, node2, weight=weight)\n\n\t\treturn nx_graph",
"def create_social_graph(file):\n social_graph = NonDirectionalGraph(\"SocialGraph\")\n with open(file, \"rt\") as f:\n data = f.readlines()\n n_friendship = 0 # Represents the number of friendships in the graph in each iteration\n highest_n_friendship = 0 # Captures the highest record of n_friendship in the graph\n highest_n_neighbors_per_node_dict = {} # Captures the highest record of friendship per node\n for line in data:\n split_line = line.split()\n if \"became\" in split_line: # \"became\" is in lines where persons become connected\n for name in [split_line[0], split_line[2]]:\n # The following if statement makes sure to instantiate the node and adds it to the graph\n if name not in social_graph:\n node = Node(name)\n social_graph.add_node(node)\n highest_n_neighbors_per_node_dict[name] = 0 ##\n social_graph.add_edge(split_line[0],split_line[2]) # Adds a connection between the nodes\n n_friendship += 1 # Updates the number of friendships\n # The following for loop updates the highest number of friends (neighbors) if it changes\n for name in [split_line[0], split_line[2]]:\n if len(social_graph.nodes[name].neighbors) > highest_n_neighbors_per_node_dict[name]:\n highest_n_neighbors_per_node_dict[name] = len(social_graph.nodes[name].neighbors)\n elif \"cancelled\" in split_line: # \"became\" is in lines where persons become disconnected\n social_graph.remove_edge(split_line[0], split_line[2])\n n_friendship -= 1 # Updates the number of friendships\n # In case any of the words \"cancelled\" or \"became\" is in the line\n else:\n print(\"Unrecognized line\")\n # The following for loop updates the highest number of friendship if it changes\n if n_friendship > highest_n_friendship:\n highest_n_friendship = n_friendship\n return social_graph, highest_n_friendship, highest_n_neighbors_per_node_dict",
"def all_pairs_node_connectivity(G, nbunch=None, cutoff=None):\n if nbunch is None:\n nbunch = G\n else:\n nbunch = set(nbunch)\n\n directed = G.is_directed()\n if directed:\n iter_func = itertools.permutations\n else:\n iter_func = itertools.combinations\n\n all_pairs = {n: {} for n in nbunch}\n\n for u, v in iter_func(nbunch, 2):\n k = local_node_connectivity(G, u, v, cutoff=cutoff)\n all_pairs[u][v] = k\n if not directed:\n all_pairs[v][u] = k\n\n return all_pairs",
"def draw_graph(self):\r\n G=nx.Graph()\r\n \r\n list_location1 = []\r\n list_location2 = []\r\n list_location3 = []\r\n list_location4 = []\r\n \r\n for citizen in self.citizens:\r\n G.add_node(citizen.id)\r\n if citizen.location == 1:\r\n list_location1.append(citizen.id)\r\n elif citizen.location == 2:\r\n list_location2.append(citizen.id)\r\n elif citizen.location == 3:\r\n list_location3.append(citizen.id)\r\n else: \r\n list_location4.append(citizen.id)\r\n\r\n for citizen in self.citizens:\r\n for friend in citizen.friends:\r\n G.add_edge(citizen.id,friend.id)\r\n\r\n pos = nx.random_layout(G)\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location1, node_color='r')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location2, node_color='g')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location3, node_color='b')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location4, node_color='y')\r\n nx.draw_networkx_edges(G,pos, width=1)\r\n\r\n plt.show()",
"def buildGraph(M: List[List[int]]) -> List:\n l = len(M)\n G = [Node(i) for i in range(l)]\n for i in range(len(M)):\n for j in range(len(M)):\n if M[i][j]:\n G[i].add_adjacent(G[j])\n return G",
"def _connect_neighbours(self):\n for prev in self.unvisited:\n for next in self.unvisited:\n if (next[0] == prev[0] and next[1] == prev[1] + 1) or (next[0] == prev[0] + 1 and next[1] == prev[1]):\n self.graph.addEdge((prev, next))\n self.visited.add(prev)\n self.visited.add(next)\n if self._find_intersection():\n self.intersection.append(prev)\n self.intersection.append(next)",
"def make_complete_graph(num_nodes):\n #initialize empty graph\n complete_graph = {}\n #consider each vertex\n for vertex in range(num_nodes):\n #add vertex with list of neighbours\n complete_graph[vertex] = list(set([j for j in range(num_nodes) if j != vertex]))\n return complete_graph",
"def build_graph(edges):\n \n G = nx.MultiGraph()\n G.add_edges_from(edges)\n return G",
"def _create_reference_connectivity_graph(self):\n #take the self._residue_graph and create a replicate (without the misc attributes) with the atoms_with_positions\n _reference_connectivity_graph = nx.Graph()\n atoms_with_positions = set(self._atoms_with_positions)\n\n #iterate over all the bonds\n for bond in self._residue_graph.edges():\n if set(bond).issubset(atoms_with_positions):\n #if both of the atoms in the bond are in atoms_with_positions, we can add the atoms/bonds to the reference\n _reference_connectivity_graph.add_edge(*bond)\n\n return _reference_connectivity_graph"
] | [
"0.7127473",
"0.6754733",
"0.6723426",
"0.66156125",
"0.6598361",
"0.65796894",
"0.6538674",
"0.6529904",
"0.6496961",
"0.64637756",
"0.64514536",
"0.64246947",
"0.64232814",
"0.6319336",
"0.63178855",
"0.6282292",
"0.6275593",
"0.6268891",
"0.6247102",
"0.6227013",
"0.62208754",
"0.620973",
"0.6200654",
"0.61973715",
"0.6161072",
"0.6159909",
"0.61542857",
"0.6154203",
"0.6151688",
"0.6140231"
] | 0.76689845 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.