query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Given the normalized RNN outputs (postsoftmax) at each time step and a target labeling, compute the backward variables beta_t(s) as defined in equation 9 in the paper
def compute_backward_variables(self, normalized_logits, target): target_length = target.shape[0] num_time_steps = normalized_logits.shape[0] ###################### ### YOUR CODE HERE ### ###################### blank_label = normalized_logits.shape[1] - 1 l = add_blanks(target, blank_label) target_length = l.shape[0] beta = np.zeros((target_length, num_time_steps)) # init beta[target_length - 1, num_time_steps - 1] = normalized_logits[num_time_steps - 1, l[-1]] beta[target_length - 2, num_time_steps - 1] = normalized_logits[num_time_steps - 1, l[-2]] for s in xrange(target_length - 2): beta[s, num_time_steps - 2] = 0.0 # recursive case for t in xrange(num_time_steps - 2, -1, -1): for s in xrange(target_length - 3, -1, -1): b_bar = beta[s, t+1] + beta[s+1, t+1] if l[s] == blank_label or l[s+2] == l[s]: beta[s, t] = b_bar * normalized_logits[t, l[s]] else: beta[s, t] = (b_bar + beta[s+2, t+1]) * normalized_logits[t, l[2]] return beta
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def backwardVariableGeneration(self):\n self.beta = zeros((self.noOfEmmittingStates+2, self.T + 1))\n\n # initialisation\n for j in range(self.noOfEmmittingStates+1):\n self.beta[j,-1] = self.transitionMatrix[j,-1]\n self.beta[-1,-1] = 1.0\n\n # main recursion\n for t in range(self.T, 1, -1):\n for j in range(self.noOfEmmittingStates, 0, -1):\n partialSum = 0\n for k in range(1, self.noOfEmmittingStates+1):\n partialSum += (self.transitionMatrix[j,k-1] * self.b[k-1,t-1] * self.beta[k,t])\n self.beta[j,t-1] = partialSum\n\n # first column\n partialSum = 0\n for k in range(1, self.noOfEmmittingStates+1):\n partialSum += (self.transitionMatrix[0,k-1] * self.b[k-1,0] * self.beta[k,1])\n self.beta[0,0] = partialSum\n\n # likelihood of observed sequence, p(O|lambda)\n self.observationLikelihood = self.alpha[-1,-1]", "def _backward_probability(self, unlabeled_sequence):\n T = len(unlabeled_sequence)\n N = len(self._states)\n beta = _ninf_array((T, N))\n\n transitions_logprob = self._transitions_matrix().T\n\n # initialise the backward values;\n # \"1\" is an arbitrarily chosen value from Rabiner tutorial\n beta[T - 1, :] = np.log2(1)\n\n # inductively calculate remaining backward values\n for t in range(T - 2, -1, -1):\n symbol = unlabeled_sequence[t + 1][_TEXT]\n outputs = self._outputs_vector(symbol)\n\n for i in range(N):\n summand = transitions_logprob[i] + beta[t + 1] + outputs\n beta[t, i] = logsumexp2(summand)\n\n return beta", "def backward_pass(self, delta):\n\n a = config['learning_rate']\n y = config['momentum_gamma']\n m = config['momentum']\n l = config['L2_penalty']\n\n # print(\"shape of delta incoming: \", delta.shape, \"shape of x: \", self.x.shape)\n self.d_x = delta.T @ self.x\n # print(\"SHAPE OF GRADIENT: \", self.d_x.shape)\n\n # gradient momentum\n self.w_inc = (a * self.d_x.T) + (y * self.d_v) - l * self.w\n \n # saving \n if m:\n self.d_v = self.w_inc\n else:\n self.d_v = np.zeros(self.w.shape)\n\n # backprop for bias weights\n x_0 = np.ones([len(delta), 1])\n\n self.d_b = delta.T @ x_0\n\n # print(\"shape of BIAS GRAD: \", self.d_b.shape)\n\n self.d_w = delta @ self.w.T\n # print(\"shape of w.T: \", self.w.T.shape, \"shape of RETURN delta: \", self.d_w.shape)\n #print(self.w.shape)\n return self.d_w", "def step(self):\n loss = None\n for group in self.param_groups:\n for p in group['params']:\n grad = p.grad.data\n state = self.state[p]\n\n if len(state) == 0:\n t = 0\n m = torch.zeros_like(p.data)\n v = torch.zeros_like(p.data)\n # v_hat = torch.zeros_like(p.data)\n else:\n t = state['t']\n m = state['m']\n v = state['v']\n\n b1 = group['beta1']\n b2 = group['beta2']\n t += 1\n\n m = torch.mul(m, b1) + (1-b1) * grad\n v = torch.mul(v, b2) + (1-b2) * grad**2\n\n m_unbias = 1 / (1 - b1**t)\n v_unbias = 1 / (1 - b2**t)\n\n p.data -= (group['lr'] * m_unbias / math.sqrt(v_unbias)) * \\\n m / (math.sqrt(v_unbias) + group['eps'])\n\n # v_hat = torch.max(v_hat, v)\n # p.data -= group['lr'] / m_unbias * m * v_hat / (v_unbias.sqrt() + group['eps'])\n state['t'] = t\n state['m'] = m\n state['v'] = v\n\n return loss", "def predict(self,inputs,keep_prob, _):\n #Non-Dynamic Unidirectional RNN\n hidden_size = self.config.mRNN._hidden_size\n batch_size = self.config.batch_size\n embed_size = self.config.mRNN._embed_size\n\n if keep_prob == None:\n keep_prob = 1\n\n with tf.variable_scope('InputDropout'):\n inputs = [tf.nn.dropout(x,keep_prob) for x in inputs]\n \n with tf.variable_scope('RNN') as scope:\n state = self.initial_state\n RNN_H = tf.get_variable('HMatrix',[hidden_size,hidden_size])\n RNN_I = tf.get_variable('IMatrix', [embed_size,hidden_size])\n RNN_b = tf.get_variable('B',[hidden_size])\n\n self.variable_summaries(RNN_H, 'HMatrix')\n self.variable_summaries(RNN_I, 'IMatrix')\n self.variable_summaries(RNN_b, 'Bias')\n \n with tf.variable_scope('RNN',reuse=True):\n rnn_outputs = []\n for tstep, current_input in enumerate(inputs):\n RNN_H = tf.get_variable('HMatrix',[hidden_size,hidden_size])\n RNN_I = tf.get_variable('IMatrix', [embed_size,hidden_size])\n RNN_b = tf.get_variable('B',[hidden_size])\n #state = tf.nn.tanh(tf.matmul(state,RNN_H) + tf.matmul(current_input,RNN_I) + RNN_b)\n\n state = tf.matmul(state,RNN_H) + current_input\n rnn_outputs.append(state)\n\t\t#How to pass state info for subsequent sentences\n self.final_state = rnn_outputs[-1]\n \n with tf.variable_scope('RNNDropout'):\n rnn_outputs = [tf.nn.dropout(x,keep_prob) for x in rnn_outputs]\n\n return rnn_outputs", "def backward_pass(self, delta, zs, activations, nabla_b, nabla_w):\n for l in range(2, self.num_layers):\n delta = np.dot(self.weights[-l + 1].transpose(), delta) * sigmoid_prime(zs[-l])\n nabla_b[-l] = delta\n nabla_w[-l] = np.dot(delta, activations[-l - 1].transpose())\n return nabla_b, nabla_w", "def backward_step(activations, targets, layers):\n param_grads = collections.deque() # List of parameter gradients for each layer\n output_grad = None # The error gradient at the output of the current layer\n # Propagate the error backwards through all the layers.\n # Use reversed to iterate backwards over the list of layers.\n for i, layer in enumerate(reversed(layers)):\n cur_layer_idx = len(layers) - i - 1\n if cur_layer_idx <= NUM_LAYERS_SKIP:\n # implement short circuit here\n if layer.is_fc_layer:\n grads = [0.0 for _ in range(layer.W.shape[0]*layer.W.shape[1]+layer.W.shape[1])]\n else:\n # normal gradient computation \n Y = activations.pop() # Get the activations of the last layer on the stack\n # Compute the error at the output layer.\n # The output layer error is calculated different then hidden layer error.\n if output_grad is None:\n input_grad = layer.get_input_grad(Y, targets)\n else: # output_grad is not None (layer is not output layer)\n input_grad = layer.get_input_grad(Y, output_grad)\n # Get the input of this layer (activations of the previous layer)\n X = activations[-1]\n # Compute the layer parameter gradients used to update the parameters\n grads = layer.get_params_grad(X, output_grad)\n param_grads.appendleft(grads)\n # Compute gradient at output of previous layer (input of current layer):\n output_grad = input_grad\n return list(param_grads) # Return the parameter gradients", "def backward_pass(self):\r\n # the gradient of cross-entropy on top of softmax is (t-y)\r\n back_output = (self.targets - self.y) / self.y.shape[0]\r\n\r\n for layer in reversed(self.layers):\r\n back_output = layer.backward_pass(back_output)", "def forward_backward_prop(data, labels, params, dimensions):\n\n ### Unpack network parameters (do not modify)\n ofs = 0\n Dx, H, Dy = (dimensions[0], dimensions[1], dimensions[2])\n\n activation = []\n\n W1 = np.reshape(params[ofs:ofs+ Dx * H], (Dx, H))\n ofs += Dx * H\n b1 = np.reshape(params[ofs:ofs + H], (1, H))\n ofs += H\n W2 = np.reshape(params[ofs:ofs + H * Dy], (H, Dy))\n ofs += H * Dy\n b2 = np.reshape(params[ofs:ofs + Dy], (1, Dy))\n\n ### Forward propagation\n activation.append(data)\n\n # Hidden layer inputs: (N, Dx) * (Dx, H) -> N x H\n z = np.dot(activation[-1], W1) + b1 \n # Activations, inputs to the final layer. \n activation.append(sigmoid(z)) # output of the hidden layer, activation\n # Final layer outputs: ( N x H ) * ( H, Dy) -> (N, Dy)\n z = np.dot(activation[-1], W2) + b2\n activation.append( softmax(z) )\n\n # Cross-entropy cost\n\n y_p = activation[-1]\n activation = activation[:-1] # remove activation data (output)\n\n cost = -np.sum(labels * np.log(y_p))\n \n error = []\n \n ### backward propagation\n sigma = (y_p - labels)\n error.append(sigma)\n\n gradb2 = np.sum(error[-1], axis=0)\n gradW2 = np.dot(activation[-1].T, error[-1])\n\n #\n sigma = np.dot(W2, error[-1].T)\n sigma = sigma.T * sigmoid_grad(activation[-1])\n activation = activation[:-1] # remove activation data ( hidden layer )\n\n error.append(sigma)\n\n gradb1 = np.sum(error[-1], axis=0)\n gradW1 = np.dot(activation[-1].T, error[-1])\n\n\n ### Stack gradients (do not modify)\n grad = np.concatenate((gradW1.flatten(), gradb1.flatten(), \n gradW2.flatten(), gradb2.flatten()))\n \n return cost, grad", "def backward_step(activations, targets, layers):\n param_grads = collections.deque() # List of parameter gradients for each layer\n output_grad = None # The error gradient at the output of the current layer\n # Propagate the error backwards through all the layers.\n # Use reversed to iterate backwards over the list of layers.\n for layer in reversed(layers): \n Y = activations.pop() # Get the activations of the last layer on the stack\n # Compute the error at the output layer.\n # The output layer error is calculated different then hidden layer error.\n if output_grad is None:\n input_grad = layer.get_input_grad(Y, targets)\n else: # output_grad is not None (layer is not output layer)\n input_grad = layer.get_input_grad(Y, output_grad)\n # Get the input of this layer (activations of the previous layer)\n X = activations[-1]\n # Compute the layer parameter gradients used to update the parameters\n grads = layer.get_params_grad(X, output_grad)\n param_grads.appendleft(grads)\n # Compute gradient at output of previous layer (input of current layer):\n output_grad = input_grad\n return list(param_grads) # Return the parameter gradients", "def backward_G(self):\n mask = self.mask*0.5 + 0.5\n\n self.loss_G_SH = self.criterionS(self.pr_SH*mask, self.gt_SH*mask) * self.opt.lambda_S\n self.loss_G = self.loss_G_SH\n\n if not self.opt.no_brightness:\n self.loss_G_BA = self.criterionBA(self.pr_BA*mask, self.gt_BA*mask) * self.opt.lambda_BA\n self.loss_G_BC = 0\n for i in range(25):\n gt_BC = self.gt_BC[i][:, :2]\n bc_num = int(self.gt_BC[i][0, 3].item())\n pr_BC = self.pr_BC[i]\n loss_G_BC = util.min_loss_BC_NoBatch(pr_BC, gt_BC, bc_num, self.criterionBC)\n loss_G_BC = loss_G_BC * self.opt.lambda_BC / 25.0\n self.loss_G_BC += loss_G_BC\n\n loss_B = self.loss_G_BA + self.loss_G_BC\n self.loss_G += loss_B\n\n # Third, LTM Regularization\n if self.opt.reg_LTM:\n ltm_mean = torch.mean(self.ltm, dim=0, keepdim=True) # [1, 75, 256, 256]\n ltm_mean = ltm_mean.expand(self.ltm.size(0), ltm_mean.size(1), ltm_mean.size(2), ltm_mean.size(3)) # [25, 75, 256, 256]\n self.loss_LTMReg = self.criterionReg(self.ltm, ltm_mean) * self.opt.lambda_regLTM\n self.loss_G += self.loss_LTMReg\n\n\n self.loss_G.backward()", "def backward(self, b):\n\n self.b = [b]\n\n # calculate the estimated errors on each layer ($\\delta$)\n for k,w in reversed(list(enumerate(self.weights[1:]))):\n if self.has_bias:\n delta = numpy.dot(self.b[0], w[1:].T)\n act = self.a[k+1][:,1:]\n else:\n delta = numpy.dot(self.b[0], w.T)\n act = self.a[k+1]\n self.b.insert(0, delta*self.hidden_activation.f_prime_from_f(act))\n\n self.d = []\n for a,b in zip(self.a[:-1], self.b):\n self.d.append(numpy.dot(a.T, b) / len(b))\n\n return self.d", "def backward(self, outputs, labels):\n # Layers & shape\n depth = len(self.layer_dimensions) - 1\n # num_classes, batch_size = outputs.shape\n batch_size, num_classes = outputs.shape\n coefficient = 1 / batch_size\n # 1/ First case : last layer -> output\n layer_a = \"a_\" + str(depth - 1)\n a = self._cache[layer_a]\n Jz = outputs - labels\n # Weights gradients\n dw = coefficient * np.dot(a.T, Jz)\n db = coefficient * np.sum(Jz, axis=0)\n self._grad[\"dw_\" + str(depth)] = dw\n self._grad[\"db_\" + str(depth)] = db\n # 2/ Second case : inside the layers\n for i in range(depth - 1, 0, -1):\n # Get the weights and biases\n layer_w = \"w_\" + str(i + 1)\n layer_a = \"a_\" + str(i - 1)\n layer_z = \"z_\" + str(i)\n w = self._parameters[layer_w]\n a = self._cache[layer_a]\n z = self._cache[layer_z]\n # Gradients\n Jz = self.activation_hidden.backward(z) * np.dot(Jz, w.T)\n db = coefficient * np.sum(Jz, axis=0)\n dw = coefficient * np.dot(a.T, Jz)\n self._grad[\"dw_\" + str(i)] = dw\n self._grad[\"db_\" + str(i)] = db", "def back_prop(self, dout):\n W, b, X, Z = self.W, self.b, self.X, self.Z\n num_input, num_output = self.num_input, self.num_output\n dZ_dW = X.T\n dZ_dX = W.T\n\n dout = dout * (Z != 0.)\n dout_dW = dout @ dZ_dW # (out, train) x (train, in) = (out, in)\n dout_db = dout.sum(axis=1) # (out, )\n dout_dX = dZ_dX @ dout # (in, out) x (out, train) = (in, train)\n return dout_dX, dout_dW, dout_db.reshape(num_output, 1)", "def backward(Observation, Emission, Transition, Initial):\n\n T = Observation.shape[0]\n N, M = Emission.shape\n beta = np.zeros((N, T))\n beta[:, T - 1] = np.ones(N)\n\n for t in range(T - 2, -1, -1):\n for n in range(N):\n Transitions = Transition[n, :]\n Emissions = Emission[:, Observation[t + 1]]\n beta[n, t] = np.sum((Transitions * beta[:, t + 1]) * Emissions)\n\n # P = np.sum(Initial[:, 0] * Emission[:, Observation[0]] * beta[:, 0])\n return beta", "def train(self, inputs, targets, eta, niterations):\n ndata = np.shape(inputs)[0] # number of data samples\n # adding the bias\n inputs = np.concatenate((inputs, -np.ones((ndata, 1))), axis=1)\n\n # numpy array to store the update weights\n updatew1 = np.zeros((np.shape(self.weights1)))\n updatew2 = np.zeros((np.shape(self.weights2)))\n updatew3 = np.zeros((np.shape(self.weights3)))\n\n self.Errors = []\n for n in range(niterations):\n\n #############################################################################\n # TODO: implement the training phase of one iteration which consists of two phases:\n # the forward phase and the backward phase. you will implement the forward phase in \n # the self.forwardPass method and return the outputs to self.outputs. Then compute \n # the error (hints: similar to what we did in the lab). Next is to implement the \n # backward phase where you will compute the derivative of the layers and update \n # their weights. \n #############################################################################\n\n # forward phase \n self.outputs = self.forwardPass(inputs)\n\n # Error using the sum-of-squares error function\n error = 0.5 * np.sum((self.outputs - targets) ** 2)\n\n if np.mod(n, 100) == 0:\n self.Errors.append(error)\n print(\"Iteration: \", n, \" Error: \", error)\n\n # backward phase \n # Compute the derivative of the output layer. NOTE: you will need to compute the derivative of \n # the softmax function. Hints: equation 4.55 in the book. \n # deltao = (self.outputs - targets) * (self.outputs - self.outputs ** 2)\n deltao = (self.outputs - targets) * self.outputs * (1 - self.outputs)\n\n # compute the derivative of the second hidden layer\n\n deltah2 = self.beta * self.hidden2 * (1.0 - self.hidden2) * (np.dot(deltao, np.transpose(self.weights3)))\n\n\n # compute the derivative of the first hidden layer\n deltah1 = self.beta * self.hidden1 * (1.0 - self.hidden1) * (np.dot(deltah2[:, :-1], np.transpose(self.weights2)))\n\n # update the weights of the three layers: self.weights1, self.weights2 and self.weights3\n # here you can update the weights as we did in the week 4 lab (using gradient descent) \n # but you can also add the momentum\n\n updatew1 = eta * np.dot(np.transpose(inputs), deltah1[:, :-1]) + self.momentum * updatew1\n updatew2 = eta * np.dot(np.transpose(self.hidden1), deltah2[:, :-1]) + self.momentum * updatew2\n updatew3 = eta * np.dot(np.transpose(self.hidden2), deltao) + self.momentum * updatew3\n\n self.weights1 -= updatew1\n self.weights2 -= updatew2\n self.weights3 -= updatew3\n\n #############################################################################\n # END of YOUR CODE \n #############################################################################", "def backward(self, O, B, final_probs = None, terminal_nodes = None):\n\n beta = numpy.zeros([len(O), len(self)])\n\n if final_probs is not None:\n beta[-1, :] = numpy.log(final_probs + Constants.k_zero_prob)\n elif terminal_nodes is not None:\n beta[-1, :] = Constants.k_log_zero\n for s in terminal_nodes:\n beta[-1, s] = 0.0\n else:\n beta[-1, :] = 0.0 # log(1.0)\n\n t = beta.shape[0] - 2\n while t >= 0:\n for i in range(beta.shape[1]):\n #\n # beta[t, i] = sum_j(A[i, j] * B[j, O[t + 1]) * beta[t + 1, j])\n #\n beta[t, i] = HMM.log_add(self.A.log_transitions[i, :] + B[:, t + 1] + beta[t + 1, :])\n t -= 1\n\n return beta", "def forward_backward_prop(data, labels, params, dimensions):\n\n ### Unpack network parameters (do not modify)\n ofs = 0\n Dx, H, Dy = (dimensions[0], dimensions[1], dimensions[2])\n\n W1 = np.reshape(params[ofs:ofs+ Dx * H], (Dx, H))\n ofs += Dx * H\n b1 = np.reshape(params[ofs:ofs + H], (1, H))\n ofs += H\n W2 = np.reshape(params[ofs:ofs + H * Dy], (H, Dy))\n ofs += H * Dy\n b2 = np.reshape(params[ofs:ofs + Dy], (1, Dy))\n\n #print W2.shape, b2.shape\n\n #print len(W1), data[0], b1, len(data)\n\n ### YOUR CODE HERE: forward propagation\n #Eg, find the cost function. Save some intermediate stuff though, seems like it'd be useful\n #h = sigmoid(x * w1 + b1)\n # y = (softmax( h * w2 + b2)\n # hence the cost function will be labels * log(y) and then sum it all up\n\n z_1 = np.matrix(data) * W1 + b1\n h = sigmoid(z_1)\n y_prime = softmax(h * W2 + b2)\n logs = np.log(y_prime)\n\n #print y_prime.shape\n\n #print np.array(logs) * labels\n\n cost = - np.sum(np.array(logs) * labels, axis = 1)\n cost = np.sum(cost) # lets add up each instance fo the cost for now and see what happens\n\n # My question is then do we just sum up the costs of each function\n #print cost #somethign is printing so I'm gonan say i'm a genius right here duh\n\n #Cost(y, y') = -sum of (y * log Y')\n ### END YOUR CODE\n\n ### YOUR CODE HERE: backward propagation\n\n # you'll need gradients for each parameter except for the input vectors. Right now this isn't even a word2vec\n delta_1 = y_prime - labels\n delta_2 = delta_1 * W2.T\n #print sigmoid_grad(h).shape\n delta_3 = np.array(delta_2) * sigmoid_grad(h)\n\n gradW2 = np.array(h.T * delta_1) # i dunno or its reverse OMG I HASTE EVERYONE why is it that np.array fixes everything. Sigh\n gradb2 = np.array(np.sum(delta_1, axis=0)) # main issue is that this is a 20 x 5 vector when it should be a 1 x 5\n gradW1 = data.T.dot(delta_3)\n gradb1 = np.sum(delta_3, axis=0) # this should be 1 x10 not 20 x 5\n\n\n\n ### END YOUR CODE\n\n #print gradW1, gradW1.flatten()\n # print 'jee'\n\n ### Stack gradients (do not modify)\n grad = np.concatenate((\n gradW1.flatten(),\n gradb1.flatten(),\n gradW2.flatten(),\n gradb2.flatten())\n )\n #print grad\n #print cost\n return cost, grad", "def backward_prop(self, xs, scores, y):\n deltas = []\n # output layer\n # print xs[-1].shape\n #assert(xs[-1].shape == ())\n #assert(scores[-1].shape == ())\n\n deltas = []\n delta_L = -2 * (y - xs[-1]) * d_tanh(xs[-1])\n # use reverse order first. reverse in the end\n deltas.append(delta_L)\n\n for layer_idx in range(self.n_layer - 1, 0, -1):\n prev_layer_idx = layer_idx - 1\n w = self.ws[prev_layer_idx]\n x = xs[prev_layer_idx]\n no_bias_w = w[1:, ]\n delta = deltas[-1]\n #print\n #print no_bias_w.shape\n #print delta.shape\n #print x.shape\n #print \n prev_delta = np.dot(no_bias_w, delta) * d_tanh(x)\n deltas.append(prev_delta)\n\n deltas.reverse()\n return deltas", "def backward(self, inputs): \n self.error = self.error * sigmoid(self.output, der=True) # because the activation function of last layer must be sigmoid\n delta3_weights = np.dot(self.z2.T, self.error)\n\n self.error = np.dot(self.error, self.output3_weights.T) * self.af(self.z2, der=True) \n delta2_weights = np.dot(self.z1.T, self.error)\n\n self.error = np.dot(self.error, self.hidden2_weights.T) * self.af(self.z1, der=True)\n delta1_weights = np.dot(inputs.T, self.error)\n\n self.hidden1_weights -= self.lr * delta1_weights\n self.hidden2_weights -= self.lr * delta2_weights\n self.output3_weights -= self.lr * delta3_weights", "def backward_pass(architecture,gradient_layerwise,grad_weights,grad_bias):\n \n for layer in range(len(architecture)-1,-1,-1):\n X_input,X_output,weightsi,biasi,X_input_im2col,imi,output_shapei,kernel_shapei,stridei,operationi,imxi = architecture['layer{}'.format(layer+1)]\n# print(\"Operation is:{} and Layer is: {}\".format(operationi,layer+1))\n if operationi == 'softmax': # Last layer -> Dont apply softmax in any layer other than the last layer!\n # not taking gradients here because we need dz_dX(secondlastlayer) which is y_pred - y\n continue\n \n if operationi == 'conv_bn_relu' or operationi == 'conv_relu' or operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if operationi__1 == 'softmax':\n y_pred = architecture['layer{}'.format(layer+2)][1]\n y_pred = torch.reshape(y_pred,y.shape)\n dz_dXi = y_pred - y\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = dz_dXi # .\n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n gradient_layerwise['layer{}'.format(layer+1)][2] = dz_dbi # .\n try:\n dz_dweightsi = (dz_dXi).mm(torch.t(X_input_im2col)) # dz_dweightsi = dz_dXi * dXi_dweightsi (chain rule)\n except:\n dz_dweightsi = (dz_dXi).mm(X_input_im2col)\n \n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n gradient_layerwise['layer{}'.format(layer+1)][1] = dz_dweightsi #\n elif operationi__1 == 'maxpool': # need to do something here to fix the problem\n None\n\n elif 'flatten' in operationi__1:\n # we currently have dz_doutput of flatten -> we want dz_doutput of the conv_bn_relu before flatten\n \n weightsi__1 = architecture['layer{}'.format(layer+2)][2] # weights2\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dXoutput of flatten\n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5] # i\n try:\n dz_dXi = torch.t(weightsi__1).mm(dz_dXi__1)\n except:\n dz_dXi = weightsi__1.mm(dz_dXi__1)\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n\n dz_dXi = torch.reshape(dz_dXi,(output_shapei[1]*output_shapei[2],-1))\n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n dz_dweightsi = X_input_im2col.mm(dz_dXi)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0 # Gradient Clipping\n dz_dbi = dz_dXi\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)# Can also set this to layer like in line ~800\n \n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi) # Can also set this to layer like in line ~800\n \n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi) # Can also set this to layer like in line ~800\n \n else:\n weightsi__1 = architecture['layer{}'.format(layer+2)][2]\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dX2 -> backpropagated from maxpool\n output_shapei__1 = architecture['layer{}'.format(layer+2)][6]\n operationi__1 == architecture['layer{}'.format(layer+2)][9] # ...\n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5]\n try:\n Y = weightsi__1.mm(dz_dXi__1)\n except:\n Y = weightsi__1.mm(torch.t(dz_dXi__1))\n dz_dXi = torch.zeros(X_output.shape)\n output_shape_current_layer = architecture['layer{}'.format(layer+1)][6]\n bias_current_layer = architecture['layer{}'.format(layer+1)][3]\n X_im2col_current_layer = architecture['layer{}'.format(layer+1)][4]\n for i in range(np.shape(X_output)[0]):\n for j in range(np.shape(X_output)[1]):\n for k in range(np.shape(X_output)[2]):\n idxs = getIndexes(imi__1,(i,j,k))\n dz_dXi[i,j,k] = sum([Y[idx[0],idx[1]] for idx in idxs])\n \n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n if 'sigmoid' in operationi__1: # ...\n X_output = torch.reshape(X_output,dz_dXi.shape)\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi__1: # ...\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n dz_dXi = torch.reshape(dz_dXi,(output_shape_current_layer[1]*output_shape_current_layer[2],-1))\n dz_dbi = torch.reshape(dz_dXi,bias_current_layer.shape)\n dz_dweightsi = X_im2col_current_layer.mm(dz_dXi)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0 # Gradient Clipping\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)\n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi)\n \n if operationi == 'maxpool':\n \n weightsi__1 = architecture['layer{}'.format(layer+2)][2]\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dXoutput -> backpropagated from maxpool\n output_shapei__1 = architecture['layer{}'.format(layer+2)][6]\n operationi__1 == architecture['layer{}'.format(layer+2)][9] # ...\n \n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5]\n try:\n Y = weightsi__1.mm(dz_dXi__1)\n except:\n try:\n Y = weightsi__1.mm(torch.t(dz_dXi__1))\n except:\n Y = torch.t(weightsi__1).mm(dz_dXi__1) # Ensuring valid matrix multiplication here\n \n dz_dXi = torch.zeros(X_output.shape)\n output_shape_current_layer = architecture['layer{}'.format(layer+1)][6]\n bias_current_layer = architecture['layer{}'.format(layer+1)][3]\n X_im2col_current_layer = architecture['layer{}'.format(layer+1)][4]\n for i in range(np.shape(X_output)[0]):\n for j in range(np.shape(X_output)[1]):\n for k in range(np.shape(X_output)[2]):\n idxs = getIndexes(imi__1,(i,j,k))\n dz_dXi[i,j,k] = sum([Y[idx[0],idx[1]] for idx in idxs])\n\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n \n if operationi__1 == 'conv_sigmoid' or operationi__1 == 'conv_bn_sigmoid': # ...\n X_output = torch.reshape(X_output,dz_dXi.shape)\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n else:\n dz_dXi[X_output <= 0] = 0\n\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)\n \n dz_dXinput = torch.zeros((X_input.shape))\n dz_dXoutput = gradient_layerwise['layer{}'.format(layer+1)][0] # output = output of maxpool\n\n dz_dXoutput = torch.reshape(dz_dXoutput,(output_shapei[0],X_input_im2col.shape[2]))\n \n for i in range(output_shapei[0]):\n for j in range(X_input_im2col.shape[2]):\n Xi2ci = X_im2col_current_layer[i,:,:]\n idx = torch.argmax(Xi2ci[:,j]).item()\n value = imxi[i][(idx,j)]\n dz_dXinput[value[0],value[1],value[2]] += float(dz_dXoutput[i,j])\n\n# dz_dXinput = torch.reshape(dz_dXinput,output_shapei)\n \n X_prev_im2col = architecture['layer{}'.format(layer)][4]\n X_output_prev = architecture['layer{}'.format(layer)][1]\n X_output_prev = torch.reshape(X_output_prev,dz_dXinput.shape)\n X_input_prev = architecture['layer{}'.format(layer)][0]\n prev_bias = architecture['layer{}'.format(layer)][3]\n output_shape_prev = architecture['layer{}'.format(layer)][6]\n prev_operation = architecture['layer{}'.format(layer)][9]\n \n if prev_operation == 'conv_sigmoid' or prev_operation == 'conv_bn_sigmoid':\n dz_dXinput *= sigmoid(X_output_prev)*(1-sigmoid(X_output_prev)) # Taking the derivative of the sigmoid function\n else:\n dz_dXinput[X_output_prev <= 0] = 0\n \n if len(dz_dXinput.shape) == 3:\n dz_dXinput = torch.reshape(dz_dXinput,(-1,output_shape_prev[0]))\n \n dz_dbi = torch.reshape(dz_dXinput,prev_bias.shape)\n dz_dweightsi = X_prev_im2col.mm(dz_dXinput)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n \n gradient_layerwise['layer{}'.format(layer)][2] = torch.Tensor(dz_dbi)\n gradient_layerwise['layer{}'.format(layer)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer)][0] = torch.Tensor(dz_dXinput) # ...\n \n if 'flatten_dense' in operationi:\n \n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n \n if operationi__1 == 'softmax':\n \n X_input = torch.reshape(torch.Tensor(X_input),(-1,1))\n X_output = torch.reshape(X_output,(-1,1))\n y_pred = architecture['layer{}'.format(layer+2)][1]\n y_pred = torch.reshape(y_pred,y.shape)\n dz_dXi = y_pred - y\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if 'sigmoid' in operationi:\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n try:\n dz_dweightsi = (dz_dXi).mm(torch.t(X_input)) # dz_dweightsi = dz_dXi * dXi_dweightsi (chain rule)\n except:\n dz_dweightsi = (dz_dXi).mm(X_input)\n \n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = dz_dXi # Can also set this to layer like in line ~800\n gradient_layerwise['layer{}'.format(layer+1)][1] = dz_dweightsi # Can also set this to layer like in line ~800\n gradient_layerwise['layer{}'.format(layer+1)][2] = dz_dbi # Can also set this to layer like in line ~800\n \n else:\n # Have to modify and test this before implementation -> Specifically\n # the backprop implementation is not consistent with the ones above\n #\n X_output = torch.reshape(X_output,(-1,1))\n weights__i = architecture['layer{}'.format(layer+2)][2]\n dz_dXoutput = gradient_layerwise['layer{}'.format(layer+2)][0]\n dz_dXoutput = torch.reshape(torch.Tensor(dz_dXoutput),X_output.shape)\n X_input = torch.reshape(torch.Tensor(X_input),(-1,1))\n\n if 'relu' in operationi:\n dz_dXoutput[X_output<0] = 0\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n if 'sigmoid' in operationi:\n dz_dXoutput*= sigmoid(X_output)*(1-sigmoid(X_output))\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n else:\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n \n unflattened_Xinput = architecture['layer{}'.format(layer+1)][0]\n dz_dXinput = torch.reshape(dz_dXinput,unflattened_Xinput.shape)\n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi)\n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXinput)\n \n if gradient_layerwise['layer{}'.format(layer+1)][1] is not None:\n try:\n grad_weights['layer{}'.format(layer+1)] += gradient_layerwise['layer{}'.format(layer+1)][1]\n except:\n grad_weights['layer{}'.format(layer+1)] += torch.t(gradient_layerwise['layer{}'.format(layer+1)][1])\n if gradient_layerwise['layer{}'.format(layer+1)][2] is not None:\n try:\n grad_bias['layer{}'.format(layer+1)] += gradient_layerwise['layer{}'.format(layer+1)][2]\n except:\n grad_bias['layer{}'.format(layer+1)] += torch.t(gradient_layerwise['layer{}'.format(layer+1)][2])\n \n gc.collect()\n return", "def backward_step_matrix_version(activations, targets, layers):\n param_grads = collections.deque() # List of parameter gradients for each layer\n \n output_grad = None # The error gradient at the output of the current layer\n # Propagate the error backwards through all the layers.\n # Use reversed to iterate backwards over the list of layers.\n for i, layer in enumerate(reversed(layers)):\n cur_layer_idx = len(layers) - i - 1\n if cur_layer_idx <= NUM_LAYERS_SKIP:\n # implement short circuit here\n if layer.is_fc_layer:\n grads = [0.0 for _ in range(layer.W.shape[0]*layer.W.shape[1]+layer.W.shape[1])]\n else:\n # normal gradient computation \n Y = activations.pop() # Get the activations of the last layer on the stack\n # Compute the error at the output layer.\n # The output layer error is calculated different then hidden layer error.\n if output_grad is None:\n input_grad = layer.get_input_grad(Y, targets)\n else: # output_grad is not None (layer is not output layer)\n input_grad = layer.get_input_grad(Y, output_grad)\n # Get the input of this layer (activations of the previous layer)\n X = activations[-1]\n # Compute the layer parameter gradients used to update the parameters\n grads = layer.get_params_grad(X, output_grad)\n # we do one extra gradient reshape step here:\n if layer.is_fc_layer:\n grads_new = np.array(grads).reshape((layer.get_shape[0]+1, layer.get_shape[1]))\n param_grads.appendleft(grads_new)\n else:\n param_grads.appendleft(grads)\n # Compute gradient at output of previous layer (input of current layer):\n output_grad = input_grad\n return list(param_grads) # in this way, `param_grads` will be a list which contains grads of numpy ndarray", "def compute_forward_variables(self, normalized_logits, target):\n\n target_length = target.shape[0]\n num_time_steps = normalized_logits.shape[0]\n\n\t\t######################\n\t\t### YOUR CODE HERE ###\n\t\t######################\n \n blank_label = normalized_logits.shape[1] - 1\n l = add_blanks(target, blank_label)\n target_length = l.shape[0]\n\n # init\n alpha = np.zeros((target_length, num_time_steps))\n alpha[0, 0] = normalized_logits[0, blank_label] # where s = 0, t = 0\n alpha[1, 0] = normalized_logits[0, target[0]] # where s = 1, t = 0\n for i in xrange(2, num_time_steps): # for all s >= 2, t = 0\n alpha[i, 0] = 0\n\n # recursive case\n for t in xrange(1, num_time_steps):\n for s in xrange(2, target_length):\n \n a_bar = alpha[s, t-1] + alpha[s-1, t-1] \n\n if l[s] == blank_label or l[s-2] == l[s]:\n alpha[s, t] = normalized_logits[t, l[s]] * a_bar\n else:\n alpha[s, t] = normalized_logits[t, l[s]] * (a_bar + alpha[s-2, t-1])\n return alpha", "def backward(ctx, grad_output_var):\n xmin = 0\n xmax = 1\n grad_output = grad_output_var.data\n gamma_mu,kappa,uTx,x = ctx.saved_tensors\n n = kappa.size()[0]\n nx = grad_output.size()[2]\n u = 1/nx**2*torch.linspace(1,nx,nx)\n norm_u = torch.norm(u)**2\n torch_u = u.view(1,1,-1)+torch.zeros(n,1,nx)#broadcast\n denom = (xmin-kappa)*(xmax-kappa)\\\n -(kappa-uTx)*(xmin+xmax-2*kappa)\\\n -2*gamma_mu*norm_u \n #\n idx = (denom.abs()>1e-7)\n ind = (denom.abs()>1e-7)+ torch.zeros(n,1,nx)#broadcasting\n ind = ind>0\n denom[~idx] = denom[~idx]+1\n grad_input_gamma_mu = (2*kappa-(xmin+xmax))/denom*torch_u\n coeff = (xmax-kappa)*(xmin-kappa)/denom - 1\n grad_input_u = torch.eye(nx) \\\n +coeff*torch.matmul(torch_u.view(1,1,-1,1),u.view(1,-1))/norm_u\n # if denom is very small, it means that gamma_mu is very small and u is very close to one of the bounds,\n # there is a discontinuity when gamma_mu tends to zero, if 0<u<1 the derivative wrt x is approximately equal to \n # 1 and the derivative wrt gamma_mu is approximated by 10^3 times the error 2kappa-xmin-xmax\n grad_input_gamma_mu[~ind] = 0*grad_input_gamma_mu[~ind]+1e3*(2*kappa[~idx]-(xmin+xmax))\n grad_input_u[~ind] = 0*grad_input_u[~ind]+1\n \n grad_input_gamma_mu = grad_input_gamma_mu*grad_output#.sum(1).sum(1).unsqueeze(1).unsqueeze(2)\n grad_input_u = grad_input_u*grad_output\n \n # safety check for numerical instabilities\n if (grad_input_gamma_mu!=grad_input_gamma_mu).any():\n print('there is a nan in grad_input_gamma_mu')\n if (x!=x).any():\n print('there is a nan in x')\n sys.exit()\n if (grad_input_u!=grad_input_u).any():\n print('there is a nan in grad_input_u')\n sys.exit()\n \n grad_input_gamma_mu = Variable(grad_input_gamma_mu,requires_grad=True)\n grad_input_u = Variable(grad_input_u,requires_grad=True)\n \n return grad_input_gamma_mu, grad_input_u, None", "def back_prop(net, input_values, desired_output, r=1, minimum_accuracy=-0.001):\n raise NotImplementedError", "def forward_backward(obs, trans_probs, init_probs, trees):\n n, m = len(init_probs), obs\n F = np.zeros((n, m))\n B = np.zeros((n, m))\n R = np.zeros((n, m))\n\n # forwards\n for i, p in enumerate(init_probs):\n F[i, 0] = p + trees[i][0]\n\n for i in range(obs)[1:]:\n for j in range(n):\n e = trees[j][i]\n probs = [F[k, i - 1] + trans_probs[k][j] for k in range(n)]\n F[j, i] = e + logsumexp(probs)\n\n likelihood_f = logsumexp(F[:, -1])\n\n # backwards (note that log(1) = 0, so last col is already set)\n for i in reversed(list(range(obs))[1:]):\n for j in range(n):\n probs = [trans_probs[j][k] + trees[k][i] + B[k, i]\n for k in range(n)]\n B[j, i - 1] = logsumexp(probs)\n\n likelihood_b = logsumexp([p + i + e[0] for p, i, e in zip(B[:, 0], init_probs, trees)])\n\n # Calculate posterior probabilities\n pzi_x = np.array([logsumexp([F[j, i] + B[j, i] for j in range(n)]) for i in range(m)])\n for j in range(m):\n R[:, j] = (F[:, j] + B[:, j]) - pzi_x[j]\n\n return F, likelihood_f, B, likelihood_b, np.exp(R)", "def svm_loss_bias_forloop(W, b, X, y, reg, delta = 1): \n # initialize the returned results\n loss = 0.0\n d_W = np.zeros(W.shape) \n d_b = np.zeros(b.shape)\n\n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n \n for i in xrange(num_train):\n # compute the classification scores for a single image\n scores = X[i].dot(W) + b\n correct_class_score = scores[y[i]]\n for j in xrange(num_classes):\n # compute the loss for this image\n if j == y[i]:\n continue\n margin = scores[j] - correct_class_score + delta \n if margin > 0:\n loss += margin\n # compute the gradient for this image\n d_W[:, j] += X[i, :].T\n d_b[j] += 1\n d_W[:, y[i]] -= X[i, :].T\n d_b[y[i]] -= 1\n \n # Right now the loss is a sum over all training examples\n # We need it to be an average instead so we divide by num_train.\n loss /= num_train \n # Add regularization to the loss.\n loss += 0.5 * reg * np.sum(W * W)\n \n # Do the same for d_W and d_b\n d_W /= num_train\n d_W += reg*W\n\n d_b /= num_train\n \n return loss, d_W, d_b", "def backprop(nn, y):\n LAST = len(nn) - 1\n\n # last layer\n nn[LAST].dCdz = np.multiply(2.0 * (nn[LAST].a - y), AF_PRIME(nn[LAST].z))\n nn[LAST].dCdw = (np.dot(nn[LAST].dCdz, nn[LAST].input_value.T))\n nn[LAST].dCdw_sum = \\\n np.add(nn[LAST].dCdw, nn[LAST].dCdw_sum)\n nn[LAST].w -= nn[LAST].dCdw * LEARNING_RATE\n\n # other layer\n for n in range(1, len(nn)):\n dz1dz2 = \\\n np.dot(nn[LAST - n + 1].w.T, nn[LAST - n + 1].dCdz)\n nn[LAST - n].dCdz = \\\n np.multiply(AF_PRIME(nn[LAST - n].z), dz1dz2)\n nn[LAST - n].dCdw = \\\n (np.dot(nn[LAST - n].dCdz, nn[LAST - n].input_value.T))\n nn[LAST - n].dCdw_sum = \\\n np.add(nn[LAST - n].dCdw, nn[LAST - n].dCdw_sum)\n nn[LAST - n].w -= nn[LAST - n].dCdw * LEARNING_RATE", "def _back_prop_hidden(self, layer, alpha, next_layer, outputs, inputs, output_delta):\n res = []\n for i, neuron in enumerate(layer._neurons):\n err_sig = sum([neuron._weights[i] * output_delta[j] for j, neuron in enumerate(next_layer._neurons)])\n res.append(neuron._update_weights(alpha, err_sig))\n return res", "def _backprop(self, y_train, X_train, A_hidden, Z_hidden, A_out, Z_out, batch_idx):\n\n # This is the derivative assuming our costfunction is 0.5*two_norm(A_out - y)**2\n # This results in different backpropagation \n \n error_out = A_out - y_train[batch_idx].reshape(len(y_train[batch_idx]), 1)\n \n # Since we are in the regression case with a linear ouput funct.\n act_derivative_out = 1\n\n delta_out = error_out*act_derivative_out\n\n grad_w_out = np.dot(A_hidden[-1].T, delta_out)\n grad_b_out = np.sum(delta_out, axis=0)\n\n # Updating the output weights \n self.W_out = self.W_out - self.eta * grad_w_out\n self.b_out = self.b_out - self.eta * grad_b_out\n \n \n # Looping over all the hidden layers except one\n # If the layer only have one layer it doesn't go into this while loop \n \n i = 0\n while (i < self.n_hidden_layers-1):\n # Index moving backward in the layers.\n #print(\"this should only be one loop\")\n layer_ind = self.n_hidden_layers - 1 - i\n #print(\"layer_ind: : \", layer_ind)\n act_derivative_h = self.activate(Z_hidden[layer_ind], self.activation, deriv=True)\n \n if (i == 0):\n error_prev = np.dot(delta_out, self.W_out.T) * act_derivative_h\n else:\n #print(\"np.shape(error_prev)\", np.shape(error_prev))\n error_prev = np.dot(error_prev, self.W_h[layer_ind+1].T) * act_derivative_h\n \n grad_w_h = np.dot(A_hidden[layer_ind - 1].T, error_prev)\n grad_b_h = np.sum(error_prev, axis=0)\n \n self.W_h[layer_ind] = self.W_h[layer_ind] - self.eta * grad_w_h\n self.b_h[layer_ind] = self.b_h[layer_ind] - self.eta * grad_b_h\n i += 1\n \n \n act_derivative_h = self.activate(Z_hidden[0], self.activation, deriv=True) \n \n # Case with one hidden layer doesn't enter the while loop.\n if( self.n_hidden_layers == 1):\n error_last = np.dot(delta_out, self.W_out.T) * act_derivative_h\n else:\n error_last = np.dot(error_prev, self.W_h[layer_ind].T) * act_derivative_h\n\n grad_w_h = np.dot(X_train[batch_idx].T, error_last)\n grad_b_h = np.sum(error_last, axis = 0)\n\n self.W_h[0] = self.W_h[0] - self.eta * grad_w_h\n self.b_h[0] = self.b_h[0] - self.eta * grad_b_h\n\n return None" ]
[ "0.69956106", "0.64794415", "0.64426166", "0.6405609", "0.6382755", "0.62425697", "0.6233701", "0.62206537", "0.6206972", "0.6171835", "0.6160794", "0.6156567", "0.6151788", "0.614928", "0.6116877", "0.61009604", "0.61001897", "0.60951155", "0.609498", "0.6073558", "0.60669565", "0.606441", "0.60599", "0.60444075", "0.6034624", "0.60328346", "0.6031396", "0.6013562", "0.6011351", "0.6007622" ]
0.7704807
0
Given the RNN outputs (presoftmax) at each time step and a target labeling, compute the gradients of the CTC loss w.r.t. the unnormalized logits
def compute_gradients(self, logits, target): target_length = target.shape[0] num_time_steps = logits.shape[0] ###################### ### YOUR CODE HERE ### ###################### # expand labels by inserting a blank between each pair normalized_logits = softmax(logits) blank_label = normalized_logits.shape[1] - 1 l = add_blanks(target, blank_label) target_length = l.shape[0] alpha = self.compute_forward_variables(normalized_logits, target) beta = self.compute_backward_variables(normalized_logits, target) # rescale alpha = alpha / np.sum(alpha, axis=0) beta = beta / np.sum(beta, axis=0) alphabeta = alpha * beta print "alpha" print alpha # compute zt z = Counter() for t in xrange(num_time_steps): for s, k in enumerate(l): z[t] += alphabeta[s, t] / normalized_logits[t, k] # normalized_logits is time steps t by labels k # alpha is 2 * target_length - 1 by time steps lab_zk = np.zeros_like(normalized_logits) for s, k in enumerate(l): for t in xrange(num_time_steps): lab_zk[t, k] += alphabeta[s, t] grad = normalized_logits for k in xrange(target.shape[0]): for t in xrange(num_time_steps): ytk = normalized_logits[t, k] constant = 1.0 / (ytk * z[t]) grad[t, k] = ytk - constant * lab_zk[t, k] return grad
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_ctc_loss(self, logits, target):\n\n num_time_steps = logits.shape[0]\n num_labels = logits.shape[1] - 1\n num_labels_with_blank = num_labels + 1\n\n # sanity check to ensure targets are all right\n assert (target < num_labels).all()\n\n\t\t######################\n\t\t### YOUR CODE HERE ###\n\t\t######################\n target_length = 2 * target.shape[0] + 1\n\n normalized_logits = softmax(logits)\n alpha = self.compute_forward_variables(normalized_logits, target) \n \n return -np.log(np.sum(alpha[target_length-1, :] \\\n + alpha[target_length - 2, :], axis=0))", "def compute_gradients(self, inputs, targets, hprev):\n n = len(inputs)\n loss = 0\n\n # Dictionaries for storing values during the forward pass\n aa, xx, hh, oo, pp = {}, {}, {}, {}, {}\n hh[-1] = np.copy(hprev)\n\n # Forward pass\n for t in range(n):\n xx[t] = np.zeros((self.vocab_len, 1))\n xx[t][inputs[t]] = 1 # 1-hot-encoding\n\n aa[t], hh[t], oo[t], pp[t] = self.evaluate_classifier(hh[t-1], xx[t])\n\n loss += -np.log(pp[t][targets[t]][0]) # update the loss\n\n # Dictionary for storing the gradients\n grads = {\"W\": np.zeros_like(self.W), \"U\": np.zeros_like(self.U),\n \"V\": np.zeros_like(self.V), \"b\": np.zeros_like(self.b),\n \"c\": np.zeros_like(self.c), \"o\": np.zeros_like(pp[0]),\n \"h\": np.zeros_like(hh[0]), \"h_next\": np.zeros_like(hh[0]),\n \"a\": np.zeros_like(aa[0])}\n\n # Backward pass\n for t in reversed(range(n)):\n grads[\"o\"] = np.copy(pp[t])\n grads[\"o\"][targets[t]] -= 1\n\n grads[\"V\"] += grads[\"o\"]@hh[t].T\n grads[\"c\"] += grads[\"o\"]\n\n grads[\"h\"] = np.matmul(self.V.T , grads[\"o\"] )+ grads[\"h_next\"]\n grads[\"a\"] = np.multiply(grads[\"h\"], (1 - np.square(hh[t])))\n\n grads[\"U\"] += np.matmul(grads[\"a\"], xx[t].T)\n grads[\"W\"] += np.matmul(grads[\"a\"], hh[t-1].T)\n grads[\"b\"] += grads[\"a\"]\n\n grads[\"h_next\"] = np.matmul(self.W.T, grads[\"a\"])\n\n # Drop redundant gradients\n grads = {k: grads[k] for k in grads if k not in [\"o\", \"h\", \"h_next\", \"a\"]}\n\n # Clip the gradients\n for grad in grads:\n grads[grad] = np.clip(grads[grad], -5, 5)\n\n # Update the hidden state sequence\n h = hh[n-1]\n\n return grads, loss, h", "def softmaxCostAndGradient(predicted, target, outputVectors, dataset):\n\n ### YOUR CODE HERE\n y_hat = softmax(np.dot(outputVectors,predicted))\n y = np.zeros(outputVectors.shape[0])\n y[target] = 1.0\n\n cost = -np.log(y_hat[target])\n gradPred = np.dot(outputVectors.T,y_hat - y)\n grad = np.outer(y_hat - y,predicted)\n ### END YOUR CODE\n\n return cost, gradPred, grad", "def backward_pass(self):\r\n # the gradient of cross-entropy on top of softmax is (t-y)\r\n back_output = (self.targets - self.y) / self.y.shape[0]\r\n\r\n for layer in reversed(self.layers):\r\n back_output = layer.backward_pass(back_output)", "def lossFunc(inputs, targets, hprev):\n xs, hs, ys, ps = {}, {}, {}, {} # input, hidden, output, out_prob states for each time t\n hs[-1] = np.copy(hprev)\n loss = 0\n \n # forward pass\n for t in xrange(len(inputs)):\n xs[t] = np.zeros((vocab_size,1)) \n xs[t][inputs[t]] = 1. # convert input to one-hot\n hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh)\n ys[t] = np.dot(Why, hs[t]) + by\n ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t]))\n loss += -np.log(ps[t][targets[t],0])\n \n # backward pass\n dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)\n dbh, dby = np.zeros_like(bh), np.zeros_like(by)\n dhnext = np.zeros_like(hs[0])\n for t in reversed(xrange(len(inputs))):\n # backprop into y\n dy = np.copy(ps[t])\n dy[targets[t]] -= 1\n # backprop into Why, hs, and by\n dWhy += np.dot(dy, hs[t].T)\n dby += dy\n dh = np.dot(Why.T, dy) + dhnext\n # backprop through tanh activition\n dhraw = (1 - hs[t] * hs[t]) * dh\n # backprop into Wxh, Whh, hs, and bh\n dbh += dhraw\n dWxh += np.dot(dhraw, xs[t].T)\n dWhh += np.dot(dhraw, hs[t-1].T)\n dhnext = np.dot(Whh.T, dhraw)\n # clip gradient preventing exploding\n for dparam in [dWxh, dWhh, dWhy, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam)\n\n return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]", "def softmaxCostAndGradient(vc_predicted, target, uw_output, dataset):\n\n N = uw_output.shape[0] # n_words: vocab size\n y = np.zeros(N)\n y[target] = 1 # y is a 1-hot encoded vector with the actual word's index being 1 and rest of elements being 0\n\n score = np.dot(vc_predicted, uw_output.T) # vc dot uo_transpose which gives a vector of dimension (1, n_words)\n y_hat = softmax(score)\n\n # cross-entropy cost is given by formula in assignment 1.2b\n cost = np.sum(-y * np.log(y_hat))\n\n dout = y_hat - y # (1, n_words)\n\n grad_pred_dJ_vc = np.dot(dout, uw_output) # (1, dim_embed)\n\n grad_dJ_uw = np.dot(dout.T, vc_predicted) # (n_words, dim_embed)\n\n return cost, grad_pred_dJ_vc, grad_dJ_uw", "def softmaxCostAndGradient(predicted, target, outputVectors, dataset):\n\n ### YOUR CODE HERE\n scores = outputVectors.dot(predicted.T) # shape = (V, 1)\n y_hat = softmax(scores)\n cost = -scores[target] + np.log(np.sum(np.exp(scores)))\n one_hot_target = np.zeros_like(y_hat)\n one_hot_target[target] = 1\n grad = np.outer((y_hat - one_hot_target), predicted)\n gradPred = outputVectors.T.dot(y_hat - one_hot_target)\n \n '''\n final_predicted = predicted.dot(outputVectors.T)\n probability = softmax(final_predicted)\n cost = -np.log(probability[target])\n \n one_hot_target = np.zeros_like(probability)\n one_hot_target[target] += 1\n dlogits = probability - one_hot_target\n grad = np.outer(predicted, dlogits).T\n gradPred = outputVectors.T.dot(dlogits)\n '''\n ### END YOUR CODE\n\n return cost, gradPred, grad", "def lossFun(self, inputs, targets, hprev):\n xs, hs, ys, ps = {}, {}, {}, {}\n hs[-1] = np.copy(hprev)\n loss = 0\n # forward pass\n for t in range(len(inputs)):\n xs[t] = np.zeros((self._txt_reader.vocab_size,1)) # One-hot, encode in 1-of-k representation\n xs[t][inputs[t]] = 1\n hs[t] = np.tanh(np.dot(self._Wxh, xs[t]) + np.dot(self._Whh, hs[t-1]) + self._bh) # compute chidden state\n ys[t] = np.dot(self._Why, hs[t]) + self._by # logits \n ys[t] -= ys[t].max() # for numerical stability\n ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # probabilities for next chars\n\n loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss)\n\n # backward pass: compute gradients going backwards\n dWxh, dWhh, dWhy = np.zeros_like(self._Wxh), np.zeros_like(self._Whh), np.zeros_like(self._Why)\n dbh, dby = np.zeros_like(self._bh), np.zeros_like(self._by)\n\n dhnext = np.zeros_like(hs[0])\n for t in reversed(range(len(inputs))):\n dy = np.copy(ps[t])\n dy[targets[t]] -= 1 # backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here\n dWhy += np.dot(dy, hs[t].T)\n dby += dy\n dh = np.dot(self._Why.T, dy) + dhnext # backprop into h\n dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity\n dbh += dhraw\n dWxh += np.dot(dhraw, xs[t].T)\n dWhh += np.dot(dhraw, hs[t-1].T)\n dhnext = np.dot(self._Whh.T, dhraw)\n\n for dparam in [dWxh, dWhh, dWhy, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients\n return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]", "def ComputeGradients(self, input_data: list, target_output_data: list):\n delta = 1e-6\n normal_cost = self.Cost(input_data, target_output_data)\n\n # Evaluate Gradient for Hidden Layer Biases\n for i in range(self.hidden_layer_biases.shape[0]):\n original_bias_value = self.hidden_layer_biases[i]\n self.hidden_layer_biases[i] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.hidden_layer_biases[i] = original_bias_value\n self.hidden_biases_gradient[i] = (plusdelta_cost - normal_cost) / delta\n\n # Evaluate Gradient for Output Layer Biases\n for i in range(self.output_layer_biases.shape[0]):\n original_bias_value = self.output_layer_biases[i]\n self.output_layer_biases[i] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.output_layer_biases[i] = original_bias_value\n self.output_biases_gradient[i] = (plusdelta_cost - normal_cost) / delta\n\n # Evaluate Gradient for Input Layer to Hidden Layer Weights\n for i in range(self.input_to_hidden_weights.shape[0]):\n for h in range(self.input_to_hidden_weights.shape[1]):\n original_bias_value = self.input_to_hidden_weights[i, h]\n self.input_to_hidden_weights[i, h] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.input_to_hidden_weights[i, h] = original_bias_value\n self.input_to_hidden_weights_gradient[i, h] = (plusdelta_cost - normal_cost) / delta\n\n # Evaluate Gradient for Input Layer to Hidden Layer Weights\n for h in range(self.hidden_to_output_weights.shape[0]):\n for o in range(self.hidden_to_output_weights.shape[1]):\n original_bias_value = self.hidden_to_output_weights[h, o]\n self.hidden_to_output_weights[h, o] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.hidden_to_output_weights[h, o] = original_bias_value\n self.hidden_to_output_weights_gradient[h, o] = (plusdelta_cost - normal_cost) / delta", "def loss_and_grad(self, X, y):\n\n # Initialize the loss and gradient to zero.\n loss = 0.0\n grad = np.zeros_like(self.W)\n grad_tmp = np.zeros_like(self.W)\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and the gradient. Store the gradient\n # as the variable grad.\n # ================================================================ #\n \n exp_a = np.zeros((num_classes,num_train))\n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n \n #if i==0:\n grada = np.zeros(X.shape[1])\n \n for j in range(num_classes):\n if j != y[i]:\n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) \n else: \n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) - X[i,:].T \n\n grad += grad_tmp\n loss += Loss \n \n pass\n\n\n loss /= num_train\n grad /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def fast_loss_and_grad(self, X, y):\n loss = 0.0\n grad = np.zeros(self.W.shape) # initialize the gradient as zero\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and gradient WITHOUT any for loops.\n # ================================================================ #\n \n num_train = X.shape[0]\n num_classes = self.W.shape[0]\n \n# # vectorized loss calculation #\n class_scores_matrix = np.dot(self.W,X.T) # calculating class scores matrix (C x m): rows are class scores transposes\n class_scores_matrix -= np.max(class_scores_matrix) # considering the possible issue for numerical instability and account for it\n exp_a = np.exp(class_scores_matrix) # calculating the exponents\n \n# y_exp = np.array(exp_a[y, np.arange(0, class_scores_matrix.shape[1])])\n# #print(exp_a[:,:3])\n# #print(y[:3])\n# #print(y_exp[:3])\n \n# tt = np.sum(exp_a,axis=0)\n# tt2 = np.divide(tt,y_exp)\n# print(num_train)\n# tt3 = np.power(tt2,1/num_train)\n# loss = np.log(np.prod(tt3))\n \n \n \n \n (C, D) = self.W.shape\n N = X.shape[0]\n\n scores = np.dot(self.W, X.T)\n scores -= np.max(scores) # shift by log C to avoid numerical instability\n\n y_mat = np.zeros(shape = (C, N))\n y_mat[y, range(N)] = 1\n\n # matrix of all zeros except for a single wx + log C value in each column that corresponds to the\n # quantity we need to subtract from each row of scores\n correct_wx = np.multiply(y_mat, scores)\n\n # create a single row of the correct wx_y + log C values for each data point\n sums_wy = np.sum(correct_wx, axis=0) # sum over each column\n\n exp_scores = np.exp(scores)\n sums_exp = np.sum(exp_scores, axis=0) # sum over each column\n result = np.log(sums_exp)\n\n result -= sums_wy\n\n loss = np.sum(result)\n loss /= num_train\n \n \n # vectorized gradient calculation #\n exp_a_sum = np.sum(exp_a,axis=0)\n\n y_mat_corres = np.zeros(shape = (num_classes, num_train))\n y_mat_corres[y, range(num_train)] = 1\n sum_exp_scores = np.sum(exp_a, axis=0) \n sum_exp_scores = 1.0 / exp_a_sum # division by sum over columns\n exp_a *= sum_exp_scores\n grad = np.dot(exp_a, X)\n grad -= np.dot(y_mat_corres, X)\n grad /= num_train\n \n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def softmaxCostAndGradient(predicted, target, outputVectors, dataset):\n\n\t#calculate the predictions\n\t#outputVectors: U: (V,d) ->!!diff from 3(a) dimension\n\tvhat = predicted\t#center word or BOW: (d,1)\n\tz = np.dot(outputVectors, vhat)\t#(V,1)\n\tpreds = softmax(z)\t#yhat: (V,1)\n\n\t#calculate the cost \n\tcost = -np.log(preds[target])\n\n\t#gradients\n\tgrad_pred = preds \n\tgrad_pred[target] -= 1\t#yhat - y\n\n\tgrad = np.outer(grad_pred, vhat)\t#(V, d)\n\tgradPred = np.dot(outputVectors.T, grad_pred)\t#dvc\n\n\treturn cost, gradPred, grad", "def compute_forward_variables(self, normalized_logits, target):\n\n target_length = target.shape[0]\n num_time_steps = normalized_logits.shape[0]\n\n\t\t######################\n\t\t### YOUR CODE HERE ###\n\t\t######################\n \n blank_label = normalized_logits.shape[1] - 1\n l = add_blanks(target, blank_label)\n target_length = l.shape[0]\n\n # init\n alpha = np.zeros((target_length, num_time_steps))\n alpha[0, 0] = normalized_logits[0, blank_label] # where s = 0, t = 0\n alpha[1, 0] = normalized_logits[0, target[0]] # where s = 1, t = 0\n for i in xrange(2, num_time_steps): # for all s >= 2, t = 0\n alpha[i, 0] = 0\n\n # recursive case\n for t in xrange(1, num_time_steps):\n for s in xrange(2, target_length):\n \n a_bar = alpha[s, t-1] + alpha[s-1, t-1] \n\n if l[s] == blank_label or l[s-2] == l[s]:\n alpha[s, t] = normalized_logits[t, l[s]] * a_bar\n else:\n alpha[s, t] = normalized_logits[t, l[s]] * (a_bar + alpha[s-2, t-1])\n return alpha", "def softmaxCostAndGradient(predicted, target, outputVectors, dataset):\n\n ### YOUR CODE HERE\n # identify the target predicted vector and then find the dot product\n # between the vector and the output vectors\n # outputVector structured as V x D \n # v_c structured as 1xD matrix\n # we are assuming here that the output vector and the \n # predicted vector is structured so that each row represent a word / token in {1, V}\n v_c = predicted\n z_w = np.dot(outputVectors, v_c)\n # the output yhat is a 1xV matrix\n yhat = softmax(z_w)\n # create the one hot vector for the predicted word\n # calculate the difference for gradient\n ydiff = yhat.copy()\n ydiff[target] -= 1.0\n\n # find the cross-entropy cost function based on yhat\n # cost = calc_cost_from_prediction(y, yhat)\n cost = - np.log( yhat[target] )\n\n # calculate the gradient wrt to the v_c (the predicted word vector)\n # the gradient is U(yhat - y)\n # the output should be a D x 1 matrix, same as v_c\n # y is a one-hot vector that represents the actual word\n # and we multiply it by output vector, it can also be calculated\n # by using index to find the vector\n gradPred = np.dot( outputVectors.T, ydiff)\n\n\n # calculate the gradient wrt to all other word vectors\n # the gradient is v_c(yhat - y)\n # we multiple yhat by v_c to get a V x D matrix\n grad = np.outer(ydiff, v_c)\n\n ### END YOUR CODE\n return cost, gradPred, grad", "def _define_loss(self):\n\n cost = []\n unit_cost = []\n for nn in range(len(self.ffnet_out)):\n data_out = self.data_out_batch[nn]\n if self.filter_data:\n # this will zero out predictions where there is no data,\n # matching Robs here\n pred = tf.multiply(\n self.networks[self.ffnet_out[nn]].layers[-1].outputs,\n self.data_filter_batch[nn])\n else:\n pred = self.networks[self.ffnet_out[nn]].layers[-1].outputs\n\n nt = tf.cast(tf.shape(pred)[0], tf.float32)\n # define cost function\n if self.noise_dist == 'gaussian':\n with tf.name_scope('gaussian_loss'):\n cost.append(tf.nn.l2_loss(data_out - pred) / nt)\n unit_cost.append(tf.reduce_mean(tf.square(data_out-pred), axis=0))\n\n elif self.noise_dist == 'poisson':\n with tf.name_scope('poisson_loss'):\n\n if self.poisson_unit_norm is not None:\n # normalize based on rate * time (number of spikes)\n cost_norm = tf.multiply(self.poisson_unit_norm[nn], nt)\n else:\n cost_norm = nt\n\n cost.append(-tf.reduce_sum(tf.divide(\n tf.multiply(data_out, tf.log(self._log_min + pred)) - pred,\n cost_norm)))\n\n unit_cost.append(-tf.divide(\n tf.reduce_sum(\n tf.multiply(\n data_out, tf.log(self._log_min + pred)) - pred, axis=0),\n cost_norm))\n\n elif self.noise_dist == 'bernoulli':\n with tf.name_scope('bernoulli_loss'):\n # Check per-cell normalization with cross-entropy\n # cost_norm = tf.maximum(\n # tf.reduce_sum(data_out, axis=0), 1)\n cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred)))\n unit_cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred), axis=0))\n else:\n TypeError('Cost function not supported.')\n\n self.cost = tf.add_n(cost)\n self.unit_cost = unit_cost\n\n # Add regularization penalties\n reg_costs = []\n with tf.name_scope('regularization'):\n for nn in range(self.num_networks):\n reg_costs.append(self.networks[nn].define_regularization_loss())\n self.cost_reg = tf.add_n(reg_costs)\n\n self.cost_penalized = tf.add(self.cost, self.cost_reg)\n\n # save summary of cost\n # with tf.variable_scope('summaries'):\n tf.summary.scalar('cost', self.cost)\n tf.summary.scalar('cost_penalized', self.cost_penalized)\n tf.summary.scalar('reg_pen', self.cost_reg)", "def grad_softmax_cross_entropy_loss(logit, labels):\n return softmax(logit) - labels", "def lossFun(review, target, hprev):\n xs, hs, ys, ps = {}, {}, {}, {}\n hs[-1] = np.copy(hprev)\n loss = 0\n\n # forward pass\n for t in range(len(review)):\n xs[t] = np.zeros((vector_len,1)) # encode in 1-of-k representation\n for j in range(32):\n xs[t][j] = review[t][j]\n hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh) # hidden state\n\n #Many 2 one\n last = len(review) - 1\n ys = np.dot(Why, hs[last]) + by # unnormalized log probabilities for next chars\n ps = np.exp(ys) / np.sum(np.exp(ys)) # probabilities for next chars\n loss = -np.log(ps[target,0]) # softmax (cross-entropy loss)\n\n # backward pass: compute gradients going backwards\n dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)\n dbh, dby = np.zeros_like(bh), np.zeros_like(by)\n dhnext = np.zeros_like(hs[0])\n\n dy = np.subtract(ps,target) # backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here\n dWhy += np.dot(dy, hs[last].T)\n dby += dy\n dh = np.dot(Why.T, dy) + dhnext # backprop into h\n for t in reversed(range(len(review))):\n dhraw = (1 - (hs[t] * hs[t].T)) * dh # backprop through tanh nonlinearity\n dbh += dhraw\n dWxh += np.dot(dhraw, xs[t].T)\n dWhh += np.dot(dhraw, hs[t-1].T)\n dhnext = np.dot(Whh.T, dhraw)\n for dparam in [dWxh, dWhh, dWhy, dbh, dby]:\n np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients\n return loss, dWxh, dWhh, dWhy, dbh, dby, hs[last]", "def backward(self, input_train, input_train_label):\n batchSize = len(input_train) #liczba obrazow podawanych na wejscie w trakcie jednej iteracji\n weights = self.Weights\n biases = self.Biases\n delta_W = self.delta_W\n delta_B = self.delta_B\n poolParams = self.poolParams\n dW_list = []\n dB_list = []\n dW4 = np.zeros(weights[4].shape)\n dB4 = np.zeros(biases[4].shape)\n dW3 = np.zeros(weights[3].shape)\n dB3 = np.zeros(biases[3].shape)\n dW2 = np.zeros(weights[2].shape)\n dB2 = np.zeros(biases[2].shape)\n dW1 = np.zeros(weights[1].shape)\n dB1 = np.zeros(biases[1].shape)\n dW0 = np.zeros(weights[0].shape)\n dB0 = np.zeros(biases[0].shape)\n loss = 0\n for image in range(batchSize):\n\n X_data = input_train[image]\n X_label = input_train_label[image]\n output_forward, cache = self.forward(X_data) \n loss += -1*sum(X_label - np.log(output_forward)) #obliczenie wartosci funkcji straty [cross entropy]\n\n #Propagacja wsteczna gradientu\n dy = -1*(X_label - output_forward)/2\n #print(\"X_label = {} \\t layer7 = {} \\t dy = {}\".format(X_label, output_forward, dy))\n\n [dy, dW, dB ] = fullycon_b(cache[6], np.asarray([dy]).transpose() , weights[4])\n dW4 += dW\n dB4 += dB.flatten() #wektoryzacja macierzy\n dy = act.relu_b(dy.transpose(), cache[6])\n\n [dy, dW, dB ] = fullycon_b(cache[5][:,0], dy, weights[3])\n dW3 += dW\n dB3 += dB.flatten()\n dy = act.relu_b(dy.transpose(), cache[5][:,0]) \n \n [dy, dW, dB ] = convolution_b(cache[4], dy, weights[2])\n dW2 += dW\n dB2 += dB.flatten()\n \n dy = maxpool_b(cache[3], dy)\n dy = act.relu_b(dy, cache[3])\n\n [dy, dW, dB ] = convolution_b(cache[2], dy, weights[1])\n dW1 += dW\n dB1 += dB.flatten()\n \n dy = maxpool_b(cache[1], dy)\n dy = act.relu_b(dy, cache[1]) \n\n [dy, dW, dB ] = convolution_b(np.asarray([cache[0]]), dy, weights[0])\n dW0 += dW\n dB0 += dB.flatten()\n\t\t\t\n dW_list.append(dW4)\n dB_list.append(dB4)\n dW_list.append(dW3)\n dB_list.append(dB3)\n dW_list.append(dW2)\n dB_list.append(dB2)\n dW_list.append(dW1)\n dB_list.append(dB1)\n dW_list.append(dW0)\n dB_list.append(dB0)\n dW_list = dW_list[::-1]\n dB_list = dB_list[::-1]\n \n #Aktualizacja parametrow kazdej z warstw (o ile takie posiada)\n #uczenie z metoda momentum: learning rate = const; alpha = const\n for x in range(len(dW_list)):\n delta_W[x] = alpha*delta_W[x] - eta*dW_list[x]/batchSize\n weights[x] += delta_W[x]\n delta_B[x] = alpha*delta_B[x] - eta*dB_list[x]/batchSize\n biases[x] += delta_B[x]\n #przypisanie nowych wag po aktualiacji wszystkich parametrow\n self.Weights = weights\n self.Biases = biases\n\n #zwrocenie stosunku wartosci f-cji straty do rozmiaru batch'u\n return loss/batchSize", "def train(input, label, conv, maxpool, softmax, lr=0.005):\n # Forward\n output, loss, accuracy = forward(input, label, conv, maxpool, softmax)\n\n gradient = np.zeros(10)\n gradient[label] = -1 / output[label]\n\n # Backprop\n gradient = softmax.backprop(gradient, lr)\n gradient = maxpool.backprop(gradient)\n gradient = conv.backprop(gradient, lr)\n\n return loss, accuracy", "def output_loss_and_grads(self, h, V, c, y):\n\n loss, dh, dV, dc = 0.0, [], np.zeros_like(self.V), np.zeros_like(self.c)\n # calculate the output (o) - unnormalized log probabilities of classes\n # calculate yhat - softmax of the output\n # calculate the cross-entropy loss\n # calculate the derivative of the cross-entropy softmax loss with respect to the output (o)\n # calculate the gradients with respect to the output parameters V and c\n # calculate the gradients with respect to the hidden layer h\n for t in range(self.sequence_length):\n hp = h[:, t, :] # BS x H\n #o = self.output(hp, V, c) # leng x BS\n o = self.output(hp, V, c) # BS x leng\n #exp = np.exp(o) # leng x BS\n exp = np.exp(o) # BS x leng\n #s = exp / np.sum(exp, axis=0, keepdims=True) # leng x BS\n s = exp / np.sum(exp, axis=1, keepdims=True) # BS x leng\n yp = y[:, t, :]\n #dO = s - yp # leng x BS\n dO = s - yp # BS x leng\n #dV += np.dot(dO, hp.T) # ( leng x BS ) * ( H x BS ).T = leng x H\n dV += np.dot(hp.T, dO) # ( BS x H ).T * ( BS x leng ) = H x leng\n #dc += np.sum(dO, axis=1).reshape([-1, 1]) #\n dc += np.sum(dO, axis=0).reshape([1, -1]) #\n #dh.append(np.dot(self.V.T, dO)) # ( leng x H ).T * ( leng x BS ) = ( BS x H )\n dh.append(np.dot(dO, self.V.T)) # ( BS x leng ) * ( H x leng ).T = ( BS x H )\n loss += -np.sum(np.log(s)*yp)\n return loss, np.array(dh), dV, dc", "def optimize(nn_last_layer, correct_label, learning_rate, num_classes):\n\n # freeze all convolution variables\n tvars = tf.trainable_variables()\n trainable_vars = [var for var in tvars if not(var.name.startswith('conv'))]\n\n #print(\"Trainable parameters are: \")\n #for var in trainable_vars:\n # print(var.name + \"\\n\")\n\n logits = tf.reshape(nn_last_layer, (-1, num_classes), name=\"logits\")\n pred = tf.nn.softmax(logits)\n output = tf.identity(pred, 'prediction')\n\n correct_label = tf.reshape(correct_label, (-1, num_classes))\n cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label))\n\n tf.summary.scalar('cross_entropy_loss', cross_entropy_loss, collections=['batch'])\n # add regularization to the loss\n reg_losses = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n tf.summary.scalar('regularization loss', reg_losses, collections=['batch'])\n reg_constant = 0.01\n loss = cross_entropy_loss + reg_constant * reg_losses\n\n tf.summary.scalar('total loss', loss, collections=['batch'])\n\n prediction = tf.argmax(logits, 1)\n correct_label_flatten = tf.argmax(correct_label, 1)\n acc = tf.reduce_mean(tf.cast(tf.equal(prediction, correct_label_flatten), tf.float32))\n tf.summary.scalar('train_acc', acc, collections=['epoch_train'])\n tf.summary.scalar('validation_acc', acc, collections=['epoch_validate'])\n\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n training_operation = optimizer.minimize(cross_entropy_loss, var_list=trainable_vars)\n\n return logits, training_operation, loss", "def hybrid_forward(self, F, logits, labels):\n # computes softmax over the last axis, backpropagates ce gradients. Shape: (batch, len, vocab)\n softmax_out = F.SoftmaxOutput(data=logits,\n label=labels,\n ignore_label=self.ignore_label,\n use_ignore=True,\n normalization=self._normalization,\n smooth_alpha=self._alpha,\n # see https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html\n grad_scale=self.weight,\n preserve_shape=True)\n # (batch, len)\n pred = F.log(F.pick(F.BlockGrad(softmax_out), labels, axis=-1, keepdims=False))\n # (batch, len,)\n valid_mask = labels != self.ignore_label\n # (batch, len)\n pred = pred * valid_mask\n # (1,)\n ce = -F.sum(pred)\n return ce, F.sum(valid_mask)", "def grad_wrt_loss(predictions, targets):\n grad = -2 * (targets.float() - predictions)\n return grad", "def get_losses(Y,T,X,ind_kf,ind_kt,num_obs_times,num_obs_values,\n num_tcn_grid_times, cov_grid, input_dim,method, gp_params, tcn, is_training, n_classes, lab_vitals_only, pad_before,\n labels, pos_weight): #med_cov_grid\n \n Z = get_GP_samples(Y,T,X,ind_kf,ind_kt,num_obs_times,num_obs_values,\n num_tcn_grid_times, cov_grid, input_dim, method=method, gp_params=gp_params, lab_vitals_only=lab_vitals_only, pad_before=pad_before) #batchsize*num_MC x batch_maxseqlen x num_inputs ##,med_cov_grid\n Z.set_shape([None,None,input_dim]) #somehow lost shape info, but need this\n N = tf.shape(Z)[0] #number of observations \n\n\n # We only want to consider up tw0 7 timepoints before end\n T_max = 7\n \n # Only during training we want to average over the last few predictions in order to give\n # the model the incentive to predict early\n tcn_out = tcn(Z, training=is_training)[:, -T_max:, :]\n tcn_logits = tf.layers.dense(tcn_out,\n n_classes, activation=None, \n kernel_initializer=tf.orthogonal_initializer(),\n name='last_linear', reuse=False\n )\n\n # Only get a few of the last obs\n #used_grid = tf.reduce_min(tf.stack([num_tcn_grid_times, tf.fill(tf.shape(num_tcn_grid_times), T_max)]), axis=0)\n #tiled = tf.tile(tf.expand_dims(used_grid, axis=-1), [1, gp_params.n_mc_smps])\n #expanded_used_grid = tf.reshape(tiled, [-1])\n tiled_labels = tf.tile(tf.expand_dims(labels, axis=1), tf.stack([1, T_max, 1]))\n all_losses = tf.nn.weighted_cross_entropy_with_logits(logits=tcn_logits,targets=tiled_labels, pos_weight=pos_weight)\n average_losses = tf.reduce_mean(all_losses, axis=-1)\n\n return average_losses", "def forward_backward_prop(data, labels, params, dimensions):\n\n ### Unpack network parameters (do not modify)\n ofs = 0\n Dx, H, Dy = (dimensions[0], dimensions[1], dimensions[2])\n\n activation = []\n\n W1 = np.reshape(params[ofs:ofs+ Dx * H], (Dx, H))\n ofs += Dx * H\n b1 = np.reshape(params[ofs:ofs + H], (1, H))\n ofs += H\n W2 = np.reshape(params[ofs:ofs + H * Dy], (H, Dy))\n ofs += H * Dy\n b2 = np.reshape(params[ofs:ofs + Dy], (1, Dy))\n\n ### Forward propagation\n activation.append(data)\n\n # Hidden layer inputs: (N, Dx) * (Dx, H) -> N x H\n z = np.dot(activation[-1], W1) + b1 \n # Activations, inputs to the final layer. \n activation.append(sigmoid(z)) # output of the hidden layer, activation\n # Final layer outputs: ( N x H ) * ( H, Dy) -> (N, Dy)\n z = np.dot(activation[-1], W2) + b2\n activation.append( softmax(z) )\n\n # Cross-entropy cost\n\n y_p = activation[-1]\n activation = activation[:-1] # remove activation data (output)\n\n cost = -np.sum(labels * np.log(y_p))\n \n error = []\n \n ### backward propagation\n sigma = (y_p - labels)\n error.append(sigma)\n\n gradb2 = np.sum(error[-1], axis=0)\n gradW2 = np.dot(activation[-1].T, error[-1])\n\n #\n sigma = np.dot(W2, error[-1].T)\n sigma = sigma.T * sigmoid_grad(activation[-1])\n activation = activation[:-1] # remove activation data ( hidden layer )\n\n error.append(sigma)\n\n gradb1 = np.sum(error[-1], axis=0)\n gradW1 = np.dot(activation[-1].T, error[-1])\n\n\n ### Stack gradients (do not modify)\n grad = np.concatenate((gradW1.flatten(), gradb1.flatten(), \n gradW2.flatten(), gradb2.flatten()))\n \n return cost, grad", "def calc_gradients(\n test_file,\n model_name,\n output_file_dir,\n max_iter,\n learning_rate=0.001,\n targets=None,\n weight_loss2=1,\n data_spec=None,\n batch_size=1,\n seq_len=40,\n resolution_x=16,\n resolution_y=32,\n resolution_z=32,\n c_space=cv2.COLOR_BGR2LUV): \n spec = data_spec\n\n modifier = tf.Variable(0.01*np.ones((1, seq_len, spec.crop_size,spec.crop_size,spec.channels),dtype=np.float32))\n \n input_image = tf.placeholder(tf.float32, (batch_size, seq_len, spec.crop_size, spec.crop_size, spec.channels))\n input_label = tf.placeholder(tf.int32, (batch_size))\n #input_image_cs = tf.placeholder(tf.float32, (batch_size, seq_len, spec.crop_size, spec.crop_size, spec.channels))\n params_color = tf.Variable(np.empty_like(construct_identity_param(batch_size,resolution_x, resolution_y, resolution_z)).reshape(batch_size,-1,spec.channels))\n \n trans_color_img = function(input_image,params_color,batch_size, seq_len, spec.crop_size, spec.crop_size, spec.channels,resolution_x,resolution_y, resolution_z)\n #print(tf.shape(trans_color_img))\n #trans_input = np.array(trans_color_img,dtype=np.float32)\n #trans_color_img = cv2.cvtColor( trans_input, cv2.COLOR_LUV2RGB)\n # temporal mask, 1 indicates the selected frame\n indicator = [0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0]\n\n true_image = tf.minimum(tf.maximum(modifier[0,0,:,:,:]+trans_color_img [0,0,:,:,:]*255.0, -spec.mean+spec.rescale[0]), -spec.mean+spec.rescale[1])/255.0\n true_image = tf.expand_dims(true_image, 0)\n for ll in range(seq_len-1):\n if indicator[ll+1] == 1:\n mask_temp = tf.minimum(tf.maximum(modifier[0,ll+1,:,:,:]+input_image[0,ll+1,:,:,:]*255.0, -spec.mean+spec.rescale[0]), -spec.mean+spec.rescale[1])/255.0\n else:\n mask_temp = input_image[0,ll+1,:,:,:]\n mask_temp = tf.expand_dims(mask_temp,0)\n true_image = tf.concat([true_image, mask_temp],0)\n true_image = tf.expand_dims(true_image, 0)\n\n for kk in range(batch_size-1):\n true_image_temp = tf.minimum(tf.maximum(modifier[0,0,:,:,:]+input_image[kk+1,0,:,:,:]*255.0, -spec.mean+spec.rescale[0]), -spec.mean+spec.rescale[1])/255.0\n true_image_temp = tf.expand_dims(true_image_temp, 0)\n for ll in range(seq_len-1):\n if indicator[ll+1] == 1:\n mask_temp = tf.minimum(tf.maximum(modifier[0,ll+1,:,:,:]+input_image[kk+1,ll+1,:,:,:]*255.0, -spec.mean+spec.rescale[0]), -spec.mean+spec.rescale[1])/255.0\n else:\n mask_temp = input_image[kk+1,ll+1,:,:,:]\n mask_temp = tf.expand_dims(mask_temp,0)\n true_image_temp = tf.concat([true_image_temp, mask_temp],0)\n true_image_temp = tf.expand_dims(true_image_temp, 0)\n\n true_image = tf.concat([true_image, true_image_temp],0)\n loss2 = tf.reduce_mean(1.0 - tf.image.ssim(true_image, input_image, max_val=255))\n \n #loss2 = tf.reduce_sum(tf.sqrt(tf.reduce_mean(tf.square(true_image-input_image), axis=[0, 2, 3, 4])))\n norm_frame = tf.reduce_mean(tf.abs(modifier), axis=[2,3,4])\n\n sess = tf.Session()\n probs, variable_set, pre_label,ince_output, pre_node = models.get_model(sess, true_image, model_name, False) \n true_label_prob = tf.reduce_sum(probs*tf.one_hot(input_label,101),[1])\n if targets is None:\n loss1 = -tf.log(1 - true_label_prob + 1e-6)\n else:\n loss1 = -tf.log(true_label_prob + 1e-6)\n loss1 = tf.reduce_mean(loss1)\n loss = loss1 + weight_loss2 * loss2\n\n optimizer = tf.train.AdamOptimizer(learning_rate)\n print('optimizer.minimize....')\n train = optimizer.minimize(loss, var_list=[modifier,params_color])\n # initiallize all uninitialized varibales\n init_varibale_list = set(tf.all_variables()) - variable_set\n sess.run(tf.initialize_variables(init_varibale_list))\n\n data = DataSet(test_list=test_file, seq_length=seq_len,image_shape=(spec.crop_size, spec.crop_size, spec.channels))\n all_names = []\n all_images = []\n all_labels = []\n \n def_len = 40\n for video in data.test_data:\n frames = data.get_frames_for_sample(video)\n if len(frames) < def_len:\n continue\n frames = data.rescale_list(frames, def_len)\n frames_data = data.build_image_sequence(frames)\n all_images.append(frames_data)\n label, hot_labels = data.get_class_one_hot(video[1])\n all_labels.append(label)\n all_names.append(frames)\n total = len(all_names)\n all_indices = range(total)\n num_batch = int(total/batch_size)\n print('process data length:', num_batch)\n\n correct_ori = 0\n correct_noi = 0\n tot_image = 0\n \n for ii in range(num_batch): \n images = all_images[ii*batch_size : (ii+1)*batch_size]\n names = all_names[ii*batch_size : (ii+1)*batch_size]\n labels = all_labels[ii*batch_size : (ii+1)*batch_size]\n indices = all_indices[ii*batch_size : (ii+1)*batch_size]\n print('------------------prediction for clean video-------------------')\n print('---video-level prediction---')\n for xx in range(len(indices)):\n print(names[xx][0],'label:', labels[xx], 'indice:',indices[xx], 'size:', len(images[xx]), len(images[xx][0]), len(images[xx][0][0]), len(images[xx][0][0][0]))\n sess.run(tf.initialize_variables(init_varibale_list))\n if targets is not None:\n labels = [targets[e] for e in names]\n \n feed_dict = {input_image: [images[0][0:seq_len]], input_label: labels}\n var_loss, true_prob, var_loss1, var_loss2, var_pre, var_node = sess.run((loss, true_label_prob, loss1, loss2, pre_label, pre_node), feed_dict=feed_dict)\n \n correct_pre = correct_ori\n for xx in range(len(indices)):\n if labels[xx] == var_pre[xx]:\n correct_ori += 1\n\n tot_image += 1\n print('Start!')\n min_loss = var_loss\n last_min = -1\n print('---frame-wise prediction---')\n print('node_label:', var_node, 'label loss:', var_loss1, 'content loss:', var_loss2, 'prediction:', var_pre, 'probib', true_prob)\n # record numer of iteration\n tot_iter = 0\n\n if correct_pre == correct_ori:\n ii += 1\n continue\n \n print('------------------prediction for adversarial video-------------------')\n\n for cur_iter in range(max_iter):\n tot_iter += 1\n sess.run(train, feed_dict=feed_dict)\n var_loss, true_prob, var_loss1, var_loss2, var_pre, var_node = sess.run((loss, true_label_prob, loss1, loss2, pre_label, pre_node), feed_dict=feed_dict)\n print('iter:', cur_iter, 'total loss:', var_loss, 'label loss:', var_loss1, 'content loss:', var_loss2, 'prediction:', var_pre, 'probib:', true_prob)\n break_condition = False\n if var_loss < min_loss:\n if np.absolute(var_loss-min_loss) < 0.00001:\n break_condition = True\n print(last_min)\n min_loss = var_loss\n last_min = cur_iter\n\n if cur_iter + 1 == max_iter or break_condition:\n print('iter:', cur_iter, 'node_label:', var_node, 'label loss:', var_loss1, 'content loss:', var_loss2, 'prediction:', var_pre, 'probib:', true_prob)\n var_diff, var_color,var_probs, noise_norm = sess.run((modifier, params_color,probs, norm_frame), feed_dict=feed_dict)\n for pp in range(seq_len):\n # print the map value for each frame\n print(noise_norm[0][pp])\n for i in range(len(indices)):\n top1 = var_probs[i].argmax()\n if labels[i] == top1:\n correct_noi += 1\n break\n print('saved modifier paramters.', ii)\n \n for ll in range(len(indices)):\n for kk in range(def_len):\n if kk < seq_len:\n attack_img = np.clip(images[ll][kk]*255.0+var_diff[0][kk]+data_spec.mean,data_spec.rescale[0],data_spec.rescale[1])\n diff = np.clip(np.absolute(var_diff[0][kk])*255.0, data_spec.rescale[0],data_spec.rescale[1])\n else:\n attack_img = np.clip(images[ll][kk]*255.0+data_spec.mean,data_spec.rescale[0],data_spec.rescale[1])\n diff = np.zeros((spec.crop_size,spec.crop_size,spec.channels))\n im_diff = scipy.misc.toimage(arr=diff, cmin=data_spec.rescale[0], cmax=data_spec.rescale[1])\n im = scipy.misc.toimage(arr=attack_img, cmin=data_spec.rescale[0], cmax=data_spec.rescale[1])\n new_name = names[ll][kk].split('/')\n \n adv_dir = output_file_dir+'/adversarial/'\n dif_dir = output_file_dir+'/noise/'\n if not os.path.exists(adv_dir):\n os.mkdir(adv_dir)\n os.mkdir(dif_dir)\n\n tmp_dir = adv_dir+new_name[-2]\n tmp1_dir = dif_dir+new_name[-2]\n if not os.path.exists(tmp_dir):\n os.mkdir(tmp_dir)\n os.mkdir(tmp1_dir)\n \n new_name = new_name[-1] + '.png'\n im.save(tmp_dir + '/' +new_name)\n im_diff.save(tmp1_dir + '/' +new_name)\n print('saved adversarial frames.', ii)\n print('correct_ori:', correct_ori, 'correct_noi:', correct_noi)", "def delta_cross_entropy_softmax(outputs, labels):\n \n m = labels.shape[0]\n grad = outputs\n \n grad[range(m),labels] -= torch.tensor(1.)\n\n grad = grad/m\n avg_grads = grad\n return avg_grads", "def rnn_backward(dh, cache):\n dx, dh_prev, dWx, dWh, db = None, None, None, None, None\n ##############################################################################\n # TODO: Implement the backward pass for a vanilla RNN running an entire #\n # sequence of data. You should use the rnn_step_backward function that you #\n # defined above. #\n ##############################################################################\n \"\"\"\n x, next_h, prev_h, Wx, Wh, b = cache\n dz = (1-next_h*next_h)*dnext_h\n # THIS ERROR IS SPREAD AMONG THE\n # np.dot(x, Wx) + np.dot(prev_h, Wh) + b)\n dx = np.dot(dz,Wx.T)\n dprev_h = np.dot(dz,Wh.T)\n db = np.sum(dz,axis=0)\n dWx = np.dot(x.T,dz)\n dWh = np.dot(prev_h.T,dz)\n #d(tanh) = 1- tanh*tanh\n \"\"\"\n #pdb.set_trace()\n # dh is not result of forward prop\n # but\n N,T,H = dh.shape\n tmp_x, tmp_next_h, tmp_prev_h, tmp_Wx, tmp_Wh, tmp_b = cache[T-1]\n D = tmp_x.shape[1]\n\n\n dx = np.zeros((N,T,D))\n dh_prev = np.zeros((N,H))\n dWx = np.zeros((D,H))\n dWh = np.zeros((H,H))\n db = np.zeros((H))\n\n for i in reversed(list(range(0,T))):\n # current gradient at timestep is the upstream gradient (provided as input)\n # this may be coming from the Y as in the min_char_rnn.py (see line 59)\n # + downstream gradient provided by rnn_step_backward.\n dh_curr = dh[:,i,:] + dh_prev\n dx_, dh_prev, dWx_, dWh_, db_ = rnn_step_backward(dh_curr, cache[i])\n dWx += dWx_\n dWh += dWh_\n db += db_\n dx[:,i,:]=dx_\n\n\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n return dx, dh_prev, dWx, dWh, db", "def softmaxCostAndGradient(predicted, target, outputVectors, dataset):\n\n # Implement the cost and gradients for one predicted word vector\n # and one target word vector as a building block for word2vec\n # models, assuming the softmax prediction function and cross\n # entropy loss.\n\n # Inputs:\n # - predicted: numpy ndarray, predicted word vector (\\hat{v} in\n # the written component or \\hat{r} in an earlier version)\n # - target: integer, the index of the target word\n # - outputVectors: \"output\" vectors (as rows) for all tokens\n # - dataset: needed for negative sampling, unused here.\n\n # Outputs:\n # - cost: cross entropy cost for the softmax word prediction\n # - gradPred: the gradient with respect to the predicted word\n # vector\n # - grad: the gradient with respect to all the other word\n # vectors\n\n # We will not provide starter code for this function, but feel\n # free to reference the code you previously wrote for this\n # assignment!\n\n ### YOUR CODE HERE\n yhat = softmax(np.dot(outputVectors, predicted))\n\n cost = -np.log(yhat[target])\n\n yhat_y = yhat.copy()\n yhat_y[target] -= 1\n\n gradPred = np.dot(yhat_y, outputVectors)\n\n grad = yhat_y[:, np.newaxis] * np.tile(predicted, (yhat_y.shape[0], 1))\n ### END YOUR CODE\n\n return cost, gradPred, grad", "def add_loss_op(self, preds):\n ### YOUR CODE HERE (~2-4 lines)\n trans = tf.get_variable('trans',\n shape=[Config.n_classes, Config.n_classes],\n initializer=tf.contrib.layers.xavier_initializer())\n log_likelihood, _ = crf_log_likelihood(preds,\n self.labels_placeholder,\n self.length_placeholder,\n trans)\n #log_likelihood = tf.boolean_mask(log_likelihood, self.mask_placeholder)\n loss = tf.reduce_mean(-1.0 * log_likelihood)\n \n ### END YOUR CODE\n return trans, loss" ]
[ "0.70471126", "0.6771997", "0.6501649", "0.6481051", "0.6475507", "0.64638615", "0.645722", "0.644719", "0.6429101", "0.6332298", "0.6330999", "0.6292698", "0.622307", "0.6192175", "0.6190752", "0.61711174", "0.6128896", "0.61129713", "0.6112532", "0.6099241", "0.60904557", "0.6080622", "0.607248", "0.6041186", "0.6036952", "0.6024581", "0.6020475", "0.59909457", "0.59766996", "0.59700483" ]
0.7260533
0
Deletes text object file, database records and any keywords in the graph.
def TextDelete(texttitle): path = app.config['UPLOAD_FOLDER'] + \ '/objects/' + texttitle + '.txt' with Database() as database: database.deleteText(texttitle, session['id']) # Loads in the file to be deleted and the keyword graph with open(path, "rb") as objectfile: current_file = pickle.load(objectfile) keywords = current_file.stats['Key Words'] with open("word_graph.txt", "rb") as graphfile: word_graph = pickle.load(graphfile) # Reduces each edge connected to the current file keywords for keyword in keywords: word_graph.add_node(keyword[0]) for k in keywords: if k[0] != keyword[0]: word_graph.reduce_edge(keyword[0], k[0]) # Rewrites the graph object file with open("word_graph.txt", "wb") as graphfile: pickle.dump(word_graph, graphfile) # Deletes the object file os.remove(path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete():", "def clear_db():\n humans = Human4j.nodes.all()\n for h in humans:\n h.delete()\n binomes = Binome4j.nodes.all()\n for b in binomes:\n b.delete()\n projects = Project4j.nodes.all()\n for p in projects:\n p.delete()\n sherpas = Sherpa4j.nodes.all()\n for sh in sherpas:\n sh.delete()\n students = Pioupiou4j.nodes.all()\n for piou in students:\n piou.delete()\n partenaires = Partenaire4j.nodes.all()\n for part in partenaires:\n part.delete()\n ps = Planete_Solidaire.nodes.all()\n for misc in ps:\n misc.delete()", "def delete_db(self):\n import os.path\n os.remove(self.filepath)", "def delete(self):\n self.graph._del(handle=self.handle)", "def delete_model(self):\n os.remove(self.filepath)\n self.cmodel = None", "def _delete_all(self):\n logging.info(\"Remove all nodes and relations from database.\")\n self.graph.delete_all()\n return", "def clear_data_base():\n\n\tcommand = 'rm object_models/*.json'\n\tos.system(command)\n\tprint(\"data base cleared\")", "def deleteAll(self):\n self.db.execute(\"DELETE FROM MATCH;\", ())", "def delete(self):\n if not pdbox._args.get(\"dryrun\"):\n result = execute(pdbox.dbx.files_delete_v2, self.path)\n pdbox.debug(\"Metadata response: %s\" % result.metadata)\n pdbox.info(\"Deleted %s\" % self.uri)", "def delete_table_data():\n try:\n print 'delete existing data'\n sql = 'delete from document'\n sql1 = 'delete from clean_keywords'\n sql2 = 'delete from keywords'\n util.executeSQL(conn, sql) # delete the existing data.\n util.executeSQL(conn, sql1)\n util.executeSQL(conn, sql2)\n except Exception as e:\n print e", "def clear_data():\n logger.info(\"Delete Structure instances\")\n Structure.objects.all().delete()\n logger.info(\"Delete StructureType instances\")\n StructureType.objects.all().delete()\n logger.info(\"Delete Industry instances\")\n Industry.objects.all().delete()\n logger.info(\"Delete Price instances\")\n PriceList.objects.all().delete()\n logger.info(\"Delete Stock instances\")\n Stock.objects.all().delete()\n logger.info(\"Delete News instances\")\n News.objects.all().delete()\n logger.info(\"Delete NewsImages instances\")\n NewsImage.objects.all().delete()\n logger.info(\"Delete News Sections instances\")\n NewsCategorySection.objects.all().delete()\n logger.info(\"Delete Analysis instances\")\n AnalysisOpinion.objects.all().delete()\n logger.info(\"Delete Analysis Images instances\")\n AnalysisImage.objects.all().delete()\n logger.info(\"Delete Analysis Sections instances\")\n AnalysisCategorySection.objects.all().delete()", "def _delete_datafile(sender, instance, **kwargs):\n instance.delete_datafile(save_instance=False)", "def remove(self):\n for ref_node in self.node.find_references():\n ref_node.destroy()\n File.remove(self)", "def predio_delete(sender, instance, **kwargs):\n instance.dataFile.delete(False)", "def delete(self):\n\n del self.parent_mirror_dir[self.cvs_path]", "def db_remove():\n\n db.session.close()\n db.drop_all()\n\n path = current_app.config['SNER_VAR']\n for file_object in os.listdir(path):\n file_object_path = os.path.join(path, file_object)\n if os.path.isdir(file_object_path):\n shutil.rmtree(file_object_path)\n else:\n os.unlink(file_object_path)", "def remove(self):\n \n dbpath, config = self._start() \n desc_file = check_file(config.model_descriptions, dbpath,\n \"model_descriptions\", allow_none=False) \n self.logger.msg1(\"Reading model ids\")\n ids = values_in_column(desc_file, \"id\")\n self.logger.msg1(\"Deleting models: \"+str(len(ids)))\n delete_models(dbpath, ids)\n self._end()", "def __del__(self):\r\n self.save()\r\n self.close()", "def clear_db():\n from example_data import ExampleDataLoader\n ExampleDataLoader.clean_db()", "def delete_path():\n #TODO delete path from database\n pass", "def delete(self):\n ...", "def deleteDB():\n db = sqlite.connect(db_path)\n db.row_factory = sqlite.Row\n cursor = db.cursor()\n cursor.execute(\"DELETE from rooms\")\n\n cursor.execute(\"DELETE from users\")\n\n cursor.execute(\"DELETE from urls\")\n\n cursor.fetchall()\n db.commit()\n cursor.close()\n db.close()", "def delete(self):\n\n\n try:\n db = getDatabase()\n connection = db.connect()\n\n connection.delete(self)\n except Exception as e:\n raise e\n finally:\n db.dispose()", "def delete(self):\n self._vertex_list.delete()\n self._vertex_list = None", "def do_destroy(self, arg):\n args = shlex.split(arg)\n stored_objects = models.storage.all()\n\n if self.basic_errs(args):\n '''check if instance exists'''\n instance = self.check_instance(args[0], args[1], stored_objects)\n if instance:\n \"\"\"delete from FileStorage.__objects\"\"\"\n del stored_objects[instance]\n \"\"\"overwrite the new data to file.json\"\"\"\n models.storage.save()", "def tearDown(self):\n self.db.close()\n self.dbfile.close()\n os.unlink(self.path)", "def clean(obj):\n clean_up_generated_files(obj)", "def delete(self):\n if not hasattr(self, 'id'):\n raise BadReference('No matching issue on disk')\n shutil.rmtree(self.paths['root'])", "def cleanup(self):\r\n for f in [i for d in self.data.values() for i in d[\"filenames\"]]:\r\n try:\r\n os.unlink(f)\r\n except Exception: pass\r\n self.Destroy()", "def remove_all():\n storage = FileStorage()\n objects = storage.all()\n objects = list(objects.values())\n\n for element in objects:\n storage.delete(element)\n objects = storage.all()" ]
[ "0.66543853", "0.6603808", "0.6559491", "0.6447322", "0.64100724", "0.6389136", "0.6299942", "0.62388057", "0.6218455", "0.6199544", "0.6175071", "0.61286473", "0.6114", "0.6112866", "0.6111774", "0.6104314", "0.60767096", "0.6068975", "0.60340655", "0.6027067", "0.60106134", "0.6002575", "0.6001822", "0.59970397", "0.59881383", "0.59856457", "0.59848326", "0.5979064", "0.5973471", "0.59634006" ]
0.78041196
0
Performs dijsktras algorithm from a certain node
def dijsktra(graph, initial): # Sets initial node score to 10 visited = {initial: 10} nodes = set(graph.nodes) max_weight = graph.distances[max(graph.distances, key=graph.distances.get)] min_weight = graph.distances[min(graph.distances, key=graph.distances.get)] # Defines the number of nodes to explore as the number of conected nodes nodes_to_explore = len(graph.edges[initial]) + 2 explored = 1 while explored < nodes_to_explore: # Finds nodes with maximum value that has not yet been explored max_node = None for node in nodes: if node in visited: if max_node is None: max_node = node elif visited[node] > visited[max_node]: max_node = node if max_node is None: break nodes.remove(max_node) current_weight = visited[max_node] # Finds score of the next node if node has already been visited # changes score if it is greater for edge in graph.edges[max_node]: weight = graph.distances[(max_node, edge)] if max_weight - min_weight == 0: normalised = 1 else: normalised = ((weight - min_weight) / (max_weight - min_weight)) + 1 weight = current_weight - (1 / normalised) if edge not in visited or weight > visited[edge]: visited[edge] = round(weight, 2) explored += 1 return visited
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def label_correcting_algo(dt, ori_node, des_node, do_return=False):\n # Convert all labels to string\n ori = str(ori_node)\n des = str(des_node)\n dt[[\"start\", \"end\"]] = dt[[\"start\", \"end\"]].astype(str) \n \n # Initialization\n nodes = set(dt.loc[:,\"start\"].unique()) | set(dt.loc[:,\"end\"].unique())\n dist = {}.fromkeys(nodes, np.inf)\n dist[ori] = 0\n points = {}.fromkeys(nodes, ori)\n iter_set = {ori}\n \n # Main Algo\n while iter_set:\n i = iter_set.pop() # Randomly pop out a node i\n A_i = dt[dt.start == i]\n for row in A_i.index: \n j = A_i.loc[:, \"end\"][row]\n c_ij = A_i.loc[:, \"cost\"][row]\n if dist[j] > dist[i] + c_ij:\n dist[j] = dist[i] + c_ij\n points[j] = i\n iter_set = iter_set | set([j]) # Union\n \n # Print & Return the Answer\n x = pd.concat([pd.Series(points), pd.Series(dist)], axis=1)\n x.columns = [\"Front-node\", \"Distance\"]\n\n current_node = des\n front_node = \"\"\n sp = des\n while front_node != ori:\n front_node = str(x.loc[current_node, \"Front-node\"])\n sp = \"{} -> {}\".format(front_node, sp)\n current_node = front_node\n \n sp = \"From node {} to node {}, total Distance: {}\\n{}\\n\".format(ori, des, x.loc[des, \"Distance\"], sp)\n if do_return:\n print(sp)\n return x\n else:\n return sp", "def computeCLgainRemoveNodeD(self, G, PD, nodes, node, dropLidx, dir):\n CL_I = 0.0\n CL_F = 0.0\n if dir==2:\n for i in nodes:\n pos_I = PD.getPOS(node, i, case=2, isSimple=self.isSimple)\n pos_F = PD.getPOS(node, i, case=4, isSimple=self.isSimple, dropLidx=dropLidx)\n if pos_I < 0.0000000001:\n pos_I = 0.0000000001\n elif pos_I > 1.0 - 0.0000000001:\n pos_I = 1.0 - 0.0000000001\n if pos_F < 0.0000000001:\n pos_F = 0.0000000001\n elif pos_F > 1.0 - 0.0000000001:\n pos_F = 1.0 - 0.0000000001\n numE = G.number_of_edges(node, i)\n if self.isSimple:\n CL_I += math.log2(math.pow(1.0-pos_I, 1.0-numE)*math.pow(pos_I, numE))\n CL_F += math.log2(math.pow(1.0-pos_F, 1.0-numE)*math.pow(pos_F, numE))\n else:\n CL_I += math.log2(math.pow(1.0-pos_I, numE)*pos_I)\n CL_F += math.log2(math.pow(1.0-pos_F, numE)*pos_F)\n elif dir==1:\n for i in nodes:\n pos_I = PD.getPOS(i, node, case=2, isSimple=self.isSimple)\n pos_F = PD.getPOS(i, node, case=4, isSimple=self.isSimple, dropLidx=dropLidx)\n if pos_I < 0.0000000001:\n pos_I = 0.0000000001\n elif pos_I > 1.0 - 0.0000000001:\n pos_I = 1.0 - 0.0000000001\n if pos_F < 0.0000000001:\n pos_F = 0.0000000001\n elif pos_F > 1.0 - 0.0000000001:\n pos_F = 1.0 - 0.0000000001\n numE = G.number_of_edges(i, node)\n if self.isSimple:\n CL_I += math.log2(math.pow(1.0-pos_I, 1.0-numE)*math.pow(pos_I, numE))\n CL_F += math.log2(math.pow(1.0-pos_F, 1.0-numE)*math.pow(pos_F, numE))\n else:\n CL_I += math.log2( math.pow( 1.0-pos_I, numE )*pos_I )\n CL_F += math.log2( math.pow( 1.0-pos_F, numE )*pos_F )\n return -CL_I - (-CL_F)", "def __call__(self, node):\n if not node.children: return;\n if len(node.children) <= 2: return;\n if self.IsGoodTriple(node.children): return;\n if len(node.children) >= 8: raise ValueError(\"Too long to decompose\");\n children = map(lambda x : [self.GetLabel(x)], node.children);\n #print \"Guessing %s\" % children;\n print node.ToPrettyString();\n res = self.path_finder.FindPath(children, self.GetLabel(node));\n if len(res) != 0:\n print res[0];\n tnodes, count = self.Transform(res[0][1], node, 0);\n node.children = tnodes.children;\n else:\n raise ValueError(\"Find no production chains to decompose for %s\" % children);\n print node.ToPrettyString();", "def __dikjstra(self, start_node):\n visited = []\n unvisited = [x for x in self.__node]\n shortest_dist_from_start_node = 0\n current_node = start_node\n\n current_node.setShortestDist(shortest_dist_from_start_node)\n\n while current_node:\n #check unvisited neighbor\n for neighbor_node, distance in current_node.getNeighbors().items():\n #print(neighbor_node.getId(), distance) troubleshoot je ni\n if neighbor_node in visited:\n continue\n\n #add up shortest_dist_from_start_node with distance from neighbor distance\n calc_dist = shortest_dist_from_start_node + distance\n\n if calc_dist < neighbor_node.getShortestDist():\n neighbor_node.setShortestDist(calc_dist)\n neighbor_node.setPrevNode(current_node)\n\n # add current node to visited array\n visited.append(current_node)\n unvisited.remove(current_node)\n \n #update next node and next shortest distance\n next_shortest_dist_from_start_node = inf\n next_node = None\n\n for unvisited_node in unvisited:\n if unvisited_node.getShortestDist() < next_shortest_dist_from_start_node:\n next_shortest_dist_from_start_node = unvisited_node.getShortestDist()\n next_node = unvisited_node\n\n # update current node and shortest distance from start vertex\n if next_node:\n current_node = next_node\n shortest_dist_from_start_node = next_shortest_dist_from_start_node\n #if there are left over unvisited node\n else: \n if unvisited:\n current_node = unvisited[0]\n else:\n current_node = None", "def visit(self, node):", "def visit(self, node):", "def kAnonymit(g,k):\n rlist=list()\n degreeH=nx.degree_histogram(g)\n## print degreeH\n li=[[] for i in range(len(degreeH))]\n uNode=dict(zip(range(len(degreeH)),li))\n \n gd=g.degree()\n for e in gd:\n uNode[gd[e]].append(e)\n ad=list()\n mi=list()\n su=list() \n sel3(g,3,ad,mi,su)\n nodeAd=list()\n nodeMi=list()\n nodeSu=list() \n for i in ad:\n nodeAd=nodeAd+uNode[i]\n #nodeAd store the graph ID\n for i in mi:\n nodeMi=nodeMi+uNode[i]\n for i in su:\n nodeSu=nodeSu+uNode[i]\n\n print nodeAd,nodeMi,nodeSu\n\n\n i=0#Add edge \n while i<len(nodeAd)-1:\n j=i+1\n while j<len(nodeAd):\n if not g.has_edge(nodeAd[i],nodeAd[j]):\n g.add_edge(nodeAd[i],nodeAd[j])\n rlist.append(nodeAd[i])\n rlist.append(nodeAd[j])\n nodeAd.pop(j)\n nodeAd.pop(i)\n i=i-1\n break\n else:\n j=j+1\n i=i+1\n print nodeAd\n \n i=0#delet edge\n while i<len(nodeMi)-1:\n j=i+1\n while j<len(nodeMi):\n if g.has_edge(nodeMi[i],nodeMi[j]):\n g.remove_edge(nodeMi[i],nodeMi[j])\n rlist.append(nodeMi[i])\n rlist.append(nodeMi[j])\n nodeMi.pop(j)\n nodeMi.pop(i)\n i=i-1\n break\n else:\n j=j+1\n i=i+1\n print nodeMi\n \n i=0#Add edge \n while i<len(nodeSu):\n j=0\n while j<len(nodeAd):\n if not g.has_edge(nodeSu[i],nodeAd[j]):\n g.add_edge(nodeSu[i],nodeAd[j])\n rlist.append(nodeSu[i])\n rlist.append(nodeAd[j])\n nodeAd.pop(j)\n nodeSu.pop(i)\n \n i=i-1\n break\n else:\n j=j+1\n i=i+1\n print nodeSu\n \n i=0#delet edge\n while i<len(nodeSu):\n j=0\n while j<len(nodeMi):\n if g.has_edge(nodeSu[i],nodeMi[j]):\n g.remove_edge(nodeSu[i],nodeMi[j])\n rlist.append(nodeSu[i])\n rlist.append(nodeMi[j])\n nodeMi.pop(j)\n nodeSu.pop(i)\n i=i-1\n break\n else:\n j=j+1\n i=i+1\n print nodeSu\n return rlist", "def main():\n\n \"\"\"\n nodes, hd3 = erdos_rennie_like(100,8333,5)\n export('d3',hd3)\n\n nodes, hd5 = erdos_rennie_like(100,8333,6)\n export('d5',hd5)\n\n nodes, hd6 = erdos_rennie_like(100,8333,7)\n export('d6',hd6)\n \"\"\"\n\n \"\"\"\n nodes, sparse1 = erdos_rennie_like(600, 1200, 3)\n export('sparse_diag1', sparse1)\n\n nodes, sparse2 = erdos_rennie_like(600, 2400, 3)\n export('sparse_diag2',sparse2)\n\n nodes, sparse3 = erdos_rennie_like(600, 5800, 3)\n export('sparse_diag3',sparse3)\n\n nodes, sparse4 = erdos_rennie_like(600,11600, 3)\n export('sparse_diag4',sparse4)\n\n nodes, sparse5 = erdos_rennie_like(600,23200, 3)\n export('sparse_diag5',sparse5)\n \"\"\"\n\n nodes, size1 = erdos_rennie_like(100, 500, 3)\n nodes, size2 = erdos_rennie_like(200,1000,3)\n nodes,size3 = erdos_rennie_like(300,1500,3)\n nodes,size4 = erdos_rennie_like(400,2000,3)\n nodes,size5 = erdos_rennie_like(500,2500,3)\n\n export('size_diag1',size1)\n export('size_diag2',size2)\n export('size_diag3',size3)\n export('size_diag4',size4)\n export('size_diag5',size5)", "def test_dijsktra(self):\n input_file = \"/home/andy/code/python/om_task/data/test_data.txt\"\n graph_data = process_input_file(input_file)\n\n graphs = {}\n\n for timestep in graph_data:\n # print(\"Timestep=\", timestep)\n # Create a graph for each timestep\n graphs[timestep] = Graph()\n for edge in graph_data.get(timestep):\n graphs[timestep].add_edge(*edge)\n\n path = dijsktra(graphs[0], 'A0', 'B2')\n\n print(path)\n\n self.assertEqual(['A0', 'A1', 'A2', 'B2'], path)", "def test_dijsktra_2(self):\n input_file = \"/home/andy/code/python/om_task/data/test_data3.txt\"\n graph_data = process_input_file(input_file)\n\n graphs = {}\n\n for timestep in graph_data:\n # print(\"Timestep=\", timestep)\n # Create a graph for each timestep\n graphs[timestep] = Graph()\n for edge in graph_data.get(timestep):\n graphs[timestep].add_edge(*edge)\n\n path = dijsktra(graphs[0], 'A0', 'A1')\n\n print(path)\n\n self.assertEqual(['A0', 'A1'], path)", "def main():\n\n # create graph (DIRECTED), add nodes\n # create graph, add nodes\n api = GraphAPI()\n graph = api.init_graph(graph_type=GraphTypes.DIRECTED)\n\n api.add_node(graph, \"A\")\n api.add_node(graph, \"B\")\n api.add_node(graph, \"C\")\n api.add_node(graph, \"D\")\n api.add_node(graph, \"E\")\n api.add_node(graph, \"F\")\n\n # connect nodes in graph\n api.add_edge(graph, \"A\", \"B\", 5)\n api.add_edge(graph, \"A\", \"C\", 2)\n api.add_edge(graph, \"B\", \"D\", 3)\n api.add_edge(graph, \"B\", \"C\", 9)\n api.add_edge(graph, \"C\", \"E\", 5)\n api.add_edge(graph, \"E\", \"D\", 4)\n api.add_edge(graph, \"E\", \"F\", 6)\n api.add_edge(graph, \"D\", \"F\", 1)\n\n source_node = \"A\"\n\n print(\"Running Dijikstra algorithm starting from node %s. \" % source_node)\n shortest_distance_data = api.find_shortest_path_using_dijikstra(graph, start_node=source_node)\n print(\"Shortest distance to all nodes from source node [%s] is: \" % source_node)\n print(shortest_distance_data)", "def dfs(node: TreeNode):\n if not node:\n return\n helper(node, 0, sum)\n dfs(node.left)\n dfs(node.right)", "def felsen( node, column, tm ):\n nucs = tm.alphabet\n # Is it a leaf node?\n if node.edges is None:\n symbol = column[ node.label ]\n if symbol == '*':\n ## Uniform distribution\n # return [ 1 / len( nucs ) ] # len( nucs ) \n ## Equilibrium (\\pi) distribution\n #return tm.background\n # Eliminate entirely\n return None\n return [ int( symbol == nuc ) for nuc in nucs ] \n else:\n # Traverse children and determine likelihoods\n ## l_children = [ felsen( edge.tip, column, tm ) for edge in node.edges ]\n l_children = []\n for edge in node.edges:\n t = felsen( edge.tip, column, tm )\n if t is not None:\n l_children.append( ( t, edge ) )\n if not l_children: return None\n l_self = []\n # Determine liklihood for each possible 'ancestral' sequence\n for i_y in range( len( nucs ) ):\n y = nucs[ i_y ]\n p_y = 1\n # For each child, sum over all paths\n for cl, edge in l_children:\n if edge.length not in matrix_by_time_cache:\n matrix_by_time_cache[ edge.length ] = matrix_for_time( tm.matrix, edge.length )\n psub = matrix_by_time_cache[ edge.length ]\n p_y_c = 0\n for i_a in range( len( nucs ) ):\n p_y_c += psub[i_y,i_a] * cl[ i_a ]\n # Product over all children\n p_y *= p_y_c\n l_self.append( p_y )\n return l_self", "def id_dfts(self):\n \n def dls(node, limit):\n \"\"\"Recursively performs a Depth Limited Search (DLS) on the puzzle's search space\n starting at the given node with given limit.\n \n Returns a DLSResult class instance that contains the problem solution, a flag\n indicating the cutoff was reached, or a flag indicating a search failure.\n \"\"\"\n if self.check_goal_state(node.state):\n # Search success\n # Return final state and list of actions along path to the goal\n # as part of the Solution portion of the DLSResult class\n return DLSResult(solution=Solution(final_state=node.state, actions=self.get_action_path(node)))\n \n elif limit == 0:\n # The cutoff has been reached\n return DLSResult(cutoff=True)\n \n else:\n cutoff_occurred = False\n \n # Generate all possible actions for the given state\n actions = self.get_actions(node.state)\n \n for action in actions:\n # Apply this action to the current state to get the new state\n new_state = self.get_result(node.state, action)\n \n # Create a new child search node with the new state and action\n child_node = SearchNode(new_state, node, action)\n \n # Recursively call DLS on the child node with a reduced limit\n result = dls(child_node, limit - 1)\n \n if result.cutoff:\n # A cutoff occurred\n cutoff_occurred = True\n \n elif not result.failure:\n # Search success\n return result\n \n if cutoff_occurred:\n # A cutoff occurred\n return DLSResult(cutoff=True)\n \n else:\n # This search has failed\n return DLSReturn(failure=True)\n\n\n print('Performing ID-DFTS\\n')\n\n # Iterate through depths from 0 to infinity\n for depth in count(0):\n print('Trying depth', depth)\n\n # Get the DLS result for this depth\n result = dls(SearchNode(self.initial_state), depth)\n \n if not result.cutoff:\n # A solution has been found or a search failure has ocurred\n # Return the result\n return result", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def test_dijsktra2(self):\n input_file = \"/home/andy/code/python/om_task/data/test_data4.txt\"\n graph_data = process_input_file(input_file)\n\n graphs = {}\n\n for timestep in graph_data:\n # print(\"Timestep=\", timestep)\n # Create a graph for each timestep\n graphs[timestep] = Graph()\n for edge in graph_data.get(timestep):\n graphs[timestep].add_edge(*edge)\n\n path = dijsktra(graphs[1], 'A0', 'B2')\n\n print(path)\n\n self.assertEqual(['A0', 'B0', 'B1', 'B2'], path)", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \n util.raiseNotDefined()", "def dijkstra(graph, node):\n\n color = dict((n, 'white') for n in graph.nodes())\n distance = dict((n, float('inf')) for n in graph.nodes())\n distance[node] = 0\n pred = {}\n\n def visit(node):\n q = deque()\n color[node] = 'grey'\n # q.appendleft(node)\n while len(q) != 0:\n\n for adjacent in graph.outAdjacent(current_node):\n if color[adjacent] is 'white':\n color[adjacent] = 'grey'\n distance[adjacent] = distance[current_node] \\\n + graph.weight(current_node, adjacent)\n pred[adjacent] = current_node\n q.appendleft(adjacent)\n elif color[adjacent] is 'grey':\n new_path_dist = distance[current_node] + \\\n graph.weight(current_node, adjacent)\n if distance[adjacent] > new_path_dist:\n distance[adjacent] = new_path_dist\n\n color[current_node] = 'black'\n\n\n visit(node)\n\n for node, color in color.items():\n if color is 'white':\n visit(node)\n return distance", "def traverseTree(mdsnode,dead_branches=False,depth=float('Nan'),current_depth=0,noisy=False,strict=False,tags=False):\n tagdict={}\n if isinstance(mdsnode,mds.tree.Tree): \n mdsnode=mdsnode.getNode(\"\\\\TOP\")\n \n name = get_mds_shortname(mdsnode) \n me = Branch(mdsnode)#put node information here if you like\n if noisy: print (\" \"*current_depth + name)\n\n #Members are data/signals, put them directly the current Node object\n #if they are arrays\n if mdsnode.getNumMembers()>0:\n leaves=mdsnode.getMembers()\n for leaf in leaves:\n leafname=get_mds_shortname(leaf)\n leafshape=get_mds_shape(leaf)\n if dead_branches or not len(leafshape) ==0:\n if noisy: print (\" \"*(current_depth+1) + leafname +\": array%s\"%str(leafshape))\n setattr(me,leafname,Leaf(leaf,strict))\n tagdict[leafname]=getattr(me,leafname)\n else:\n if noisy: print(\" \"*(current_depth+1) + leafname)\n #Children contain no immediate data, just links to more nodes. If depth is\n #not beyond limit, go down these 'branches' and add contents to the current\n #Node object\n if not depth <= current_depth and mdsnode.getNumChildren()>0:\n branches = mdsnode.getChildren()\n for b in branches:\n subname,subnode,subtags=traverseTree(b, dead_branches,depth,current_depth+1,noisy,strict)\n if len(subnode.__getDescendants__())>0:\n setattr(me,subname,subnode)\n tagdict[subname]=getattr(me,subname)\n for k,v in subtags.items(): #merge tags in\n tagdict[k]=v\n \n if current_depth==0:#we are done, returning to user\n if tags: \n for tag,obj in tagdict.items():\n setattr(me,tag,obj)\n else:\n tagbranch=Branch(mdsnode)\n for tag,obj in tagdict.items():\n setattr(tagbranch,tag,obj)\n setattr(me,'tags',tagbranch) \n return me\n return (name, me,tagdict) #else, we are still recursing back down the tree", "def dfs(node):\n nonlocal ans\n if not node: return []\n if node.left is node.right is None: return [0]\n left,right = dfs(node.left), dfs(node.right)\n ans += sum(2 + x + y <= distance for x in left for y in right)\n return [1 + x for x in left + right]", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n #Creamos las estructuras de datos necesarias (priority queue y set)\n openNodes = util.PriorityQueue()\n closedNodes = set([])\n\n #Guardamos el nodo inicial\n node = Node(problem.getStartState(), '', 0, None)\n\n #Calculamos funcion heuristica y el coste acumulado para sacar la funcion de evaluacion del nodo inicial\n fn = problem.getCostOfActions(node.path) + nullHeuristic(node.name, problem);\n\n #Lo metemos en la cola con su funcion de evaluacion como prioridad\n openNodes.push(node, fn)\n\n #Iteramos para cada nodo\n while True:\n if openNodes.isEmpty():\n break #ERROR: throw exception\n else :\n #sacamos el nodo de arriba de la cola\n node = openNodes.pop()\n if problem.isGoalState(node.name): #Comprobamos si el nodo es Goal. Si lo es terminamos.\n break\n else: #Expandimos los nodos sucesores del nodo si no estan en closed\n if nodeIsClosed(node, closedNodes) is False:\n for successor in problem.getSuccessors(node.name):\n n, p, c = successor\n succNode = Node(n, p, c, node)\n if nodeIsClosed(succNode, closedNodes) is False:\n fn = problem.getCostOfActions(findPath(succNode)) + nullHeuristic(succNode.name, problem);\n openNodes.push(succNode, fn)\n #Metemos el nodo en closed\n closedNodes.add(node)\n\n #Devolvemos el camino al Goal\n return findPath(node)", "def __follow_node(node, tree_graph, seed_space, seed):\n\n def node_has_filter(x):\n \"\"\"\n Check if a node is a pattern node and has an object filter\n \"\"\"\n p_node = list(self.__plan_graph.objects(subject=x, predicate=AGORA.byPattern))\n try:\n p_node = p_node.pop()\n return 'filter_object' in self.__patterns[p_node] or 'filter_subject' in self.__patterns[p_node]\n except IndexError:\n return False\n\n try:\n # Get the sorted list of current node's successors\n nxt = sorted(list(self.__plan_graph.objects(node, AGORA.next)),\n key=lambda x: node_has_filter(x), reverse=True)\n\n # Per each successor...\n for n in nxt:\n if seed_space in self.__node_spaces[n]:\n node_patterns = self.__node_patterns.get(n, [])\n\n # In case the node is not a leaf, 'onProperty' tells which is the next link to follow\n try:\n link = list(self.__plan_graph.objects(subject=n, predicate=AGORA.onProperty)).pop()\n except IndexError:\n link = None\n\n filter_next_seeds = set([])\n next_seeds = set([])\n # If the current node is a pattern node, it must search for triples to yield\n for pattern in node_patterns:\n pattern_space = self.__patterns[pattern].get('space', None)\n if pattern_space != seed_space or seed in self.__subjects_to_ignore[pattern_space]:\n continue\n\n subject_filter = self.__patterns[pattern].get('filter_subject', None)\n if subject_filter is not None and seed != subject_filter:\n self.__subjects_to_ignore[pattern_space].add(seed)\n continue\n\n pattern_link = self.__patterns[pattern].get('property', None)\n\n # If pattern is of type '?s prop O'...\n if pattern_link is not None:\n if (seed, pattern_link) not in self.__fragment:\n obj_filter = self.__patterns[pattern].get('filter_object', None)\n if on_plink is not None:\n on_plink(pattern_link, [seed], pattern_space)\n\n seed_was_filtered = True\n try:\n for seed_object in list(\n __process_pattern_link_seed(seed, tree_graph, pattern_link)):\n __check_stop()\n quad = (pattern, seed, pattern_link, seed_object)\n if obj_filter is None or u''.join(seed_object).encode(\n 'utf-8') == u''.join(obj_filter.toPython()).encode('utf-8'):\n self.__fragment.add((seed, pattern_link))\n __put_triple_in_queue(quad)\n seed_was_filtered = False\n if isinstance(obj_filter, URIRef):\n filter_next_seeds.add(obj_filter)\n if obj_filter is not None and seed_was_filtered:\n self.__subjects_to_ignore[pattern_space].add(seed)\n except AttributeError as e:\n log.warning('Trying to find {} objects of {}: {}'.format(link, seed, e.message))\n\n # If pattern is of type '?s a Concept'...\n obj_type = self.__patterns[pattern].get('type', None)\n if obj_type is not None:\n check_type = self.__patterns[pattern].get('check', False)\n if on_type is not None:\n on_type(obj_type, [seed], pattern_space)\n\n __dereference_uri(tree_graph, seed)\n try:\n seed_objects = list(tree_graph.objects(subject=seed, predicate=link))\n for seed_object in seed_objects:\n type_triple = (pattern, seed_object, RDF.type, obj_type)\n # In some cases, it is necessary to verify the type of the seed\n if (seed_object, obj_type) not in self.__fragment:\n if check_type:\n __dereference_uri(tree_graph, seed_object)\n types = list(\n tree_graph.objects(subject=seed_object, predicate=RDF.type))\n if obj_type in types:\n self.__fragment.add((seed_object, obj_type))\n __put_triple_in_queue(type_triple)\n else:\n self.__subjects_to_ignore[pattern_space].add(seed_object)\n else:\n self.__fragment.add((seed_object, obj_type))\n __put_triple_in_queue(type_triple)\n except AttributeError as e:\n log.warning('Trying to find {} objects of {}: {}'.format(link, seed, e.message))\n\n # If the current node is not a leaf... go on finding seeds for the successors\n if link is not None and seed not in self.__subjects_to_ignore[seed_space]:\n if on_link is not None:\n on_link(link, [seed], seed_space)\n __process_link_seed(seed, tree_graph, link, next_seeds)\n\n if filter_next_seeds:\n next_seeds = set.intersection(next_seeds, filter_next_seeds)\n\n chs = list(chunks(list(next_seeds), min(len(next_seeds), max(1, workers / 2))))\n next_seeds.clear()\n try:\n while True:\n __check_stop()\n chunk = chs.pop()\n threads = []\n for s in chunk:\n try:\n workers_queue.put_nowait(s)\n future = pool.submit(__follow_node, n, tree_graph, seed_space, s)\n threads.append(future)\n except Queue.Full:\n # If all threads are busy...I'll do it myself\n __follow_node(n, tree_graph, seed_space, s)\n except Queue.Empty:\n pass\n\n wait(threads)\n [(workers_queue.get_nowait(), workers_queue.task_done()) for _ in threads]\n except (IndexError, KeyError):\n pass\n except Queue.Full:\n stop_event.set()\n except Exception as e:\n traceback.print_exc()\n log.error(e.message)\n return", "def compute_simple_protein_features(traj, key_res):\n import mdtraj as md\n import numpy as np\n import pandas as pd\n\n topology = traj.topology\n coord = traj.xyz\n\n # JG debug\n #table, bonds = topology.to_dataframe()\n #atoms = table.values\n #print(atoms)\n\n # get the array of atom indices for the calculation of:\n # * 7 ditances (a 7*2 array where each row contains indices of the two atoms for each distance)\n dis = np.zeros(shape=(2, 2), dtype=int, order='C')\n\n # name list of the features\n #feature_names = list()\n\n # parse the topology info\n '''\n The coordinates are located by row number (usually is atom index minus one, which is also why it's zero-based)\n by mdtraj but when the atom indices are not continuous there is a problem so a safer way to locate the coordinates\n is through row number (as a fake atom index) in case the atom indices are not continuous.\n '''\n\n ### distances\n if topology.select(f\"chainid 0 and residue {key_res['group3'][3]} and name CA\"):\n dis[0][0] = topology.select(f\"chainid 0 and residue {key_res['group3'][3]} and name CA\")\n if topology.select(f\"chainid 0 and residue {key_res['group3'][2]} and name CZ\"):\n dis[0][1] = topology.select(f\"chainid 0 and residue {key_res['group3'][2]} and name CZ\")\n if topology.select(f\"chainid 0 and residue {key_res['group2'][3]} and name CA\"):\n dis[1][0] = topology.select(f\"chainid 0 and residue {key_res['group2'][3]} and name CA\")\n dis[1][1] = dis[0][1]\n #feature_names.append('Dunbrack_D1')\n #feature_names.append('Dunbrack_D2')\n\n # check if there is any missing coordinates; if so, skip dihedral/distance calculation for those residues\n check_flag = 1\n\n for i in range(len(dis)):\n if 0 in dis[i]:\n dis[i] = [0,0]\n check_flag = 0\n #if check_flag:\n #print(\"There is no missing coordinates. All dihedrals and distances will be computed.\")\n\n\n distances = md.compute_distances(traj, dis)\n\n # clean up\n del traj, dis\n return distances", "def diop_DN(D, N, t=symbols(\"t\", integer=True)):\n if D < 0:\n if N == 0:\n return [(0, 0)]\n elif N < 0:\n return []\n elif N > 0:\n sol = []\n for d in divisors(square_factor(N)):\n sols = cornacchia(1, -D, N // d**2)\n if sols:\n for x, y in sols:\n sol.append((d*x, d*y))\n if D == -1:\n sol.append((d*y, d*x))\n return sol\n\n elif D == 0:\n if N < 0:\n return []\n if N == 0:\n return [(0, t)]\n sN, _exact = integer_nthroot(N, 2)\n if _exact:\n return [(sN, t)]\n else:\n return []\n\n else: # D > 0\n sD, _exact = integer_nthroot(D, 2)\n if _exact:\n if N == 0:\n return [(sD*t, t)]\n else:\n sol = []\n\n for y in range(floor(sign(N)*(N - 1)/(2*sD)) + 1):\n try:\n sq, _exact = integer_nthroot(D*y**2 + N, 2)\n except ValueError:\n _exact = False\n if _exact:\n sol.append((sq, y))\n\n return sol\n\n elif 1 < N**2 < D:\n # It is much faster to call `_special_diop_DN`.\n return _special_diop_DN(D, N)\n\n else:\n if N == 0:\n return [(0, 0)]\n\n elif abs(N) == 1:\n\n pqa = PQa(0, 1, D)\n j = 0\n G = []\n B = []\n\n for i in pqa:\n\n a = i[2]\n G.append(i[5])\n B.append(i[4])\n\n if j != 0 and a == 2*sD:\n break\n j = j + 1\n\n if _odd(j):\n\n if N == -1:\n x = G[j - 1]\n y = B[j - 1]\n else:\n count = j\n while count < 2*j - 1:\n i = next(pqa)\n G.append(i[5])\n B.append(i[4])\n count += 1\n\n x = G[count]\n y = B[count]\n else:\n if N == 1:\n x = G[j - 1]\n y = B[j - 1]\n else:\n return []\n\n return [(x, y)]\n\n else:\n\n fs = []\n sol = []\n div = divisors(N)\n\n for d in div:\n if divisible(N, d**2):\n fs.append(d)\n\n for f in fs:\n m = N // f**2\n\n zs = sqrt_mod(D, abs(m), all_roots=True)\n zs = [i for i in zs if i <= abs(m) // 2 ]\n\n if abs(m) != 2:\n zs = zs + [-i for i in zs if i] # omit dupl 0\n\n for z in zs:\n\n pqa = PQa(z, abs(m), D)\n j = 0\n G = []\n B = []\n\n for i in pqa:\n\n G.append(i[5])\n B.append(i[4])\n\n if j != 0 and abs(i[1]) == 1:\n r = G[j-1]\n s = B[j-1]\n\n if r**2 - D*s**2 == m:\n sol.append((f*r, f*s))\n\n elif diop_DN(D, -1) != []:\n a = diop_DN(D, -1)\n sol.append((f*(r*a[0][0] + a[0][1]*s*D), f*(r*a[0][1] + s*a[0][0])))\n\n break\n\n j = j + 1\n if j == length(z, abs(m), D):\n break\n\n return sol", "def fixptDist(D, ht, chatty=False):\n changed = True\n while changed:\n changed = False\n for kv in ht.items(): \n s0 = kv[0][0]\n s1 = kv[0][1] \n if ht[(s0,s1)] == 0:\n continue \n if (chatty):\n print(\" \")\n print(\"Seeing if states \", s0, \" and \", s1, \" can now be distinguished by any symbol.\") \n for c in D[\"Sigma\"]:\n ns0 = D[\"Delta\"][(s0,c)]\n ns1 = D[\"Delta\"][(s1,c)] \n if (chatty):\n print(\" The next states reached via symbol \", c, \" are: \", ns0, \" and \", ns1) \n if ns0 == ns1:\n if (chatty):\n print(\" Nope. Symbol \", c, \" could not distinguish (the next states are the same).\")\n continue \n if (ns0, ns1) in ht:\n if ht[(s0,s1)] == -1:\n if ht[(ns0, ns1)] >= 0: \n if (chatty):\n print(\" Found a distinguishable pair!\") \n ht[(s0,s1)] = ht[(ns0, ns1)] + 1 \n if (chatty):\n print(\" Since \", (ns0,ns1), \" are \", ht[(ns0,ns1)], \" distinguishable, marking \", (s0,s1), \" as \", ht[(s0,s1)], \" distinguishable.\")\n print(\" Hence, must continue through one more sweep of the algorithm.\") \n changed = True \n break\n else:\n if (chatty):\n print(\" Cannot distinguish yet, via \", c) \n continue \n else:\n if (ns1, ns0) in ht: \n if ht[(s0,s1)] == -1:\n if ht[(ns1, ns0)] >= 0: \n if (chatty):\n print(\" Found a distinguishable pair!\")\n ht[(s0,s1)] = ht[(ns1, ns0)] + 1\n\n if (chatty):\n print(\" Since \", (ns0,ns1), \" are \", ht[(ns1,ns0)], \" distinguishable, marking \", (s0,s1), \" as \", ht[(s0,s1)], \" distinguishable.\")\n print(\" Hence, must continue through one more sweep of the algorithm.\") \n changed = True \n break \n else:\n if (chatty):\n print(\" Cannot distinguish yet, via \", c) \n continue\n \n else: \n print(\"ht doesn't cover all reqd state combos. An internal inconsistency!\")\n return ht", "def main():\n G = nx.gnp_random_graph(100, 0.5)\n centrality = nx.eigenvector_centrality(G)\n avg_centrality = sum(centrality.values()) / len(G)\n\n def has_high_centrality(v):\n return centrality[v] >= avg_centrality\n\n source = 0\n value = centrality.get\n condition = has_high_centrality\n\n found_node = progressive_widening_search(G, source, value, condition)\n c = centrality[found_node]\n print('found node {0} with centrality {1}'.format(found_node, c))", "def test_dvidir(self):\n self.chck_triple('dvidir')", "def casdetude_dinardo():\n file_path = PROJECT_PATH + \"/geographycal_data/Monterusciello/MontEdo_buildings\"\n router = Router(building_file=file_path)\n\n router.design_aqueduct(0)\n\n router.solve(router.acqueduct)\n minimal = router.design_minimal_aqueduct(router.acqueduct, \"Q*H\")\n kpi_calculator(minimal)\n\n print(\"N H Z P\")\n for i, (node, datadict) in enumerate(router.acqueduct.nodes.items()):\n print(i, round(datadict[\"H\"]), round(datadict[\"ELEVATION\"]), round(datadict[\"H\"] - datadict[\"ELEVATION\"]))\n\n\n router.write2shp(minimal, PROJECT_PATH + \"/Monterusciello_solution/Monterusciello_acqueduct\")\n router.write2epanet(minimal, PROJECT_PATH + \"/Monterusciello_solution/Monterusciello_acqueduct\")", "def dc_nodes(path_dict, num_node, place_type, topo_type):\n \n if topo_type == \"ring\":\n # ring topology\n #node_x = random.choice(range(1, num_node + 1))\n node_x = 1\n if place_type == 'a':\n node_y = (node_x + 1) % num_node\n elif place_type == 'f':\n if num_node % 2 == 1: # odd number of nodes\n node_y = (node_x + (num_node + 1) / 2) % num_node\n else:\n node_y = (node_x + num_node / 2) % num_node\n if node_y == 0:\n node_y = num_node\n if topo_type == \"grid\":\n # grid topology: m-by-m\n top, bottom, left, right = grid_edges(num_node)\n corner = grid_corner(num_node)\n h_edges = deepcopy(top)\n v_edges = deepcopy(left)\n h_edges.extend(bottom)\n v_edges.extend(right)\n #side = set().union(set(top), set(bottom), set(left), set(right))\n \n if place_type == 's':\n #side_nonc = side.difference(set(corner))\n #node_x = random.choice(list(side_nonc))\n #node_x = random.choice(top)\n node_x = 2\n if node_x in h_edges and node_x not in corner:\n node_y = node_x + 1\n elif node_x in v_edges and node_x not in corner:\n node_y = int(node_x + math.sqrt(num_node))\n else:\n pass\n print \"Data Center Nodes\", node_x, node_y\n if place_type == 'c':\n #node_x = random.choice(corner)\n node_x = 1\n node_y = num_node + 1 - node_x\n if place_type == 'a':\n # two data centers are adjacent in the center\n if num_node % 2 == 1:\n # odd number of nodes\n node_x = int((1 + num_node) / 2)\n else:\n # even number of nodes\n node_x = int((num_node - math.sqrt(num_node)) / 2)\n node_y = node_x + 1 # inside the grid\n print \"Data Center Nodes\", node_x, node_y \n return (node_x, node_y)", "def dijkstra_convert(rates, value, from_currency, to_currency):\n path = get_shortest_path(from_currency, to_currency)\n counter = 0\n while counter < len(path) - 1:\n print(counter, path[counter], path[counter + 1])\n value = simple_convert(rates, value, path[counter], path[counter + 1])\n counter += 1\n return value" ]
[ "0.59614277", "0.5681578", "0.56544733", "0.55270684", "0.5521709", "0.5521709", "0.5468644", "0.54065055", "0.5363926", "0.53576124", "0.5327712", "0.529251", "0.5275726", "0.5270906", "0.5267602", "0.52594185", "0.5239558", "0.5176163", "0.5163248", "0.51169175", "0.50994235", "0.5092961", "0.50690454", "0.5050751", "0.50444734", "0.5029677", "0.5027903", "0.5025103", "0.50208634", "0.5000304" ]
0.5816856
1
Upload path for raw text, creates a text file with the text in Creates analyser object
def raw_text_upload(): try: global current_file if request.method == "POST": raw_text = request.form['raw_text'] # Checks text is not empty raw_text = raw_text.strip('<>') if raw_text != '': if app.config['UPLOAD_FOLDER'] == UPLOAD_FOLDER: app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER + \ session['username'] filepath = filepath = app.config[ 'UPLOAD_FOLDER'] + '/files/raw.txt' filename = 'raw' # Writes file with raw text in with open(filepath, 'w') as f: f.write(raw_text) # Makes actual analyser object current_file = main.Analyser(filepath, filename) analysed_texts = current_file.analysed_texts text_facts = current_file.stats with Database() as db: categories = db.loadCategories() keywords = '' for word in text_facts['Key Words']: keywords += word[0] + ", " keywords = keywords[:-2] return render_template('textdisplay.html', title=current_file.title, texts=analysed_texts, text=analysed_texts['Regular'], facts=text_facts, keywords=keywords, categories=categories, ext=current_file.text.ext, upload=True) except Exception as e: flash(e) return redirect(url_for('index'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_text_file(text, path):\n os.makedirs(os.path.dirname(path), exist_ok=True)\n with open(path, \"w\") as f:\n f.write(text)", "def store_lyrics_text(target_path, track_id, text, extension=\".txt\"):\n file_path = os.path.join(target_path, track_id + extension)\n print(file_path)\n with open(file_path, 'w') as fp_out:\n fp_out.write(text)", "def __init__(self, file_path):\n\n\t\tsuper(Text, self).__init__()\n\n\t\tself.open_file_path(file_path)\n\t\tself.preprocess_raw_text()\n\t\tself.concatenate_processed_text()\n\t\tself.generate_list_of_words()\n\n\t\tself.name = split(file_path)[-1]", "def text_to_file(phase, filename):\n path = \"sons/%s\" % filename # caminho para arquivo\n\n # gera e salva frase pelo gTTS\n voice = gTTS(phase, lang='pt')\n voice.save(path)\n\n return path", "def write_file(text):\n\n\ttempfile.tempdir = UPLOAD_FOLDER\n\ttemp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.txt')\n\n\ttext = text.encode('utf8')\n\n\twith open(temp_file.name, 'w') as temp:\n\t\ttemp.write(text)\n\n\tpathparts = (temp.name).split('/')\n\tpath = \"/\".join(pathparts[5:])\n\n\t#returns the temporary file path\n\treturn path", "def store_file(text: str, file_path: str) -> None:\n with open(file=file_path, mode='w', encoding='utf8') as f:\n f.write(text)", "def text_file_upload(request):\n if request.method == \"POST\":\n txt_files = request.FILES.getlist('text_file')\n fs = [i for i in os.listdir('temp/text_files') if 'txt' in i]\n #delete old files\n for f in fs:\n os.remove('temp/text_files/{}'.format(f))\n for i, f in enumerate(txt_files):\n handle_uploaded_file(f, 'temp/text_files/text_file_{}.txt'.format(i+1))\n\n return _start_analysis(request)\n else:\n return HttpResponse(\n json.dumps({\"error\": \"error, GET request not supported\"}),\n content_type=\"application/json\"\n )", "def save_file(path, text):\n with path.open(mode='w') as f_stream:\n f_stream.write(text)", "def processText(self, text: str, filename: str) :\n execution_time = 0.\n\n directory = os.path.join(self.execution_time_dir, AUDIO_DIR, self.getTTS().getName())\n make_dir(directory)\n time_for_generating_audio_fpath = os.path.join(directory, filename + \".txt\")\n \n audio_fpath = self.getTTS().getAudioPath(\n text=text, audio_dir=self.audio_dir, filename=filename)\n \n if self.recompute or not os.path.exists(audio_fpath):\n # print(audio_fpath)\n start_time = time.time()\n self.getTTS().generateAudio(text=text, audio_fpath=audio_fpath)\n save_execution_time(fpath=time_for_generating_audio_fpath, execution_time=time.time() - start_time)\n \n ## add execution time for generating audio\n execution_time += get_execution_time(\n fpath=time_for_generating_audio_fpath) \n \n transcription_dir = os.path.join(self.transcription_dir, self.getTTS().getName())\n \n transcriptions = {}\n for asr in self.asrs :\n directory = os.path.join(\n self.execution_time_dir, TRANSCRIPTION_DIR, self.getTTS().getName(), asr.getName())\n make_dir(directory)\n time_for_recognizing_audio_fpath = os.path.join(\n directory, filename + \".txt\")\n\n if self.recompute :\n start_time = time.time()\n # TODO: \n # change recognize audio -> input audio instead of fpath\n # audio = asr.loadAudio(audio_fpath=audio_fpath)\n # transcription = asr.recognizeAudio(audio=audio)\n # asr.saveTranscription(transcription_fpath, transcription)\n transcription = asr.recognizeAudio(audio_fpath=audio_fpath)\n asr.setTranscription(transcription)\n asr.saveTranscription(transcription_dir=transcription_dir, filename=filename)\n save_execution_time(fpath=time_for_recognizing_audio_fpath, execution_time=time.time() - start_time)\n \n transcription = asr.loadTranscription(\n transcription_dir=transcription_dir, filename=filename)\n num_retry = 0\n while transcription == \"\" and num_retry < self.max_num_retry :\n start_time = time.time()\n asr.recognizeAudio(audio_fpath=audio_fpath)\n asr.saveTranscription(\n transcription_dir=transcription_dir, filename=filename)\n save_execution_time(\n fpath=time_for_recognizing_audio_fpath, execution_time=time.time() - start_time)\n transcription = asr.loadTranscription(\n transcription_dir=transcription_dir, filename=filename)\n\n if asr.getName() == \"wit\" :\n random_number = float(random.randint(9, 47))/10.\n time.sleep(random_number)\n\n num_retry += 1\n\n transcriptions[asr.getName()] = preprocess_text(transcription)\n\n ## add execution time for generating audio\n execution_time += get_execution_time(\n fpath=time_for_recognizing_audio_fpath) \n \n\n cases = self.caseDeterminer(text, transcriptions)\n # if sum(cases.values()) == 0 :\n # print(text)\n # print(transcriptions[\"wav2vec2\"])\n # print(cases)\n # print()\n \n for asr_name, case in cases.items() :\n self.saveCase(self.case_dir, self.getTTS().getName(), asr_name, filename, str(case))\n\n # print(f\"Execution time: {execution_time}\")\n return cases, execution_time", "def _create_file(self, rel_path, text):\n # FIXME: There are better/more secure APIs for creating tmp file paths.\n file_path = self.filesystem.join(self._temp_dir, rel_path)\n self.filesystem.write_text_file(file_path, text)\n return file_path", "def save_text(req: request) -> Tuple[Union[None, str], Union[None, str]]:\n\n json_data = req.get_json()\n if json_data:\n if not isinstance(json_data, dict) or \"text\" not in json_data:\n return (None, \"Incorrect text payload\")\n text = json_data[\"text\"]\n filename = f\"{str(uuid.uuid4())}.txt\"\n filepath = os.path.join(UPLOAD_FOLDER, filename)\n with open(filepath, \"w\") as w:\n w.write(text)\n return (filename, None)\n\n file = req.files[\"file\"]\n if not utils.is_allowed_file(file.filename):\n return (None, \"Incorrect extension\")\n\n ext = file.filename.rsplit(\".\", 1)[1]\n filename = f\"{str(uuid.uuid4())}.{ext}\"\n file.save(os.path.join(UPLOAD_FOLDER, filename))\n\n return (filename, None)", "def open_raw(self, name):\n self._canOperate = False\n self._txt = \"\"\n try:\n with open(name, mode=\"r\", encoding=\"utf-8\") as f:\n for line in f:\n l = line.strip(\"\\n\")\n if l != \"\":\n self._txt += l + \" \"\n else:\n # paragraphing\n self._txt += \"\\n\"\n\n # cut the source into words\n self._words = re.findall(\"[\\w\\dÀÁÂÃÄÅàáâãäåÒÓÔÕÖØòóôõöøÈÉÊËèéêëÇçÌÍÎÏìíîïÙÚÛÜùúûüÿÑñ]+\", self._txt)\n self._length = len(self._words)\n except:\n raise FileNotFound(name)", "def read_raw_text(self, raw_path: str = None):\n\n if raw_path.rsplit(\".\")[-1] == \"json\":\n self.import_from_json(raw_path)\n return\n\n if raw_path is not None:\n self.raw_path = raw_path\n\n if self.raw_path is None:\n raise Exception(\"Found no file to read\")\n\n file = open(raw_path, \"r\")\n raw = file.read()\n file.close()\n\n self.sentences += get_sentences(raw, self.cM.use_spacy)\n\n self.loaded(False)", "def convertToText(self):\n documentType = self._getDocumentType()\n self.tempFilePath = self.getTempFilePath()\n\n callback = getattr(self, TextRepresentation.KNOWNTYPES[documentType])\n\n #Call function to convert\n callback(self.sourceFileName, self.tempFilePath, self.logDir)\n\n return self.tempFilePath", "def paste_to_file(self, text):\n self._initialize_file()\n assert os.path.isfile(self.file)\n with open(self.file, 'a') as fid:\n # fid.write((text + '\\n').encode('utf-8'))\n fid.write(text + '\\n')\n return self", "def createTXT(self):\n now = dt.datetime.now().strftime(\"%m-%d %H-%M\")\n self.filename = \"bwcca_tags \" + now\n try:\n if \"/\" in self.dir_lbl[\"text\"]:\n desired_list = self.phraseMaker()\n with open(f\"{self.folder}/{self.filename}.txt\", \"w\") as f:\n for i in desired_list:\n f.write(f\"{i}\\n\")\n self.stat_lbl[\"text\"] = f\"/{self.filename} created!\"\n else:\n self.dir_lbl[\"text\"] = \"Select a folder!\"\n self.dir_btn.focus()\n except Exception as e:\n self.dir_lbl[\"text\"] = e", "def txtWrite(text, path, mode=\"w\"):\n dirMake(os.path.dirname(path))\n textFile = open(path, mode)\n textFile.close()", "def save_file(self, file_name, text):\n\n with open(file_name, 'w') as content_file:\n content = content_file.write(text)", "def _text_to_disk(self, folder=None, filename=None, text=None):\n assert folder and filename\n # unify interface for saving\n text = text or self.text\n assert text, \"There are no text to save !\"\n filepath = os.path.join(folder, filename)\n with open(filepath, 'w') as f:\n f.write(text)\n self.log.debug(\"Written text to file {}\".format(filepath))", "def load_input(self, path):\n f = codecs.open(path, 'r', 'utf-8')\n raw_text = f.read()\n return raw_text", "async def hastebin_upload(self, text: str) -> Union[str, None]:\n req = await self.session.post(\"https://hastebin.com/documents\", data=text)\n reqjson = None\n try:\n reqjson = await req.json()\n key = reqjson[\"key\"]\n except (TypeError, KeyError, aiohttp.ContentTypeError):\n print_error(f\"[red]Could not upload error,[/] Raw Data: {reqjson or 'Could not get raw data'}\")\n url = None\n else:\n url = f\"https://hastebin.com/{key}.txt\"\n return url", "def send_text_file(file_name):\n file_dot_text = file_name + '.txt'\n return views.send_static_file(file_dot_text)", "def save_txt(file_name, url, subdir=''):\n text = get_txt(url)\n create_subdir('static/api/', subdir)\n try:\n with open(file_name, \"w\") as text_file:\n print(text, file=text_file)\n res = True\n except:\n res = False\n return res", "def textdisplay(textTitle, analysis):\n try:\n global current_file\n with Database() as database:\n text_owner = database.getTextOwner(textTitle, session['username'])\n app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER + text_owner\n path = app.config['UPLOAD_FOLDER'] + '/objects/' + textTitle + '.txt'\n with open(path, 'rb') as f:\n current_file = pickle.load(f)\n analysed_texts = current_file.analysed_texts\n text_facts = current_file.stats\n keywords = ''\n for word in text_facts['Key Words']:\n keywords += word[0] + \", \"\n keywords = keywords[:-2]\n return render_template('textdisplay.html',\n title=current_file.title,\n texts=analysed_texts,\n text=analysed_texts[analysis],\n facts=text_facts,\n keywords=keywords,\n owner=text_owner,\n user=session['username'])\n except Exception as e:\n flash(\"Something went wrong, please try again\")\n return redirect(url_for('profile', username=session['username']))", "def synthesize_text_file(text_file):\n from google.cloud import texttospeech\n client = texttospeech.TextToSpeechClient()\n\n with open(text_file, 'r') as f:\n text = f.read()\n input_text = texttospeech.types.SynthesisInput(text=text)\n\n # Note: the voice can also be specified by name.\n # Names of voices can be retrieved with client.list_voices().\n voice = texttospeech.types.VoiceSelectionParams(\n language_code='en-AU',\n name='en-AU-Wavenet-C',\n ssml_gender=texttospeech.enums.SsmlVoiceGender.NEUTRAL)\n\n audio_config = texttospeech.types.AudioConfig(\n audio_encoding=texttospeech.enums.AudioEncoding.MP3,\n speaking_rate=0.80)\n\n response = client.synthesize_speech(input_text, voice, audio_config)\n\n # The response's audio_content is binary.\n filename = text_file\n try:\n filename = filename.replace('.txt', '.mp3')\n filename = filename.replace('../Articles/', '')\n filename = filename.replace(';', ' ')\n filename = filename.replace(\"'\", \" \")\n except Exception as e:\n print(e)\n print('Check replace command in synthesize_file.py file')\n\n with open(filename, 'wb') as out:\n out.write(response.audio_content)\n print(f'Audio content written to file: \\n{filename}\\n')", "def write_text_file(path: Path, data: str) -> None:\n path.write_text(data, encoding='utf-8')", "def Prepend(filepath, text):\n file_data = text\n if os.path.exists(filepath):\n file_data += open(filepath).read()\n f = open(filepath, 'w')\n f.write(file_data)\n f.close()", "def save(self, path, project_name=\"project\"):\n save_path = os.path.join(path, self.save_path)\n save_path = re.sub(r\"/^{}/\".format(self.template.name), project_name, save_path)\n try:\n os.makedirs(os.path.dirname(save_path))\n except FileExistsError:\n pass\n file = open(save_path, \"w\")\n file.write(self.text)\n file.close()\n print(\"save file: \", save_path)", "def text(self):\n for mt in Config.mimes_rtf:\n if mt in self.sub_type:\n self.add_file_string('Rich Text file')\n # TODO: need a way to convert it to plain text\n self.force_ext('.txt')\n return\n for mt in Config.mimes_ooxml:\n if mt in self.sub_type:\n self.add_file_string('OOXML File')\n self._ooxml()\n return\n self.add_file_string('Text file')\n self.force_ext('.txt')", "def saveText(texto, fileName, nameLib): \r\n arq = open(fileName + \"-\" + nameLib + \".txt\", \"w\")\r\n arq.write(texto) \r\n arq.close()" ]
[ "0.63974303", "0.6382405", "0.6246426", "0.62317955", "0.61909753", "0.61618316", "0.6158124", "0.60989183", "0.6093532", "0.6090002", "0.6025201", "0.59735364", "0.59291154", "0.59190583", "0.5890567", "0.5857344", "0.58509505", "0.5835025", "0.5823908", "0.58041954", "0.57996416", "0.5733043", "0.57325506", "0.5710704", "0.5706086", "0.56970596", "0.5688238", "0.5677233", "0.5663925", "0.5661112" ]
0.77678305
0
Shares the text with the user, by creating link in databse
def share_text(texttitle, username): message = session['username'] + \ " shared the text " + texttitle + " with you." with Database() as database: database.share_text(texttitle, username, session["username"]) database.sendNotif(username, message) flash("Text Shared") return redirect(url_for('index'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def share_link(cls, user, link):", "def share_link(cls, user, link):", "def insert_link(self, text, href):\n self.insert_text('\\n<a href=\"%s\">%s</a>' % (href, text))", "def paste_text(text, language=\"text\", paste_expire=8640, paste_user=\"paste.py\",\n return_link=True):\n # costruct url\n data = {\"paste_data\": text,\n \"paste_lang\": language,\n \"api_submit\": \"true\",\n \"mode\": \"json\",\n \"paste_user\": paste_user,\n \"paste_expire\": paste_expire\n }\n try:\n with contextlib.closing(urllib2.urlopen(PASTE_BASE_URL, urllib.urlencode(data))) as query:\n id = json.loads(query.read(), object_hook=Struct).result.id\n return PASTE_BASE_URL + id if return_link else id\n except urllib2.HTTPError as e:\n print(\"Error uploading file:\")\n print(e.reason)", "def send_text_to_user(user):", "def create_link(self):\n self.filename = App.get_running_app().root.ids.camera_screen.capture()\n self.url = FileSharer(self.filename).share()\n self.ids.label.text = self.url", "def content(self, uid, text):\n\n if uid and isinstance(uid, str) and uid.lower().startswith(\"http\"):\n return f\"<a href='{uid}' rel='noopener noreferrer' target='blank'>{text}</a>\"\n\n return text", "def share_email(request, pk):\n template_var = base_template_vals(request)\n subject = 'Your friend shared an event with you on Dons Affairs!'\n from_email = '[email protected]'\n to = '[email protected]'\n to = request.POST[\"email_to\"] #default is sending to self '[email protected]'\n link = request.POST[\"abs_url\"]\n text_content = 'This is an important message.'\n text_content += 'Your friend shared an event link with you. ' + link\n html_content = '<p>Hi Dear,</p>' \n html_content += '<p>Your friend shared an exciting event with you on ' \n html_content += '<a href=\"http://mtk.im/usf\">Don\\'s Affairs</a>!</p>'\n html_content += '<p><a href=\"' + link + '\"> '\n html_content += 'Here is the link to the event.</a>' \n html_content += '<br>Feel free to check it out!</p>' + '<p><br>With love,'\n html_content += '<br>Don\\'s Affairs Team</p>'\n msg = EmailMultiAlternatives(subject, text_content, from_email, [to])\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()\n return redirect('index')", "def writeLink ( text ) :\r\n\r\n if not type( text ) == str : return False\r\n\r\n text = text.strip()\r\n\r\n if not text.endswith( os.sep ) : text = text + os.sep\r\n\r\n text = os.path.abspath( text )\r\n\r\n text = os.path.expanduser( text )\r\n\r\n if not text.endswith( os.sep ) : text = text + os.sep\r\n\r\n try :\r\n \r\n handler = file( text + \"findLibrary.txt\", mode = \"w\" )\r\n\r\n handler.write( text )\r\n\r\n handler.close()\r\n\r\n except Exception, exception :\r\n\r\n return False\r\n \r\n return True", "def copy_link(self):\n try:\n Clipboard.copy(self.url)\n except:\n self.ids.link.text=self.link_message", "def link(self, link, title, text):\n link = escape_link(link)\n return [MdStyleInstructionLink(link)] + text", "def add_link_ref(self, url_from, url_to, link_text):\n words = separate_words(link_text)\n fromid = self.get_entry_id('urllist', 'url', url_from)\n toid = self.get_entry_id('urllist', 'url', url_to)\n if fromid == toid:\n return\n # insert into link table indicating the urls involved\n cur = self.con.execute(\"insert into link(fromid,toid) values (%d,%d)\" % (fromid, toid))\n linkid = cur.lastrowid\n # insert into linkwords the words associated with the link\n for word in words:\n if word in IGNORE_WORDS:\n continue\n wordid = self.get_entry_id('wordlist', 'word', word)\n self.con.execute(\"insert into linkwords(linkid,wordid) values (%d,%d)\"\n % (linkid, wordid))", "def edit_link(db_object, text=None):\n if text is None:\n text = 'edit'\n return _make_link(db_object.update_url(), text)", "def link_to_modal(link_text, index, autoescape=True): # pylint: disable=unused-argument\n link = (\n '<a'\n ' href=\"#!\"'\n ' class=\"text-underline view-course-details-link\"'\n ' id=\"view-course-details-link-{index}\"'\n '>{link_text}</a>'\n ).format(\n index=index,\n link_text=link_text,\n )\n return mark_safe(link)", "def link(path):\n abs_path = navigate.get_abs_path(path)\n parent, name = navigate.split_path(abs_path)\n if not db.file_exists(parent, name):\n print \"Error: '\" + path + \"' does not exist.\"\n else:\n dbox_path = '/' + name\n access_token = db.get_access_to_file(parent, name)\n client = dropbox.client.DropboxClient(access_token)\n short_link = client.share(dbox_path)['url']\n normal_link = client.share(dbox_path, short_url=False)['url']\n dl_link = normal_link.replace('www.dropbox.com',\n 'dl.dropboxusercontent.com', 1)\n print \"short link: \" + short_link\n print \"normal link: \" + normal_link\n print \"download link: \" + dl_link", "def add_link():\n return True", "def urlLink(self, text=None, url=None, attrs={}):\n if not text:\n text = self.titleOrId()\n text = escape(text)\n if not self.checkRemotePerm(\"View\", self):\n return text\n if not url:\n url = self.getPrimaryUrlPath()\n if len(attrs):\n return '<a href=\"%s\" %s>%s</a>' % (url,\n ' '.join('%s=\"%s\"' % (x,y) for x,y in attrs.items()),\n text)\n else:\n return '<a href=\"%s\">%s</a>' % (url, text)", "def share(link, emails, from_name = \"\", reply_to = \"\", body = \"\"):\r\n now = datetime.datetime.now(g.tz)\r\n ival = now - timeago(g.new_link_share_delay)\r\n date = max(now,link._date + ival)\r\n Email.handler.add_to_queue(c.user, link, emails, from_name, g.share_reply,\r\n date, request.ip, Email.Kind.SHARE,\r\n body = body, reply_to = reply_to)", "def link(request, link_id):\n bkmrk_id = Bookmark.decode_id(link_id)\n bookmark = get_object_or_404(Bookmark, pk=bkmrk_id)\n if request.user.is_authenticated():\n Click.objects.create(human=request.user, bookmark=bookmark)\n else:\n Click.objects.create(bookmark=bookmark)\n return redirect(bookmark.url)", "def click(cls, user, link):\r\n pass", "def add_links(update: Update, context: CallbackContext):\n urls = update.message.parse_entities([\"url\", \"text_link\"]).values()\n\n if urls:\n logging.info(f\"Got content of type url, text_link: {urls}\")\n\n with db.connect() as connection:\n existing_links = db.get_links(connection, update.message.from_user.id)\n if existing_links:\n distinct_links = set([url.casefold() for url in urls]) - set(\n [link.url for link in existing_links]\n )\n else:\n distinct_links = set([url.casefold() for url in urls])\n\n if distinct_links:\n success = context.bot.send_message(\n chat_id=update.message.chat_id,\n text=f\"⏳ Saving your link{'s' if len(distinct_links) > 1 else ''}... ⏳\",\n disable_notification=True,\n )\n\n db.add_links(connection, distinct_links, update.message.from_user.id)\n\n context.bot.edit_message_text(\n chat_id=update.message.chat_id,\n message_id=success.message_id,\n text=f\"✨ {len(distinct_links)} link{'s' if len(distinct_links) > 1 else ''} saved ✨\",\n )\n else:\n context.bot.send_message(\n chat_id=update.message.chat_id,\n text=f\"You already have that link saved! Look it up with *View all* or */all*\",\n parse_mode=telegram.ParseMode.MARKDOWN,\n )", "def setTextLink (self, text):\n self.textUpQLabel.setText(text)", "async def link(self, ctx):\n if not is_linked(ctx.author.id):\n token = str(uuid.uuid4())\n valid_until = int((datetime.utcnow() + timedelta(days=1)).timestamp())\n add_token(ctx.author.display_name, ctx.author.id, token, valid_until, str(ctx.author.avatar_url))\n web_base_url = get_setting('web_base_url')\n await ctx.author.send(f\"Please visit {web_base_url}/link/{token} to link your Spotify account. \"\n f\"This link will expire after 24 hours.\")\n if ctx.guild is not None:\n await ctx.message.add_reaction('📬')\n else:\n await ctx.reply(\"You have already linked a spotify account!\")", "def user2Link(user): \n # could also look up mail addrs via a table lookup, etc\n return '<a href=\"mailto:%(user)[email protected]\">%(user)s</a>' % {\"user\": user}", "async def paste(text: str) -> str:\n\n async with aiohttp.ClientSession() as aioclient:\n post = await aioclient.post(\"https://hastebin.com/documents\", data=text)\n if post.status == 200:\n response = await post.text()\n return f\"https://hastebin.com/{response[8:-2]}\"\n\n # Fallback bin\n post = await aioclient.post(\"https://bin.drlazor.be\", data={\"val\": text})\n if post.status == 200:\n return post.url", "def addContent(text):", "def contact_linkup(self, request, pk):\n obj_api = api()\n title_contact = \"Tu contacto Linkup\"\n token = request.session['token']\n resp = obj_api.get(slug='sellers/' + pk + \"/\", token=token)\n return render(request, 'frontend/actors/client/my_account.html', {'data_user': resp, \n 'title_contact': title_contact})", "def link_click(_):\r\n\r\n tag_name = about_content.tag_names(tkinter.CURRENT)[0]\r\n about_content.tag_config(tag_name, foreground=\"#551A8B\")\r\n if tag_name == 'hyper':\r\n webbrowser.open(\"https://www.facebook.com/nihal.agarwal.14\")\r\n else:\r\n webbrowser.open(\"https://github.com/NihalAgarwal/Windows-Wi-Fi-Manager\")", "def add_link(self, text, link, doc=None):\n if doc is None:\n doc = self.doc\n\n attributes = dict(height=13, width=800, align=None,\n style={'width': '800px',\n 'font-size': '100%',\n 'font-style': 'italic',\n 'font-weight': 'lighter',\n 'color': self.palette['hover'],\n 'text-align': 'center'})\n\n color = self.palette['hover']\n style = f\"style=\\\"text-decoration: none; color: {color};\\\"\"\n\n doc.add_root(Div(text=f\"<a href=\\\"{link}\\\" {style}>{text}</a>\",\n **attributes))\n return doc", "async def link_to(self, *args):\n pass" ]
[ "0.6997458", "0.6997458", "0.6394542", "0.6387845", "0.63727754", "0.6336196", "0.62323594", "0.59182006", "0.59175485", "0.5866517", "0.585186", "0.5835037", "0.58334965", "0.57938176", "0.5787896", "0.5775299", "0.57594526", "0.5759272", "0.5726179", "0.57058954", "0.5695367", "0.568566", "0.56709146", "0.56678915", "0.5648084", "0.5647578", "0.5646238", "0.5633367", "0.56195134", "0.5613913" ]
0.7278323
0
Displays the text on upload and allows the analysis to be selected
def textdisplay(textTitle, analysis): try: global current_file with Database() as database: text_owner = database.getTextOwner(textTitle, session['username']) app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER + text_owner path = app.config['UPLOAD_FOLDER'] + '/objects/' + textTitle + '.txt' with open(path, 'rb') as f: current_file = pickle.load(f) analysed_texts = current_file.analysed_texts text_facts = current_file.stats keywords = '' for word in text_facts['Key Words']: keywords += word[0] + ", " keywords = keywords[:-2] return render_template('textdisplay.html', title=current_file.title, texts=analysed_texts, text=analysed_texts[analysis], facts=text_facts, keywords=keywords, owner=text_owner, user=session['username']) except Exception as e: flash("Something went wrong, please try again") return redirect(url_for('profile', username=session['username']))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def raw_text_upload():\n try:\n global current_file\n if request.method == \"POST\":\n raw_text = request.form['raw_text']\n # Checks text is not empty\n raw_text = raw_text.strip('<>')\n if raw_text != '':\n if app.config['UPLOAD_FOLDER'] == UPLOAD_FOLDER:\n app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER + \\\n session['username']\n filepath = filepath = app.config[\n 'UPLOAD_FOLDER'] + '/files/raw.txt'\n filename = 'raw'\n # Writes file with raw text in\n with open(filepath, 'w') as f:\n f.write(raw_text)\n\n # Makes actual analyser object\n current_file = main.Analyser(filepath, filename)\n analysed_texts = current_file.analysed_texts\n text_facts = current_file.stats\n with Database() as db:\n categories = db.loadCategories()\n keywords = ''\n for word in text_facts['Key Words']:\n keywords += word[0] + \", \"\n keywords = keywords[:-2]\n return render_template('textdisplay.html',\n title=current_file.title,\n texts=analysed_texts,\n text=analysed_texts['Regular'],\n facts=text_facts,\n keywords=keywords,\n categories=categories,\n ext=current_file.text.ext,\n upload=True)\n except Exception as e:\n flash(e)\n return redirect(url_for('index'))", "def webtext(analysis):\n global current_file\n try:\n if request.form[\"url\"] == \"\":\n flash(\"No URL given\")\n return redirect(url_for('index'))\n url = request.form['url']\n current_file = main.Analyser(url)\n analysed_texts = current_file.analysed_texts\n text_facts = current_file.stats\n with Database() as database:\n categories = database.loadCategories()\n keywords = ''\n for word in text_facts['Key Words']:\n keywords += word[0] + \", \"\n keywords = keywords[:-2]\n return render_template('textdisplay.html',\n title=current_file.title,\n texts=analysed_texts,\n text=analysed_texts[analysis],\n ext=current_file.text.ext,\n keywords=keywords,\n categories=categories,\n facts=text_facts,\n upload=True)\n\n except:\n flash(\"Web address not found!\")\n return redirect(url_for('index'))", "def changeview(analysis):\n try:\n analysed_texts = current_file.analysed_texts\n text_facts = current_file.stats\n with Database() as db:\n categories = db.loadCategories()\n keywords = ''\n for word in text_facts['Key Words']:\n keywords += word[0] + \", \"\n keywords = keywords[:-2]\n return render_template('textdisplay.html',\n title=current_file.title,\n ext=current_file.text.ext,\n texts=analysed_texts,\n keywords=keywords,\n text=analysed_texts[analysis],\n categories=categories,\n facts=text_facts,\n upload=True)\n except Exception as e:\n flash(\"Something went wrong, please try again\")\n return redirect(url_for('profile', username=session['username']))", "def text_file_upload(request):\n if request.method == \"POST\":\n txt_files = request.FILES.getlist('text_file')\n fs = [i for i in os.listdir('temp/text_files') if 'txt' in i]\n #delete old files\n for f in fs:\n os.remove('temp/text_files/{}'.format(f))\n for i, f in enumerate(txt_files):\n handle_uploaded_file(f, 'temp/text_files/text_file_{}.txt'.format(i+1))\n\n return _start_analysis(request)\n else:\n return HttpResponse(\n json.dumps({\"error\": \"error, GET request not supported\"}),\n content_type=\"application/json\"\n )", "def browse( self ):\n Tk.Tk().withdraw()\n filename = askopenfilename( initialdir = self.initialdir,\n title = self.title ,\n filetypes = self.filetypes )\n\n if filename == \"\":\n return\n\n self.set_text( filename )\n #rint( f\"get_text = {self.get_text()}\", flush = True )", "def ocr():\n return render_template('upload.html')", "def upload():\n\treturn render_template(\"upload.html\", title=\"Upload a file\")", "def upload_file():\n try:\n global current_file\n if request.method == \"POST\":\n # Validates a file has been uploaded\n if 'file' not in request.files:\n flash(\"No file submitted\")\n return redirect(url_for('index'))\n\n f = request.files['file']\n if f.filename == '':\n flash(\"No file submitted\")\n return redirect(url_for('index'))\n\n if app.config['UPLOAD_FOLDER'] == UPLOAD_FOLDER:\n app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER + \\\n session['username']\n\n if check_extension(f.filename):\n # Makes sure filename is safe\n filename = secure_filename(f.filename)\n filepath = app.config['UPLOAD_FOLDER'] + '/files/' + filename\n # Saves the uploaded file\n f.save(filepath)\n # Removes extension from filename\n filename = filename.replace('.txt', '')\n filename = filename.replace('.pdf', '')\n filename = filename.replace('.docx', '')\n\n current_file = main.Analyser(filepath, filename)\n analysed_texts = current_file.analysed_texts\n text_facts = current_file.stats\n with Database() as db:\n categories = db.loadCategories()\n keywords = ''\n for word in text_facts['Key Words']:\n keywords += word[0] + \", \"\n keywords = keywords[:-2]\n return render_template('textdisplay.html',\n title=current_file.title,\n texts=analysed_texts,\n text=analysed_texts['Regular'],\n facts=text_facts,\n ext=current_file.text.ext,\n categories=categories,\n keywords=keywords,\n upload=True)\n\n else:\n flash(\"File type not allowed\")\n return redirect(url_for('index'))\n\n else:\n return redirect(url_for('index'))\n except Exception as e:\n flash(\"Something went wrong, please try again\")\n return redirect(url_for('index'))", "def display_text(target_text):\n\n print('Text to analyze:')\n print('')\n print('-------TEXT BELOW-------')\n print(target_text)\n print('-------TEXT ENDS-------')\n print('')", "def main():\r\n activities = [\"EDA\",\"Plots\"]\t\r\n choice = st.sidebar.selectbox(\"Select Activities\",activities)\r\n\r\n if choice == 'EDA':\r\n result = st.file_uploader(\"Upload\", type=\"txt\")\r\n\r\n # filename =st.text_input('Enter a file path:')\r\n try:\r\n if result:\r\n # Process you file here\r\n data = result.getvalue()\r\n # file1 = open(filename,\"r\") \r\n # data=file1.read()\r\n data=data.lower().replace('\\n','')\r\n # file1.close() \r\n st.write(data[:200])\r\n obj=Lyrics()\r\n add_split = st.sidebar.slider(\r\n 'Select a split of values',\r\n 2, 25\r\n )\r\n st.write(\"Select Split from Left Slider .\")\r\n if add_split>3:\r\n # split=st.text_input(\"Enter String split for Prediction :\")\r\n gen=obj.generator(data=data,split=int(add_split))\r\n if gen:\r\n startString=st.text_input(\"Enter Starting String for Prediction :\")\r\n if len(startString)>0:\r\n val=st.sidebar.slider(\r\n \"How many char's want's to Prediction :\",\r\n 100, 1000\r\n )\r\n st.write(\"Select no of char's want's to Prediction from Left Slider .\")\r\n if val>100:\r\n final_op=obj.future_data(startString,val,add_split)\r\n st.write(final_op)\r\n except FileNotFoundError:\r\n st.error('File not found.')\r\n except IndexError:\r\n st.error('Select only one Author. ')\r\n except KeyError:\r\n st.error(\"Enter correct Integer. \")", "def browse_files_in(self,*args):\n path_to_data = tkFileDialog.askopenfilename()\n #show chosen value in textframe\n self.docstring.delete(0,tk.END)\n self.docstring.insert(0,path_to_data)\n #use chosen value as self.data_file\n self.data_file.set(path_to_data)", "def upload():\n\n file = request.files['query']\n filepath = upload_filepath(secure_filename(file.filename))\n file.save(filepath)\n classification = classify(filepath)\n classification['filename'] = file.filename\n return render_template('index.html', classification=classification)", "def form_valid(self, form):\n file_in_memory = form.cleaned_data \n xml_text = forms.handle_upload(file_in_memory)\n data = parse_txt(xml_text)\n return render(self.request, 'esfviewer/output.html', {'data': data})", "def on_loadFile(self):\n self.stored = self.teText.toHtml()\n self.rf_widgetVis(state=True)", "def file_open(self):\n filename, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File')\n\n with open(filename, 'r', encoding=\"utf8\") as file:\n self.file_cont = file.readlines()\n self.textToAnalize.setText(''.join(self.file_cont))", "def main():\n st.info(\n \"This webpage lets you upload wav audio file and transribe it to Amharic, CHECK THAT OUT !!\")\n st.markdown(STYLE, unsafe_allow_html=True)\n st.header(\"Upload audio file\")\n file = st.file_uploader(\"Audio file\", type=FILE_TYPES)\n show_file = st.empty()\n if not file:\n show_file.info(\"Please upload a file of type: \" +\n \", \".join(FILE_TYPES))\n return\n\n file_type = get_file_type(file)\n if file_type == FileType.PYTHON:\n st.code(file.getvalue())\n\n elif file_type == FileType.SOUND:\n # st.code(file.getvalue())\n audio_bytes = file.read()\n st.audio(audio_bytes, format=\"audio/ogg\")\n\n else:\n data = pd.read_csv(file)\n st.dataframe(data.head(10))\n\n with open(os.path.join(\"./tempfile\", file.name), \"wb\") as f:\n f.write(file.getbuffer())\n st.success(\"Processing File..\")\n\n st.header(\"Transcribe audio\")\n if st.button('Transcribe'):\n st.write(\"\")\n with st.spinner('wait for it ...'):\n time.sleep(60)\n st.success('Done!')\n else:\n st.write('')\n\n # if file:\n # token, t_id = upload_file(file)\n # result = {}\n # #polling\n # sleep_duration = 1\n # percent_complete = 0\n # progress_bar = st.progress(percent_complete)\n # st.text(\"Currently in queue\")\n # while result.get(\"status\") != \"processing\":\n # percent_complete += sleep_duration\n # time.sleep(sleep_duration)\n # progress_bar.progress(percent_complete/10)\n # result = get_text(token,t_id)\n\n # sleep_duration = 0.01\n\n # for percent in range(percent_complete,101):\n # time.sleep(sleep_duration)\n # progress_bar.progress(percent)\n\n # with st.spinner(\"Processing.....\"):\n # while result.get(\"status\") != 'completed':\n # result = get_text(token,t_id)\n\n # st.balloons()\n # st.header(\"Transcribed Text\")\n # st.subheader(result['text'])\n\n file.close()", "def show(self):\n self.set_text(self.read())", "def open_dialog1(self):\n file_name = QtGui.QFileDialog.getOpenFileName()\n self.change_task(\"Computing recency vectors of text 1\")\n txt = self.controller.process_raw_text(file_name, LEFT_TEXT)\n self.end_task()\n self._window.column1.align_disp.set_text(txt)", "def main():\n\n\tst.title(\"Sentiment Analysis Emoji App\")\n\n\tactivities = [\"Sentiment\",\"Text Analysis on URL\",\"About\"]\n\tchoice = st.sidebar.selectbox(\"Choice\",activities)\n\n\tif choice == 'Sentiment':\n\t\tst.subheader(\"Sentiment Analysis\")\n\t\tst.write(emoji.emojize('Everyone :red_heart: Streamlit ',use_aliases=True))\n\t\traw_text = st.text_area(\"Enter Your Text\",\"Type Here\")\n\t\tif st.button(\"Analyze\"):\n\t\t\tblob = TextBlob(raw_text)\n\t\t\tresult = blob.sentiment.polarity\n\t\t\tif result > 0.0:\n\t\t\t\tcustom_emoji = ':smile:'\n\t\t\t\tst.write(emoji.emojize(custom_emoji,use_aliases=True))\n\t\t\telif result < 0.0:\n\t\t\t\tcustom_emoji = ':disappointed:'\n\t\t\t\tst.write(emoji.emojize(custom_emoji,use_aliases=True))\n\t\t\telse:\n\t\t\t\tst.write(emoji.emojize(':expressionless:',use_aliases=True))\n\t\t\tst.info(\"Polarity Score is:: {}\".format(result))\n\t\t\t\n\tif choice == 'Text Analysis on URL':\n\t\tst.subheader(\"Analysis on Text From URL\")\n\t\traw_url = st.text_input(\"Enter URL Here\",\"Type here\")\n\t\ttext_preview_length = st.slider(\"Length to Preview\",50,100)\n\t\tif st.button(\"Analyze\"):\n\t\t\tif raw_url != \"Type here\":\n\t\t\t\tresult = get_text(raw_url)\n\t\t\t\tblob = TextBlob(result)\n\t\t\t\tlen_of_full_text = len(result)\n\t\t\t\tlen_of_short_text = round(len(result)/text_preview_length)\n\t\t\t\tst.success(\"Length of Full Text::{}\".format(len_of_full_text))\n\t\t\t\tst.success(\"Length of Short Text::{}\".format(len_of_short_text))\n\t\t\t\tst.info(result[:len_of_short_text])\n\t\t\t\tc_sentences = [ sent for sent in blob.sentences ]\n\t\t\t\tc_sentiment = [sent.sentiment.polarity for sent in blob.sentences]\n\t\t\t\t\n\t\t\t\tnew_df = pd.DataFrame(zip(c_sentences,c_sentiment),columns=['Sentence','Sentiment'])\n\t\t\t\tst.dataframe(new_df)\n\n\tif choice == 'About':\n\t\tst.subheader(\"About:Sentiment Analysis Emoji App\")\n\t\tst.info(\"Built with Streamlit,Textblob and Emoji\")\n\t\tst.text(\"Jesse E.Agbe(JCharis\")\n\t\tst.text(\"Jesus Saves@JCharisTech\")", "def browse_files_out(self,*args):\n path_to_data = tkFileDialog.askopenfilename()\n #show chosen value in textframe\n self.docstring_offers.delete(0,tk.END)\n self.docstring_offers.insert(0,path_to_data)\n #use chosen value as self.exchanged_offers_filepa\n self.exchanged_offers_filepath.set(path_to_data)", "def on_open_text(self, event):\r\n self.text_id = event.EventObject.text_id\r\n data = self.data[self.text_id]\r\n self.edit_text.Value = data[\"text\"]\r\n self.list_lang.Value = data[\"lang_text\"]\r\n if data[\"filenames\"]:\r\n self.mediactrl.Load(data[\"filenames\"][0])\r\n if self.mc_hack:\r\n wx.CallLater(500, self.mediactrl.Play)", "def displayText(self):\n if self.entryWidget.get().strip() == \"\":\n tkMessageBox.showerror(\"Tkinter Entry Widget\", \"Enter a text value\")\n else:\n self.file_com.write(self.entryWidget.get().strip()+'\\n')", "def upload_file(self):\n self.master.switch_frame(UploadFileView)", "def analyse() -> 'html':\n storeFileName = ''\n if request.method == 'POST':\n \n if 'file' not in request.files:\n return render_mainpageerror('No file selected')\n \n file = request.files['file']\n if file.filename == '':\n return render_mainpageerror('Empty file name Please select a File to upload')\n \n if file and not allowed_file(file.filename):\n storeFileName = filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return render_mainpageerror('Not allowed file format')\n \n if ' ' in file.filename:\n return render_mainpageerror('Please remove withespace from file name before uploading')\n \n if file and allowed_file(file.filename):\n originalFilename = file.filename\n storeFileName = filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n \n \n if len(storeFileName) == 0:\n return render_template('mainpage.html',\n title='Word association')\n else:\n wordsfromfile = readFile.red_uploaded_file(storeFileName)\n #print(wordsfromfile, 'The ans from pdf')\n if wordsfromfile == 'STOP':\n return render_mainpageerror('Cant find Encoding on PDF document')\n size = get_store()\n if not check_fileName(size):\n \"\"\"Analyse first time building up JSON file\"\"\"\n keyWords = readFile.read_JSON()\n allJson = readFile.load_JSON()\n data = Counter(wordsfromfile)\n printResultsExluds = []\n container = {}\n container['size'] = size\n container['file'] = storeFileName\n toDB = []\n for k, v in sorted(data.items()):\n if k in keyWords:\n string = allJson[k][:3]\n toDB.append({k : v})\n toDB.append(string)\n else:\n value = str(v)\n keystr = k + '(' + value + ')'\n printResultsExluds.append(keystr)\n for w in printResultsExluds:\n toDB.append({w:'was not found in the associations list'})\n\n container['files'] = toDB \n js = json.dumps(container)\n print(js)\n c = get_client()\n c.insert(json.loads(js))\n print('insert')\n \"\"\"If analysed first time\"\"\"\n return pr(size, 'Analysed first time')\n else:\n \"\"\"If already Analysed\"\"\"\n return pr(size,'Already analysed')", "def browse( self ):\n Tk.Tk().withdraw()\n dirname = askdirectory()\n\n self.set_text( dirname )\n #rint( f\"get_text = {self.get_text()}\", flush = True )", "def buttonClick(self):\n \n self.fpath=filedialog.askopenfilename()\n self.label_fpath.config(text=self.fpath)\n self.err_label.config(text='')\n pass", "def text(self):\n for mt in Config.mimes_rtf:\n if mt in self.sub_type:\n self.add_file_string('Rich Text file')\n # TODO: need a way to convert it to plain text\n self.force_ext('.txt')\n return\n for mt in Config.mimes_ooxml:\n if mt in self.sub_type:\n self.add_file_string('OOXML File')\n self._ooxml()\n return\n self.add_file_string('Text file')\n self.force_ext('.txt')", "def open_file(self):\n files = [('Text Document', '*.txt'), ('PDF Document', '*.pdf'), ('Word Document', '*.docx')]\n text_file = askopenfile(mode='r', title=\"Open your file\", filetypes=files,\n defaultextension=files)\n if text_file is not None:\n self.file_path = text_file.name\n text_inside = self.file.load_file(text_file.name)\n text_file.close()\n self.textbox.delete(\"1.0\", tk.END)\n self.textbox.insert(\"1.0\", text_inside)\n self.text = self.textbox", "def update(self, *_):\n if not self.input_main.edit_modified():\n return\n\n analyze_text = self.create_analysis()\n self.output_main[\"state\"] = tk.NORMAL\n self.output_main.delete(\"1.0\", tk.END)\n self.output_main.insert(\"1.0\", analyze_text)\n self.output_main[\"state\"] = tk.DISABLED\n self.input_main.edit_modified(False)", "def selectTrial(self):\r\n # Select interactively\r\n self.trcFilePath = utils.FileUtils.getInstance().browseForFilename('.trc','Select the file to preview',1)" ]
[ "0.7159925", "0.70026785", "0.6800656", "0.6480692", "0.6478881", "0.63340646", "0.6268505", "0.62629604", "0.6173394", "0.61475825", "0.6066088", "0.60618997", "0.60281646", "0.5980928", "0.5969213", "0.5954582", "0.5953171", "0.59319204", "0.59178823", "0.5866974", "0.5828872", "0.5811747", "0.5793154", "0.57854456", "0.57580507", "0.5755527", "0.57498574", "0.5743698", "0.57331926", "0.5728825" ]
0.7547743
0
Saves the text object in a text file and saves it in db
def save_text(): try: global current_file if request.method == "POST": current_file.title = request.form['title'].replace(' ', '') with Database() as database: category = database.getCategory(request.form['Category']) current_file.category = category session['id'] = database.getID(session['username']) owned, shared = database.getUsersTexts(session['id']) result = [x[0] for x in owned] + [x[0] for x in shared] # Checks that the user does not already have # access to a text with the same name if current_file.title not in result and current_file.title != "": object_file_path = app.config[ 'UPLOAD_FOLDER'] + '/objects/' + current_file.title + '.txt' # Puts the object in the file pickle.dump(current_file, open(object_file_path, 'wb')) fhc = current_file.text.content[:97] + '...' data = [session['id'], current_file.title, fhc, current_file.category] + current_file.has_features keywords = current_file.stats['Key Words'] # Saves to database database.addText(data, keywords) # Adds keywords to graph with open("word_graph.txt", "rb") as f: G = pickle.load(f) for keyword in keywords: G.add_node(keyword[0]) for k in keywords: if k[0] != keyword[0]: G.add_edge(keyword[0], k[0]) # Saves graph in file again with open("word_graph.txt", "wb") as f: pickle.dump(G, f) current_file = None return redirect(url_for('profile', username=session['username'])) else: print("LONEOWDBOFHNEROSFOEBFWEBFWOD") flash("A file with this name already exists.") categories = database.loadCategories() analysed_texts = current_file.analysed_texts text_facts = current_file.stats keywords = '' for word in text_facts['Key Words']: keywords += word[0] + ", " keywords = keywords[:-2] return render_template('textdisplay.html', title=current_file.title, texts=analysed_texts, text=analysed_texts['Regular'], facts=text_facts, keywords=keywords, categories=categories, ext=current_file.text.ext, upload=True) else: flash("Page does not exist") return redirect(url_for('index')) except Exception as e: flash("Something went wrong, please try again") return redirect(url_for('index'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_text(self):\n if self.tab_control.index(\"current\") == 0:\n text = self.textbox.get(\"1.0\", tk.END)\n if text is not None:\n files = [('Text Document', '*.txt')]\n text_file = asksaveasfile(title=\"Save your text as .txt\", filetypes=files,\n defaultextension=files)\n if text_file is not None:\n text_file.write(text)\n text_file.close()\n else:\n msg.showwarning(title=\"Warning\", message=\"There is no data to save!\")\n elif self.tab_control.index(\"current\") == 1:\n text = self.words\n if len(text) > 0:\n result = msg.askyesno(title=\"Save words\", message=\"Do you want to save top 5 words in SQL database?\")\n if result:\n if self.file_path is not None:\n TextReaderInterface.start_sql_gui(words=text, path=self.file_path)\n else:\n msg.showwarning(message=\"You have to save your file as txt before saving in SQL!\")\n files = [('Text Document', '*.txt')]\n text_file = asksaveasfile(title=\"Save your text as .txt\", filetypes=files,\n defaultextension=files)\n if text_file is not None:\n self.file_path = text_file.name\n text_file.write(str(text))\n text_file.close()\n TextReaderInterface.start_sql_gui(words=text, path=self.file_path)\n else:\n msg.showwarning(title=\"Warning\", message=\"There is no data to save!\")", "def save(self, file_path):\n with open(file_path, 'w') as file:\n file.write(self.text)\n file.close()", "def save_to_text(self, file_name, data):\n\n valid_data = ''\n for item in data:\n valid_data = valid_data + item.get_data()+'\\n'\n\n file_save = open(file_name, 'w')\n file_save.write(valid_data)\n file_save.close()", "def save_file(self, file_name, text):\n\n with open(file_name, 'w') as content_file:\n content = content_file.write(text)", "def saveText(texto, fileName, nameLib): \r\n arq = open(fileName + \"-\" + nameLib + \".txt\", \"w\")\r\n arq.write(texto) \r\n arq.close()", "def _text_to_disk(self, folder=None, filename=None, text=None):\n assert folder and filename\n # unify interface for saving\n text = text or self.text\n assert text, \"There are no text to save !\"\n filepath = os.path.join(folder, filename)\n with open(filepath, 'w') as f:\n f.write(text)\n self.log.debug(\"Written text to file {}\".format(filepath))", "def save_txt(filename, data, encoding):\n with open(filename, \"w\") as f:\n f.write(dump(data, encoding))", "def save2txt(obj, file: str):\n with open(file, \"w\") as f:\n print(obj, file=f)", "def save_file(path, text):\n with path.open(mode='w') as f_stream:\n f_stream.write(text)", "def saveText(self):\n self.rsubject.saveOnChanged(self.edCursor.getPos())\n\n # Allows saving after a certain number of delete operations:\n self.deleteCount = 0", "def save():", "def save_in_db(self):\n self.sql_database.table_name = self.table_db\n self.sql_database.db_name = self.db\n if self.sql_database.insert_item(text_path=self.path, word_first=self.word_1.get(),\n word_second=self.word_2.get(),\n word_third=self.word_3.get(), word_fourth=self.word_4.get(),\n word_fifth=self.word_5.get()):\n msg.showinfo(message=\"Done\")", "def store_file(text: str, file_path: str) -> None:\n with open(file=file_path, mode='w', encoding='utf8') as f:\n f.write(text)", "def save_text_file(text, path):\n os.makedirs(os.path.dirname(path), exist_ok=True)\n with open(path, \"w\") as f:\n f.write(text)", "def save(self,file):\n\n with open(file,\"w\") as f:\n f.write(self.to_string())", "def save(self):\n with open(self.__file_path, \"w\", encoding=\"UTF-8\") as file:\n parsed_dict = {\n key: value.to_dict()\n for key, value in self.__objects.items()\n }\n save_data(parsed_dict, file)", "def save(self, db):\n # Create new if no id is given\n if self._id is None:\n self.collection(db).insert_one(\n document={'text': self.text})\n\n # Else update old\n else:\n self.collection(db).update_one(\n filter={'_id': ObjectId(self._id)},\n update={'$set': {'text': self.text}})", "def storeText(ffile):\n print '\\nProcessing ' + str(ffile) + '...'\n try:\n meifile = convert(str(ffile))\n except Exception, e:\n lg.debug(\"Could not process file {0}. Threw exception: {1}\".format(ffile, e))\n\n page = meifile.search('page')\n pagen = int(page[0].attribute_by_name('n').value)\n\n lines = meifile.search('l')\n zones = meifile.search('zone')\n\n textdocs = []\n for line in lines:\n text = line.value\n facs = str(line.attribute_by_name('facs').value)\n zone = findbyID(zones, facs)\n ulx = int(zone.ulx)\n uly = int(zone.uly)\n lrx = int(zone.lrx)\n lry = int(zone.lry)\n \n textdocs.append({'id': str(uuid.uuid4()), 'pagen': pagen, 'text': text, 'location': {\"ulx\": ulx ,\"uly\": uly, \"height\": abs(uly - lry), \"width\": abs(ulx - lrx)}})\n \n solrconn.add_many(textdocs)\n solrconn.commit()", "def save_text_file(i):\n\n fn = i['text_file']\n\n s = i['string']\n\n try:\n s = s.replace('\\r', '')\n except Exception as e:\n pass\n\n try:\n s = s.replace(b'\\r', b'')\n except Exception as e:\n pass\n\n m = 'w'\n if i.get('append', '') == 'yes':\n m = 'a'\n\n try:\n s = s.encode('utf8')\n except Exception as e:\n pass\n\n try:\n # if sys.version_info[0]>2:\n # f=open(fn, m+'b')\n # f.write(s)\n # else:\n f = open(fn, m+'b')\n f.write(s)\n except Exception as e:\n return {'return': 1, 'error': 'problem writing text file='+fn+' ('+format(e)+')'}\n\n f.close()\n\n return {'return': 0}", "def save(self):\n if PYTHON3:\n fileobj = open(self.filename, 'w', encoding=self.ENCODING, errors=\"replace\")\n else:\n fileobj = open(self.filename, 'w')\n self.save_to_fileobj(fileobj)\n fileobj.close()", "def save_txt_file():\n global output_on_display\n if data_base == '':\n mistake_load_table()\n else:\n save_name = asksaveasfilename(title=\"Select file\", filetypes=((\"TXT\", \"*.txt\"), (\"all files\", \"*.*\")),\n defaultextension='.txt')\n if Path(save_name).suffix == '.txt':\n data_txt = output_on_display.get('1.0', 'end')\n f = open(save_name, 'w')\n f.write(data_txt)\n f.close()", "def save(self, path: str):\n\n\t\tinfo_dict = {\n\t\t\t\"n_gram_size\": self.n_gram_size,\n\t\t\t\"caseless\": self.caseless,\n\t\t\t\"ignore_punctuation\": self.ignore_punctuation,\n\t\t\t\"add_pos_tags\": self.add_pos_tags,\n\t\t\t\"uses_lemma\": self.uses_lemma,\n\t\t\t\"uses_sentences\": self.uses_sentences\n\t\t}\n\n\t\twith open(path, \"wt\", encoding=\"utf8\") as f:\n\t\t\tjson.dump(info_dict, f)", "def saveTeachersData():\n with open(\"TeacherData.txt\",\"wb\") as teacherData:\n pickle.dump(teacherEntities,teacherData)", "def save_file(self):\n f = open(self._file_name, \"w\")\n try:\n for rental in self.list:\n rental_str = self.obj_to_string(rental)\n f.write(rental_str)\n f.close()\n except Exception as e:\n raise e", "def save_db(self) -> None:", "def save_txt(txt_fname, line):\n \n #if os.path.isfile(txt_fname):\n # print(\"File {} already exists ...\\n\".format(txt_fname))\n # print(\"Adding line: {} to file: {}\".format(line, txt_fname))\n \n #else:\n # print('Creating new text file ... \\n') \n # print(\"Adding line: {} to file: {}\".format(line, txt_fname))\n\n try:\n \n txt_file = open(txt_fname, 'a+')\n \n line_string = \"{}\\n\".format(line)\n\n txt_file.write(line_string)\n \n txt_file.close()\n \n \n #print(\"\\nLine added to file {} correctly\".format(txt_fname))\n \n except Exception as e:\n a = e", "def save_text(req: request) -> Tuple[Union[None, str], Union[None, str]]:\n\n json_data = req.get_json()\n if json_data:\n if not isinstance(json_data, dict) or \"text\" not in json_data:\n return (None, \"Incorrect text payload\")\n text = json_data[\"text\"]\n filename = f\"{str(uuid.uuid4())}.txt\"\n filepath = os.path.join(UPLOAD_FOLDER, filename)\n with open(filepath, \"w\") as w:\n w.write(text)\n return (filename, None)\n\n file = req.files[\"file\"]\n if not utils.is_allowed_file(file.filename):\n return (None, \"Incorrect extension\")\n\n ext = file.filename.rsplit(\".\", 1)[1]\n filename = f\"{str(uuid.uuid4())}.{ext}\"\n file.save(os.path.join(UPLOAD_FOLDER, filename))\n\n return (filename, None)", "def save_text(self, text: Text):\n self.text_versions.append(deepcopy(text))", "def paste_to_file(self, text):\n self._initialize_file()\n assert os.path.isfile(self.file)\n with open(self.file, 'a') as fid:\n # fid.write((text + '\\n').encode('utf-8'))\n fid.write(text + '\\n')\n return self", "def save(self, obj):" ]
[ "0.7108115", "0.69423765", "0.6938336", "0.68713033", "0.677555", "0.6758945", "0.6651387", "0.659884", "0.65734136", "0.6515245", "0.6488028", "0.64555424", "0.64316326", "0.6394388", "0.6353932", "0.63499653", "0.6315648", "0.6307501", "0.630571", "0.62945545", "0.62737465", "0.62269294", "0.6224288", "0.62043214", "0.6191111", "0.6183335", "0.6157821", "0.61370355", "0.61049473", "0.61017317" ]
0.7057494
1
Deletes the users account, their files and texts from the database
def deleteaccount(): try: if app.config['UPLOAD_FOLDER'] == UPLOAD_FOLDER: app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER + \ session['username'] with Database() as db: texts = db.getOwnedTexts(session['id']) for text in texts: TextDelete(text[0]) db.deleteUser(session['id']) shutil.rmtree(app.config['UPLOAD_FOLDER']) session.clear() app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER flash("Account has been deleted") return redirect(url_for('index')) except Exception as e: flash("Something went wrong, please try again") return redirect(url_for('index'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_user():", "def db_delete_user_data(self):\n util.log(\"Clearing all user data\", util.LogLevel.Info)\n self.db.db_clear_data_user()\n util.log(\"Done\", util.LogLevel.Info)", "def delete_user():\n #TODO user delete\n pass", "def delete_user(self) -> None:\n table_dictionary = {\n 'Apple': {\n 'table': 'AppleReceipts',\n 'user_id': 'User_id'\n },\n 'ESL': {\n 'table': 'ESLReceipts',\n 'user_id': 'User_id'\n },\n 'Transactions': {\n 'table': 'Transactions',\n 'user_id': 'User_id'\n },\n 'Users': {\n 'table': 'Users',\n 'user_id': 'id'\n },\n }\n\n # delete the current user's information from the db.\n for key in table_dictionary:\n query = f\"\"\"\n DELETE\n FROM {table_dictionary[key]['table']}\n WHERE {table_dictionary[key]['user_id']}=?;\n \"\"\"\n self.db.commit(query, values=(self.id,))\n\n # perform a sign out\n self.sign_out()\n\n log(f\"User:{self.id} has deleted their account.\")", "def delete_user(self, username): #WORKS\n try:\n self.cur.execute(\"SELECT video_ID FROM videos WHERE uploader = \\\"{}\\\"\".format(username))\n videos_to_delete = []\n for row in self.cur.fetchall():\n videos_to_delete.append(row[0]) # Get video IDs of all videos uploaded by the user.\n for ID in videos_to_delete:\n os.remove('static/videos/' + str(ID) + '.mp4') # Deletes the video from the static/videos directory.\n os.remove('static/images/' + str(ID) + '.jpg') # Deletes the image from the static/images directory.\n self.cur.execute(\"DELETE FROM users WHERE username = \\\"{}\\\"\".format(username))\n self.db.commit()\n except:\n self.db.rollback()", "def delete(self):\n with sqlite3.connect(self.dbpath) as connection: \n cursor = connection.cursor()\n DELETESQL = \"\"\"DELETE FROM accounts WHERE id=:id \"\"\"\n cursor.execute(DELETESQL, {\"id\": self.id})\n self.id = None", "def delete_all(cls):\n with sqlite3.connect(cls.dbpath) as connection:\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n SQL = \"DELETE FROM accounts;\"\n cursor.execute(SQL)", "def delete_account(request):\n ubanks = request.user.userbank.all()\n for ubank in ubanks:\n ubank.delete()\n user = request.user\n log_out(request)\n user.delete()\n return HttpResponse(\"Account succesfully deleted\")", "def DelteUser(database):\n firstname=str(input(\"what is the name of the user you want to delete : \"))\n delusr,find =getByName(database,firstname)\n if not find:\n return\n del database[delusr.key]\n for key,usr in database.items():\n if delusr.key in usr.folow:\n usr.folow.remove(delusr.key)\n if delusr.key in usr.folowed:\n usr.folowed.remove(delusr.key)\n \n os.remove(f\"Users/{delusr.key}\")", "def delete_my_account():\n # Remove user ownerships\n for p in current_user.projects:\n p.user_id = None\n p.save()\n # Delete user posts\n [ a.delete() for a in current_user.activities ]\n # Delete user account\n current_user.delete()\n logout_user()\n flash('We are sorry to see you go. Your profile has been deleted.', 'info')\n return redirect(url_for('public.home'))", "def delete_users(self, filename):\n f_id = self.face.FACES.files.find_one({ \"filename\" : filename }, { \"_id\" : 1 })\n self.face_fs.delete(f_id['_id'])", "def delete(bot, update):\n chatID = update.message.chat_id\n username = get_user_info(chatID)['PID']\n logger.info(\"Deleting user credentials for {}!\".format(username))\n Chat.query.filter(Chat.chatID == chatID).delete() # Delete the user's record referenced by their ChatID\n Misc.query.filter(Misc.chatID == chatID).delete()\n db_session.commit()\n messageContent = \"Your credentials have been deleted, {}\\nHope to see you back soon!\".format(username[3:-4].title())\n bot.sendMessage(chat_id=update.message.chat_id, text=messageContent)\n \n mp.track(username, 'User Left')\n mp.people_set(username, {'active': False })", "def delete():\n # Must be logged in to perform any delete commands.\n auth_required()\n pass", "def delete_all_users():\n\tUser.drop_collection()", "def delete_user(username, table, db_file):\n \n try:\n conn, c = connect_to_db(db_file)\n command = \"DELETE FROM {table} WHERE username =?\".format(table=safe(table))\n c.execute(command, (username,))\n conn.commit()\n conn.close()\n except Exception as e:\n print(\"Error when trying to delete user \" + username + \\\n \" from table \" + table + \" in file \" + db_file)\n print(e)\n return False\n else:\n return True", "def delete():\r\n if 'username' not in flask.session:\r\n return flask.redirect(flask.url_for('login'))\r\n\r\n # Delete Account\r\n if flask.request.method == 'POST':\r\n # Delete Rows\r\n get_db().cursor().execute(''' DELETE FROM users\r\n WHERE username=? ''',\r\n (flask.session['username'],))\r\n\r\n flask.session.pop('username', None)\r\n return flask.redirect(flask.url_for('create'))\r\n\r\n context = {}\r\n context[\"logname\"] = flask.session[\"username\"]\r\n return flask.render_template('delete.html', **context)", "def deleteDB():\n db = sqlite.connect(db_path)\n db.row_factory = sqlite.Row\n cursor = db.cursor()\n cursor.execute(\"DELETE from rooms\")\n\n cursor.execute(\"DELETE from users\")\n\n cursor.execute(\"DELETE from urls\")\n\n cursor.fetchall()\n db.commit()\n cursor.close()\n db.close()", "def delete_all_users(self):\n\n User.query.delete()", "def delete(self):\n self.deleted = True\n # Deactivate the user to disallow authentication and also\n # to let the user verify the email again after recovery.\n self.is_active = False\n self.save()\n self.history.create(user_id=self.pk, action=user_history.DELETION)", "def delete_user():\n del globalopts.appdata[request.user]\n del globalopts.users[request.user]\n return \"\", 200", "def delete_user(self):\n db_acces = DbMethods()\n username = self.result_table.currentItem().text()\n response = db_acces.delete_user(username)\n\n if response == True:\n self.populate_table()\n else:\n message = Message(\n self.language[\"error\"], self.language[\"inf_error\"])\n warning_message = message.create_iw_message(\n self.language[\"ok\"], \"warning\")\n warning_message.exec()", "def delete_user_account(connection,user):\r\n with connection:\r\n connection.execute(DELETE_SPECIFIC_USER,(user,))", "def del_user(self, username):\n pass", "def delete_user(username, user_id):\r\n global sql_cursor\r\n global database\r\n\r\n print(\"Are you absolutely sure that you want to delete your account.\")\r\n conf_del = input(\"(y/n) : \").lower()\r\n\r\n if conf_del == \"y\":\r\n\r\n print(\"Deleting...\")\r\n\r\n sql_cursor.execute(f\"DELETE FROM passwords WHERE user_id={user_id};\")\r\n sql_cursor.execute(f'DELETE FROM users WHERE username=\"{username}\";')\r\n database.commit()\r\n\r\n print(\"Account successfully deleted\")\r\n print(\"You need to start the program again\")\r\n print(\"Exiting now\")\r\n sleep(5)\r\n quit()\r\n\r\n else:\r\n print(\"Cancelling deletion ...\")\r\n return", "def tearDown(self):\n account_models.User.objects.all().delete()", "def delete_user(id):\n pass", "def delete(request):\n if request.method != 'POST':\n return HttpResponseNotAllowed(['POST'])\n username = request.user.get_username()\n context = RequestContext(request, {\n 'username': username})\n try:\n DataHubManager.remove_user(username=username, remove_db=True)\n django_logout(request)\n return render(request, 'delete-done.html', context)\n except User.DoesNotExist:\n return HttpResponseNotFound('User {0} not found.'.format(username))", "def delete(self, userinformation):\n self.db.remove(userinformation)", "def delete_account(username):\n set_user_group(username, \"basic\")\n password = generate_password_hash(\"deleted\")\n set_password(username, password)\n sql = \"UPDATE users \" \\\n \"SET is_active=FALSE, username='[deleted]' \" \\\n \"WHERE username=:username\"\n db.session.execute(sql, {\"username\": username})\n db.session.commit()", "def deleteUser(self):\r\n #Find name and ID column\r\n userData = self.getCurrentUserData()\r\n\r\n #Prompt for confirmation\r\n deleteChoice = QMessageBox.question(self.view, 'Confirm user deletion', \r\n 'Are you sure you want to delete user ' \r\n + userData['Name'] + \" with ID \" + userData['User_ID'] + \r\n \" from database permanently?\", \r\n QMessageBox.Yes | QMessageBox.No)\r\n \r\n if (deleteChoice == QMessageBox.Yes):\r\n DBController().deleteUser(userData['User_ID'] )\r\n self.updateUserTable() #Re-fill table\r" ]
[ "0.75881016", "0.7571063", "0.73386544", "0.72856486", "0.71325874", "0.70421135", "0.70142746", "0.6962373", "0.6958195", "0.69209486", "0.6919454", "0.6880083", "0.68616986", "0.67589813", "0.6717374", "0.6716456", "0.66849625", "0.6683709", "0.6657697", "0.66400325", "0.6621631", "0.66195685", "0.6612583", "0.659985", "0.65996873", "0.657567", "0.6571092", "0.65608126", "0.655507", "0.6547388" ]
0.76813954
0
204 responses must not return some entity headers
def get204(self): bad = ('content-length', 'content-type') for h in bad: bottle.response.set_header(h, 'foo') bottle.status = 204 for h, v in bottle.response.headerlist: self.assertFalse(h.lower() in bad, "Header %s not deleted" % h)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_204_response() -> bytes:\n date = datetime.datetime.now(datetime.timezone.utc).strftime(\"%a, %d %b %Y %H:%M:%S GMT\")\n\n header = \"HTTP/1.1 204 No Content\" + \"\\r\\nDate: \" + date + \"\\r\\n\\r\\n\"\n\n print(header)\n return header.encode(HttpServer.FORMAT)", "def assertHttpNoContent(self, resp):\r\n return self.assertEqual(resp.status_code, 204)", "def err_204(e):\n return jsonify(dict(\n errno=0,\n alert=\"Not found.\"\n )), 204", "def check_valid_cancel_response(response: HTTPResponse) -> bool:\n return response.status_code == 204", "def response_204(description=None):\n description = description or \"Item deleted\"\n return response(204, {\n \"description\": description,\n \"schema\": {\"type\": \"null\"}\n })", "def verify(self, request):\n return Response(None, status=status.HTTP_204_NO_CONTENT)", "def delete_response(self):\n deriva_ctx.deriva_response.status = '204 No Content'\n return deriva_ctx.deriva_response", "def get304(self):\n bad = ('allow', 'content-encoding', 'content-language',\n 'content-length', 'content-md5', 'content-range',\n 'content-type', 'last-modified') # + c-location, expires?\n for h in bad:\n bottle.response.set_header(h, 'foo')\n bottle.status = 304\n for h, v in bottle.response.headerlist:\n self.assertFalse(h.lower() in bad, \"Header %s not deleted\" % h)", "def clean_headers(status):\r\n import cherrypy\r\n \r\n response = cherrypy.response\r\n \r\n # Remove headers which applied to the original content,\r\n # but do not apply to the error page.\r\n respheaders = response.headers\r\n for key in [\"Accept-Ranges\", \"Age\", \"ETag\", \"Location\", \"Retry-After\",\r\n \"Vary\", \"Content-Encoding\", \"Content-Length\", \"Expires\",\r\n \"Content-Location\", \"Content-MD5\", \"Last-Modified\"]:\r\n if respheaders.has_key(key):\r\n del respheaders[key]\r\n \r\n if status != 416:\r\n # A server sending a response with status code 416 (Requested\r\n # range not satisfiable) SHOULD include a Content-Range field\r\n # with a byte-range-resp-spec of \"*\". The instance-length\r\n # specifies the current length of the selected resource.\r\n # A response with status code 206 (Partial Content) MUST NOT\r\n # include a Content-Range field with a byte-range- resp-spec of \"*\".\r\n if respheaders.has_key(\"Content-Range\"):\r\n del respheaders[\"Content-Range\"]", "def update_response(self):\n deriva_ctx.deriva_response.status = '204 No Content'\n return deriva_ctx.deriva_response", "def assertHttpGone(self, resp):\r\n return self.assertEqual(resp.status_code, 410)", "def check_no_header_response(response: HTTPResponse) -> bool:\n return response.status_code == 422", "def test_no_unsafe(self):\n\n def get_200_response(req):\n return HttpResponse(status=200)\n\n response = ConditionalGetMiddleware(self.get_response)(self.req)\n etag = response.headers[\"ETag\"]\n put_request = self.request_factory.put(\"/\", headers={\"if-match\": etag})\n conditional_get_response = ConditionalGetMiddleware(get_200_response)(\n put_request\n )\n self.assertEqual(\n conditional_get_response.status_code, 200\n ) # should never be a 412", "def __handle_response(self, response, json_content=True):\n if 200 <= response.status_code <= 204:\n if json_content:\n return response.json()\n else:\n return response\n else:\n raise ScraperApiException(response.json()['detail'])", "def options(self):\n self.set_status(204)\n self.finish()", "def options(self):\n self.set_status(204)\n self.finish()", "def test_no_head(self):\n\n def get_200_response(req):\n return HttpResponse(status=200)\n\n request = self.request_factory.head(\"/\")\n conditional_get_response = ConditionalGetMiddleware(get_200_response)(request)\n self.assertNotIn(\"ETag\", conditional_get_response)", "def write_empty_response(self, status_code):\n self.send_response(status_code)\n self.end_headers()", "def test_vary_on_headers_disabled(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"get\": content},\n status_code=200,\n content=content,\n headers_exclude=\"Vary\")", "def assertHttpNotModified(self, resp):\r\n return self.assertEqual(resp.status_code, 304)", "def assertHttpNotModified(self, response):\n self.assertEqual(response.status_code, 304)\n self.assertEqual(response.content, b'')", "def _respond_unauthorized(self, request, message=\"Unauthorized\"):\n resp = Response()\n resp.status = 401\n resp.headers = self.forget(request.environ, {})\n resp.content_type = \"text/plain\"\n resp.body = message\n request.environ[\"repoze.who.application\"] = resp\n return None", "def process_response(self, response):\n self.set_status(response.code) # code\n for name, value in response.headers.items(): # headers except restricted\n if name.lower() not in self.RESTRICTED_HEADERS:\n self.set_header(name, value)\n if response.code not in self.RESTRICT_SEND_BODY_ON_CODE: # body\n self.write(response.body)", "def emptyresponse():\n return get_response(\"\")", "def test_response_200_on_get(self):\n pass", "async def test_endpoint_delete_share_correct(self):\n with self.patch_json_dump:\n resp = await delete_share_handler(self.mock_request)\n self.assertEqual(resp.status, 204)", "def _process_not_ok_response(content, status):\n if status == codes.bad:\n length = len(content)\n err_msg = (content if length > 0 else str(status))\n raise NoSQLException('Error response: ' + err_msg)\n raise NoSQLException('Error response = ' + str(status))", "def maybe_raise_304(request, response):\n if request.method not in (consts.METHOD_HEAD, consts.METHOD_GET):\n LOG.warning(\n 'check If-None-Match in non-standard request method: %s %s',\n request.method,\n request.path_str,\n )\n if_none_match = request.get_header(consts.HEADER_IF_NONE_MATCH)\n if if_none_match is None:\n return\n etag = response.headers.get(consts.HEADER_ETAG)\n if etag is None:\n return\n # TODO: Handle W/\"...\" weak validator.\n if etag in _parse_etags(if_none_match):\n raise wsgi_apps.HttpError(\n consts.Statuses.NOT_MODIFIED,\n 'etag matches: %s vs %s' % (etag, if_none_match),\n response.headers,\n )", "def _output(content):\n serve = True\n # check modifications and etag\n if 'If-Modified-Since' in request.headers:\n last_seen = datetime.datetime.strptime(\n request.headers['If-Modified-Since'], HTTP_DATE_FMT)\n if last_seen >= content.modified.replace(microsecond=0):\n serve = False\n if 'If-None-Match' in request.headers:\n etags = [x.strip('\" ')\n for x in request.headers['If-None-Match'].split(',')]\n if content.etag in etags:\n serve = False\n\n headers = {}\n if content.content_type:\n headers['Content-Type'] = content.content_type\n last_modified = content.modified.strftime(HTTP_DATE_FMT)\n headers['Last-Modified'] = last_modified\n headers['ETag']= '\"%s\"' % (content.etag,)\n for header in content.headers:\n key, value = header.split(':', 1)\n headers[key] = value.strip()\n if serve:\n response.body = content.body\n for key, value in headers.iteritems():\n response.set_header(key, value)\n response.content_type=content.content_type\n response.status=int(content.status)\n else:\n response.status=304\n return response", "def get202_none204_none_default_error204_none( # pylint: disable=inconsistent-return-statements,name-too-long\n self, **kwargs: Any\n ) -> None:\n error_map = {\n 401: ClientAuthenticationError,\n 404: ResourceNotFoundError,\n 409: ResourceExistsError,\n 304: ResourceNotModifiedError,\n }\n error_map.update(kwargs.pop(\"error_map\", {}) or {})\n\n _headers = kwargs.pop(\"headers\", {}) or {}\n _params = kwargs.pop(\"params\", {}) or {}\n\n cls: ClsType[None] = kwargs.pop(\"cls\", None)\n\n request = build_get202_none204_none_default_error204_none_request(\n template_url=self.get202_none204_none_default_error204_none.metadata[\"url\"],\n headers=_headers,\n params=_params,\n )\n request = _convert_request(request)\n request.url = self._client.format_url(request.url)\n\n _stream = False\n pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access\n request, stream=_stream, **kwargs\n )\n\n response = pipeline_response.http_response\n\n if response.status_code not in [202, 204]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.Error, pipeline_response)\n raise HttpResponseError(response=response, model=error)\n\n if cls:\n return cls(pipeline_response, None, {})" ]
[ "0.7504536", "0.7456752", "0.70702946", "0.7007137", "0.6838798", "0.6740226", "0.66936654", "0.66851264", "0.66362447", "0.65761197", "0.648202", "0.6477876", "0.64216065", "0.6242951", "0.623777", "0.623777", "0.6185526", "0.61398613", "0.61072254", "0.6086671", "0.60757756", "0.6031335", "0.5965463", "0.59336287", "0.5925962", "0.5921395", "0.58926773", "0.5889033", "0.5860246", "0.5855489" ]
0.83296347
0
304 responses must not return entity headers
def get304(self): bad = ('allow', 'content-encoding', 'content-language', 'content-length', 'content-md5', 'content-range', 'content-type', 'last-modified') # + c-location, expires? for h in bad: bottle.response.set_header(h, 'foo') bottle.status = 304 for h, v in bottle.response.headerlist: self.assertFalse(h.lower() in bad, "Header %s not deleted" % h)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_304_response() -> bytes:\n content_data = HttpServer.get_content_data(\"/not_modified.html\")\n date = datetime.datetime.now(datetime.timezone.utc).strftime(\"%a, %d %b %Y %H:%M:%S GMT\")\n\n header = \"HTTP/1.1 304 Not Modified\" + \"\\r\\nDate: \" + date + \"\\r\\n\" + content_data + \"\\r\\n\\r\\n\"\n raw_header = header.encode(HttpServer.FORMAT)\n print(header)\n raw_body = HttpServer.create_body(\"/not_modified.html\")\n response = raw_header + raw_body\n\n return response", "def maybe_raise_304(request, response):\n if request.method not in (consts.METHOD_HEAD, consts.METHOD_GET):\n LOG.warning(\n 'check If-None-Match in non-standard request method: %s %s',\n request.method,\n request.path_str,\n )\n if_none_match = request.get_header(consts.HEADER_IF_NONE_MATCH)\n if if_none_match is None:\n return\n etag = response.headers.get(consts.HEADER_ETAG)\n if etag is None:\n return\n # TODO: Handle W/\"...\" weak validator.\n if etag in _parse_etags(if_none_match):\n raise wsgi_apps.HttpError(\n consts.Statuses.NOT_MODIFIED,\n 'etag matches: %s vs %s' % (etag, if_none_match),\n response.headers,\n )", "def assertHttpNotModified(self, resp):\r\n return self.assertEqual(resp.status_code, 304)", "def test_not_modified_headers(self):\n\n def get_response(req):\n resp = self.client.get(req.path_info)\n resp[\"Date\"] = \"Sat, 12 Feb 2011 17:35:44 GMT\"\n resp[\"Last-Modified\"] = \"Sat, 12 Feb 2011 17:35:44 GMT\"\n resp[\"Expires\"] = \"Sun, 13 Feb 2011 17:35:44 GMT\"\n resp[\"Vary\"] = \"Cookie\"\n resp[\"Cache-Control\"] = \"public\"\n resp[\"Content-Location\"] = \"/alt\"\n resp[\"Content-Language\"] = \"en\" # shouldn't be preserved\n resp[\"ETag\"] = '\"spam\"'\n resp.set_cookie(\"key\", \"value\")\n return resp\n\n self.req.META[\"HTTP_IF_NONE_MATCH\"] = '\"spam\"'\n\n new_response = ConditionalGetMiddleware(get_response)(self.req)\n self.assertEqual(new_response.status_code, 304)\n base_response = get_response(self.req)\n for header in (\n \"Cache-Control\",\n \"Content-Location\",\n \"Date\",\n \"ETag\",\n \"Expires\",\n \"Last-Modified\",\n \"Vary\",\n ):\n self.assertEqual(\n new_response.headers[header], base_response.headers[header]\n )\n self.assertEqual(new_response.cookies, base_response.cookies)\n self.assertNotIn(\"Content-Language\", new_response)", "def assertHttpNotModified(self, response):\n self.assertEqual(response.status_code, 304)\n self.assertEqual(response.content, b'')", "def test_no_unsafe(self):\n\n def get_200_response(req):\n return HttpResponse(status=200)\n\n response = ConditionalGetMiddleware(self.get_response)(self.req)\n etag = response.headers[\"ETag\"]\n put_request = self.request_factory.put(\"/\", headers={\"if-match\": etag})\n conditional_get_response = ConditionalGetMiddleware(get_200_response)(\n put_request\n )\n self.assertEqual(\n conditional_get_response.status_code, 200\n ) # should never be a 412", "def not_modified(self):\n self.status = 304", "def get204(self):\n bad = ('content-length', 'content-type')\n for h in bad:\n bottle.response.set_header(h, 'foo')\n bottle.status = 204\n for h, v in bottle.response.headerlist:\n self.assertFalse(h.lower() in bad, \"Header %s not deleted\" % h)", "def _is_not_modified_result(result):\n return result.get('status', None) == 304", "def test_no_head(self):\n\n def get_200_response(req):\n return HttpResponse(status=200)\n\n request = self.request_factory.head(\"/\")\n conditional_get_response = ConditionalGetMiddleware(get_200_response)(request)\n self.assertNotIn(\"ETag\", conditional_get_response)", "def _output(content):\n serve = True\n # check modifications and etag\n if 'If-Modified-Since' in request.headers:\n last_seen = datetime.datetime.strptime(\n request.headers['If-Modified-Since'], HTTP_DATE_FMT)\n if last_seen >= content.modified.replace(microsecond=0):\n serve = False\n if 'If-None-Match' in request.headers:\n etags = [x.strip('\" ')\n for x in request.headers['If-None-Match'].split(',')]\n if content.etag in etags:\n serve = False\n\n headers = {}\n if content.content_type:\n headers['Content-Type'] = content.content_type\n last_modified = content.modified.strftime(HTTP_DATE_FMT)\n headers['Last-Modified'] = last_modified\n headers['ETag']= '\"%s\"' % (content.etag,)\n for header in content.headers:\n key, value = header.split(':', 1)\n headers[key] = value.strip()\n if serve:\n response.body = content.body\n for key, value in headers.iteritems():\n response.set_header(key, value)\n response.content_type=content.content_type\n response.status=int(content.status)\n else:\n response.status=304\n return response", "def clean_headers(status):\r\n import cherrypy\r\n \r\n response = cherrypy.response\r\n \r\n # Remove headers which applied to the original content,\r\n # but do not apply to the error page.\r\n respheaders = response.headers\r\n for key in [\"Accept-Ranges\", \"Age\", \"ETag\", \"Location\", \"Retry-After\",\r\n \"Vary\", \"Content-Encoding\", \"Content-Length\", \"Expires\",\r\n \"Content-Location\", \"Content-MD5\", \"Last-Modified\"]:\r\n if respheaders.has_key(key):\r\n del respheaders[key]\r\n \r\n if status != 416:\r\n # A server sending a response with status code 416 (Requested\r\n # range not satisfiable) SHOULD include a Content-Range field\r\n # with a byte-range-resp-spec of \"*\". The instance-length\r\n # specifies the current length of the selected resource.\r\n # A response with status code 206 (Partial Content) MUST NOT\r\n # include a Content-Range field with a byte-range- resp-spec of \"*\".\r\n if respheaders.has_key(\"Content-Range\"):\r\n del respheaders[\"Content-Range\"]", "def test_vary_on_headers_disabled(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"get\": content},\n status_code=200,\n content=content,\n headers_exclude=\"Vary\")", "def cache():\n is_conditional = request.headers.get(\"If-Modified-Since\") or request.headers.get(\n \"If-None-Match\"\n )\n\n if is_conditional is None:\n response = view_get()\n response.headers[\"Last-Modified\"] = http_date()\n response.headers[\"ETag\"] = uuid.uuid4().hex\n return response\n else:\n return status_code(304)", "def nocache(response):\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, max-age=0'\n return response", "def add_header(response):\n\n # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control\n response.cache_control.no_store = True\n return response", "def add_header(response):\n\n # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control\n response.cache_control.no_store = True\n return response", "def test_unmodified(self):\n self.channel.lineReceived(\"If-Modified-Since: %s\"\n % http.datetimeToString(100))\n self.channel.lineReceived('')\n result = self.transport.getvalue()\n self.failUnlessEqual(httpCode(result), http.NOT_MODIFIED)\n self.failUnlessEqual(httpBody(result), \"\")", "def create_204_response() -> bytes:\n date = datetime.datetime.now(datetime.timezone.utc).strftime(\"%a, %d %b %Y %H:%M:%S GMT\")\n\n header = \"HTTP/1.1 204 No Content\" + \"\\r\\nDate: \" + date + \"\\r\\n\\r\\n\"\n\n print(header)\n return header.encode(HttpServer.FORMAT)", "def test_weak_etag_not_modified(self):\n\n def get_response(req):\n response = HttpResponse(self.compressible_string)\n response.headers[\"ETag\"] = 'W/\"eggs\"'\n return response\n\n request = self.rf.get(\"/\", headers={\"accept-encoding\": \"gzip, deflate\"})\n gzip_response = GZipMiddleware(get_response)(request)\n self.assertEqual(gzip_response.headers[\"ETag\"], 'W/\"eggs\"')", "def add_headers(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n return r", "def test_etag_match(self):\n\n def get_response(req):\n return HttpResponse(self.compressible_string)\n\n def get_cond_response(req):\n return ConditionalGetMiddleware(get_response)(req)\n\n request = self.rf.get(\"/\", headers={\"accept-encoding\": \"gzip, deflate\"})\n response = GZipMiddleware(get_cond_response)(request)\n gzip_etag = response.headers[\"ETag\"]\n next_request = self.rf.get(\n \"/\",\n headers={\"accept-encoding\": \"gzip, deflate\", \"if-none-match\": gzip_etag},\n )\n next_response = ConditionalGetMiddleware(get_response)(next_request)\n self.assertEqual(next_response.status_code, 304)", "def test_cache_control_headers_on_apis(flask_app):\n rv = flask_app.get('api/v1/')\n headers = rv.headers\n assert headers.get('Cache-Control') == 'no-cache, no-store, must-revalidate, max-age=0'\n assert headers.get('Pragma') == 'no-cache'", "def add_header(response):\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'\n response.headers['Pragma'] = 'no-cache'\n response.headers['Expires'] = '-1'\n return response", "def assertHttpSeeOther(self, resp):\r\n return self.assertEqual(resp.status_code, 303)", "def assertHttpNoContent(self, resp):\r\n return self.assertEqual(resp.status_code, 204)", "def add_header(r):\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers[\"Cache-Control\"] = \"public, max-age=0\"\n return r", "def add_header(response):\n response.cache_control.public = True\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Pragma\"] = \"no-cache\"\n response.headers[\"Expires\"] = \"0\"\n return response", "def test_strong_etag_modified(self):\n\n def get_response(req):\n response = HttpResponse(self.compressible_string)\n response.headers[\"ETag\"] = '\"eggs\"'\n return response\n\n request = self.rf.get(\"/\", headers={\"accept-encoding\": \"gzip, deflate\"})\n gzip_response = GZipMiddleware(get_response)(request)\n self.assertEqual(gzip_response.headers[\"ETag\"], 'W/\"eggs\"')", "def check_no_header_response(response: HTTPResponse) -> bool:\n return response.status_code == 422" ]
[ "0.75092345", "0.7419917", "0.72726953", "0.7164188", "0.7087629", "0.6714187", "0.66934466", "0.6550176", "0.65363", "0.6527774", "0.65198684", "0.6495292", "0.6457777", "0.63518214", "0.6306506", "0.6283255", "0.6283255", "0.6245512", "0.6245413", "0.6172141", "0.6148629", "0.61303014", "0.612773", "0.61217546", "0.61051154", "0.6058441", "0.6039961", "0.60293114", "0.60280204", "0.6026342" ]
0.82604736
0
Returns an amended copy of the proxies dictionary used by `requests`, it will disable the proxy if the uri provided is to be reached directly.
def config_proxy_skip(proxies, uri, skip_proxy=False): parsed_uri = urlparse(uri) # disable proxy if necessary if skip_proxy: if 'http' in proxies: proxies.pop('http') if 'https' in proxies: proxies.pop('https') elif proxies.get('no'): urls = [] if isinstance(proxies['no'], basestring): urls = proxies['no'].replace(';', ',').split(",") elif isinstance(proxies['no'], list): urls = proxies['no'] for url in urls: if url in parsed_uri.netloc: if 'http' in proxies: proxies.pop('http') if 'https' in proxies: proxies.pop('https') return proxies
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_proxies(self) -> dict:\n return self._proxies.copy() if self._proxies else None", "def _proxies_dict(proxy):\r\n if not proxy:\r\n return None\r\n return {'http': proxy, 'https': proxy}", "def proxies(self):\n\n proxies = APIConsumer.get(\"/proxies\").json()\n proxies_dict = {}\n\n for name, values in viewitems(proxies):\n # Lets create a Proxy object to hold all its data\n proxy = Proxy(**values)\n\n # Add the new proxy to the toxiproxy proxies collection\n proxies_dict.update({name: proxy})\n\n return proxies_dict", "def set_proxies(proxy_dict={}):\n global proxies\n proxies = proxy_dict", "def _init_proxies(self):\n url = 'https://free-proxy-list.net/'\n log.debug('Init proxies: Getting proxy list from web...')\n try:\n soup = BeautifulSoup(get(url).text, \"html5lib\")\n proxies = list()\n for tr in soup.select('#proxylisttable > tbody > tr'):\n td = tr.select('td')\n if (td[4].text == 'elite proxy') & (td[6].text == 'yes'):\n proxies.append(':'.join([td[0].text, td[1].text]))\n return proxies\n except:\n log.exception('Failed to download proxy list.')\n raise", "def _ReadNoProxy(uri):\n return urllib2.build_opener(urllib2.ProxyHandler({})).open(\n urllib2.Request(uri), timeout=1).read()", "def proxies_to_dict(proxies):\r\n if isinstance(proxies, str):\r\n proxies = proxies.split(\"\\n\")\r\n\r\n proxies_dict = {}\r\n for proxy_line in proxies:\r\n proxy_line = proxy_line.strip()\r\n if not proxy_line:\r\n continue\r\n\r\n proxy_pair = proxy_line.split(\"=\", maxsplit=1)\r\n if len(proxy_pair) == 1:\r\n proxy_pair.insert(0, \"\")\r\n\r\n proxy_scheme = proxy_pair[0].strip().lower()\r\n proxy_url = proxy_pair[1].strip().lstrip(\"=\").strip(\"/\")\r\n\r\n # urlsplit() doesn't work well if no scheme is provided, try to\r\n # circumvent that first\r\n if \"://\" not in proxy_url:\r\n if not proxy_scheme:\r\n raise ValueError(\"missing scheme for proxy: {}\".format(proxy_line))\r\n proxy_url = proxy_scheme + \"://\" + proxy_url\r\n\r\n proxy_info = urllib.parse.urlsplit(proxy_url)\r\n if not proxy_info.hostname or not proxy_info.scheme:\r\n raise ValueError(\"malformed proxy url: {}\".format(proxy_line))\r\n if not proxy_info.port:\r\n raise ValueError(\"missing port number for proxy: {}\".format(proxy_line))\r\n\r\n if not proxy_scheme:\r\n proxy_scheme = proxy_info.scheme.lower()\r\n\r\n # create/overwrite proxy entry by recomposing its url\r\n proxies_dict[proxy_scheme] = \"{}://{}\".format(\r\n proxy_info.scheme.lower(), proxy_info.netloc)\r\n\r\n return proxies_dict", "def get_proxies():\n scrapper = Scrapper(category='ALL', print_err_trace=False)\n data = scrapper.getProxies()\n\n proxies = []\n for item in data.proxies:\n proxies.append('{}:{}'.format(item.ip, item.port))\n return proxies", "def proxy_addresses(self):\n if \"proxyAddresses\" in self._prop_dict:\n return self._prop_dict[\"proxyAddresses\"]\n else:\n return None", "def test_client_request_can_take_proxies_directly(self):\n bad_proxies = {'http': 'http://20.10.1.11:1080', 'https': 'https://20.10.1.11:1080'}\n self.assertRaises(requests.exceptions.ConnectTimeout, self.httpbin_5.test_requests_delete_method, proxies=bad_proxies )", "def proxy_settings(self):\n if config.proxy_host is None or config.proxy_host == \"\":\n return\n\n proxy = urllib2.ProxyHandler({\"http\": config.proxy_host})\n opener = urllib2.build_opener(proxy)\n urllib2.install_opener(opener)", "def proxies_pool(self):\n \n PROXY_URL = 'https://www.sslproxies.org/'\n\n # Retrieve the site's page. The 'with'(Python closure) is used here in order to automatically close the session\n # when done\n with requests.Session() as res:\n proxies_page = res.get(PROXY_URL)\n\n # Create a BeutifulSoup object and find the table element which consists of all proxies\n soup = BeautifulSoup(proxies_page.content, 'html.parser')\n proxies_table = soup.find(id='proxylisttable')\n\n # Go through all rows in the proxies table and store them in the right format (IP:port) in our proxies list\n proxies = []\n for row in proxies_table.tbody.find_all('tr'):\n proxies.append('{}:{}'.format(row.find_all('td')[utils['MAGIC_ZERO']].string, row.find_all('td')[MAGIC_ONE].string))\n return proxies", "def proxies_get(self) -> bool:\n return True", "def get_proxies():\n # url = 'http://nntime.com//'\n url = 'https://free-proxy-list.net/'\n\n response = requests.get(url)\n parser = fromstring(response.text)\n proxies = set()\n for i in parser.xpath('//tbody/tr'):\n if i.xpath('.//td[7][contains(text(),\"yes\")]'):\n proxy = \":\".join([i.xpath('.//td[1]/text()')[0], i.xpath('.//td[2]/text()')[0]])\n proxies.add(proxy)\n return proxies", "def service_proxy_settings(private_base_url):\n return rawobj.Proxy(private_base_url(\"echo_api\"))", "def proxy_scrape(self):\n print(\"Getting new live proxies\")\n url = 'https://free-proxy-list.net/'\n response = requests.get(url)\n parser = fromstring(response.text)\n proxies = set()\n for i in parser.xpath('//tbody/tr')[:20]:\n # if i.xpath('.//td[7][contains(text(),\"yes\")]'):\n proxy = \":\".join([i.xpath('.//td[1]/text()')\n [0], i.xpath('.//td[2]/text()')[0]])\n proxies.add(proxy)\n # return proxies\n # proxies=[]\n print(\"Obtained proxied are as : \", proxies)\n proxy_pool = cycle(proxies)\n proxy_list = [proxy for proxy in proxies]\n return proxy_pool, proxy_list", "def get_proxy_address(self):\n proxies = self.get_to_use_proxies()\n\n if not proxies:\n return None\n\n quality_proxy_quantities = max(6, int(len(proxies) * 0.5))\n quality_proxy_quantities = min(quality_proxy_quantities, len(proxies))\n\n proxy = random.choice(proxies[0:quality_proxy_quantities])\n _logger.debug(\"Using %s proxy\", proxy[\"http\"])\n return copy.deepcopy(proxy)", "def randomize_request_proxies(renewed_proxy=''):\n if renewed_proxy:\n random_number = renewed_proxy\n else:\n first_random_number = random.randrange(0, len(proxies))\n random_number = first_random_number\n\n print(random_number)\n proxy = proxies[random_number]\n\n return {'http': f'http://{config.PROXY_USERNAME}:{config.PROXY_PASS}@{proxy}',\n 'https': f'http://{config.PROXY_USERNAME}:{config.PROXY_PASS}@{proxy}'}", "def get_all(self):\n return self.proxies", "def _simple_proxy_request_get(*args, **kwargs):\n proxy = get_proxy_ip()\n kwargs[\"proxies\"] = kwargs.get(\n \"proxies\", {\"http\": \"http://{}\".format(proxy)}\n )\n kwargs[\"timeout\"] = kwargs.get(\"timeout\", 30)\n return proxy, requests.get(*args, **kwargs)", "def selenium_proxy(self):\n return webdriver.Proxy({\n \"httpProxy\": self.proxy(),\n \"sslProxy\": self.proxy(),\n })", "def get(self, pages=pages):\n try:\n self.pool.map(self.proxyPage,pages)\n except urllib.error.HTTPError as e:\n self.run(e.geturl().split('/')[-1])\n return self.proxys", "def ssl_proxies():\n # Category = 'PROXYLIST_DOWNLOAD_HTTPS'\n Category = 'PROXYLIST_DOWNLOAD_HTTP'\n\n # Initialize the Scrapper\n scrapper = Scrapper(category=Category, print_err_trace=False)\n\n # Get ALL Proxies According to your Choice\n data = scrapper.getProxies()\n\n proxies_list = []\n https = \"http\"\n for item in data.proxies:\n proxies_list.append({https : '{}:{}'.format(item.ip, item.port)})\n return proxies_list", "def test_proxy(mocker, proxy):\n get = mocker.patch(\"requests.get\", return_value=Mock(text=\"Foo\"))\n crawler = Crawler(proxies=[proxy] if proxy else None)\n\n url = 'http://foo.bar/'\n crawler.get(url)\n get.assert_called_once_with(url, proxies={'http': proxy})", "def reset_proxy_from_bag(self):\n if len(self.proxy_bag) == 0:\n self.logger.debug(\"Changing proxy\")\n self.logger.warning(\"Proxy bag is empty! Cannot reset Proxy from Proxy Bag.\")\n raise errors.EmptyProxyBag\n\n # Remove the current proxy from the proxy bag if one is set.\n if self.proxy:\n self.logger.debug(\"Changing proxy\")\n del self.proxy_bag[0]\n else:\n self.logger.debug(\"Selecting proxy\")\n\n if len(self.proxy_bag) == 0:\n self.logger.debug(\"Changing proxy\")\n self.logger.error(\"Proxy bag is empty! Cannot reset Proxy from Proxy Bag.\")\n raise errors.EmptyProxyBag\n\n self.proxy_current = self.proxy_bag[0]\n if \"http\" in self.proxy:\n self.proxy.pop(\"http\")\n if \"https\" in self.proxy:\n self.proxy.pop(\"https\")\n\n self.logger.debug(\"New Proxy: %s (%s - %s)\" % (\n self.proxy_current[\"address\"],\n self.proxy_current[\"continent\"],\n self.proxy_current[\"country\"]))\n\n if self.proxy_current[\"ssl\"]:\n self.proxy = {\"https\": self.proxy_current[\"address\"]}\n else:\n self.proxy = {\"http\": self.proxy_current[\"address\"]}", "def proxy(self):\n return self.get('proxy', None)", "def http_proxy_config(self) -> Optional[pulumi.Input['HttpProxyConfigArgs']]:\n return pulumi.get(self, \"http_proxy_config\")", "def get_dict(self):\n self.headers = self.hm.getRequestHeaders()\n self.proxies = self.hm.getRandomProxyDict(self.num)\n return {'link':self.link,\n 'headers':headers,\n 'proxies':proxies,\n 't0':datetime.now(),\n 't1':None,\n 'response': None,}", "def get_proxy():\n response = requests.get(\"http://127.0.0.1:5010/get/\")\n json_response = response.json()\n proxy = json_response.get(\"proxy\")\n return 'http://{}'.format(proxy)", "def get_proxy(self):\n return self.proxy()" ]
[ "0.66343987", "0.6391098", "0.6165451", "0.6008237", "0.55292976", "0.5497907", "0.5492077", "0.5427683", "0.5362546", "0.5328795", "0.52967983", "0.52951497", "0.52471906", "0.5230866", "0.52142024", "0.5200751", "0.5157269", "0.5149122", "0.5092851", "0.5078898", "0.5063718", "0.5062427", "0.50591475", "0.5029346", "0.5028545", "0.49568224", "0.49368054", "0.49331006", "0.49254644", "0.48973733" ]
0.6825937
0
Creates a new Thoth release instance
def make_release(self, **kwargs) -> ThothRelease: snapshot_date = make_snapshot_date(**kwargs) release = ThothRelease(dag_id=self.dag_id, run_id=kwargs["run_id"], snapshot_date=snapshot_date) return release
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_release(self, **kwargs) -> CrossrefEventsRelease:\n\n start_date, end_date, first_release = self.get_release_info(**kwargs)\n\n release = CrossrefEventsRelease(\n self.dag_id, start_date, end_date, first_release, self.mailto, self.max_threads, self.max_processes\n )\n return release", "def create_release(config, args):\n yield config.repo.create_release(args.tag_name, name=args.name,\n target_commitish=args.get(\"target_commitish\"), body=args.get(\"body\"),\n draft=args.get_bool(\"draft\"), prerelease=args.get_bool(\"prerelease\"))", "def create_release(ctx, sha):\n\n try:\n\n gh = ctx.obj.github\n\n log.echo('Creating a GitHub release')\n release = gh.create_release(sha=sha)\n log.echo('Release created: {}'.format(release.url))\n return release\n\n except exceptions.SetupPyNotFoundException as e:\n e.possible_solutions = [solutions.create_setup_py()]\n raise", "def test_create_release(self):\n releases_before = self.hello_world_project.get_releases()\n latest_release = releases_before[0].tag_name\n count_before = len(releases_before)\n increased_release = \".\".join(\n [\n latest_release.rsplit(\".\", 1)[0],\n str(int(latest_release.rsplit(\".\", 1)[1]) + 1),\n ]\n )\n release = self.hello_world_project.create_release(\n tag=increased_release, name=\"test\", message=\"testing release\"\n )\n count_after = len(self.hello_world_project.get_releases())\n assert release.tag_name == increased_release\n assert release.title == \"test\"\n assert release.body == \"testing release\"\n assert count_before + 1 == count_after", "def create_release(ctx):\n # Get the head of master\n r = _get_repo()\n b = r.get_branch(branch=\"master\")\n head = b.commit\n\n faasm_ver = get_faasm_version()\n\n # Create a tag from the head\n tag_name = _tag_name(faasm_ver)\n r.create_git_tag(\n tag_name,\n \"Release {}\\n\".format(faasm_ver),\n head.sha,\n \"commit\",\n )\n\n r.create_git_release(\n tag_name,\n \"Faasm {}\".format(faasm_ver),\n \"Release {}\\n\".format(faasm_ver),\n draft=True\n )", "def create_release(\n self,\n ) -> Callable[[cloud_deploy.CreateReleaseRequest], operations_pb2.Operation]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"create_release\" not in self._stubs:\n self._stubs[\"create_release\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.deploy.v1.CloudDeploy/CreateRelease\",\n request_serializer=cloud_deploy.CreateReleaseRequest.serialize,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"create_release\"]", "def create_release(\n repository, tag, release_version, changelog_url, *, additional_assets=None\n):\n api_key = os.environ.get(\"GITHUB_API_KEY\")\n if api_key is None:\n api_key = getpass(\"API key: \")\n\n git = Github(api_key)\n\n repo = git.get_repo(repository.path.strip(\"/\"))\n\n release = repo.create_git_release(\n tag,\n f\"Version {release_version}\",\n f\"See {changelog_url}\",\n draft=True,\n )\n\n for asset in [] if additional_assets is None else additional_assets:\n release.upload_asset(asset, label=asset)\n\n return release", "def create_stp_instance(self, instance, priority):\n pass", "def __init__(__self__,\n resource_name: str,\n args: ReleaseArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def create_instance(c_instance):\n return AumPC40(c_instance)", "def new(name, template, version):\n NewCommandExecutor().new(name, template, version)", "def create_release_event(sender, instance, name, source, target, **kwargs):\n ev_type = \"error\" if target in [\"failed\", \"rejected\"] else \"info\"\n ev = ReleaseEvent(\n event_type=ev_type,\n message=\"release {}, version {} changed from {} to {}\".format(\n instance.kf_id, instance.version, source, target\n ),\n release=instance,\n )\n ev.save()", "def create(cls, _):\n return cls", "def create_instance(test_id, config, args):\n return TestT1Detail(test_id, config, args)", "def make_release(self, **kwargs) -> CrossrefMetadataRelease:\n\n # The release date is always the end of the execution_date month\n snapshot_date = kwargs[\"data_interval_start\"].end_of(\"month\")\n run_id = kwargs[\"run_id\"]\n return CrossrefMetadataRelease(dag_id=self.dag_id, run_id=run_id, snapshot_date=snapshot_date)", "def _new_instance(self):\n return self.__class__(self._vmodule)", "def new(self, *args, **kwargs):\n return flattrclient.things.Thing(session=self._session, **kwargs)", "def create_strongswan_object(linux_handle, **kwargs):\r\n\r\n return Strongswan(linux_handle, **kwargs)", "def create(cls):\n pass\n return cls()", "def create_releases(name):\n data = get_object(name)\n if not data:\n return None\n\n data = copy.deepcopy(data)\n\n # pop out the works\n works = data.pop('work_version', None)\n\n # create a dictionary of the object parameters with release as the key\n dd = {}\n dd[data.get('release', 'DR15')] = data\n\n if works:\n # add any other work objects found\n for work in works:\n work_object = create_work_version(work, data)\n dd[f\"WORK-{work_object['version_info']['number']}\"] = work_object\n\n # expand the path envvars\n for k, v in dd.items():\n release = 'SDSSWORK' if 'WORK' in k else k\n tree.replant_tree(release.lower())\n dd[k]['path'] = os.path.expandvars(v.get('path', ''))\n\n return dd", "def _new_instance(self):\n return self.__class__(self._vmodule, self._tensor_rank)", "def create():", "def create():", "def create_instance(c_instance):\n\treturn MattLooper(c_instance)", "def _make_release_branch(self):\n user = getpass.getuser()\n if not user == self._user:\n raise Error('the command should only be run as user %s' % self._user)\n branch = self._branch\n # get the latest master updates\n subprocess.check_call('git remote update', shell=True)\n subprocess.check_call('git checkout master', shell=True)\n # does a git pull and updates the submodules\n GitUtil.update_submodules()\n # get the latest commit before the release is cut\n self._latest_commit = GitUtil.get_latest_commit()\n print 'Making release branch %s' % branch\n # create the new release branch\n GitUtil.create_branch(branch)\n print TermColor.ColorStr('Created remote branch %s' % branch, 'GREEN')", "def create_instance(c_instance):\n return LemaurPad(c_instance)", "def create_instance(c_instance):\n return OpenLabs(c_instance)", "def create_rythm(self, dev):\n new_rythm = rythm.Rythm()\n new_rythm.rythm_from_distr(duration, self.density)\n\n return new_rythm", "def test_create_release_monitor():\n assert ReleaseMonitor()", "def create_instance(c_instance):\n return VCM600(c_instance)" ]
[ "0.64280033", "0.63345087", "0.6219137", "0.62178385", "0.59829724", "0.5792777", "0.5772973", "0.56713486", "0.5646336", "0.56411284", "0.556297", "0.55392003", "0.5535768", "0.55193186", "0.54974574", "0.54897374", "0.54813004", "0.5434569", "0.5398685", "0.5379647", "0.5372119", "0.53556365", "0.53556365", "0.53254", "0.53217435", "0.53190583", "0.531711", "0.5291502", "0.52668333", "0.52537036" ]
0.74462247
0
Task to download the ONIX release from Thoth.
def download(self, release: ThothRelease, **kwargs) -> None: thoth_download_onix( publisher_id=self.publisher_id, format_spec=self.format_specification, download_path=release.download_path, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _download(self):\n self._system.download(\"http://geant4.web.cern.ch/geant4/support/source/\" + self._tar_name)", "def thoth_download_onix(\n publisher_id: str,\n download_path: str,\n format_spec: str,\n host_name: str = DEFAULT_HOST_NAME,\n num_retries: int = 3,\n) -> None:\n url = THOTH_URL.format(host_name=host_name, format_specification=format_spec, publisher_id=publisher_id)\n logging.info(f\"Downloading ONIX XML from {url}\")\n response = retry_get_url(url, num_retries=num_retries)\n if response.status_code != 200:\n raise AirflowException(\n f\"Request for URL {url} was unsuccessful with code: {response.status_code}\\nContent response: {response.content.decode('utf-8')}\"\n )\n with open(download_path, \"wb\") as f:\n f.write(response.content)", "def _download(self):\n self._system.download_file(\"http://curl.haxx.se/download/\" + self._tar_name)", "def __download(self):\n\n # Use the default repository if set to True\n if self.repository is True:\n self.repository = self.__default_repository\n\n if not self.repository and not self.url:\n tarball = 'ucx-{}.tar.gz'.format(self.__version)\n self.url = '{0}/v{1}/{2}'.format(self.__baseurl, self.__version,\n tarball)", "def dowload_vt():\n print get_date_time_now() + \" ==> Download VT Samples started!\"\n print get_date_time_now() + \" ==> Nothing downloaded\"", "def x_download():\n\t#_loadconfig()\n\tconf = _get_config()\n\t#print conf['xplane']\n\tdownload_url = conf['xplane']['download']\n\tlocal(\"wget -P %s %s\" % (navimport.conf.work_dir(\"/xplane_zips\"), download_url))", "def download():\n env_banner()\n\n download_data = Download()\n download_data()\n click.echo('Download done.')", "def download():\n try:\n cli.run(\n [URL, '--output', TEMP_DIR],\n )\n except SystemExit:\n return None", "def main():\n get_obofoundry(force_download=True)", "def cli(date, path, mission):\n download.main(path, mission, date)", "def download():\n raise NotImplementedError", "def _download_pkg(self, context):\n pkg_url = context.package.arg\n dst_file_path = context.package.full_path\n log.debug('downloading {0} to {1}'.format(pkg_url, dst_file_path))\n download_file(pkg_url, dst_file_path, context.package.get('timeout', 1), verify_https=context.get('verify_https', False))", "def download(self, release: DoabRelease, **kwargs):\n # Download release\n release.download()", "def download(self):\n logger.info(f\"downloading project {self}\")\n self.project.storage.download(f\"{self.path}/releasemanifest\", None)\n self.extract()", "def download_release(self):\n if self.cache_dir is not None:\n download = Download(cache=self.cache_dir)\n else:\n self.log.warning(\"not using a download cache for uwsgi\")\n download = Download()\n\n download_url = self.options.get(\"download-url\", DOWNLOAD_URL)\n download_path, is_temp = download(\n download_url.format(self.uwsgi_version), md5sum=self.md5sum)\n return download_path", "def download(ctx, index, tstep, oformat, param, prod, experiment,\n model, queue, urgent):\n # ensure that ctx.obj exists and is a dict (in case `cli()` is called\n # by means other than the `if` block below)\n ctx.ensure_object(dict)\n ctx.obj['log'] = cdslog\n valid_format = list(iproduct(['tgz','zip'],['etccdi','hsi']))\n if (oformat,index) not in valid_format:\n cdslog.info(f'Download format {oformat} not available for {index} product')\n sys.exit()\n valid_tstep = list(iproduct(['mon','yr'],['etccdi']))\n valid_tstep.append(('day','hsi'))\n prod = expand_prod(prod)\n if (tstep,index) not in valid_tstep:\n cdslog.info(f\"Timestep {tstep} not available for {index} product\")\n sys.exit()\n args = {'format': oformat,\n 'index': index,\n 'params': list(param),\n 'prod': prod,\n 'experiment': list(experiment),\n 'model': list(model),\n 'tstep': tstep}\n if queue:\n dump_args(args, urgent)\n else: \n api_request(ctx, args)", "def maybe_download():\n\n print(\"Downloading Inception 5h Model ...\")\n download.maybe_download_and_extract(url=data_url, download_dir=data_dir)", "def fetch_zenodo(self):\n\n # retrieve content from URL\n try:\n logging.info(f\"Downloading example data from {self.url}\")\n r = requests.get(self.url, stream=True)\n with io.BytesIO() as stream:\n with tqdm.wrapattr(\n stream,\n 'write',\n file=sys.stdout,\n miniters=1,\n desc=self.url,\n total=int(r.headers.get('content-length', 0))\n ) as file:\n for chunk in r.iter_content(chunk_size=4096):\n file.write(chunk)\n with zipfile.ZipFile(stream) as zipped:\n # extract each file in the zipped dir to the project\n for f in zipped.namelist():\n logging.info(\"Unzipped: {}\".format(os.path.join(self.destination, f)))\n zipped.extract(f, self.destination)\n\n logging.info(\"Download and install complete.\")\n\n self.close_logger()\n\n except requests.exceptions.MissingSchema:\n msg = f\"Unable to download data from {self.url}\"\n logging.exception(msg)\n self.close_logger()\n raise", "def download(self, args):\n\n\t\t\"\"\" Default argument for Architecture \"\"\"\n\t\tif len(args) >= 4:\n\t\t\tarch = args[3]\n\t\telse:\n\t\t\tarch = platform.processor()\n\n\t\t\"\"\" Default argument for Version \"\"\"\n\t\tif len(args) >= 3:\n\t\t\tif args[2] == \"latest\":\n\t\t\t\tversion = \"Latest\"\n\t\t\telse:\n\t\t\t\tversion = args[2]\n\t\telse:\n\t\t\tversion = \"Latest\"\n\n\t\t\"\"\" Find package path from package list, based on prev. arguments \"\"\"\n\t\tif len(args) >= 2:\n\t\t\tpackage = args[1]\n\t\t\tfilename = False\n\t\t\t\n\t\t\tversions = self.master.Dump(package)\n\t\t\tfor d in versions:\n\t\t\t\tif d[\"Version\"] == version:\n\t\t\t\t\tif d[\"Version\"] != \"Latest\" and d[\"Architecture\"] == arch:\n\t\t\t\t\t\tfilename = d[\"Filename\"]\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor e in versions:\n\t\t\t\t\t\t\tif e[\"Version\"] == d[\"LatestVersion\"] and e[\"Architecture\"] == arch:\n\t\t\t\t\t\t\t\tfilename = e[\"Filename\"]\n\t\t\t\t\t\t\t\tversion = d[\"LatestVersion\"];\n\t\t\tif not filename:\n\t\t\t\tself.write_line(\"ERROR XXX: Package not found.\")\n\t\t\t\treturn\n\n\t\t\t\"\"\" Find chunks to download \"\"\"\n\t\t\tid = 0\n\t\t\tto_download = False\n\t\t\tfor f in self.torrent_info.files():\n\t\t\t\tprint(f.path.replace(\"packages/\", \"\") + \" = \" + filename);\n\t\t\t\tif f.path.replace(\"packages/\", \"\") == filename:\n\t\t\t\t\tto_download = f\n\t\t\t\t\tbreak;\n\t\t\t\tid += 1\n\t\t\tif not to_download:\n\t\t\t\tprint(\"ERROR XXX: dunno\")\n\t\t\t\treturn\n\n\t\t\t\"\"\" Set chunks priority to 7? (download max priority) \"\"\"\n\t\t\tpr = self.torrent_info.map_file(id, 0, to_download.size);\n\t\t\tn_pieces = math.ceil(pr.length / self.torrent_info.piece_length() + 1);\n\n\t\t\tfor i in range(self.torrent_info.num_pieces()):\n\t\t\t\tif i in range(pr.piece, pr.piece + n_pieces):\n\t\t\t\t\tself.handler.piece_priority(i, 7)\n\n\n\t\t\t\"\"\" Print download of package status \"\"\"\n\t\t\tself.print_status(id, pr, package, version, filename)\n\t\t\t\t\n\t\t\t\"\"\" Check the server for hash validation \"\"\"\n\t\t\tif self.valid_tpkg_file(to_download.path):\n\t\t\t\tself.write_line(\"DONE {0} {1} {2} {3}\".format(package, version, arch, self.config[\"daemon\"][\"rootdir\"] + \"/\" + to_download.path).replace('//', '/'))\n\t\t\telse:\n\t\t\t\tself.write_line(\"ERROR XXX: Hash verification failed.\")\n\t\telse:\n\t\t\tself.write_line(\"INVALID ARGUMENTS\");", "async def test_ota_url_generic_x86_64_rename(coresys: CoreSys) -> None:\n coresys.os._board = \"intel-nuc\"\n coresys.os._version = AwesomeVersion(\"5.13\")\n await coresys.updater.fetch_data()\n\n version6 = AwesomeVersion(\"6.0\")\n url = coresys.updater.ota_url.format(\n version=str(version6), board=\"generic-x86-64\", os_name=\"haos\"\n )\n\n assert coresys.os._get_download_url(version6) == url", "def _Download( self ):\n self._DownloadPipe += PackageUtil.ExecuteSimpleCommand( \"git\", [\"clone\", \"[email protected]:mastbaum/avalanche.git\", self.GetInstallPath()], None, os.getcwd() )\n return", "def download_release(self):\n cache = tempfile.mkdtemp('download-cache')\n download = Download(cache=cache)\n download_path, is_temp = download(self.download_url)\n return download_path", "def download(self, release: CrossrefEventsRelease, **kwargs):\n release.download()", "def download_mission(self):\n cmds = self.vehicle.commands\n cmds.download()\n # Wait until download is complete.\n cmds.wait_valid()", "def download():\n basedir = os.path.dirname(os.path.dirname(__file__))\n print(basedir)\n datadir = os.path.join(basedir,\"data/NeonTreeEvaluation/\")\n print(\"Downloading data files to {}\".format(datadir)) \n eval_url = zenodo_url(concept_rec_id=\"3723356\", datadir=datadir)", "def get_software(self):\n\n logging.info('downloading OCP 4.3 software bits into {}'.format(self.software_dir))\n for url_key in self.ocp_urls.keys():\n url = self.ocp_urls[url_key]\n dest_name = url.split('/')[-1]\n dest_path = self.software_dir + '/' + dest_name\n dest_path_exist = check_path(dest_path, isfile=True)\n url_check = ''\n if dest_path_exist:\n logging.info('file {} already exists in {}'.format(dest_name, self.software_dir))\n self.inventory_dict['csah']['vars'][url_key] = dest_name\n else:\n url_check = validate_url(url)\n if url_check == '':\n logging.error('file {} in {} is not available'.format(dest_name, url_key))\n self.inventory_dict['csah']['vars'][url_key] = ''\n\n if url_check != '' and url_check.code == 200:\n logging.info('downloading {}'.format(dest_name))\n urlretrieve('{}'.format(url),'{}/{}'.format(self.software_dir, dest_name))\n self.inventory_dict['csah']['vars'][url_key] = dest_name", "def download(ctx: click.Context, **kwargs):\n root_commands.cmd_download(ctx.obj, **kwargs)", "def download(self):\n pass", "def download(self):\n pass", "def download_latest(conn, logger):\n download_intraday_extended(conn, logger)" ]
[ "0.6697182", "0.6280148", "0.62222373", "0.61106557", "0.6080942", "0.5996796", "0.5958443", "0.59322697", "0.5895125", "0.5894148", "0.58753026", "0.580345", "0.57798254", "0.57530046", "0.57020843", "0.5632765", "0.5632071", "0.5620086", "0.56162405", "0.5600442", "0.5587876", "0.55726606", "0.5544313", "0.55202776", "0.5488919", "0.5453822", "0.54492766", "0.54483426", "0.54483426", "0.5415254" ]
0.69096416
0
Hits the Thoth API and requests the ONIX feed for a particular publisher. Creates a file called onix.xml at the specified location
def thoth_download_onix( publisher_id: str, download_path: str, format_spec: str, host_name: str = DEFAULT_HOST_NAME, num_retries: int = 3, ) -> None: url = THOTH_URL.format(host_name=host_name, format_specification=format_spec, publisher_id=publisher_id) logging.info(f"Downloading ONIX XML from {url}") response = retry_get_url(url, num_retries=num_retries) if response.status_code != 200: raise AirflowException( f"Request for URL {url} was unsuccessful with code: {response.status_code}\nContent response: {response.content.decode('utf-8')}" ) with open(download_path, "wb") as f: f.write(response.content)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def oai_harvest(basic_url, metadata_prefix=None, oai_set=None, processing=None, out_file_suffix=None):\n collection_xpath = \".//oai_2_0:metadata//intact:collection\"\n record_xpath = \".//oai_2_0:record\"\n identifier_xpath = \".//oai_2_0:header//oai_2_0:identifier\"\n token_xpath = \".//oai_2_0:resumptionToken\"\n processing_regex = re.compile(r\"'(?P<target>\\w*?)':'(?P<generator>.*?)'\")\n variable_regex = re.compile(r\"%(\\w*?)%\")\n #institution_xpath =\n namespaces = {\n \"oai_2_0\": \"http://www.openarchives.org/OAI/2.0/\",\n \"intact\": \"http://intact-project.org\"\n }\n url = basic_url + \"?verb=ListRecords\"\n if metadata_prefix:\n url += \"&metadataPrefix=\" + metadata_prefix\n if oai_set:\n url += \"&set=\" + oai_set\n if processing:\n match = processing_regex.match(processing)\n if match:\n groupdict = match.groupdict()\n target = groupdict[\"target\"]\n generator = groupdict[\"generator\"]\n variables = variable_regex.search(generator).groups()\n else:\n print_r(\"Error: Unable to parse processing instruction!\")\n processing = None\n print_b(\"Harvesting from \" + url)\n articles = []\n file_output = \"\"\n while url is not None:\n try:\n request = Request(url)\n url = None\n response = urlopen(request)\n content_string = response.read()\n if out_file_suffix:\n file_output += content_string.decode()\n root = ET.fromstring(content_string)\n records = root.findall(record_xpath, namespaces)\n counter = 0\n for record in records:\n article = {}\n identifier = record.find(identifier_xpath, namespaces)\n article[\"identifier\"] = identifier.text\n collection = record.find(collection_xpath, namespaces)\n if collection is None:\n # Might happen with deleted records\n continue\n for elem, xpath in OAI_COLLECTION_CONTENT.items():\n article[elem] = \"NA\"\n if xpath is not None:\n result = collection.find(xpath, namespaces)\n if result is not None and result.text is not None:\n article[elem] = result.text\n if processing:\n target_string = generator\n for variable in variables:\n target_string = target_string.replace(\"%\" + variable + \"%\", article[variable])\n article[target] = target_string\n if article[\"euro\"] in [\"NA\", \"0\"]:\n print_r(\"Article skipped, no APC amount found.\")\n continue\n if article[\"doi\"] != \"NA\":\n norm_doi = get_normalised_DOI(article[\"doi\"])\n if norm_doi is None:\n article[\"doi\"] = \"NA\"\n else:\n article[\"doi\"] = norm_doi\n articles.append(article)\n counter += 1\n token = root.find(token_xpath, namespaces)\n if token is not None and token.text is not None:\n url = basic_url + \"?verb=ListRecords&resumptionToken=\" + token.text\n print_g(str(counter) + \" articles harvested.\")\n except HTTPError as httpe:\n code = str(httpe.getcode())\n print(\"HTTPError: {} - {}\".format(code, httpe.reason))\n except URLError as urle:\n print(\"URLError: {}\".format(urle.reason))\n if out_file_suffix:\n with open(\"raw_harvest_data_\" + out_file_suffix, \"w\") as out:\n out.write(file_output)\n return articles", "def main(url, ip):\n\n doc = {'url' : url}\n #get the whois and domain info\n print('[*] Getting whois')\n doc = get_whois(doc)\n\n #only continue checking stuff if we can actually resolve the address\n if doc['ip'] != '':\n #get ssl information if available\n print('[*] Get certificate information')\n doc = get_certinfo(doc)\n #browse to the site and get metrics\n print('[*] Interrogating homepage')\n doc = interrogate_homepage(doc)\n\n #now it is time to parse the ids logs\n doc = get_ids_logs(doc)\n\n #strip out ip if we don't have one\n if doc['ip'] == '':\n doc.pop('ip')\n \n\n try:\n print('[*] Adding information to elastiseach as %s:9200' % ip)\n es = Elasticsearch([ip])\n res = es.index(index='flurb', doc_type='site', body=doc)\n #if res == 'OK then return some sort of success response\n\n #there is no explicit close so we are going to delete\n #our es object to trigger the socket cleanup\n del(es)\n\n except:\n print('[*] Failed to add document to elasticsearch at %s' % ip)\n del(es)\n return doc\n\n return doc", "def do_request(xml_location):\n request = open(xml_location,\"r\").read()\n webservice = httplib.HTTP(HOST,PORT)\n webservice.putrequest(\"POST\", API_URL)\n webservice.putheader(\"Host\", HOST)\n webservice.putheader(\"User-Agent\",\"Python post\")\n webservice.putheader(\"Content-type\", \"text/xml; charset=\\\"UTF-8\\\"\")\n webservice.putheader(\"Content-length\", \"%d\" % len(request))\n webservice.endheaders()\n webservice.send(request)\n statuscode, statusmessage, header = webservice.getreply()\n result = webservice.getfile().read()\n print statuscode, statusmessage, header\n print result", "def get_xml_file_url(self, elife_id):\n\t\txml_url = \"https://s3.amazonaws.com/\" + self.settings.cdn_bucket + \"/elife-articles/\" + elife_id + \"/elife\" + elife_id + \".xml\"\n\t\t\n\t\treturn xml_url", "def _collect_sai_test_report_xml(ptfhost, request):\n logger.info(\"Collecting xunit SAI tests log from ptf\")\n ptfhost.shell(\"cd {0} && tar -czvf result.tar.gz *\".format(SAI_TEST_REPORT_DIR_ON_PTF))\n ptfhost.fetch(\n src=\"{0}/result.tar.gz\".format(SAI_TEST_REPORT_DIR_ON_PTF), \n dest=request.config.option.sai_test_report_dir + \"/\", \n flat=True)", "def download_article(doi):\n if doi.lower().startswith('doi:'):\n doi = doi[4:]\n url = '%s/%s' % (elsevier_article_url, doi)\n if api_key is None:\n logging.error('Missing API key at %s, could not download article.' %\n api_key_file)\n return None\n params = {'APIKey': api_key, 'httpAccept': 'text/xml'}\n try:\n res = urllib2.urlopen(url, data=urllib.urlencode(params))\n except urllib2.HTTPError:\n logging.error('Cound not download article %s' % doi)\n return None\n xml = res.read()\n return xml", "def __init__(self, endpoint='https://www.wikidata.org/w/api.php'):\n self.endpoint = endpoint", "def cli():\n fire.Fire(fetch_rss_file)", "def download_second_request(url=None, filename=None, **kwargs):\n with open(filename, 'w') as output:\n output.write('some successful second response XML')", "def request_xml(self):\n xml_filename = pkg_resources.resource_filename(__name__, 'data/request.xml')\n with open(xml_filename, 'r') as xml_file:\n xml = xml_file.read()\n xml = xml.format(username=self.username,\n password=self.password,\n timestamp=time.time(),\n hardware_id=self.hardware_id(),\n advertisement_id=self.advertisement_id(),\n locale=self.locale)\n return xml", "def getxml(url, **kwargs):\n xml = fetch_resource(url, **kwargs)\n return etree.fromstring(xml)", "def send_request(date):\n base_url = 'https://api.nytimes.com/svc/archive/v1/'\n url = base_url + '/' + date[0] + '/' + date[1] + f'.json?api-key={key}'\n response = requests.get(url).json()\n time.sleep(5)\n return response", "def init(depot):\n global tlookup\n pkg.server.feed.init(depot)\n tlookup = mako.lookup.TemplateLookup(directories=[depot.web_root])", "def respond(depot, request, response, pub, http_depot=None):\n path = request.path_info.strip(\"/\")\n if pub and os.path.exists(os.path.join(depot.web_root, pub)):\n # If an item exists under the web root\n # with this name, it isn't a publisher\n # prefix.\n pub = None\n elif pub and pub not in depot.repo.publishers:\n raise cherrypy.NotFound()\n\n if pub:\n # Strip publisher from path as it can't be used to determine\n # resource locations.\n path = path.replace(pub, \"\").strip(\"/\")\n else:\n # No publisher specified in request, so assume default.\n pub = depot.repo.cfg.get_property(\"publisher\", \"prefix\")\n if not pub:\n pub = None\n\n if path == \"\":\n path = \"index.shtml\"\n elif path.split(\"/\")[0] == \"feed\":\n response.headers.update({ \"Expires\": 0, \"Pragma\": \"no-cache\",\n \"Cache-Control\": \"no-cache, no-transform, must-revalidate\"\n })\n return feed(depot, request, response, pub)\n\n if not path.endswith(\".shtml\"):\n spath = urllib.unquote(path)\n fname = os.path.join(depot.web_root, spath)\n if not os.path.normpath(fname).startswith(\n os.path.normpath(depot.web_root)):\n # Ignore requests for files outside of the web root.\n return __handle_error(path, httplib.NOT_FOUND)\n else:\n return cherrypy.lib.static.serve_file(os.path.join(\n depot.web_root, spath))\n\n try:\n response.headers.update({ \"Expires\": 0, \"Pragma\": \"no-cache\",\n \"Cache-Control\": \"no-cache, no-transform, must-revalidate\"\n })\n return __render_template(depot, request, path, pub, http_depot)\n except sae.VersionException as e:\n # The user shouldn't see why we can't render a template, but\n # the reason should be logged (cleanly).\n cherrypy.log(\"Template '{path}' is incompatible with current \"\n \"server api: {error}\".format(path=path,\n error=str(e)))\n cherrypy.log(\"Ensure that the correct --content-root has been \"\n \"provided to pkg.depotd.\")\n return __handle_error(request.path_info, httplib.NOT_FOUND)\n except IOError as e:\n return __handle_error(path, httplib.INTERNAL_SERVER_ERROR)\n except mako.exceptions.TemplateLookupException as e:\n # The above exception indicates that mako could not locate the\n # template (in most cases, Mako doesn't seem to always clearly\n # differentiate).\n return __handle_error(path, httplib.NOT_FOUND)\n except sae.RedirectException as e:\n raise cherrypy.HTTPRedirect(e.data)\n except:\n return __handle_error(path, httplib.INTERNAL_SERVER_ERROR)", "def getrawxml(fp,fn):\n print(\"starting to get the NRE XML Data from historical file\")\n infile = open(fp+fn,\"r\",encoding=\"utf-8\")\n xml_file = infile.read()\n return xml_file", "def lineup_xml() -> Response:\n watch = \"watch_direct\" if config.direct else \"watch\"\n xml = render_template('lineup.xml',\n stations=locast_service.get_stations(),\n url_base=host_and_port,\n watch=watch).encode(\"utf-8\")\n return Response(xml, mimetype='text/xml')", "def request(self, host, handler, request_body, verbose=0):\n self.verbose = verbose\n\n headers = {'Content-type': 'text/xml'}\n data = request_body\n req = urllib2.Request('http://' + host + handler, data, headers)\n\n response = self.opener.open(req)\n\n return self.parse_response(response)", "def getIdentifierMetaXML(base_url, identifier):\n\n\tquery_url = base_url + \"/meta/\" + urllib.quote_plus(identifier)\n\tprint(\"\\t\\t%s\" % query_url)\n\n\ttry:\n\t\trequest = urllib2.urlopen(query_url)\n\t\tresponse = request.read()\n\t\tresponse_xml = ET.fromstring(response)\n\texcept:\n\t\tprint \"\\t\\tFailed request: %s\" % query_url\n\t\tresponse_xml = None\n\n\treturn response_xml", "def epg_xml() -> Response:\n xml = render_template('epg.xml',\n stations=locast_service.get_stations(),\n url_base=host_and_port)\n return Response(xml, mimetype='text/xml')", "def get_full_metadata(self, nuxeo_id):\n parts = urlparse.urlsplit(self.nx.conf[\"api\"])\n url = '{}://{}/Merritt/{}.xml'.format(parts.scheme, parts.netloc, nuxeo_id)\n \n return url", "def get(self):\n f1 = Feed()\n f1.name = 'Test_Feed'\n # Change the feed url here to create your test feed.\n f1.url = 'http://news.google.com/?output=rss'\n f1.put()\n t2 = Topic()\n # A topic of interest.\n t2.name = 'internet'\n t2.put()", "def buildtransportxml(self):\n try:\n subprocess.check_call([\"emanegentransportxml\", \"platform.xml\"], cwd=self.session.session_dir)\n except subprocess.CalledProcessError:\n logger.exception(\"error running emanegentransportxml\")", "def main():\n try:\n res = requests.get('http://localhost:9200')\n pprint(json.loads(res.content.decode('utf-8')))\n except requests.exceptions.ConnectionError:\n print(\"ERROR: ELASTICSEARCH Server is not running!\")\n exit(-1)\n\n # scrapeAndSaveNewsArticles()\n # generateNewsDocsCSV() # may need to be modified based on how scrapeAndSave function file output\n if not es_client.indices.exists(index='huffpost_news_index'):\n print(\"PLEASE WAIT... LOADING DOCUMENTS INTO INVERTED INDEX\")\n indexDocsToES('huffpost_news_index')", "def pubone(file_name,alg,host):\n\n hash_alg=alg\n scheme=\"ni\"\n rform=\"json\"\n ext=\"{ \\\"meta\\\": { \\\"pubdirs\\\" : \\\"yep\\\" } }\"\n\n # record start time of this\n stime=time.time()\n\n # Create NIdigester for use with form encoder and StreamingHTTP\n ni_digester = NIdigester()\n # Install the template URL built from the scheme, the authority and the digest algorithm\n rv = ni_digester.set_url((scheme, host, \"/%s\" % hash_alg))\n if rv != ni_errs.niSUCCESS:\n nilog(\"Cannot construct valid ni URL: %s\" % ni_errs_txt[rv])\n return\n debug(ni_digester.get_url())\n # Open the file if possible\n try:\n f = open(file_name, \"rb\")\n except Exception, e :\n debug(\"Cannot open file %s: Error: %s\" %(file_name, str(e)))\n return\n # Guess the mimetype of the file\n m = magic.Magic(mime=True)\n ctype = m.from_file(file_name)\n debug(\"Content-Type: %s\" % ctype)\n if ctype is None:\n # Guessing didn't work - default\n ctype = \"application/octet-stream\"\n # Set up HTTP form data for publish request\n # Make parameter for file with digester\n octet_param = MultipartParam(\"octets\",\n fileobj=f,\n filetype=ctype,\n filename=file_name,\n digester = ni_digester)\n # Make dictionary that will dynamically retrieve ni URI when it has been made\n uri_dict = { \"generator\": octet_param.get_url,\n \"length\": (len(ni_digester.get_url()) + len(\";\") +\n ni_digester.get_b64_encoded_length())}\n msgid=str(random.randint(1, 2**64)) \n param_list = [octet_param,\n (\"URI\", uri_dict),\n (\"msgid\", msgid),\n (\"ext\", ext),\n (\"fullPut\", \"yes\"),\n (\"rform\", rform)]\n # Construct data generator and header strings\n datagen, headers = multipart_encode(param_list)\n if verbose:\n debug(\"Parameters prepared: %s\"% \"\".join(datagen))\n\n # Set up streaming HTTP mechanism - register handlers with urllib2\n # get out for now, don't do it\n opener = streaminghttp.register_openers()\n # Where to send the publish request.\n http_url = \"http://%s/netinfproto/publish\" % host\n # debug(\"Accessing: %s\" % http_url)\n # Send POST request to destination server\n fsize=os.path.getsize(file_name)\n nilog(\"%s,PUBLISH tx,file,%s,size,%d,to,%s\" % (msgid,file_name,fsize,host))\n try:\n req = urllib2.Request(http_url, datagen, headers)\n except Exception, e:\n nilog(\"%s,PUBLISH tx error\" % msgid);\n if verbose:\n nilog(\"Error: Unable to create request for http URL %s: %s\" %\n (http_url, str(e)))\n f.close()\n return\n # Get HTTP results\n try:\n http_object = urllib2.urlopen(req)\n except Exception, e:\n nilog(\"%s,PUBLISH rx error\" % msgid);\n if verbose:\n nilog(\"Error: Unable to access http URL %s: %s\" % (http_url, str(e)))\n f.close()\n return\n f.close()\n if verbose:\n nilog(\"Digester result: %s\" % octet_param.get_url())\n # Get message headers\n http_info = http_object.info()\n http_result = http_object.getcode()\n if verbose:\n debug(\"HTTP result: %d\" % http_result)\n debug(\"Response info: %s\" % http_info)\n debug(\"Response type: %s\" % http_info.gettype())\n\n # Read results into buffer\n payload = http_object.read()\n http_object.close()\n # debug(payload)\n # Report outcome\n if (http_result != 200):\n if verbose:\n debug(\"Unsuccessful publish request returned HTTP code %d\" %\n http_result) \n nilog(\"%s,PUBLISH rx error bad response status,%d\" % (msgid,http_result));\n return\n # Check content type of returned message matches requested response type\n ct = http_object.headers[\"content-type\"]\n if ct != \"application/json\":\n if verbose:\n debug(\"Error: Expecting JSON coded (application/json) \"\n \"response but received Content-Type: %s\" % ct)\n nilog(\"%s,PUBLISH rx error bad content type,%s\" % (msgid,ct));\n return\n # If output of response is expected, print in the requested format\n if verbose:\n nilog( \"Publication of %s successful:\" % target)\n\n # JSON cases\n try:\n json_report = json.loads(payload)\n except Exception, e:\n if verbose:\n nilog(\"Error: Could not decode JSON report '%s': %s\" % (payload,\n str(e)))\n nilog(\"%s, PUBLISH rx error bad json decode\" % msgid);\n return\n\n if verbose: \n print json.dumps(json_report, indent = 4)\n etime=time.time()\n duration=etime-stime\n niuri=json_report[\"ni\"]\n nilog(\"%s,PUBLISH rx fine,ni,%s,size,%d,time,%10.10f\" % (msgid,niuri,fsize,duration*1000))\n\n return niuri", "def fourohfour(error):\n response = make_response('<?xml version=\"1.0\"?>\\n<updates>\\n</updates>')\n response.mimetype = 'text/xml'\n return response", "def main():\n\n # parses arguments\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-s', action='store', dest='start_index', type=int,\n help='The starting index for events. Default is 0')\n\n parser.add_argument('-e', action='store', dest='end_index', type=int,\n help='The starting index for events. Default is 5,000')\n\n results = parser.parse_args()\n\n start_index = results.start_index or 0\n\n end_index = results.end_index or 5000\n\n scraper = Scraper()\n\n # these are the event column titles from the sample import csv given by localist\n event_column_titles = [\n 'Title','Description','Date From','Date To','Recurrence','Start Time','End Time',\n 'Location','Address','City','State','Event Website','Room','Keywords','Tags',\n 'Photo URL','Ticket URL','Cost','Hashtag','Facebook URL','Group','Department',\n 'Allow User Activity','Allow User Attendance','Visibility','Featured Tabs',\n 'Sponsored','Venue Page Only','Exclude From Trending','Event Types','Invited Audience', 'Original URL',\n 'Location Details'\n ]\n\n out_stream = open('event_import.csv', 'w')\n\n writer = Writer(event_column_titles, out_stream)\n\n writer.write_headers()\n\n # iterates through the specified event numbers and scrapes each one and writes\n # it to the output file\n for i in range(start_index, end_index + 1):\n current_url = 'http://test-ucscevents.pantheonsite.io/event/' + str(i)\n print(\"processing url: \" + current_url)\n r = requests.get(current_url)\n if r.status_code != requests.codes.ok:\n print(' 404')\n else:\n soup = get_soup_from_url(current_url)\n events = scraper.scrape_event(soup)\n for event in events:\n event['Original URL'] = current_url\n\n writer.write_object(event) # event written to output file here\n\n out_stream.close()", "def make_request_xml(self):\n #print (self.url)\n try:\n with closing(get(self.url, stream=True)) as resp: #returns b`xml`\n if self.is_good_enough_xml(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n print('Error during requests to {0} : {1}'.format(url, str(e)))\n return None", "def pull_articles(ls):\n # pull articles\n doi = self.search_articles(file)\n els_key = self.els_key\n\n for i in doi:\n els_url = 'https://api.elsevier.com/content/article/doi/' + doi + '?APIKey=' + els_key\n r = requests.get(els_url)\n for num in range(len(ls)):\n with open(folder + f'/write_test_els_paper{num}.xml', 'wb') as file:\n file.write(r.content)", "def genStixDoc(\n outputDir_,\n targetFileSha1_,\n targetFileSha256_,\n targetFileSha512_,\n targetFileSsdeep_,\n targetFileMd5_,\n targetFileSize_,\n targetFileName_,\n ipv4Addresses_,\n hostNames_):\n parsedTargetFileName = reFileName(targetFileName_)[1]\n parsedTargetFilePrefix = reFileName(targetFileName_)[0]\n stix.utils.set_id_namespace({\"http://www.nickdriver.com/cuckoo2CRITs\" : \"cuckoo2CRITs\"})\n NS = cybox.utils.Namespace(\"http://www.nickdriver.com/cuckoo2CRITs\", \"cuckoo2CRITs\")\n cybox.utils.set_id_namespace(NS)\n stix_package = STIXPackage()\n\n stix_header = STIXHeader()\n stix_header.title = 'File: ' + parsedTargetFileName + ' with the associated hashes, network indicators'\n stix_header.description = 'File: ' + parsedTargetFileName + ' with the associated hashes, network indicators'\n stix_package.stix_header = stix_header\n\n #Will take this out later\n # Create the ttp\n malware_instance = MalwareInstance()\n malware_instance.add_name(parsedTargetFileName)\n malware_instance.description = targetFileSha1_\n ttp = TTP(title='TTP: ' + parsedTargetFileName)\n ttp.behavior = Behavior()\n ttp.behavior.add_malware_instance(malware_instance)\n #stix_package.add_ttp(ttp)\n \n #Trying to create an array that will be added later...\n stix_observables = []\n \n #This works - leaving intact until the new portion works\n '''\n # Create the indicator for the ipv4 addresses\n ipv4Object = Address(ipv4Addresses_, Address.CAT_IPV4)\n #stix_msg['stix_observables'].extend(Observables([ipv4Object]))\n stix_observables.extend([ipv4Object])\n '''\n for ip in ipv4Addresses_:\n\t\tipv4Object = Address(ip, Address.CAT_IPV4)\n\t\tstix_observables.extend([ipv4Object])\n \n \n '''\n #This works - leaving intact until the new portion works\n # Create the indicator for the domain names\n domainNameObject = DomainName()\n domainNameObject.value = hostNames_\n '''\n for name in hostNames_:\n\t\tdomainNameObject = DomainName()\n\t\tdomainNameObject.value = name\n\t\tstix_observables.extend([domainNameObject])\n\t\t\n \n\n \n # Create the observable for the file\n fileObject = File()\n fileObject.file_name = parsedTargetFileName\n #fileObject.file_name.condition = 'Equals'\n fileObject.size_in_bytes = targetFileSize_\n #fileObject.size_in_bytes.condition = 'Equals'\n fileObject.add_hash(Hash(targetFileSha1_, type_='SHA1', exact=True))\n fileObject.add_hash(Hash(targetFileSha256_, type_='SHA256', exact=True))\n fileObject.add_hash(Hash(targetFileSha512_, type_='SHA512', exact=True))\n fileObject.add_hash(Hash(targetFileSsdeep_, type_='SSDEEP', exact=True))\n fileObject.add_hash(Hash(targetFileMd5_, type_='MD5', exact=True))\n \n stix_observables.extend([fileObject])\n \n \n stix_package.observables = Observables(stix_observables)\n \n #DEBUG\n #stagedStixDoc = stix_package.to_xml()\n #pp = pprint.PrettyPrinter(indent=4)\n #pp.pprint(stagedStixDoc)\n\t\t\n #print \"stix_observables list\"\n\n #pp.pprint(stix_observables)\n \n '''\n #VERY BASIC STIX ATTEMPT - THIS WORKS!\n a = Address(\"1.2.3.4\", Address.CAT_IPV4)\n d = DomainName()\n d.value = \"cybox.mitre.org\"\n stix_package.observables = Observables([a, d])\n #concensus - Observable does not work - ObservableS does\n '''\n\t\n\t\n\t###UNCOMMENT THIS WHEN DONE###\n\t\n \n stagedStixDoc = stix_package.to_xml()\n stagedStixDoc = fixAddressObject(stagedStixDoc)\n stagedStixDoc = fixDomainObject(stagedStixDoc)\n today = datetime.datetime.now()\n now = today.strftime('%Y-%m-%d_%H%M%S')\n if not os.path.exists(outputDir_):\n os.makedirs(outputDir_)\n with open (outputDir_ + '/' + now + '-' + targetFileSha1_ + '.stix.xml', 'a') as myfile:\n myfile.write(stagedStixDoc)\n _l.debug('Wrote file: ' + now + '-' + targetFileSha1_ + '.stix.xml')\n \n return", "def _generate_atom_feed(self, director, blog_posts):\n logger.info(_('Generating Atom XML feed ...'))\n builder = FeedBuilder(self.atom_metadata)\n builder.add(blog_posts)\n output_file = os.path.join(director.outdir, self.atom_output)\n builder.write_to(output_file)" ]
[ "0.50976276", "0.50819564", "0.5062179", "0.505473", "0.49140215", "0.47646818", "0.47354826", "0.4707565", "0.46446255", "0.46394545", "0.46037316", "0.4576128", "0.45600936", "0.4554681", "0.45524687", "0.4548541", "0.45293865", "0.4528018", "0.4522511", "0.45112112", "0.45067647", "0.45004085", "0.4497475", "0.44759002", "0.44734922", "0.44706154", "0.44237947", "0.44235876", "0.44229606", "0.44065076" ]
0.5873819
0
This function will produce a batch of features and labels for each epoch step to reduce the memory usage.
def generator(features, labels, batch_size): # Create empty arrays to contain batch of features and labels# batch_features = np.zeros((batch_size, 160, 320, 3)) batch_labels = np.zeros((batch_size, 1)) while True: for i in range(batch_size): # choose random index in features index = random.choice(range(len(features))) batch_features[i] = features[index] batch_labels[i] = labels[index] yield batch_features, batch_labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n #print(labels[start:end])\n yield features[start:end], labels[start:end]", "def batch_features_labels(features, labels, batch_size):\r\n for start in range(0, len(features), batch_size):\r\n end = min(start + batch_size, len(features))\r\n yield features[start:end], labels[start:end]", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def batch_features_labels(features, labels, batch_size):\n # 用 yield迭代器。\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def _generate_training_batch(ground_truth_data, representation_function,\n batch_size, num_points, random_state):\n points = None # Dimensionality depends on the representation function.\n labels = np.zeros(num_points, dtype=np.int64)\n for i in range(num_points):\n labels[i], feature_vector = _generate_training_sample(\n ground_truth_data, representation_function, batch_size, random_state)\n if points is None:\n points = np.zeros((num_points, feature_vector.shape[0]))\n points[i, :] = feature_vector\n return points, labels", "def generate_batches(data, labels, batch_size):\n for start in range(0, len(data), batch_size):\n yield Tensor(data[start:start+batch_size, ...]), Tensor(labels[start:start+batch_size, ...])", "def train(self, num_batches: int):", "def batch_data(why = 'train'):\r\n if why == 'train':\r\n all_paths = all_train_paths\r\n if why == 'test':\r\n all_paths = all_test_paths\r\n paths_labels = get_image_path_label(all_paths)\r\n keys = set([path_label[1] for path_label in paths_labels[0]])\r\n values = [i for i in range(len(keys))]\r\n label_dict = dict(zip(keys, values))\r\n total_list = []\r\n true_labels = []\r\n for b in range(batch_size):\r\n dummy_first_set = []\r\n dummy_second_set = []\r\n dummy_true_labels = []\r\n for samp_no, path_label in enumerate(paths_labels[b]):\r\n path = path_label[0]\r\n label = path_label[1]\r\n img = Image.open(path)\r\n img = img.resize((size, size))\r\n img = np.array(img).flatten()/ 255.0\r\n feat_label = torch.zeros([n_way])\r\n feat_label[label_dict[label]] = 1\r\n if samp_no % (k_shot + 1) == 0:\r\n feature = np.concatenate((img,torch.zeros([n_way])))\r\n dummy_second_set.append(feature)\r\n dummy_true_labels.append(label_dict[label])\r\n else:\r\n feature = np.concatenate((img, feat_label))\r\n dummy_first_set.append(feature)\r\n \r\n dummy_total_list = np.concatenate((dummy_first_set, dummy_second_set))\r\n total_list.append(torch.tensor(dummy_total_list))\r\n true_labels.append(torch.tensor(dummy_true_labels))\r\n\r\n total_list = torch.stack(total_list).float()\r\n true_labels = torch.stack(true_labels).float()\r\n return total_list, true_labels", "def generate_train_batch(self):\n\n patients_indices = self.get_indices()\n patients_for_batch = [self._data[i] for i in patients_indices]\n\n data = np.zeros((self.batch_size, 1, *self.patch_size), dtype=np.short)\n labels = np.empty(self.batch_size, dtype=np.float32)\n\n # iterate over patients_for_batch and include them in the batch\n for i, j in enumerate(patients_for_batch):\n patient_data_ct = np.load(j).astype(np.short)\n\n data[i] = self.preprocess_func(patient_data_ct).astype(np.short)\n path = str(j).split('/')[-1].replace('.npy', '')\n labels[i] = float(self.age_info[path])\n\n return {'data': np.array(data), 'label': np.array(labels)}", "def next(self):\n #print('next')\n batch_size = self.batch_size\n batch_data = nd.empty((batch_size,)+self.data_shape)\n batch_label = nd.empty((batch_size,)+self.label_shape)\n i = 0\n #self.cutoff = random.randint(800,1280)\n try:\n while i < batch_size:\n #print('N', i)\n data, label, annot = self.next_sample()\n R = self.get_data(data, label, annot)\n if R is None:\n continue\n data_out, label_out, flip_data_out, flip_label_out = R\n if not self.use_coherent:\n data = nd.array(data_out)\n data = nd.transpose(data, axes=(2, 0, 1))\n label = nd.array(label_out)\n #print(data.shape, label.shape)\n batch_data[i][:] = data\n batch_label[i][:] = label\n i += 1\n else:\n data = nd.array(data_out)\n data = nd.transpose(data, axes=(2, 0, 1))\n label = nd.array(label_out)\n data2 = nd.array(flip_data_out)\n data2 = nd.transpose(data2, axes=(2, 0, 1))\n label2 = nd.array(flip_label_out)\n #M = nd.array(M)\n #print(data.shape, label.shape)\n batch_data[i][:] = data\n batch_label[i][:] = label\n #i+=1\n j = i+self.per_batch_size//2\n batch_data[j][:] = data2\n batch_label[j][:] = label2\n i += 1\n if j%self.per_batch_size==self.per_batch_size-1:\n i = j+1\n except StopIteration:\n if i<batch_size:\n raise StopIteration\n\n #return {self.data_name : batch_data,\n # self.label_name : batch_label}\n #print(batch_data.shape, batch_label.shape)\n return mx.io.DataBatch([batch_data], [batch_label], batch_size - i)", "def features_from_CNN(self):\n\n dataloader = self.datasetManager.get_dataloader()\n print(\"\\nFeatures obtention with CNN\")\n print(\"-\"*15)\n for i, batch in tqdm.tqdm(enumerate(dataloader)):\n img = self.to_device(batch[0])\n img_name = batch[2][0]\n \n temp = re.findall(r'\\d+', img_name)\n res = list(map(int, temp))\n X = res[-2]\n Y = res[-1]\n \n savepath = os.path.join(self.output_dir, 'data%i'%X)\n create_folder(savepath)\n \n out_CNN = self.network(img) \n \n torch.save(out_CNN, os.path.join(savepath,'features_tensor%i.pt'%Y))", "def save_all_features(nb_samples, source=\"./datasets/D1/images/\", dest=\"./datasets/D1/features/\", input_size=(416, 416), batch_size=16):\n\n # check if the directory exists, and if not make it\n if not os.path.exists(dest):\n os.makedirs(dest)\n\n # define image height and width\n (img_height, img_width) = input_size\n\n # build the VGG16 network and extract features after every MaxPool layer\n model = VGG16(weights='imagenet', include_top=False)\n\n c1 = model.layers[-16].output\n c1 = GlobalAveragePooling2D()(c1)\n\n c2 = model.layers[-13].output\n c2 = GlobalAveragePooling2D()(c2)\n\n c3 = model.layers[-9].output\n c3 = GlobalAveragePooling2D()(c3)\n\n c4 = model.layers[-5].output\n c4 = GlobalAveragePooling2D()(c4)\n\n c5 = model.layers[-1].output\n c5 = GlobalAveragePooling2D()(c5)\n\n\n model = Model(inputs=model.input, outputs=(c1, c2, c3, c4, c5))\n\n # always save your weights after training or during training\n model.save_weights('first_try.h5')\n model.save('model_save')\n\n # define image generator without augmentation\n datagen = ImageDataGenerator(rescale=1. / 255.)\n\n generator = datagen.flow_from_directory(\n source,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode=\"sparse\",\n shuffle=False)\n\n # generate and save features, labels and respective filenames\n steps = nb_samples / batch_size + 1\n X = model.predict_generator(generator, steps)\n Y = np.concatenate([generator.next()[1] for i in range(0, generator.samples, batch_size)])\n names = generator.filenames\n\n for n, i in enumerate(X):\n print(\"Saving \" + n + \" and \" + i)\n with open(dest + \"X-\" + str(img_height) + \"-c\" + str(n + 1) + \"-AVG.npy\", 'w') as f:\n np.save(f.name, i)\n\n if not os.path.exists(dest + \"Y.npy\"):\n with open(dest + \"Y.npy\", 'w') as f:\n np.save(f.name, Y)\n\n if not os.path.exists(dest + \"filenames.npy\"):\n with open(dest + \"filenames.npy\", 'w') as f:\n np.save(f.name, names)", "def generate_batch(\n batch: Tuple[Dict[str, Sequence[int]], List[Sequence[int]]]\n ) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:\n input_ids = torch.tensor([b[0][\"input_ids\"] for b in batch])\n attention_mask = torch.tensor([b[0][\"attention_mask\"] for b in batch])\n token_type_ids = torch.tensor([b[0][\"token_type_ids\"] for b in batch])\n labels = torch.tensor([b[1] for b in batch])\n features = {\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"token_type_ids\": token_type_ids,\n }\n return features, labels", "def batch_generator(labels_df, set_kind):\n # Generate training batches\n if set_kind == \"train\" and (labels_df.shape[0] == 32384 or labels_df.shape[0] == 3120 or labels_df.shape[0] == 64):\n while 1:\n\n for i in range(labels_df.shape[0]//8):\n x_train = np.load('data/train-npy/npdatasetX{}.npy'.format(i))\n y_train = np.load('data/train-npy/npdatasetY{}.npy'.format(i))\n\n for j in range(1):\n x_trainj = x_train[j*8:j*8-1,:]\n y_trainj = y_train[j*8:j*8-1,:]\n\n yield (x_trainj, y_trainj)\n\n\n # Generate validation batches\n if set_kind == \"valid\" and (labels_df.shape[0] == 8080 or labels_df.shape[0] == 1920 or labels_df.shape[0] == 8):\n while 1:\n\n for i in range(labels_df.shape[0]//4): \n x_valid = np.load('data/valid-npy/npdatasetX{}.npy'.format(i))\n y_valid = np.load('data/valid-npy/npdatasetY{}.npy'.format(i))\n\n for j in range(1): \n x_validj = x_valid[j*4:j*4-1,:]\n y_validj = y_valid[j*4:j*4-1,:]\n\n yield (x_validj, y_validj)\n\n\n # Generate test batches\n if set_kind == \"test\" and labels_df.shape[0] == 40669:\n while 1:\n\n for i in range(labels_df.shape[0]//4): #REPLACE 1 by 3\n x_valid = np.load('data/valid-npy/npdatasetX{}.npy'.format(i))\n\n for j in range(1): #REPLACE 2 by 2816\n x_validj = x_valid[j*4:j*4-1,:]\n \n yield (x_validj, y_validj)\n\n if set_kind == \"test\" and (labels_df.shape[0] == 8080 or labels_df.shape[0] == 8):\n while 1:\n\n for i in range(labels_df.shape[0]//8): #REPLACE 1 by 3\n x_valid = np.load('data/valid-npy/npdatasetX{}.npy'.format(i))\n\n for j in range(2): #REPLACE 2 by 2816\n x_validj = x_valid[j*4:j*4-1,:]\n\n yield x_validj", "def batch_generator(self, num_epochs=1, shuffle=False):\n def parse_fn(tfrecord):\n return parse_mnist_tfrec(\n tfrecord, self.name, self.features_shape, True\n )\n dataset = tf.data.TFRecordDataset(\n self.filenames_list, compression_type=self.compression_type\n )\n if shuffle:\n dataset = dataset.shuffle(buffer_size=256)\n dataset = dataset.repeat(num_epochs)\n dataset = dataset.map(parse_fn).prefetch(self.batch_size)\n dataset = dataset.batch(self.batch_size)\n iterator = dataset.make_one_shot_iterator()\n batch_features, batch_labels = iterator.get_next()\n return batch_features, batch_labels", "def generator(numbers, number_labels, batch_size=32):\n while True: # Loop forever so the generator never terminates\n\n images = []\n labels = []\n\n for batch_sample in range(batch_size):\n img, label = create_numbers(numbers, number_labels, return_label=True)\n\n # Here we will convert the label to a format that Keras API can process:\n n_label = np.zeros((5, 11), dtype='int')\n for i, digit in enumerate(label):\n if digit == \".\":\n n_digit = 10\n else:\n n_digit = int(digit)\n\n n_label[i][n_digit] = 1\n\n images.append(img)\n # labels.append(label)\n labels.append(n_label)\n\n X_train = np.array(images)\n if len(X_train.shape) == 3:\n X_train = np.expand_dims(X_train, -1)\n\n y_temp = np.array(labels)\n\n y1 = y_temp[:, 0, :]\n y2 = y_temp[:, 1, :]\n y3 = y_temp[:, 2, :]\n y4 = y_temp[:, 3, :]\n y5 = y_temp[:, 4, :]\n\n yield X_train, [y1, y2, y3, y4, y5]", "def next(self):\n\n if self.index - self.batch_size <= 0:\n if not self.repeater:\n raise StopIteration\n else:\n self.index = self.dataset.shape[0] - 1\n self.index = self.index - self.batch_size\n\n features = np.array([\n resize(image=imread(self.dataset.iloc[i, 0])[:, :, :3],\n output_shape=(480, 640))\n for i in range(self.index, self.index + self.batch_size)\n ])\n # features = np.array(features / 255., dtype=np.float32)\n\n if self.label_type == 'depth':\n labels = np.array([\n resize(image=imread(self.dataset.iloc[i, 1]),\n output_shape=(480, 640))\n for i in range(self.index, self.index + self.batch_size)\n ])\n labels = img_as_ubyte(labels)\n labels = np.array(labels, dtype=np.float)\n labels = (labels[:, :, :, 0] + labels[:, :, :, 1] * 256 +\n labels[:, :, :, 2] * 256 * 256) / (\n (256 * 256 * 256) - 1) * self.max_distance\n\n elif self.label_type == 'segmentation':\n labels = np.array([\n resize(image=imread(self.dataset.iloc[i, 2])[:, :, 0],\n output_shape=(480, 640))\n for i in range(self.index, self.index + self.batch_size)\n ])\n labels = img_as_ubyte(labels)\n\n new_segmentation = np.ndarray(shape=(self.batch_size, 480, 640,\n 22))\n\n for i in range(self.batch_size):\n for j in range(480):\n for k in range(640):\n if labels[i, j, k] < 22:\n new_segmentation[i, j, k, int(labels[i, j, k])] = 1\n labels = new_segmentation\n\n elif self.label_type == 'sparse_segmentation':\n labels = np.array([\n resize(image=imread(self.dataset.iloc[i, 2])[:, :, 0],\n output_shape=(480, 640, 1))\n for i in range(self.index, self.index + self.batch_size)\n ])\n labels = img_as_ubyte(labels)\n else:\n raise ValueError('invalid label type')\n\n return features, labels", "def next_batch(self, batch_size, fake_data=False, shuffle=True):\r\n if fake_data:\r\n #fake_image = [1] * 784\r\n fake_image = [1]*6400\r\n if self.one_hot:\r\n #fake_label = [1] + [0] * 9\r\n fake_label = [1]+[0]*(people-1)\r\n else:\r\n fake_label = 0\r\n return [fake_image for _ in xrange(batch_size)], [\r\n fake_label for _ in xrange(batch_size)\r\n ]\r\n start = self._index_in_epoch\r\n # Shuffle for the first epoch\r\n if self._epochs_completed == 0 and start == 0 and shuffle:\r\n perm0 = numpy.arange(self._num_examples)\r\n numpy.random.shuffle(perm0)\r\n self._images = self.images[perm0]\r\n self._labels = self.labels[perm0]\r\n # Go to the next epoch\r\n if start + batch_size > self._num_examples:\r\n # Finished epoch\r\n self._epochs_completed += 1\r\n # Get the rest examples in this epoch\r\n rest_num_examples = self._num_examples - start\r\n images_rest_part = self._images[start:self._num_examples]\r\n labels_rest_part = self._labels[start:self._num_examples]\r\n # Shuffle the data\r\n if shuffle:\r\n perm = numpy.arange(self._num_examples)\r\n numpy.random.shuffle(perm)\r\n self._images = self.images[perm]\r\n self._labels = self.labels[perm]\r\n # Start next epoch\r\n start = 0\r\n self._index_in_epoch = batch_size - rest_num_examples\r\n end = self._index_in_epoch\r\n images_new_part = self._images[start:end]\r\n labels_new_part = self._labels[start:end]\r\n return numpy.concatenate((images_rest_part, images_new_part), axis=0) , numpy.concatenate((labels_rest_part, labels_new_part), axis=0)\r\n else:\r\n self._index_in_epoch += batch_size\r\n end = self._index_in_epoch\r\n return self._images[start:end], self._labels[start:end]", "def batch_creator(batch_size, dataset_length, dataset_name):\n # batch_size = 128\n # dataset_length = 6000\n batch_mask = rng.choice(dataset_length, batch_size)\n\n batch_x = eval('x_' + dataset_name)[[batch_mask]].reshape(-1, input_num_units)\n batch_x = preproc(batch_x)\n\n if dataset_name == 'train':\n batch_y = eval('y_' + dataset_name)[[batch_mask]]\n batch_y = dense_to_one_hot(batch_y)\n\n return batch_x, batch_y", "def create_train_feats():\n features = read_process_labelled(AUDIO_DIR, debug=True)\n df = pd.DataFrame(features)\n p = './Features/dataset_features/data_features.csv'\n df.to_csv(p, index=False)\n return p", "def create_batches(data, label, max_seq_length, batch_size, rand_idx, mode='train'):\n num_samples = len(data)\n num_batches = num_samples // batch_size\n for i in xrange(num_batches):\n batch_start_pos = i * batch_size\n batch_end_pos = min((i + 1) * batch_size, num_samples)\n batch_idx = rand_idx[batch_start_pos:batch_end_pos]\n label_in_batch = to_sparse_representation(label, batch_idx)\n data_in_batch = np.zeros((max_seq_length, batch_size, num_features))\n seq_lengths = np.zeros(batch_size)\n for j, idx in enumerate(batch_idx):\n x = data[idx]\n data_in_batch[0:x.shape[1], j, :] = np.reshape(x, (x.shape[1], num_features))\n seq_lengths[j] = x.shape[1]\n yield ((data_in_batch, seq_lengths), label_in_batch)", "def gen_batches_functions(data_folder, image_paths, image_shape, out_shape,\n label_folder):\n\n def get_batches_fn(batch_size):\n \"\"\"\n Create batches of training data\n :param batch_size: Batch Size\n :return: Batches of training data\n \"\"\"\n id_road = 7\n id_lane = 6\n id_car = 10\n\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i + batch_size]:\n # Get corresponding label img path\n gt_image_file = image_file.replace('CameraRGB', 'CameraSeg')\n # Read rgb and label images\n img_in = scipy.misc.imread(image_file, mode='RGB')\n gt_in = scipy.misc.imread(gt_image_file)\n # Crop sky part of the image\n image = img_in[-out_shape[0]:, :]\n gt_image = gt_in[-out_shape[0]:, :, 0]\n # Obtain labels\n gt_road = ((gt_image == id_road) | (gt_image == id_lane))\n gt_car = (gt_image == id_car)\n gt_car[-105:, :] = False\n gt_bg = np.invert(gt_car | gt_road)\n # Augmentation\n if bool(random.getrandbits(1)):\n image, gt_bg, gt_car, gt_road = flip_img(\n image, gt_bg, gt_car, gt_road)\n\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_car = gt_car.reshape(*gt_car.shape, 1)\n gt_road = gt_road.reshape(*gt_road.shape, 1)\n\n gt_image = np.concatenate((gt_bg, gt_car, gt_road), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)\n\n return get_batches_fn", "def gen_batch(img_dir, id_label_dict, batch_size, num_class, shuffle=True):\n img_file_path = gen_img_files(img_dir, shuffle)\n num_images = len(img_file_path)\n while True:\n for i in range(0, num_images-batch_size, batch_size):\n X, y = gen_data_file(img_file_path[i:i+batch_size], id_label_dict, num_class)\n yield X, y", "def batch_data(cls, train_data, train_labels, batch_size):\n for batch in range(int(np.ceil(train_data.shape[0] / batch_size))):\n start = batch_size * batch\n end = start + batch_size\n if end > train_data.shape[0]:\n yield batch, (train_data[start:train_data.shape[0]], \\\n train_labels[start:train_data.shape[0]])\n else:\n yield batch, (train_data[start:end], \\\n train_labels[start:end])", "def _create_chunks(opts, inputs, idx1, idx2):\n # idx2 = 75\n # idx1 = 71\n num_batch = idx2 - idx1\n # img1 = torch.zeros(num_batch, 1, 10, 224, 224)\n # img2 = torch.zeros(num_batch, 1, 10, 224, 224)\n # labels = torch.zeros(num_batch)\n\n feat1_list = []\n label_list = []\n for i in range(num_batch):\n curr_idx = i + idx1\n frames = range(curr_idx - 5, curr_idx + 5)\n temp1 = _load_chunk(opts, inputs, frames)\n feat1_list.append(temp1)\n\n temp_label = inputs[1][curr_idx, :].nonzero()\n if len(temp_label.size()) == 0:\n temp_label = 6\n else:\n if temp_label.size()[0] != 0:\n temp_label = temp_label[0][0]\n label_list.append(temp_label)\n\n feat1 = torch.cat(feat1_list, dim=0)\n labels = torch.LongTensor(label_list)\n return feat1, labels", "def generate_new_batches(Gs, features, y, idx, graph_window, shift, batch_size, device, test_sample):\n\n # print(\"...create batches...\")\n N = len(idx)\n n_nodes = Gs[0].number_of_nodes()\n n_state = 50 \n \n adj_lst = list()\n features_lst = list()\n y_lst = list()\n node_lst = list()\n\n batch_data = []\n for i in range(0, N, batch_size):\n if i+batch_size >= N:\n batch_data += [[idx[x] for x in range(i, N)]]\n else:\n batch_data += [[idx[x] for x in range(i, i+batch_size)]]\n\n for batch in batch_data:\n adj_tmp = list()\n features_tmp = list()\n y_tmp = list()\n num_tmp = list()\n line_idx = 0\n for val in batch:\n for k in range(val-graph_window+1,val+1):\n adj_tmp.append(nx.adjacency_matrix(Gs[k-1]).toarray()) \n for feat in features[k]:\n features_tmp.append(feat)\n num_tmp += list(range(line_idx, line_idx+50))\n line_idx += len(features[k])\n y_tmp.append(y[val+shift])\n\n adj_tmp = sparse_mx_to_torch_sparse_tensor(sp.block_diag(adj_tmp))\n adj_lst.append(adj_tmp.to(device))\n features_tmp = torch.FloatTensor(features_tmp)\n features_lst.append(features_tmp.to(device))\n y_tmp = torch.FloatTensor(y_tmp).reshape(-1)\n y_lst.append(y_tmp.to(device))\n node_lst.append(num_tmp)\n\n\n return adj_lst, features_lst, y_lst, node_lst", "def batch_features_labels2(features, labels_1, labels_2, batch_size):\n # 用 yield迭代器。\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels_1[start:end], labels_2[start:end]", "def convert_examples_to_features(tokens_set, labels_set, max_seq_length, tokenizer):\r\n\r\n #label_map = {label: i for i, label in enumerate(label_list, 1)}\r\n\r\n input_ids, input_masks, segment_ids, labels = [], [], [], []\r\n for index in tqdm_notebook(range(len(tokens_set)),desc=\"Converting examples to features\"):\r\n textlist = tokens_set[index] #example.text_a.split(' ')\r\n labellist = labels_set[index]\r\n input_id, input_mask, segment_id,label = convert_single_example(\r\n textlist, labellist,max_seq_length,tokenizer\r\n )\r\n input_ids.append(input_id)\r\n input_masks.append(input_mask)\r\n segment_ids.append(segment_id)\r\n labels.append(label)\r\n return (\r\n np.array(input_ids),\r\n np.array(input_masks),\r\n np.array(segment_ids),\r\n np.array(labels)\r\n )" ]
[ "0.76270497", "0.7307724", "0.7307669", "0.7307669", "0.7307669", "0.72704774", "0.70929223", "0.69568336", "0.69184935", "0.68820596", "0.6748234", "0.67144907", "0.66818917", "0.66654253", "0.66291654", "0.6627662", "0.66152316", "0.6594451", "0.6584446", "0.6561161", "0.6542363", "0.6528654", "0.6525476", "0.65244156", "0.65191656", "0.65160275", "0.6494509", "0.64886415", "0.6485266", "0.6470586" ]
0.7333913
1
Validate and update field value against validator. Raise NoValidatorError if no validator was set.
def validate(self): if self.validator is None: raise NoValidatorError('Field %s has no validator assigned.' % self.id) self.value = self.validator(self.value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validator(self, value: Optional[Dict[str, Any]]):\n self._validator = value", "def validate(self):\n for field in self.fields:\n if field.validate():\n self.model.set(field.name, field.model_value)\n else:\n self.errors.append(field.error())\n return len(self.errors) == 0", "def run_validation(self, data=empty):\n (is_empty_value, data) = self.validate_empty_values(data)\n if is_empty_value:\n return data\n\n value = self.to_internal_value(data)\n try:\n self.run_validators(value)\n value = self.validate(value)\n assert value is not None, '.validate() should return the validated data'\n except ValidationError as exc:\n raise ValidationError(detail=as_serializer_error(exc))\n\n return value", "def run_validation(self, data=empty):\n (is_empty_value, data) = self.validate_empty_values(data)\n if is_empty_value:\n return data\n\n value = self.to_internal_value(data)\n try:\n self.run_validators(value)\n value = self.validate(value)\n assert value is not None, '.validate() should return the validated data'\n except ValidationError as exc:\n raise ValidationError(detail=as_serializer_error(exc))\n\n return value", "def __call__(self, value):\n self.data_validator(value)", "def __set__(self, instance, value):\n if value is None:\n new_value = value\n else:\n # check whether it's the default value for the field, which we also\n # don't clean because of charfields etc.\n field_default = self.field.get_default()\n if value == field_default:\n new_value = field_default\n else:\n # if not None/the field's default, validate it ...\n try:\n new_value = self.field.clean(value=value, model_instance=instance)\n except ValidationError as exc:\n # catch and re-raise it as a dict mapping key: exception\n # so that forms will attribute it to the correct field.\n raise ValidationError(message={\n self.field.name: exc.messages,\n }, code=getattr(exc, 'code', None))\n instance.__dict__[self.field.name] = new_value\n return new_value", "def _validate(self, instance, value):", "def update(self, instance, validated_data):", "def _validate(self, value): # type: (Any) -> bool\n if self._validator is None:\n return True\n return self._validator(value)", "def update(self, instance, validated_data):\n pass", "def run_validator(self, validator):\n if validator is None:\n return\n\n value = self.value\n\n # Boolean validator\n if validator is bool:\n # Value must \"look like\" a boolean value\n if InvenTree.helpers.is_bool(value):\n # Coerce into either \"True\" or \"False\"\n value = InvenTree.helpers.str2bool(value)\n else:\n raise ValidationError({\n 'value': _('Value must be a boolean value')\n })\n\n # Integer validator\n if validator is int:\n\n try:\n # Coerce into an integer value\n value = int(value)\n except (ValueError, TypeError):\n raise ValidationError({\n 'value': _('Value must be an integer value'),\n })\n\n # If a list of validators is supplied, iterate through each one\n if type(validator) in [list, tuple]:\n for v in validator:\n self.run_validator(v)\n\n if callable(validator):\n # We can accept function validators with a single argument\n\n if self.is_bool():\n value = self.as_bool()\n\n if self.is_int():\n value = self.as_int()\n\n validator(value)", "def run_validation(self, data=empty):\r\n # 验证空值\r\n (is_empty_value, data) = self.validate_empty_values(data)\r\n if is_empty_value:\r\n return data\r\n\r\n value = self.to_internal_value(data)\r\n try:\r\n self.run_validators(value)\r\n value = self.validate(value)\r\n assert value is not None, '.validate() should return the validated data'\r\n except (ValidationError, DjangoValidationError) as exc:\r\n raise ValidationError(detail=as_serializer_error(exc))\r\n\r\n return value", "def validate(self):\n self.valid = True\n\n if self._value is None and self._strict:\n self.valid = False\n raise self.MissingFieldData\n\n elif self._value is not None:\n self._type_specific_validation()", "def validate(self, instance, value):", "def validate(self, instance, value):", "def validate(self, value, model_instance):\n return super(self.__class__, self).validate(value.value, model_instance)", "def validator(self):\n return self._validator", "def validate(self, value):\r\n return value", "def __call__(self, value):\n return self.validate(value)", "def validate(self, obj, name, value):\n if self.validation_trait:\n return self.validation_trait.validate(obj, name, value)\n return value", "def _validate(self, model_instance, value):\r\n if self.empty(value) and self.is_required:\r\n raise ValidationError(\"Field '%s' is required.\", self.name)\r\n\r\n if self._selection and value not in self._selection_list:\r\n raise ValidationError(\r\n _(\"Field '%(name)s' is '%(value)s'; must be one of %(selection)s\",\r\n name=self.name, value=value, selection=self._selection_list))\r\n\r\n if self._validator:\r\n self._validator(model_instance, value)\r\n\r\n if value is None:\r\n return value\r\n\r\n return self.validate(value)", "def validate(self, value):\n return value", "def run_validation(self, data=empty):\n self._validated_data = super().run_validation(data)\n return self._validated_data", "def validate_value(self):\n raise NotImplementedError('validate_value() must implement in subclass.')", "def _validate(self, value, name):\n validated = self._validate_impl(value, name)\n return self._validate_post(value, name, validated)", "def validate(self):\n for fieldname in getattr(self, '_body_fields', []):\n val_name = 'validate_{fieldname}'.format(fieldname=fieldname)\n field = getattr(self, fieldname)\n val = getattr(self, val_name, None)\n if val is not None:\n val()\n elif isinstance(\n field,\n BaseAgaveResource\n ):\n field.validate()", "def data_value_validator(field, presentation, context):\n\n field.default_validate(presentation, context)\n\n value = getattr(presentation, field.name)\n if value is not None:\n the_type = presentation._get_type(context)\n entry_schema = presentation.entry_schema\n # AttributeDefinition does not have this:\n constraints = presentation._get_constraints(context) \\\n if hasattr(presentation, '_get_constraints') else None\n coerce_value(context, presentation, the_type, entry_schema, constraints, value, field.name)", "def validate(self, value):\n return self.__validate(value, self.validate_element)", "def adjust_parameter_validator(self, param):\n if not isinstance(param.vals, Numbers):\n raise Exception('Only the Numbers validator is supported.')\n min_val = param.vals._min_value\n max_val = param.vals._max_value\n\n min_val_upd = self.round_dac(min_val, param.name)\n max_val_upd = self.round_dac(max_val, param.name)\n\n param.vals = Numbers(min_val_upd, max_val_upd)", "def run_validators(self, value):\r\n if value is self.Empty:\r\n return\r\n\r\n return super(CourseKeyField, self).run_validators(value)" ]
[ "0.62739575", "0.608497", "0.6071332", "0.6071332", "0.6062489", "0.60375136", "0.59934586", "0.5940005", "0.59261507", "0.5883836", "0.5870587", "0.5867741", "0.58644885", "0.5827615", "0.5827615", "0.57252115", "0.5696865", "0.565482", "0.5627075", "0.5613289", "0.5612839", "0.55476093", "0.5546789", "0.5483489", "0.54658896", "0.54645675", "0.545996", "0.54552585", "0.54471296", "0.5447025" ]
0.83141893
0
Shortcut for field.renderer.render(). Raise NoRendererError if no renderer is set.
def render(self): if not self.renderer: raise NoRendererError('Field %s has no renderer assigned.' % self.id) return self.renderer.render(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render(self):\n try:\n if self.permit():\n return self.renderer.render(self)\n except AttributeError:\n if self.renderer is None:\n raise NotImplementedError(\"Should have implemented a renderer for {0}\".format(self.name))\n else:\n raise\n return ''", "def render(self):\n raise NotImplementedError(\"Renderer is an abstract class\")", "def render(self, renderer, right=False):\n pass # pragma: no cover", "def get_renderer(self, name, value, attrs=None):\n if value is None:\n value = self._empty_value\n final_attrs = self.build_attrs(attrs)\n return self.renderer(name, value, final_attrs, self.choices)", "def get_renderer ( self, object ):\n return self.renderer", "def _get_renderer(self) :\n \n return self._renderer", "def render(self):\n raise RenderNotImplemented('Render function is not implemented.')", "def renderer(self, ctx, name):\n\t\tif name in self.service.nevowRenderers:\n\t\t\treturn self.service.nevowRenderers[name]\n\t\treturn rend.Page.renderer(self, ctx, name)", "def render(self, name, value, attrs=None, renderer=None):\n\n repr_value = repr(value)\n\n # analyze represented value to see how big the field should be\n if attrs is not None:\n attrs[\"name\"] = name\n else:\n attrs = {\"name\": name}\n attrs[\"cols\"] = 30\n # adapt number of rows to number of lines in string\n rows = 1\n if isinstance(value, str) and \"\\n\" in repr_value:\n rows = max(1, len(value.split(\"\\n\")))\n attrs[\"rows\"] = rows\n attrs = self.build_attrs(attrs)\n\n try:\n # necessary to convert it back after repr(), otherwise validation errors will mutate it\n value = literal_eval(repr_value)\n except (ValueError, SyntaxError):\n # we could not eval it, just show its prepresentation\n value = repr_value\n return super().render(name, value, attrs=attrs, renderer=renderer)", "def render(self, **kwargs) -> str:\n return self.renderable(**kwargs).render()", "def render(self):\n self.rendered = self.value\n return self.rendered", "def render(self, name, value, attrs=None):\n raise NotImplementedError('subclasses of Widget must provide a render() method')", "def render(self, renderer: RenderingManager):\n self.grader.render(renderer)", "def render(self, value, renderer, workload, incognito=False):\n # get the name to use\n name = None if incognito else self.name\n # delegate to my protocol\n yield from self.protocol.pyre_render(renderer=renderer,\n name=name, component=value, workload=workload)\n # all done\n return", "def register_render_tag(renderer):\n def tag(parser, token):\n class TagNode(template.Node):\n def render(self, context):\n return renderer(context, token)\n return TagNode()\n for copy_attr in (\"__dict__\", \"__doc__\", \"__name__\"):\n setattr(tag, copy_attr, getattr(renderer, copy_attr))\n return register.tag(tag)", "def render(self, data, accepted_media_type=None, renderer_context=None):\n renderer_context = renderer_context or {}\n form = data.serializer\n\n style = renderer_context.get('style', {})\n if 'template_pack' not in style:\n style['template_pack'] = self.template_pack\n style['renderer'] = self\n\n template_pack = style['template_pack'].strip('/')\n template_name = template_pack + '/' + self.base_template\n template = loader.get_template(template_name)\n context = {\n 'form': form,\n 'style': style\n }\n return template.render(context)", "def renderer(self, func):\n self.renderers.append(func)\n return func", "def render_field(field, form, form_style, context, template, labelclass=None, layout_object=None, attrs=None):\n FAIL_SILENTLY = False #getattr(settings, 'CRISPY_FAIL_SILENTLY', True)\n\n if not hasattr(form, 'rendered_fields'):\n form.rendered_fields = set()\n\n if hasattr(field, 'render'):\n return field.render(form, form_style, context)\n else:\n # This allows fields to be unicode strings, always they don't use non ASCII\n try:\n if isinstance(field, basestr):\n field = str(field)\n # If `field` is not unicode then we turn it into a unicode string, otherwise doing\n # str(field) would give no error and the field would not be resolved, causing confusion \n else:\n field = str(field)\n \n except (UnicodeEncodeError, UnicodeDecodeError):\n raise Exception(\"Field '%s' is using forbidden unicode characters\" % field)\n\n try:\n # Injecting HTML attributes into field's widget, Django handles rendering these\n field_instance = form.fields[field]\n if attrs is not None:\n field_instance.widget.attrs.update(attrs)\n except KeyError:\n if not FAIL_SILENTLY:\n raise Exception(\"Could not resolve form field '%s'.\" % field)\n else:\n field_instance = None\n logging.warning(\"Could not resolve form field '%s'.\" % field, exc_info=sys.exc_info())\n \n if not field in form.rendered_fields:\n form.rendered_fields.add(field)\n else:\n if not FAIL_SILENTLY:\n raise Exception(\"A field should only be rendered once: %s\" % field)\n else:\n logging.warning(\"A field should only be rendered once: %s\" % field, exc_info=sys.exc_info())\n\n if field_instance is None:\n html = ''\n else:\n bound_field = BoundField(form, field_instance, field)\n\n template = get_template(template)\n\n # We save the Layout object's bound fields in the layout object's `bound_fields` list\n if layout_object is not None:\n layout_object.bound_fields.append(bound_field)\n\n context.update({'field': bound_field, 'labelclass': labelclass, 'flat_attrs': flatatt(attrs or {})})\n html = template.render(context)\n\n return html", "def render(self):\n raise NotImplementedError()", "def render(self):\n raise NotImplementedError", "def _create_renderer(self, yaml_renderer, name):\n return partial(\n self.chart_factory.get_renderer(class_name=yaml_renderer[\"class_name\"]),\n name=name,\n )", "def render(self, **kwargs):\r\n return h.text_field(self.name, value=self.value, **kwargs)", "def render(self, r):\n raise NotImplementedError", "def SyncRenderer(self, *args, **kw):\n # If no arguments are given, this renderer becomes the parent of the\n # newly created renderer\n if not args and not kw:\n args = (self,)\n\n return Renderer(*args, **kw)", "def SyncRenderer(self, *args, **kw):\n # If no arguments are given, this renderer becomes the parent of the\n # newly created renderer\n if not args and not kw:\n args = (self,)\n\n return Renderer(*args, **kw)", "def render(self, data, accepted_media_type=None, renderer_context=None):\n\n if '(e.g:bbox=xmin,ymin,xmax,ymax)' in str(data):\n rendered = {'error': str(data)}\n return json.dumps(rendered)\n if data is None:\n return ''\n\n if 'error' in data:\n rendered = data\n elif isinstance(data, dict):\n rendered = self.render_single(data)\n else:\n rendered = self.render_many(data)\n\n return json.dumps(rendered, separators=self.separators)", "def render(self, value, context=None):\n template = value.template\n if template:\n return render_to_string(template, {'self': value})\n else:\n return self.render_basic(value)", "def define_log_renderer(self):\n # it must accept a logger, method_name and event_dict (just like processors)\n # but must return the rendered string, not a dictionary.\n # TODO tty logic\n if self.args.log_format == \"json\":\n return structlog.processors.JSONRenderer()\n\n if self.args.log_format == \"pretty\":\n return structlog.dev.ConsoleRenderer()\n\n if self.args.log_file is not None:\n return structlog.processors.JSONRenderer()\n\n if sys.stderr.isatty() and not self.args.quiet:\n return structlog.dev.ConsoleRenderer()\n\n return structlog.processors.JSONRenderer()", "def render(self, field, key, value, REQUEST, render_prefix=None):\n return self._render(field, key, value, REQUEST, render_prefix=render_prefix)", "def GetRenderer(*args, **kwargs):\n return _gdi_.GraphicsObject_GetRenderer(*args, **kwargs)" ]
[ "0.7004743", "0.65256923", "0.64937174", "0.63230234", "0.6244154", "0.6143011", "0.60672754", "0.60345244", "0.5998662", "0.59685975", "0.5790698", "0.57647145", "0.57602614", "0.57167757", "0.5699508", "0.5687103", "0.56720227", "0.5661371", "0.563945", "0.5615098", "0.56054896", "0.55789846", "0.55721897", "0.55636", "0.55636", "0.5560846", "0.5536059", "0.5506738", "0.5500855", "0.5499886" ]
0.77539104
0
Transform each geometry to a different coordinate reference system. The ``crs`` attribute on the current GeoSeries must be set.
def to_crs(self, crs): if crs is None: raise ValueError("Can not transform with invalid crs") if self.crs is None: raise ValueError("Can not transform geometries without crs. Set crs for this GeoSeries first.") if self.crs == crs: return self return _unary_geo(arctern.ST_Transform, self, self.crs, crs, crs=crs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transform_geometries(datasource, src_epsg, dst_epsg):\n # Part 1\n src_srs = osr.SpatialReference()\n src_srs.ImportFromEPSG(src_epsg)\n dst_srs = osr.SpatialReference()\n dst_srs.ImportFromEPSG(dst_epsg)\n transformation = osr.CoordinateTransformation(src_srs, dst_srs)\n layer = datasource.GetLayerByIndex(0)\n \n # Part 2\n geoms = []\n layer.ResetReading()\n for feature in layer:\n geom = feature.GetGeometryRef().Clone()\n geom.Transform(transformation)\n geoms.append(geom)\n return geoms", "def transform_geometry(geom, crs=wgs84, to_crs=wgs84):\n\n from_crs = check_crs(crs)\n to_crs = check_crs(to_crs)\n\n if isinstance(to_crs, pyproj.Proj) and isinstance(from_crs, pyproj.Proj):\n project = partial(transform_proj, from_crs, to_crs)\n elif isinstance(to_crs, Grid):\n project = partial(to_crs.transform, crs=from_crs)\n elif isinstance(from_crs, Grid):\n project = partial(from_crs.ij_to_crs, crs=to_crs)\n else:\n raise NotImplementedError()\n\n from shapely.ops import transform\n return transform(project, geom)", "def shapely_to_cf(geometries: xr.DataArray | Sequence, grid_mapping: str | None = None):\n # Get all types to call the appropriate translation function.\n types = {\n geom.item().geom_type if isinstance(geom, xr.DataArray) else geom.geom_type\n for geom in geometries\n }\n if types.issubset({\"Point\", \"MultiPoint\"}):\n ds = points_to_cf(geometries)\n elif types.issubset({\"Polygon\", \"MultiPolygon\"}) or types.issubset(\n {\"LineString\", \"MultiLineString\"}\n ):\n raise NotImplementedError(\"Only point geometries conversion is implemented.\")\n else:\n raise ValueError(\n f\"Mixed geometry types are not supported in CF-compliant datasets. Got {types}\"\n )\n\n # Special treatment of selected grid mappings\n if grid_mapping == \"longitude_latitude\":\n # Special case for longitude_latitude grid mapping\n ds = ds.rename(crd_x=\"lon\", crd_y=\"lat\")\n ds.lon.attrs.update(units=\"degrees_east\", standard_name=\"longitude\")\n ds.lat.attrs.update(units=\"degrees_north\", standard_name=\"latitude\")\n ds.geometry_container.attrs.update(coordinates=\"lon lat\")\n ds.x.attrs.update(units=\"degrees_east\", standard_name=\"longitude\")\n ds.y.attrs.update(units=\"degrees_north\", standard_name=\"latitude\")\n elif grid_mapping is not None:\n raise NotImplementedError(\n f\"Only grid mapping longitude_latitude is implemented. Got {grid_mapping}.\"\n )\n\n return ds", "def clean_geometries(self, geo_nodes):\n for node in geo_nodes:\n if mc.nodeType(node) != 'mesh':\n node = mc.listRelatives(node, shapes=True, fullPath=True)[0]\n\n # Do the in mesh out mesh connection and the blendshape between\n # a cube and the original geometry\n cube = mc.polyCube()[0]\n cubeShape = mc.listRelatives(cube, s=True)[0]\n mc.connectAttr(\n \"{0}.outMesh\".format(node),\n \"{0}.inMesh\".format(cubeShape),\n f=True\n )\n mc.blendShape(node, cubeShape, w=(0, 1), o='world')\n\n # Rename the old object and all of it's shapes\n # This is a workaround to rename the shapeDeformed as well\n transform = mc.listRelatives(node, parent=True, fullPath=True)[0]\n renamed = mc.rename(\n transform,\n \"{0}_OM\".format(transform.split('|')[-1]),\n ignoreShape=True\n )\n for shape in mc.listRelatives(renamed, s=True, f=True):\n mc.rename(shape, \"{0}_OM\".format(shape.split('|')[-1]))\n\n # Rename the cube and it's shapes to orignial geo name\n new_node = mc.rename(\n cube,\n transform.split('|')[-1],\n ignoreShape=True\n )\n mc.rename(\n mc.listRelatives(new_node, s=True)[0],\n node.split('|')[-1]\n )\n\n # Unparent the old object and parent the new one\n parent = mc.listRelatives(renamed, parent=True, fullPath=True)\n if parent is not None:\n mc.parent(new_node, parent[0])\n mc.parent(renamed, world=True)", "def convert_spatial_reference_system(coordinates, source_epsg, target_epsg):\n source_spatial_reference = osr.SpatialReference()\n source_spatial_reference.ImportFromEPSG(source_epsg)\n target_spatial_reference = osr.SpatialReference()\n target_spatial_reference.ImportFromEPSG(target_epsg)\n coordinate_transformation = osr.CoordinateTransformation(source_spatial_reference, target_spatial_reference)\n\n transformed_coordinates = []\n for coordinate in coordinates:\n point = ogr.Geometry(ogr.wkbPoint)\n point.AddPoint(coordinate[0], coordinate[1])\n point.Transform(coordinate_transformation)\n transformed_coordinates.append((point.GetX(), point.GetY()))\n return transformed_coordinates", "def geo_transform(self):\n pass", "def set_crs(self, input_crs=None):\n crs_names = [\"crs_wkt\", \"crs\", \"epsg\"]\n names = list(self._obj.coords.keys())\n if isinstance(self._obj, xr.Dataset):\n names = names + list(self._obj.data_vars.keys())\n # user defined\n if input_crs is not None:\n input_crs = pyproj.CRS.from_user_input(input_crs)\n # look in grid_mapping and data variable attributes\n else:\n for name in crs_names:\n # check default > GEO_MAP_COORDS attrs\n crs = self._obj.coords[GEO_MAP_COORD].attrs.get(name, None)\n if crs is None: # global attrs\n crs = self._obj.attrs.pop(name, None)\n for var in names: # data var and coords attrs\n if name in self._obj[var].attrs:\n crs = self._obj[var].attrs.pop(name)\n break\n if crs is not None:\n # avoid Warning 1: +init=epsg:XXXX syntax is deprecated\n crs = crs.strip(\"+init=\") if isinstance(crs, str) else crs\n try:\n input_crs = pyproj.CRS.from_user_input(crs)\n break\n except RuntimeError:\n pass\n if input_crs is not None:\n grid_map_attrs = input_crs.to_cf()\n crs_wkt = input_crs.to_wkt()\n grid_map_attrs[\"spatial_ref\"] = crs_wkt\n grid_map_attrs[\"crs_wkt\"] = crs_wkt\n self.set_attrs(**grid_map_attrs)", "def map_coordinates(self,geometry):\n\t\tg = self.geomatrix\n\t\tdef project_coord(x,y,z=None):\n\t\t\tx = g[0] + g[1] * x + g[2] * y\n\t\t\ty = g[3] + g[4] * x + g[5] * y\n\t\t\tif z is None:\n\t\t\t\treturn x,y\n\t\t\telse:\n\t\t\t\treturn x,y,z\n\t\treturn transform(project_coord, geometry)", "def crs(self, crs):\n self.set_crs(crs)", "def transform_to(self, dst_crs):\n self.nodes.transform_to(dst_crs)", "def change_crs(data, crs, data_name):\n data_name = str(data_name)\n if data.crs != crs:\n print(\"translating {0} data\".format(data_name))\n data = data.to_crs(crs=crs)\n else:\n print(\"correct crs for {0}\".format(data_name))\n return data", "def get_coordinate_lists(self, crs=None):\n x, y = self.vertices.vectors()[:2]\n if crs is not None and (crs != self.crs):\n x, y = _reproject((x,y), self.crs, crs)\n return x, y", "def feature_transform(feature, crs_out, crs_in={'init': 'epsg:4326'}):\n p_in = Proj(crs_in)\n p_out = Proj(crs_out)\n feature_out = copy.deepcopy(feature)\n new_coords = []\n if feature['geometry']['type'] == 'Polygon':\n # Probably also work for multypolygons\n for ring in feature['geometry']['coordinates']:\n x2, y2 = transform(p_in, p_out, *zip(*ring))\n new_coords.append(zip(x2, y2))\n feature_out['geometry']['coordinates'] = new_coords\n elif feature['geometry']['type'] == 'Point':\n # Probably doesn't work for multipoints\n new_coords = transform(p_in, p_out, *feature['geometry']['coordinates'])\n feature_out['geometry']['coordinates'] = new_coords\n else:\n raise ValueError('Unsuported feature type')\n return feature_out", "def warp_geometry(geom, src_crs, dst_crs):\n return shapely.geometry.shape(rasterio.warp.transform_geom(src_crs, dst_crs, shapely.geometry.mapping(geom)))", "def obtain_geometries(self):\n\n assert isinstance(self.ts, TS)\n\n \n symbol_dict = {\n 17: \"Cl\",\n 9: \"F\",\n 8: \"O\",\n 7: \"N\",\n 6: \"C\",\n 1: \"H\",\n }\n atoms = []\n\n parser = ccread(self.log_file, loglevel=logging.ERROR)\n\n for atom_num, coords in zip(parser.atomnos, parser.atomcoords[-1]):\n atoms.append(Atom(symbol=symbol_dict[atom_num], position=coords))\n \n self.ts._ase_molecule = Atoms(atoms)\n self.ts.update_coords_from(\"ase\")\n\n self.pre_geometry = self.ts.ase_molecule.copy()\n self.post_geometry = self.ts.ase_molecule.copy()\n\n for vib, displacements in self.vibrations:\n if vib < 0: # Finding the imaginary frequency\n self.post_geometry.arrays[\"positions\"] -= displacements\n\n return self.pre_geometry, self.post_geometry", "def transform_geopandas(gdf, from_crs=None, to_crs=wgs84, inplace=False):\n from shapely.ops import transform\n import geopandas as gpd\n\n if from_crs is None:\n from_crs = check_crs(gdf.crs)\n else:\n from_crs = check_crs(from_crs)\n to_crs = check_crs(to_crs)\n\n if inplace:\n out = gdf\n else:\n out = gdf.copy()\n\n if isinstance(to_crs, pyproj.Proj) and isinstance(from_crs, pyproj.Proj):\n project = partial(transform_proj, from_crs, to_crs)\n elif isinstance(to_crs, Grid):\n project = partial(to_crs.transform, crs=from_crs)\n elif isinstance(from_crs, Grid):\n project = partial(from_crs.ij_to_crs, crs=to_crs)\n else:\n raise NotImplementedError()\n\n # Do the job and set the new attributes\n result = out.geometry.apply(lambda geom: transform(project, geom))\n result.__class__ = gpd.GeoSeries\n if isinstance(to_crs, pyproj.Proj):\n to_crs = to_crs.srs\n elif isinstance(to_crs, Grid):\n to_crs = None\n result.crs = to_crs\n out.geometry = result\n out.crs = to_crs\n out['min_x'] = [g.bounds[0] for g in out.geometry]\n out['max_x'] = [g.bounds[2] for g in out.geometry]\n out['min_y'] = [g.bounds[1] for g in out.geometry]\n out['max_y'] = [g.bounds[3] for g in out.geometry]\n return out", "def mergeGeometries(self):\n self.geometry = reduce(lambda p1,p2 : p1.union(p2) ,map(lambda tax : tax.biomeGeometry,self.taxonomies))\n return self.geometry", "def ReprojectCoords(coords,src_srs,tgt_srs):\n trans_coords = []\n transform = osr.CoordinateTransformation( src_srs, tgt_srs)\n for x,y in coords:\n x,y,z = transform.TransformPoint(x,y)\n trans_coords.append([x,y])\n return trans_coords", "def getMultiGeometry(geometry):\n geom = arcpy.Array()\n for feature in geometry:\n array = arcpy.Array()\n for point in feature:\n point = arcpy.Point(float(point[0]), float(point[1]))\n array.add(point)\n geom.add(array)\n return geom", "def _set_geotransform(self, xvals, yvals):\n xdim = len(xvals)\n ydim = len(yvals)\n\n x0 = xvals[0]\n y0 = yvals[0]\n\n dx = xvals[1] - xvals[0]\n dy = yvals[1] - yvals[0]\n\n x_leftedge = x0 - dx / 2\n y_topedge = y0 + dx / 2\n\n xlast = x0 + (xdim -1) * dx\n ylast = y0 + (ydim -1) * dy\n\n assert abs(xlast - xvals[xdim - 1]) < \\\n abs(max(xlast, xvals[xdim - 1])) / 1000.\n\n self._geotransform = (x_leftedge, dx, 0., y_topedge, 0., dy)", "def reproject(self, lon, lat):\n if self.xform is None:\n # if the CRS hasn't been determined yet, we set it from the first image's lat/lon (take the UTM crs)\n utm_i = str(int(math.floor((self.images[0].lon + 180) / 6 ) % 60) + 1).zfill(2)\n epsg_code = int('326' + utm_i) if (self.images[0].lat >= 0) else int('327' + utm_i)\n self.crs_dest = QgsCoordinateReferenceSystem(epsg_code)\n self.xform = QgsCoordinateTransform(self.crs_src, self.crs_dest, QgsProject.instance())\n return self.xform.transform(QgsPointXY(lon, lat))", "def _process_crs(self, data, crs):\n # get the proj string: either the value of data.attrs[crs] or crs itself\n _crs = getattr(data, 'attrs', {}).get(crs or 'crs', crs)\n try:\n return process_crs(_crs)\n except ValueError:\n # only raise error if crs was specified in kwargs\n if crs:\n raise ValueError(\n \"'{}' must be either a valid crs or an reference to \"\n \"a `data.attr` containing a valid crs.\".format(crs))", "def convertor(geometry, method=\"wgs2gcj\"):\n if geometry['type'] == 'Point':\n coords = geometry['coordinates']\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'LineString' or geometry['type'] == 'MutliPoint':\n coordinates = geometry['coordinates']\n for coords in coordinates:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'Polygon' or geometry['type'] == 'MultiLineString':\n coordinates = geometry['coordinates']\n for rings in coordinates:\n for coords in rings:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'MultiPolygon':\n coordinates = geometry['coordinates']\n for rings in coordinates:\n for lines in rings:\n for coords in lines:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n return geometry", "def makeTransformations(epsg1, epsg2):\n sr1 = osr.SpatialReference()\n sr1.ImportFromEPSG(epsg1)\n preventGdal3axisSwap(sr1)\n sr2 = osr.SpatialReference()\n sr2.ImportFromEPSG(epsg2)\n preventGdal3axisSwap(sr2)\n tr1to2 = osr.CoordinateTransformation(sr1, sr2)\n tr2to1 = osr.CoordinateTransformation(sr2, sr1)\n return (tr1to2, tr2to1)", "def convertmany(self, *args, **kwargs):\n return _coordsys.coordsys_convertmany(self, *args, **kwargs)", "def crs(self, value):\n self._crs = value", "def rescale_refgeos(self, core_atom, rcovmetal=None):\n if isinstance(rcovmetal,float):\n interatomic_dist_ideal = rcovmetal + io_ptable.rcov1[io_ptable.elements.index('O')]\n else:\n interatomic_dist_ideal = \\\n io_ptable.rcov1[io_ptable.elements.index(core_atom)] + io_ptable.rcov1[io_ptable.elements.index('O')]\n geodict_copy = self.geometry_dict.copy()\n for key in self.geometry_dict.keys():\n geo_dict = self.geometry_dict[key]\n dists = np.linalg.norm(geo_dict, axis=1)\n avg_dist = np.mean(dists)\n re_scale = interatomic_dist_ideal/avg_dist\n scaled_geo = []\n for xyz in geo_dict:\n out = [xyz[0]*re_scale, xyz[1]*re_scale, xyz[2]*re_scale]\n scaled_geo.append(out)\n geodict_copy.update({key: scaled_geo})\n self.geometry_dict = geodict_copy", "def set_crs(self, crs):\n crs = _validate_crs(crs)\n self._crs = crs", "def transform(self, srid: ir.IntegerValue) -> GeoSpatialValue:\n return ops.GeoTransform(self, srid).to_expr()", "def transforming_coordinates(self, coordinates_lists, transform): \n \n transformed_coordinates_lists = []\n for coordinates_list in coordinates_lists:\n transformed_coordinates_list = []\n for coordinate in coordinates_list:\n coordinate = tuple(coordinate)\n transformed_coordinate = list(transform(coordinate[0], coordinate[1]))\n transformed_coordinates_list.append(transformed_coordinate)\n transformed_coordinates_lists.append(transformed_coordinates_list)\n \n \n return transformed_coordinates_lists" ]
[ "0.6502866", "0.62526053", "0.61256564", "0.6086267", "0.60835266", "0.6010616", "0.5969945", "0.5863563", "0.5861438", "0.5789306", "0.5703812", "0.56789464", "0.5665309", "0.5656348", "0.56265503", "0.5570779", "0.5527477", "0.5501872", "0.5500846", "0.5494727", "0.5475225", "0.5462632", "0.5382871", "0.53821194", "0.53735983", "0.5365203", "0.53648984", "0.5341813", "0.5340184", "0.5331893" ]
0.7170733
0
Return a geometry that represents the union of all geometries in the GeoSeries.
def unary_union(self): return GeoSeries(arctern.ST_Union_Aggr(self))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mergeGeometries(self):\n self.geometry = reduce(lambda p1,p2 : p1.union(p2) ,map(lambda tax : tax.biomeGeometry,self.taxonomies))\n return self.geometry", "def union(self, other):\n return self._geomgen(capi.geom_union, other)", "def unary_union(self) -> ir.GeoSpatialScalar:\n return ops.GeoUnaryUnion(self).to_expr().name(\"union\")", "def union(feature):\n\n mp = MultiPolygon([Polygon([tuple(z) for z in y]) for y in feature.coord])\n union = ops.unary_union(mp)\n \n coords = [] \n if union.geom_type == 'Polygon':\n coords.append(np.array(union.exterior.coords))\n if union.geom_type == 'MultiPolygon':\n for x in union.geoms:\n coords.append(np.array(x.exterior.coords))\n\n new_feature = Feature()\n new_feature.coord = coords\n return new_feature", "def union(self, other): # -> BaseGeometry:\n ...", "def getMultiGeometry(geometry):\n geom = arcpy.Array()\n for feature in geometry:\n array = arcpy.Array()\n for point in feature:\n point = arcpy.Point(float(point[0]), float(point[1]))\n array.add(point)\n geom.add(array)\n return geom", "def union(self, right: GeoSpatialValue) -> GeoSpatialValue:\n return ops.GeoUnion(self, right).to_expr()", "def boundary_polygon_by_union(self):\n cell_geoms = [None]*self.Ncells()\n\n for i in self.valid_cell_iter():\n xy = self.nodes['x'][self.cell_to_nodes(i)]\n cell_geoms[i] = geometry.Polygon(xy)\n return ops.cascaded_union(cell_geoms)", "def Union(*args, **kwargs):\n return _gdi_.Region_Union(*args, **kwargs)", "def test_self_union():\n gdf = GeoDataFrame(\n {\n \"geometry\": GeoSeries(\n [\n Polygon([(0, 0), (0, 2), (2, 2), (2, 0)]),\n Polygon([(1, 1), (3, 1), (3, 3), (1, 3)]),\n Polygon([(1, 1), (1, 2), (2, 2), (2, 1)]),\n ]\n ),\n \"x\": [0, 1, 2],\n \"y\": [4.0, 8.0, 1.0],\n }\n )\n\n result_one = self_union(gdf)\n expected_one = GeoDataFrame(\n {\n \"geometry\": GeoSeries(\n [\n Polygon([(0, 0), (0, 2), (1, 2), (1, 1), (2, 1), (2, 0)]),\n Polygon([(1, 2), (2, 2), (2, 1), (1, 1)]),\n Polygon([(1, 2), (2, 2), (2, 1), (1, 1)]),\n Polygon([(1, 2), (2, 2), (2, 1), (1, 1)]),\n Polygon([(2, 2), (1, 2), (1, 3), (3, 3), (3, 1), (2, 1)]),\n ],\n index=[(0,), (0, 1, 2), (0, 1, 2), (0, 1, 2), (1,)],\n ),\n \"x\": [0, 0, 1, 2, 1],\n \"y\": [4.0, 4.0, 8.0, 1.0, 8.0],\n }\n )\n assert_geodataframe_equal(result_one, expected_one)\n\n result_two = self_union(gdf, ratios=[\"y\"])\n expected_two = GeoDataFrame(\n {\n \"geometry\": GeoSeries(\n [\n Polygon([(0, 0), (0, 2), (1, 2), (1, 1), (2, 1), (2, 0)]),\n Polygon([(1, 2), (2, 2), (2, 1), (1, 1)]),\n Polygon([(1, 2), (2, 2), (2, 1), (1, 1)]),\n Polygon([(1, 2), (2, 2), (2, 1), (1, 1)]),\n Polygon([(2, 2), (1, 2), (1, 3), (3, 3), (3, 1), (2, 1)]),\n ],\n index=[(0,), (0, 1, 2), (0, 1, 2), (0, 1, 2), (1,)],\n ),\n \"x\": [0, 0, 1, 2, 1],\n \"y\": [3.0, 1.0, 2.0, 1.0, 6.0],\n }\n )\n assert_geodataframe_equal(result_two, expected_two)", "def exterior(self):\n if isinstance(self.substrates, MultiPolygon):\n geoms = self.substrates.geoms\n elif isinstance(self.substrates, Polygon):\n geoms = [self.substrates]\n else:\n raise RuntimeError(\"Uknown type '{}' of substrate geometry\".format(type(self.substrates)))\n polygons = [Polygon(p.exterior) for p in geoms]\n return unary_union(polygons)", "def unionFeatureCollections(*collections):\n features = []\n for collection in collections:\n if collection[\"type\"] == \"FeatureCollection\":\n collectionFeatures = collection[\"features\"]\n features.extend(collectionFeatures)\n if collection[\"type\"] == \"Feature\":\n features.append(collection)\n return geojson.FeatureCollection(features)", "def obtain_geometries(self):\n\n assert isinstance(self.ts, TS)\n\n \n symbol_dict = {\n 17: \"Cl\",\n 9: \"F\",\n 8: \"O\",\n 7: \"N\",\n 6: \"C\",\n 1: \"H\",\n }\n atoms = []\n\n parser = ccread(self.log_file, loglevel=logging.ERROR)\n\n for atom_num, coords in zip(parser.atomnos, parser.atomcoords[-1]):\n atoms.append(Atom(symbol=symbol_dict[atom_num], position=coords))\n \n self.ts._ase_molecule = Atoms(atoms)\n self.ts.update_coords_from(\"ase\")\n\n self.pre_geometry = self.ts.ase_molecule.copy()\n self.post_geometry = self.ts.ase_molecule.copy()\n\n for vib, displacements in self.vibrations:\n if vib < 0: # Finding the imaginary frequency\n self.post_geometry.arrays[\"positions\"] -= displacements\n\n return self.pre_geometry, self.post_geometry", "def union(self, StdVectorFst other):\n cdef StdVectorFst result = self.copy()\n result.set_union(other)\n return result", "def _eval_rewrite_as_Union(self, *sets, **kwargs):\n\n dj_union = S.EmptySet\n index = 0\n for set_i in sets:\n if isinstance(set_i, Set):\n cross = ProductSet(set_i, FiniteSet(index))\n dj_union = Union(dj_union, cross)\n index = index + 1\n return dj_union", "def union(self, *args):\n _ub = None\n for _obj in args:\n if _ub is None:\n _ub = self.bbox(_obj)\n else:\n _b = self.bbox(_obj)\n _x = np.sort(np.array([_b[:, 0], _ub[:, 0]]), axis=None)\n _y = np.sort(np.array([_b[:, 1], _ub[:, 1]]), axis=None)\n _ub = np.array([[_x[0], _y[0]], [_x[3], _y[3]]])\n return _ub", "def flatten_geoms(geoms):\n geometries = []\n for g in geoms:\n if hasattr(g, \"geoms\"):\n geometries.extend(flatten_geoms(g))\n else:\n geometries.append(g)\n return geometries", "def poly_merge(s0, label):\n if s0.geom_type == 'Polygon':\n return s0\n ff = copy(s0)\n try:\n nc = len(s0.geoms)\n buffer_size = 100.0\n\n while ff.geom_type == 'MultiPolygon' and len(ff.geoms) > 1 and buffer_size <= 500.0:\n tmp0 = copy(s0)\n tmp1 = tmp0.buffer(+buffer_size)\n tmp2 = tmp1.buffer(-buffer_size)\n ff = shapely.ops.cascaded_union((tmp2, s0))\n buffer_size += 50.0\n except ValueError:\n print('!!! Error in poly_merge')\n return ff", "def union(self, other):\n from sage.misc.misc import deprecation\n deprecation('The function union is replaced by convex_hull.', 'Sage Version 4.4.4')\n return self.convex_hull(other)", "def _full_structure_geometry(self):\n # Characterized borehole structures\n borehole_structures = self._characterize_shearzones()\n\n # Tunnel shearzone data\n tunnel_structures = self.tunnel_structures\n\n structures = pd.concat(\n [borehole_structures, tunnel_structures], ignore_index=True, sort=False\n )\n\n # Fill NaN-values in all columns to 0 except in column 'shearzone', for which we do nothing.\n structures = structures.fillna(\n value={**{s: 0 for s in borehole_structures}, **{\"shearzone\": np.nan}}\n )\n\n mapping = {\n \"x\": \"x\",\n \"y\": \"y\",\n \"z\": \"z\",\n \"depth\": \"depth\",\n \"upward_gradient\": \"upward_gradient\",\n \"azimuth\": \"azimuth_bh\",\n }\n borehole_to_global_coords(structures, **mapping)\n\n return structures", "def get_geometry(self):\n geometry = self._geometry\n for geo in self._holes:\n geometry = geometry.difference(geo) \n return geometry", "def to_geom(self):\n return [\n self.create_poly(bbox)\n for group_idx, indices, bbox in self.sindex.leaves()\n ]", "def union(self, other):\n\n return self.intersect(other, op=np.union1d)", "def union(self, rng_set: Union[Rangelike, Iterable[Rangelike]]) -> 'RangeSet':\n # convert to RangeSet\n rng_set = RangeSet._to_rangeset(rng_set)\n # simply merge lists\n return RangeSet(self._ranges + rng_set._ranges)", "def union_all(self, query):\n return self.union(query, True)", "def geometry():\n return Geometry()", "def union(bs: Iterable['BBox']) -> Optional['BBox']:\n b = BBox.spanning(\n chain.from_iterable([b.corners() for b in bs]))\n return b", "def get_geometries ( self, object_class_table, spatial_column, select_column, select_id ) :\n stmt = 'select sdo_util.to_wktgeometry(' + str(spatial_column) + ') from ' + str(object_class_table) + ' where ' + str(select_column) + ' = ' + str(select_id)\n self.oracle_cursor.execute( stmt )\n resultset = self.oracle_cursor.fetchall()\n return resultset", "def union(cls, *sels):\n\n out = cls('')\n tmp = set()\n for s in sels:\n if s.nonempty:\n tmp = tmp.union(s.expanded)\n if tmp:\n out._expanded = tuple(sorted(tmp))\n else:\n out._expanded = ((),)\n try:\n out._max_levels = max([s.max_levels for s in sels if s.nonempty])\n except ValueError:\n out._max_levels = 0\n return out", "def get_outer_rings(feature_or_geometry):\n mp = Geometry.get_multipolygon(feature_or_geometry)\n return [[t[0]] for t in mp]" ]
[ "0.73016036", "0.685745", "0.6835243", "0.6767258", "0.67599726", "0.64897877", "0.63472724", "0.634694", "0.60967267", "0.60640794", "0.60346997", "0.5986849", "0.59392625", "0.58512485", "0.5828639", "0.57456577", "0.5710652", "0.568876", "0.5681035", "0.56726", "0.5660516", "0.5659101", "0.5651132", "0.56470066", "0.5621594", "0.55977213", "0.55928206", "0.5583894", "0.5582574", "0.5552656" ]
0.7909888
0
Returns the Hausdorff distance between each geometry and other. This is a measure of how similar or dissimilar 2 geometries are.
def hausdorff_distance(self, other): return _binary_op(arctern.ST_HausdorffDistance, self, other)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hausdorff_distance(self, other):\n ...", "def hausdorff_distance(image1, image2):\n image1_int = image1.clone(\"unsigned int\")\n image2_int = image2.clone(\"unsigned int\")\n\n libfn = utils.get_lib_fn(\"hausdorffDistance%iD\" % image1_int.dimension)\n d = libfn(image1_int.pointer, image2_int.pointer)\n\n return d", "def hausdorffDistance(self, id1, id2):\r\n # productive #math\r\n if frequent: profprint()\r\n node1 = slicer.mrmlScene.GetNodeByID(id1)\r\n polydata1 = node1.GetPolyData()\r\n node2 = slicer.mrmlScene.GetNodeByID(id2)\r\n polydata2 = node2.GetPolyData()\r\n nb1 = polydata1.GetNumberOfPoints()\r\n nb2 = polydata2.GetNumberOfPoints()\r\n minimum = None\r\n maximum = None\r\n JJ, jj = None, None\r\n II, ii = None, None\r\n pt1 = [0, 0, 0]\r\n pt2 = [0, 0, 0]\r\n polydata1.GetPoint(1, pt1)\r\n polydata1.GetPoint(nb1 - 1, pt2)\r\n minVal1 = min(pt1[2], pt2[2])\r\n maxVal1 = max(pt1[2], pt2[2])\r\n pt1 = [0, 0, 0]\r\n pt2 = [0, 0, 0]\r\n pt1b, pt2b = None, None\r\n polydata2.GetPoint(1, pt1)\r\n polydata2.GetPoint(nb2 - 1, pt2)\r\n minVal2 = min(pt1[2], pt2[2])\r\n maxVal2 = max(pt1[2], pt2[2])\r\n valueBase = max(minVal1, minVal2)\r\n valueTip = min(maxVal1, maxVal2)\r\n\r\n # truncate polydatas\r\n truncatedPolydata1 = self.clipPolyData(node1, valueBase)\r\n truncatedPolydata2 = self.clipPolyData(node2, valueBase)\r\n\r\n cellId = vtk.mutable(1)\r\n subid = vtk.mutable(1)\r\n dist = vtk.mutable(1)\r\n cl2 = vtk.vtkCellLocator()\r\n cl2.SetDataSet(truncatedPolydata2)\r\n cl2.BuildLocator()\r\n # Hausforff 1 -> 2\r\n minima = []\r\n for i in range(int(nb1 / float(10))):\r\n pt = [0, 0, 0]\r\n polydata1.GetPoint(10 * i, pt)\r\n closest = [0, 0, 0]\r\n cl2.FindClosestPoint(pt, closest, cellId, subid, dist)\r\n if abs(closest[2] - pt[2]) <= 1:\r\n minima.append(self.distance(pt, closest))\r\n else:\r\n minima.append(0)\r\n hausdorff12 = max(minima)\r\n\r\n # Hausforff 2 -> 1\r\n minima = []\r\n cl1 = vtk.vtkCellLocator()\r\n cl1.SetDataSet(truncatedPolydata1)\r\n cl1.BuildLocator()\r\n for i in range(int(nb2 / float(10))):\r\n pt = [0, 0, 0]\r\n polydata2.GetPoint(10 * i, pt)\r\n closest = [0, 0, 0]\r\n cl1.FindClosestPoint(pt, closest, cellId, subid, dist)\r\n if abs(closest[2] - pt[2]) <= 1:\r\n minima.append(self.distance(pt, closest))\r\n else:\r\n minima.append(0)\r\n hausdorff21 = max(minima)\r\n return max(hausdorff12, hausdorff21)", "def hamming_dist(self):\r\n distance = 0\r\n distance = abs(len(self.s1) - len(self.s2))\r\n distance += sum(i1 != i2 for i1,i2 in zip(self.s2,self.s1))\r\n return distance", "def hausdorffDistance(self,fiber1,fiber2):\n polyA = fiber1.GetPolyData()\n polyB = fiber2.GetPolyData()\n\n locA = vtk.vtkMergePoints()\n locB = vtk.vtkMergePoints()\n\n locA.SetDataSet(polyA)\n locB.SetDataSet(polyB)\n\n locs = (locA,locB)\n for loc in locs:\n loc.AutomaticOn()\n loc.BuildLocator()\n\n ptsA = polyA.GetPoints()\n ptsB = polyB.GetPoints()\n\n rangeA = ptsA.GetNumberOfPoints()\n rangeB = ptsB.GetNumberOfPoints()\n\n maxd = 0.0\n maxd1 = 0.0\n avgd = 0.0\n avgd1 = 0.0\n\n distanceA = vtk.vtkFloatArray()\n distanceA.SetName(\"Distance\")\n for i in range(rangeA):\n pt = ptsA.GetPoint(i)\n bid = locB.FindClosestPoint(pt)\n ptb = ptsB.GetPoint(bid)\n d = self.pointDistance(pt,ptb)\n distanceA.InsertNextValue(d)\n avgd += d\n if d > maxd:\n maxd = d\n avgd = avgd / rangeA\n\n distanceB = vtk.vtkFloatArray()\n distanceB.SetName(\"Distance\")\n for i in range(rangeB):\n pt = ptsB.GetPoint(i)\n bid = locA.FindClosestPoint(pt)\n ptb = ptsA.GetPoint(bid)\n d = self.pointDistance(pt,ptb)\n distanceB.InsertNextValue(d)\n avgd1 += d\n if d > maxd1:\n maxd1 = d\n avgd1 = avgd1 / rangeB\n\n polyA.GetPointData().SetScalars(distanceA)\n polyB.GetPointData().SetScalars(distanceB)\n\n return max(maxd,maxd1)", "def hausdorffDistance13(self, id1, id2):\r\n # productive #math\r\n if frequent: profprint()\r\n node1 = slicer.mrmlScene.GetNodeByID(id1)\r\n polydata1 = node1.GetPolyData()\r\n node2 = slicer.mrmlScene.GetNodeByID(id2)\r\n polydata2 = node2.GetPolyData()\r\n nb1 = polydata1.GetNumberOfPoints()\r\n nb2 = polydata2.GetNumberOfPoints()\r\n minimum = None\r\n maximum = None\r\n JJ, jj = None, None\r\n II, ii = None, None\r\n pt1 = [0, 0, 0]\r\n pt2 = [0, 0, 0]\r\n polydata1.GetPoint(1, pt1)\r\n polydata1.GetPoint(nb1 - 1, pt2)\r\n minVal1 = min(pt1[2], pt2[2])\r\n maxVal1 = max(pt1[2], pt2[2])\r\n pt1 = [0, 0, 0]\r\n pt2 = [0, 0, 0]\r\n pt1b, pt2b = None, None\r\n polydata2.GetPoint(1, pt1)\r\n polydata2.GetPoint(nb2 - 1, pt2)\r\n minVal2 = min(pt1[2], pt2[2])\r\n maxVal2 = max(pt1[2], pt2[2])\r\n valueBase = max(minVal1, minVal2)\r\n valueTip = min(maxVal1, maxVal2)\r\n cellId = vtk.mutable(1)\r\n subid = vtk.mutable(1)\r\n dist = vtk.mutable(1)\r\n cl2 = vtk.vtkCellLocator()\r\n cl2.SetDataSet(polydata2)\r\n cl2.BuildLocator()\r\n # Hausforff 1 -> 2\r\n minima = []\r\n for i in range(int(nb1 / float(100))):\r\n pt = [0, 0, 0]\r\n polydata1.GetPoint(100 * i, pt)\r\n closest = [0, 0, 0]\r\n cl2.FindClosestPoint(pt, closest, cellId, subid, dist)\r\n if abs(closest[2] - pt[2]) <= 1:\r\n minima.append(self.distance(pt, closest))\r\n else:\r\n minima.append(0)\r\n hausdorff12 = max(minima)\r\n\r\n # Hausforff 2 -> 1\r\n minima = []\r\n cl1 = vtk.vtkCellLocator()\r\n cl1.SetDataSet(polydata1)\r\n cl1.BuildLocator()\r\n for i in range(int(nb2 / float(10))):\r\n pt = [0, 0, 0]\r\n polydata2.GetPoint(10 * i, pt)\r\n closest = [0, 0, 0]\r\n cl1.FindClosestPoint(pt, closest, cellId, subid, dist)\r\n if abs(closest[2] - pt[2]) <= 1:\r\n minima.append(self.distance(pt, closest))\r\n else:\r\n minima.append(0)\r\n hausdorff21 = max(minima)\r\n return max(hausdorff12, hausdorff21)", "def Hausdorff_distance(clust1, clust2, forward, dir):\n if forward == None:\n return max(Hausdorff_distance(clust1,clust2,True,dir),Hausdorff_distance(clust1,clust2,False,dir))\n else:\n clstart, clend = (clust1,clust2) if forward else (clust2,clust1)\n dx, dy = dir if forward else (-dir[0],-dir[1])\n return sum([min([Dist((p1[0]+dx,p1[1]+dy),p2) for p2 in clend]) for p1 in clstart])/len(clstart)", "def hausdorffDistance(self,id1,id2):\n #productive #math\n profprint()\n node1 = slicer.mrmlScene.GetNodeByID(id1)\n polydata1=node1.GetPolyData()\n node2 = slicer.mrmlScene.GetNodeByID(id2)\n polydata2=node2.GetPolyData()\n nb1 = polydata1.GetNumberOfPoints()\n nb2 = polydata2.GetNumberOfPoints()\n minimum=None\n maximum=None\n JJ,jj=None,None\n II,ii=None,None\n pt1=[0,0,0]\n pt2=[0,0,0]\n polydata1.GetPoint(1,pt1)\n polydata1.GetPoint(nb1-1,pt2)\n minVal1=min(pt1[2],pt2[2])\n maxVal1=max(pt1[2],pt2[2])\n pt1=[0,0,0]\n pt2=[0,0,0]\n pt1b,pt2b=None,None\n polydata2.GetPoint(1,pt1)\n polydata2.GetPoint(nb2-1,pt2)\n minVal2 = min(pt1[2],pt2[2])\n maxVal2 = max(pt1[2],pt2[2])\n valueBase=max(minVal1,minVal2)\n valueTip=min(maxVal1,maxVal2)\n\n # truncate polydatas\n truncatedPolydata1 = self.clipPolyData(node1,valueBase)\n truncatedPolydata2 = self.clipPolyData(node2,valueBase)\n\n cellId=vtk.mutable(1)\n subid=vtk.mutable(1)\n dist=vtk.mutable(1)\n cl2=vtk.vtkCellLocator()\n cl2.SetDataSet(truncatedPolydata2)\n cl2.BuildLocator()\n # Hausforff 1 -> 2\n minima=[]\n for i in range(int(nb1/float(10))):\n pt=[0,0,0]\n polydata1.GetPoint(10*i,pt)\n closest=[0,0,0]\n cl2.FindClosestPoint(pt,closest,cellId,subid,dist)\n if abs(closest[2]-pt[2])<=1:\n minima.append(self.distance(pt,closest))\n else:\n minima.append(0)\n hausdorff12 = max(minima)\n \n # Hausforff 2 -> 1\n minima=[]\n cl1=vtk.vtkCellLocator()\n cl1.SetDataSet(truncatedPolydata1)\n cl1.BuildLocator()\n for i in range(int(nb2/float(10))):\n pt=[0,0,0]\n polydata2.GetPoint(10*i,pt)\n closest=[0,0,0]\n cl1.FindClosestPoint(pt,closest,cellId,subid,dist)\n if abs(closest[2]-pt[2])<=1:\n minima.append(self.distance(pt,closest))\n else:\n minima.append(0)\n hausdorff21 = max(minima)\n return max(hausdorff12,hausdorff21)", "def compute_feature_distances(features1: np.ndarray, \r\n features2: np.ndarray) -> np.ndarray:\r\n #broadcasting trick\r\n a = features1[:, np.newaxis, :]\r\n b = features2[np.newaxis, :, :]\r\n \r\n return np.linalg.norm( (a-b), axis=-1)", "def structural_hamming_distance(self,\n other,\n penalty_edge_mismatch_func=None):\n\n edges_1 = self.edges\n edges_2 = other.edges\n if penalty_edge_mismatch_func is None:\n penalty_edge_mismatch_func = GraphViaEdges.compute_penalty\n\n if set(edges_1.keys()) != set(edges_2.keys()):\n msg = 'The Structural Hamming Distances cannot be computed : the '\n msg += 'graphs cannot be compared.'\n raise GraphsCannotBeCompared(msg)\n\n shd = 0\n\n for key in edges_1.keys():\n\n shd += penalty_edge_mismatch_func(\n edge_1=edges_1[key],\n edge_2=edges_2[key]\n )\n\n return shd", "def feature_distance(feat1, feat2, eps=1e-7, sqrt=True):\n diff = torch.pow((feat1 - feat2), 2).sum(-1)\n if sqrt:\n diff = (diff + eps).sqrt()\n return diff", "def GetDist(feature_1, feature_2):\n return np.linalg.norm(feature_1 - feature_2)", "def calculate_distance(self, other):\n return math.sqrt((self.center[0] - other.center[0]) ** 2 + (self.center[1] - other.center[1]) ** 2)", "def dist(self, one, two):\n return np.sqrt((one[0] - two[0]) ** 2 + (one[1] - two[1]) ** 2)", "def hellinger_distance(x, y):\n assert (x.dtype == np.float64 and y.dtype == np.float64) or (\n x.dtype == np.float32 and y.dtype == np.float32)\n assert (np.all(x.sum(1) != 0.) and np.all(y.sum(1) != 0.))\n x /= x.sum(1).reshape(x.shape[0], 1)\n y /= y.sum(1).reshape(y.shape[0], 1)\n x = np.sqrt(x)\n y = np.sqrt(y)\n # x (120, 40), y (100, 40), H(x,y) (120, 100)\n xx = np.tile(x, (y.shape[0], 1, 1)).transpose((1, 0, 2))\n yy = np.tile(y, (x.shape[0], 1, 1))\n xx_yy = xx - yy\n res = np.sqrt(np.sum(xx_yy ** 2, axis=-1))\n return np.float64((1. / np.sqrt(2)) * res)", "def calculate(self):\n\n distance_filter = sitk.HausdorffDistanceImageFilter()\n distance_filter.Execute(self.ground_truth, self.segmentation)\n return distance_filter.GetHausdorffDistance()", "def directed_Hausdorff_hyperbox(b1,b2): \n return max(0,np.max(np.hstack((b1.u-b2.u,b2.l-b1.l))))", "def hamming_dist(s1, s2):\n\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.HammingDistance()\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n\n # Call the function to compute the distance\n return measure.get_raw_score(s1, s2)", "def distance(self, other):\n x_diff_sq = (self.x-other.x)**2\n y_diff_sq = (self.y-other.y)**2\n return (x_diff_sq + y_diff_sq)**0.5", "def distance_to(self, other):\n p_self, p_other = self.closest_points(other)\n return np.linalg.norm(p_self - p_other)", "def _object_distance(self, object1, object2):\n return np.linalg.norm(np.array(object1) - np.array(object2))", "def hausdorffDistance13(self,id1,id2):\n #productive #math\n profprint()\n node1 = slicer.mrmlScene.GetNodeByID(id1)\n polydata1=node1.GetPolyData()\n node2 = slicer.mrmlScene.GetNodeByID(id2)\n polydata2=node2.GetPolyData()\n nb1 = polydata1.GetNumberOfPoints()\n nb2 = polydata2.GetNumberOfPoints()\n minimum=None\n maximum=None\n JJ,jj=None,None\n II,ii=None,None\n pt1=[0,0,0]\n pt2=[0,0,0]\n polydata1.GetPoint(1,pt1)\n polydata1.GetPoint(nb1-1,pt2)\n minVal1=min(pt1[2],pt2[2])\n maxVal1=max(pt1[2],pt2[2])\n pt1=[0,0,0]\n pt2=[0,0,0]\n pt1b,pt2b=None,None\n polydata2.GetPoint(1,pt1)\n polydata2.GetPoint(nb2-1,pt2)\n minVal2 = min(pt1[2],pt2[2])\n maxVal2 = max(pt1[2],pt2[2])\n valueBase=max(minVal1,minVal2)\n valueTip=min(maxVal1,maxVal2)\n cellId=vtk.mutable(1)\n subid=vtk.mutable(1)\n dist=vtk.mutable(1)\n cl2=vtk.vtkCellLocator()\n cl2.SetDataSet(polydata2)\n cl2.BuildLocator()\n # Hausforff 1 -> 2\n minima=[]\n for i in range(int(nb1/float(100))):\n pt=[0,0,0]\n polydata1.GetPoint(100*i,pt)\n closest=[0,0,0]\n cl2.FindClosestPoint(pt,closest,cellId,subid,dist)\n if abs(closest[2]-pt[2])<=1:\n minima.append(self.distance(pt,closest))\n else:\n minima.append(0)\n hausdorff12 = max(minima)\n \n # Hausforff 2 -> 1\n minima=[]\n cl1=vtk.vtkCellLocator()\n cl1.SetDataSet(polydata1)\n cl1.BuildLocator()\n for i in range(int(nb2/float(10))):\n pt=[0,0,0]\n polydata2.GetPoint(10*i,pt)\n closest=[0,0,0]\n cl1.FindClosestPoint(pt,closest,cellId,subid,dist)\n if abs(closest[2]-pt[2])<=1:\n minima.append(self.distance(pt,closest))\n else:\n minima.append(0)\n hausdorff21 = max(minima)\n return max(hausdorff12,hausdorff21)", "def distance(self, other_cluster):\n vert_dist = self._vert_center - other_cluster.vert_center()\n horiz_dist = self._horiz_center - other_cluster.horiz_center()\n return math.sqrt(vert_dist ** 2 + horiz_dist ** 2)", "def distance(self, other_cluster):\n vert_dist = self._vert_center - other_cluster.vert_center()\n horiz_dist = self._horiz_center - other_cluster.horiz_center()\n return math.sqrt(vert_dist ** 2 + horiz_dist ** 2)", "def distance(self, other):\n x, y, z = (self.x-other.x), (self.y-other.y), (self.z-other.z)\n return math.sqrt(x**2 + y**2 + z**2)", "def distance(self, other):\n xd, yd = self.x-other.x, self.y-other.y\n return math.sqrt(xd**2 + yd**2)", "def _get_distance(a, b):\n return np.sqrt(np.sum((a - b) ** 2))", "def distance(self, other):\n return float(abs(self.x - other.x) + abs(self.y - other.y))", "def hamming_distance(h1, h2):\n b1 = bitarray.bitarray()\n b1.frombytes(h1)\n b2 = bitarray.bitarray()\n b2.frombytes(h2)\n return bitarray.bitdiff(b1, b2)", "def hamming_distance(s1, s2):\n assert(len(s1) == len(s2))\n return np.sum([1 if c1 != c2 else 0 for c1, c2 in zip(s1, s2)])" ]
[ "0.759283", "0.7179202", "0.7101328", "0.689909", "0.6858431", "0.6837336", "0.67604476", "0.6683294", "0.66194665", "0.66013527", "0.6569919", "0.65585876", "0.6527269", "0.6473583", "0.6433952", "0.6392746", "0.63679177", "0.63628966", "0.6316012", "0.63065404", "0.6305005", "0.6286384", "0.628265", "0.628265", "0.6252878", "0.6240276", "0.6234582", "0.6229081", "0.62040716", "0.6190958" ]
0.76239663
0
Transform each arctern GeoSeries to GeoPandas GeoSeries.
def to_geopandas(self): import geopandas import shapely return geopandas.GeoSeries(self.apply(lambda x: shapely.wkb.loads(x) if x is not None else None), crs=self.crs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def raster_to_geodataframe(*a, **kw) -> gpd.GeoDataFrame:\n kw[\"geo\"] = True\n return raster_to_dataframe(*a, **kw)", "def convert_to_geopandas(df):\n df['geometry'] = [Point(xy) for xy in zip(df.latitude, df.longitude)]\n crs = {'init': 'epsg:4326'}\n df = gpd.GeoDataFrame(df, crs=crs, geometry=df['geometry'])\n\n return df", "def build_geoseries(self, dataframe):\n geo_list = []\n with click.progressbar(dataframe.iterrows(), label='Pulling site plans and geographic title data', length=len(dataframe)) as d:\n for index, row in d:\n geo_list.append(self.map_property(row['linc']))\n\n geo_series = gpd.GeoSeries([Point(mark) for mark in geo_list], index=dataframe.index)\n\n return geo_series", "def from_shapely(data):\n\n from spatialpandas import GeoDataFrame, GeoSeries\n from shapely.geometry.base import BaseGeometry\n\n if not data:\n pass\n elif all(isinstance(d, BaseGeometry) for d in data):\n data = GeoSeries(data).to_frame()\n elif all(isinstance(d, dict) and 'geometry' in d and isinstance(d['geometry'], BaseGeometry)\n for d in data):\n new_data = {col: [] for col in data[0]}\n for d in data:\n for col, val in d.items():\n new_data[col].append(val if isscalar(val) or isinstance(val, BaseGeometry) else np.asarray(val))\n new_data['geometry'] = GeoSeries(new_data['geometry'])\n data = GeoDataFrame(new_data)\n return data", "def vectorize(self, connectivity=8):\n data = self._obj.values\n data_isnan = True if self.nodata is None else np.isnan(self.nodata)\n mask = ~np.isnan(data) if data_isnan else data != self.nodata\n feats_gen = features.shapes(\n data,\n mask=mask,\n transform=self.transform,\n connectivity=connectivity,\n )\n feats = [\n {\"geometry\": geom, \"properties\": {\"value\": idx}}\n for geom, idx in list(feats_gen)\n ]\n if len(feats) == 0: # return empty GeoDataFrame\n return gpd.GeoDataFrame()\n gdf = gpd.GeoDataFrame.from_features(feats, crs=self.crs)\n gdf.index = gdf.index.astype(self._obj.dtype)\n return gdf", "def from_geopandas(cls, ga):\n return super(PointArray, cls).from_geopandas(ga)", "def transform_series(obj):\n vals = obj.values\n return transform_array(vals)", "def to_real_series(self, data: pd.Series) -> pd.Series:\n ...", "def vectorize(df):\n\tt = calc_affine(df)\n\ta = df.values\n\t# zeros an nan are left open space, means mask = True!\n\tmaske = (df != 0).fillna(True)\n\tgdf = gpd.GeoDataFrame()\n\tgeoms = []\n\tvalue = []\n\tfor s,v in rasterio.features.shapes(a,transform=t,mask=maske.values):\n\t\tgeoms.append(shape(s))\n\t\tvalue.append(v)\n\tgdf['geometry'] = geoms\n\tgdf = gdf.set_geometry('geometry')\n\tgdf['val']=value\n\treturn gdf", "def _reindex_spatial_data_to_regions(ds, df):\n\n # use vectorized indexing in xarray >= 0.10\n if LooseVersion(xr.__version__) > LooseVersion(\"0.9.999\"):\n\n lon_indexer = xr.DataArray(df.lon.values, dims=(\"reshape_index\",))\n lat_indexer = xr.DataArray(df.lat.values, dims=(\"reshape_index\",))\n\n return ds.sel(lon=lon_indexer, lat=lat_indexer)\n\n else:\n res = ds.sel_points(\"reshape_index\", lat=df.lat.values, lon=df.lon.values)\n\n return res", "def makeGeoDf(self, arr: dict):\n geometry_points = [Point(x, y) for x, y in zip(arr[\"X\"], arr[\"Y\"])]\n elevetions = arr[\"Z\"]\n df = gpd.GeoDataFrame(columns=[\"elevation\", \"geometry\"])\n df['elevation'] = elevetions\n df['geometry'] = geometry_points\n df = df.set_geometry(\"geometry\")\n df.set_crs(self.output_epsg, inplace=True)\n return df", "def transform_geometries(datasource, src_epsg, dst_epsg):\n # Part 1\n src_srs = osr.SpatialReference()\n src_srs.ImportFromEPSG(src_epsg)\n dst_srs = osr.SpatialReference()\n dst_srs.ImportFromEPSG(dst_epsg)\n transformation = osr.CoordinateTransformation(src_srs, dst_srs)\n layer = datasource.GetLayerByIndex(0)\n \n # Part 2\n geoms = []\n layer.ResetReading()\n for feature in layer:\n geom = feature.GetGeometryRef().Clone()\n geom.Transform(transformation)\n geoms.append(geom)\n return geoms", "def list_to_gdf (lis):\r\n gdf = gpd.GeoDataFrame(lis)\r\n # rename the column \r\n gdf.rename(columns ={0:\"geometry\"},inplace=True)\r\n # define crs to dataframe\r\n gdf.crs = {'init' :'epsg:{}'.format(4326)} \r\n gdf = gdf.to_crs(epsg = 4326)\r\n \r\n return gdf", "def transform_geopandas(gdf, from_crs=None, to_crs=wgs84, inplace=False):\n from shapely.ops import transform\n import geopandas as gpd\n\n if from_crs is None:\n from_crs = check_crs(gdf.crs)\n else:\n from_crs = check_crs(from_crs)\n to_crs = check_crs(to_crs)\n\n if inplace:\n out = gdf\n else:\n out = gdf.copy()\n\n if isinstance(to_crs, pyproj.Proj) and isinstance(from_crs, pyproj.Proj):\n project = partial(transform_proj, from_crs, to_crs)\n elif isinstance(to_crs, Grid):\n project = partial(to_crs.transform, crs=from_crs)\n elif isinstance(from_crs, Grid):\n project = partial(from_crs.ij_to_crs, crs=to_crs)\n else:\n raise NotImplementedError()\n\n # Do the job and set the new attributes\n result = out.geometry.apply(lambda geom: transform(project, geom))\n result.__class__ = gpd.GeoSeries\n if isinstance(to_crs, pyproj.Proj):\n to_crs = to_crs.srs\n elif isinstance(to_crs, Grid):\n to_crs = None\n result.crs = to_crs\n out.geometry = result\n out.crs = to_crs\n out['min_x'] = [g.bounds[0] for g in out.geometry]\n out['max_x'] = [g.bounds[2] for g in out.geometry]\n out['min_y'] = [g.bounds[1] for g in out.geometry]\n out['max_y'] = [g.bounds[3] for g in out.geometry]\n return out", "def to_crs(self, crs):\n if crs is None:\n raise ValueError(\"Can not transform with invalid crs\")\n if self.crs is None:\n raise ValueError(\"Can not transform geometries without crs. Set crs for this GeoSeries first.\")\n if self.crs == crs:\n return self\n return _unary_geo(arctern.ST_Transform, self, self.crs, crs, crs=crs)", "def create_geodata(x):\n list_len = len(x)\n pilot_log = pd.concat(x[i][['time','Cn0DbHz','svid','geometry']] for i in range(list_len))\n \n return pilot_log", "def to_spatialpandas(data, xdim, ydim, columns=[], geom='point'):\n from spatialpandas import GeoSeries, GeoDataFrame\n from spatialpandas.geometry import (\n Point, Line, Polygon, Ring, LineArray, PolygonArray, PointArray,\n MultiLineArray, MultiPolygonArray, MultiPointArray, RingArray\n )\n from ...element import Polygons\n poly = any(Polygons._hole_key in d for d in data) or geom == 'Polygon'\n if poly:\n geom_type = Polygon\n single_array, multi_array = PolygonArray, MultiPolygonArray\n elif geom == 'Line':\n geom_type = Line\n single_array, multi_array = LineArray, MultiLineArray\n elif geom == 'Ring':\n geom_type = Ring\n single_array, multi_array = RingArray, MultiLineArray\n else:\n geom_type = Point\n single_array, multi_array = PointArray, MultiPointArray\n\n array_type = None\n hole_arrays, geom_arrays = [], []\n for geom in data:\n geom = dict(geom)\n if xdim not in geom or ydim not in geom:\n raise ValueError('Could not find geometry dimensions')\n xs, ys = geom.pop(xdim), geom.pop(ydim)\n xscalar, yscalar = isscalar(xs), isscalar(ys)\n if xscalar and yscalar:\n xs, ys = np.array([xs]), np.array([ys])\n elif xscalar:\n xs = np.full_like(ys, xs)\n elif yscalar:\n ys = np.full_like(xs, ys)\n geom_array = np.column_stack([xs, ys])\n\n if geom_type in (Polygon, Ring):\n geom_array = ensure_ring(geom_array)\n\n splits = np.where(np.isnan(geom_array[:, :2].astype('float')).sum(axis=1))[0]\n split_geoms = np.split(geom_array, splits+1) if len(splits) else [geom_array]\n split_holes = geom.pop(Polygons._hole_key, None)\n if split_holes is not None:\n if len(split_holes) != len(split_geoms):\n raise DataError('Polygons with holes containing multi-geometries '\n 'must declare a list of holes for each geometry.',\n SpatialPandasInterface)\n else:\n split_holes = [[ensure_ring(np.asarray(h)) for h in hs] for hs in split_holes]\n\n geom_arrays.append(split_geoms)\n hole_arrays.append(split_holes)\n if geom_type is Point:\n if len(splits) > 1 or any(len(g) > 1 for g in split_geoms):\n array_type = multi_array\n elif array_type is None:\n array_type = single_array\n elif len(splits):\n array_type = multi_array\n elif array_type is None:\n array_type = single_array\n\n converted = defaultdict(list)\n for geom, arrays, holes in zip(data, geom_arrays, hole_arrays):\n parts = []\n for i, g in enumerate(arrays):\n if i != (len(arrays)-1):\n g = g[:-1]\n if len(g) < (3 if poly else 2) and geom_type is not Point:\n continue\n if poly:\n parts.append([])\n subparts = parts[-1]\n else:\n subparts = parts\n subparts.append(g[:, :2])\n if poly and holes is not None:\n subparts += [np.array(h) for h in holes[i]]\n\n for c, v in geom.items():\n converted[c].append(v)\n\n if array_type is PointArray:\n parts = parts[0].flatten()\n elif array_type is MultiPointArray:\n parts = np.concatenate([sp.flatten() for sp in parts])\n elif array_type is multi_array:\n parts = [[ssp.flatten() for ssp in sp] if poly else sp.flatten() for sp in parts]\n else:\n parts = [np.asarray(sp).flatten() for sp in parts[0]] if poly else parts[0].flatten()\n converted['geometry'].append(parts)\n\n if converted:\n geometries = converted['geometry']\n if array_type is PointArray:\n geometries = np.concatenate(geometries)\n geom_array = array_type(geometries)\n if poly:\n geom_array = geom_array.oriented()\n converted['geometry'] = GeoSeries(geom_array)\n else:\n converted['geometry'] = GeoSeries(single_array([]))\n return GeoDataFrame(converted, columns=['geometry']+columns)", "def prepare_arrays(series: pd.Series) -> np.array:\n\n series = series.map(string_to_array)\n\n # transform the array of array into a 2d-array\n return np.stack(np.array(series.array))", "def __geo_interface__(self):\r\n if HASARCPY:\r\n template = {\r\n \"type\": \"FeatureCollection\",\r\n \"features\": []\r\n }\r\n geom_type = self.geometry_type\r\n if geom_type.lower() == \"point\":\r\n geom_type = \"Point\"\r\n elif geom_type.lower() == \"polyline\":\r\n geom_type = \"LineString\"\r\n elif geom_type.lower() == \"polygon\":\r\n geom_type = \"Polygon\"\r\n df_copy = self.copy(deep=True)\r\n df_copy['geom_json'] = self.geometry.JSON\r\n df_copy['SHAPE'] = df_copy['geom_json']\r\n del df_copy['geom_json']\r\n for index, row in df_copy.iterrows():\r\n geom = row['SHAPE']\r\n del row['SHAPE']\r\n template['features'].append(\r\n {\"type\" : geom_type,\r\n \"geometry\" : pd.io.json.loads(geom),\r\n \"attributes\":row}\r\n )\r\n return pd.io.json.dumps(template)", "def pull_data_from_geodb(sids, verbose=True):\n api_key = \"f125f4c130e61d9f4ad5874aadfe07ff\"\n geodb = dpf.data(api_key).geo\n\n df_a = pd.DataFrame() # for annual features\n df_m = pd.DataFrame() # for monthly features\n\n # gets meta data for each feature from its series id, \n # then uses meta data to pull all of the feature's regional data\n for sid in sids:\n print(f\"Series ID: {sid}\")\n\n print(\" > Collecting meta data...\")\n meta = geodb['meta'](series_id=sid).iloc[0, :]\n\n for col in ['min_date', 'max_date']:\n meta[col] = pd.to_datetime(meta[col]).date()\n\n print(\" > Pulling dataframe...\")\n data = geodb['data'](\n series_group=meta['series_group'],\n date=meta['max_date'],\n start_date=meta['min_date'],\n region_type=meta['region_type'],\n units=meta['units'],\n frequency=meta['frequency'],\n season=meta['season']\n )\n # swap series id for its feature name (change colname later)\n data['series_id'] = meta['title']\n\n if verbose:\n print(f\" > Title: {meta['title']}\")\n print(f\" > Min date: {meta['min_date']}\")\n print(f\" > Max date: {meta['max_date']}\")\n print(f\" > Units: {meta['units']}\")\n print(f\" > Season: {meta['season']}\")\n print(f\" > Frequency: {meta['frequency']}\")\n print(f\" > Region type: {meta['region_type']}\")\n\n print(f\" > Appending to df_{meta['frequency']}...\")\n if meta['frequency'] == 'a':\n df_a = df_a.append(data)\n else:\n df_m = df_m.append(data)\n continue\n\n df_a = Wrangler.transform_fresh_columns(df_a)\n df_m = Wrangler.transform_fresh_columns(df_m)\n\n return df_a, df_m", "def disagg(vec:gpd.GeoDataFrame):\n\t\t# Split GeometryCollections\n\t\tno_coll = []\n\t\tfor i, row in vec.iterrows():\n\t\t\tgeom = row.geometry\n\t\t\tif geom.type == 'GeometryCollection':\n\t\t\t\tfor part in geom:\n\t\t\t\t\trow2 = row.copy()\n\t\t\t\t\trow2.geometry = part\n\t\t\t\t\tno_coll.append(row2)\n\n\t\t\telse:\n\t\t\t\t\tno_coll.append(row) \n\n\t\t# Split Multi geomries\n\t\tres = []\n\t\tfor row in no_coll:\n\t\t\tgeom = row.geometry\n\t\t\tif geom.type.startswith('Multi'):\n\t\t\t\tfor part in geom:\n\t\t\t\t\trow2 = row.copy()\n\t\t\t\t\trow2.geometry = part\n\t\t\t\t\tres.append(row2)\n\t\t\telse:\n\t\t\t\t\tres.append(row)\n\n\t\treturn gpd.GeoDataFrame(res, crs=vec.crs).reset_index(drop=True)", "def from_geopandas(cls, data):\n\n import geopandas as gpd\n import shapely.wkb\n if not isinstance(data, gpd.GeoSeries):\n raise TypeError(f\"data must be {gpd.GeoSeries}, got {type(data)}\")\n\n if data.crs is not None:\n crs = data.crs.to_authority() or data.crs.source_crs.to_authority()\n crs = crs[0] + ':' + crs[1]\n else:\n crs = None\n\n def f(x):\n if x is None:\n return x\n return shapely.wkb.dumps(x)\n\n return cls(data.apply(f), crs=crs)", "def create_airports(data):\n \n airport_cities = pd.DataFrame(data)\n geometry = [Point(xy) for xy in zip(airport_cities.lon, airport_cities.lat)]\n airport_cities = airport_cities.drop(['lon', 'lat'], axis=1)\n crs = {'init': 'epsg:4326'}\n geo_airport_cities = gpd.GeoDataFrame(airport_cities, crs=crs, geometry=geometry)\n return geo_airport_cities", "def as_series(self, arraylike: Iterable) -> pd.Series:\n return pd.Series(arraylike, index=self.data.index)", "def convert_coordinates(self, coordinates):\n return np.array(zip(*self.basemap(*zip(*coordinates))))", "def convert_GeoPandas_to_Bokeh_format(gdf):\r\n gdf_new = gdf.drop('geometry', axis=1).copy()\r\n gdf_new['x'] = gdf.apply(getGeometryCoords, \r\n geom='geometry', \r\n coord_type='x', \r\n shape_type='polygon', \r\n axis=1)\r\n \r\n gdf_new['y'] = gdf.apply(getGeometryCoords, \r\n geom='geometry', \r\n coord_type='y', \r\n shape_type='polygon', \r\n axis=1)\r\n \r\n return ColumnDataSource(gdf_new)", "def geo_transform(self):\n pass", "def to_work_series(self, data: pd.Series) -> pd.Series:\n ...", "def createCoordTuples(data):\n data['xy'] = None\n for i, row in data.iterrows():\n data['xy'][i] = [np.round(row['geometry'].x, decimals=5), np.round(row['geometry'].y, decimals=5)]\n return data", "def to_pandas_series_rdd(self):\n pd_index = self.index().to_pandas_index()\n return self.map(lambda x: (x[0], pd.Series(x[1], pd_index)))" ]
[ "0.63202363", "0.6096996", "0.603656", "0.598663", "0.59274143", "0.5879244", "0.5840413", "0.5799306", "0.5705398", "0.56461185", "0.5643855", "0.55626243", "0.55617464", "0.5537935", "0.5530619", "0.5516425", "0.5465296", "0.5460588", "0.54439163", "0.5427812", "0.5421134", "0.5408094", "0.54045767", "0.5402654", "0.5382602", "0.53735256", "0.5345623", "0.5321549", "0.53133494", "0.53093433" ]
0.6296897
1
Construct polygon(rectangle) geometries from arr_min_x, arr_min_y, arr_max_x, arr_max_y and special coordinate system. The edges of polygon are parallel to coordinate axis.
def polygon_from_envelope(cls, min_x, min_y, max_x, max_y, crs=None): crs = _validate_crs(crs) return cls(arctern.ST_PolygonFromEnvelope(min_x, min_y, max_x, max_y), crs=crs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generatePolygons():", "def draw_polygon(\n i, sx, tx, sy, ty, xmin, xmax, ymin, ymax,\n offsets, values, xs, ys, yincreasing, eligible,\n *aggs_and_cols\n ):\n # Initialize values of pre-allocated buffers\n xs.fill(np.nan)\n ys.fill(np.nan)\n yincreasing.fill(0)\n eligible.fill(1)\n\n # First pass, compute bounding box of polygon vertices in data coordinates\n start_index = offsets[0]\n stop_index = offsets[-1]\n # num_edges = stop_index - start_index - 2\n poly_xmin = np.min(values[start_index:stop_index:2])\n poly_ymin = np.min(values[start_index + 1:stop_index:2])\n poly_xmax = np.max(values[start_index:stop_index:2])\n poly_ymax = np.max(values[start_index + 1:stop_index:2])\n\n # skip polygon if outside viewport\n if (poly_xmax < xmin or poly_xmin > xmax\n or poly_ymax < ymin or poly_ymin > ymax):\n return\n\n # Compute pixel bounds for polygon\n startxi, startyi = map_onto_pixel(\n sx, tx, sy, ty, xmin, xmax, ymin, ymax,\n max(poly_xmin, xmin), max(poly_ymin, ymin)\n )\n stopxi, stopyi = map_onto_pixel(\n sx, tx, sy, ty, xmin, xmax, ymin, ymax,\n min(poly_xmax, xmax), min(poly_ymax, ymax)\n )\n stopxi += 1\n stopyi += 1\n\n # Handle subpixel polygons (pixel width and/or height of polygon is 1)\n if (stopxi - startxi) == 1 and (stopyi - startyi) == 1:\n append(i, startxi, startyi, *aggs_and_cols)\n return\n elif (stopxi - startxi) == 1:\n for yi in range(min(startyi, stopyi) + 1, max(startyi, stopyi)):\n append(i, startxi, yi, *aggs_and_cols)\n return\n elif (stopyi - startyi) == 1:\n for xi in range(min(startxi, stopxi) + 1, max(startxi, stopxi)):\n append(i, xi, startyi, *aggs_and_cols)\n return\n\n # Build arrays of edges in canvas coordinates\n ei = 0\n for j in range(len(offsets) - 1):\n start = offsets[j]\n stop = offsets[j + 1]\n for k in range(start, stop - 2, 2):\n x0 = values[k]\n y0 = values[k + 1]\n x1 = values[k + 2]\n y1 = values[k + 3]\n\n # Map to canvas coordinates without rounding\n x0c = x_mapper(x0) * sx + tx - 0.5\n y0c = y_mapper(y0) * sy + ty - 0.5\n x1c = x_mapper(x1) * sx + tx - 0.5\n y1c = y_mapper(y1) * sy + ty - 0.5\n\n if y1c > y0c:\n xs[ei, 0] = x0c\n ys[ei, 0] = y0c\n xs[ei, 1] = x1c\n ys[ei, 1] = y1c\n yincreasing[ei] = 1\n elif y1c < y0c:\n xs[ei, 1] = x0c\n ys[ei, 1] = y0c\n xs[ei, 0] = x1c\n ys[ei, 0] = y1c\n yincreasing[ei] = -1\n else:\n # Skip horizontal edges\n continue\n\n ei += 1\n\n # Perform scan-line algorithm\n num_edges = ei\n for yi in range(startyi, stopyi):\n # All edges eligible at start of new row\n eligible.fill(1)\n for xi in range(startxi, stopxi):\n # Init winding number\n winding_number = 0\n for ei in range(num_edges):\n if eligible[ei] == 0:\n # We've already determined that edge is above, below, or left\n # of edge for the current pixel\n continue\n\n # Get edge coordinates.\n # Note: y1c > y0c due to how xs/ys were populated\n x0c = xs[ei, 0]\n x1c = xs[ei, 1]\n y0c = ys[ei, 0]\n y1c = ys[ei, 1]\n\n # Reject edges that are above, below, or left of current pixel.\n # Note: Edge skipped if lower vertex overlaps,\n # but is kept if upper vertex overlaps\n if (y0c >= yi or y1c < yi\n or (x0c < xi and x1c < xi)\n ):\n # Edge not eligible for any remaining pixel in this row\n eligible[ei] = 0\n continue\n\n if xi <= x0c and xi <= x1c:\n # Edge is fully to the right of the pixel, so we know ray to the\n # the right of pixel intersects edge.\n winding_number += yincreasing[ei]\n else:\n # Now check if edge is to the right of pixel using cross product\n # A is vector from pixel to first vertex\n ax = x0c - xi\n ay = y0c - yi\n\n # B is vector from pixel to second vertex\n bx = x1c - xi\n by = y1c - yi\n\n # Compute cross product of B and A\n bxa = (bx * ay - by * ax)\n\n if bxa < 0 or (bxa == 0 and yincreasing[ei]):\n # Edge to the right\n winding_number += yincreasing[ei]\n else:\n # Edge to left, not eligible for any remaining pixel in row\n eligible[ei] = 0\n continue\n\n if winding_number != 0:\n # If winding number is not zero, point\n # is inside polygon\n append(i, xi, yi, *aggs_and_cols)", "def rectpolyctl(xmin,xmax,ymin,ymax):\n pc=[]\n pc.append((xmin,ymin))\n pc.append((xmin,ymax))\n pc.append((xmax,ymax))\n pc.append((xmax,ymin))\n pc.append((xmin,ymin))\n return pc", "def convert_lane_boundaries_to_polygon(right_lane_bounds: np.ndarray, left_lane_bounds: np.ndarray) -> np.ndarray:\n assert right_lane_bounds.shape[0] == left_lane_bounds.shape[0]\n polygon = np.vstack([right_lane_bounds, left_lane_bounds[::-1]])\n polygon = np.vstack([polygon, right_lane_bounds[0]])\n return polygon", "def create_poly(self, bounds):\n\n left, bottom, right, top = bounds\n\n return Polygon(\n [\n (left, bottom),\n (left, top),\n (right, top),\n (right, bottom),\n (left, bottom),\n ]\n )", "def areap(minRA, maxRA, minDec, maxDec):\n poly = [[minRA, minDec], [minRA, maxDec], [maxRA, maxDec], [maxRA, minDec]]\n\n return geometry.Polygon(poly)", "def get_boundary_as_polygon(self, do_geo=True):\n xhor, yhor = self.get_coordinates()\n dimensions = xhor.shape\n xbottom = xhor[0, :]\n xright = xhor[:, dimensions[1]-1]\n xtop = xhor[dimensions[0]-1, :][::-1]\n xleft = xhor[:, 0][::-1]\n\n ybottom = yhor[0, :]\n yright = yhor[:, dimensions[1]-1]\n ytop = yhor[dimensions[0]-1, :][::-1]\n yleft = yhor[:, 0][::-1]\n\n lon_square = np.concatenate((xbottom, xright, xtop, xleft))\n lat_square = np.concatenate((ybottom, yright, ytop, yleft))\n\n return lon_square, lat_square", "def _polygons_to_bboxes(polygons):\n# Build bounding boxes\n bboxes = np.empty([len(polygons), 4])\n for n, p in enumerate(polygons):\n try:\n left, bottom = np.min(p, axis = 0)\n except:\n import pdb\n pdb.set_trace()\n right, top = np.max(p, axis = 0)\n bboxes[n] = [left, bottom, right, top]\n return bboxes", "def polybbox(a):\n if len(a) == 0:\n return False\n elif len(a) == 1:\n return pointbbox(a[0])\n else:\n minx = maxx = a[0][0]\n miny = maxy = a[0][1]\n for i in range(1,len(a)):\n x=a[i][0]\n y=a[i][1]\n if x < minx:\n minx =x\n elif x > maxx:\n maxx = x\n if y < miny:\n miny = y\n elif y > maxy:\n maxy = y\n return [ point(minx,miny),point(maxx,maxy)]", "def make_polygon(*coords):\n global GEOMETRY_SURF, POLYGONS,col\n if len(coords) < 3:\n print(\"Warning: Invalid polygon passed, ignoring...\")\n return\n start = coords[0]\n prev = coords[0]\n for coord in coords:\n POLYGONS |= {coord}\n line = Boundary(prev[0],prev[1],coord[0],coord[1]) #add segment to WALL list\n prev = coord\n line = Boundary(prev[0], prev[1],start[0],start[1])\n #now draw poly\n pygame.draw.polygon(GEOMETRY_SURF,col[\"SHAPECOL\"], coords)\n return", "def compute_bb(self):\n all_shapes = list(self.parts.values()) + list(self.edges.values())\n bbox_vertices = cascaded_union(all_shapes).envelope.exterior.coords.xy\n min_x = min(bbox_vertices[0])\n max_x = max(bbox_vertices[0])\n min_y = min(bbox_vertices[1])\n max_y = max(bbox_vertices[1])\n return [min_x, max_x,min_y, max_y]", "def polygon_array(cls, polygon, num, space, space_series, n_series, origin, subsampling=1):\n if (subsampling == 0) or (subsampling == -1):\n raise ValueError('Subsampling cannot be 0 or -1') \n \n # take care of subsampling\n n_series_np = np.arange(0,n_series)\n if subsampling>0:\n num_np = [x for x in range(num) if np.mod(x, subsampling)==0]\n else:\n num_np = [x for x in range(num) if np.mod(x, subsampling)!=0]\n\n # create arrays with combinations of objects and series positions \n m1, m2 = np.meshgrid(n_series_np, num_np, indexing='ij')\n\n # compute all x locations\n all_coords = np.ravel(origin[0] + m2*space+m1*(space*num+space_series))\n num_obj_after_sampling = len(all_coords)\n \n # combine x with y locations\n all_coords = np.stack([all_coords, origin[1]*np.ones_like(all_coords)])\n\n # concatenate all polygons and keep their length in memory\n poly_len = [len(p) for p in polygon.coord]\n poly_concat = np.concatenate(polygon.coord)\n\n # compute final coordinates using broadcasting\n # num_poly_edges x 2 x 1\n # x 2 x num_new_coords\n # num_poly_edges x 2 x num_new_coords\n complete = np.moveaxis(poly_concat[:,:, np.newaxis] + all_coords, 2,0)\n\n # reshape as long 2d list of length num_new_coords * num_poly_edges\n commplete_reshaped = np.reshape(complete, (complete.shape[0]*complete.shape[1], 2))\n\n # split into correct polygon lists\n split_pos=np.cumsum(num_obj_after_sampling * poly_len)\n pg_array = np.split(commplete_reshaped, split_pos[:-1])\n\n pg_array_obj = cls()\n pg_array_obj.coord = pg_array\n pg_array_obj.params = {'num':num, 'space':space, 'space_series':space_series, 'n_series':n_series, 'origin':origin, 'subsampling':subsampling}\n \n return pg_array_obj", "def bounding_box(self):\n box_min = []\n box_max = []\n if self.is_empty():\n raise ValueError('empty polytope is not allowed')\n for i in range(0, self.space_dimension()):\n x = Variable(i)\n coords = [ v.coefficient(x) for v in self.generators() ]\n max_coord = max(coords)\n min_coord = min(coords)\n box_max.append(max_coord)\n box_min.append(min_coord)\n return (tuple(box_min), tuple(box_max))", "def polygon_weights(polygon, xrange=None, yrange=None,\n center=True): # pragma: no cover\n poly = np.array(polygon)\n if poly.ndim != 2 or poly.shape[-1] != 2 or poly.shape[0] < 3:\n log.warning(\"invalid polygon shape\")\n return []\n\n xlims = [poly[:, 1].min(), poly[:, 1].max()]\n ylims = [poly[:, 0].min(), poly[:, 0].max()]\n\n if xrange is not None:\n xlims[0] = np.nanmax((xlims[0], np.nanmin(xrange)))\n xlims[1] = np.nanmin((xlims[1], np.nanmax(xrange)))\n if yrange is not None:\n ylims[0] = np.nanmax((ylims[0], np.nanmin(yrange)))\n ylims[1] = np.nanmin((ylims[1], np.nanmax(yrange)))\n\n if xlims[0] >= xlims[1] or ylims[0] >= ylims[1]:\n log.debug(\"out of bounds\")\n return []\n\n xlims = [int(np.floor(xlims[0])), int(np.ceil(xlims[1]))]\n ylims = [int(np.floor(ylims[0])), int(np.ceil(ylims[1]))]\n\n if center:\n dx = -0.5, 0.5\n dy = -0.5, 0.5\n else:\n dx = 0, 1\n dy = 0, 1\n\n gy, gx = np.mgrid[ylims[0]:ylims[1] + 1, xlims[0]:xlims[1] + 1]\n p = path.Path(poly)\n result = []\n for ycen, xcen in zip(gy.ravel(), gx.ravel()):\n bbox = Bbox([[ycen + dy[0], xcen + dx[0]],\n [ycen + dy[1], xcen + dy[1]]])\n area = polygon_area(p.clip_to_bbox(bbox))\n if area != 0:\n result.append(((ycen, xcen), area))\n\n return result", "def getPolygonBoundaries(self, polygon: Polygon):\n polygon_df = gpd.GeoDataFrame([polygon], columns=['geometry'])\n\n polygon_df.set_crs(epsg=self.output_epsg, inplace=True)\n polygon_df['geometry'] = polygon_df['geometry'].to_crs(epsg=self.input_epsg)\n minx, miny, maxx, maxy = polygon_df['geometry'][0].bounds\n\n polygon_input = 'POLYGON(('\n xcords, ycords = polygon_df['geometry'][0].exterior.coords.xy\n for x, y in zip(list(xcords), list(ycords)):\n polygon_input += f'{x} {y}, '\n polygon_input = polygon_input[:-2]\n polygon_input += '))'\n\n return f\"({[minx, maxx]},{[miny,maxy]})\", polygon_input", "def bounding_box(vertices, (height, width), extend=5):\n x_min = min(x for x, y in vertices) - extend\n x_max = max(x for x, y in vertices) + extend\n y_min = min(y for x, y in vertices) - extend\n y_max = max(y for x, y in vertices) + extend\n \n return max(x_min, 0), min(x_max, width), max(y_min, 0), min(y_max, height)", "def fitRectangle(self):\n \n #TODO MAKE SOMETHING MORE GENERIC!!\n \n fA, (fXg, fYg) = self.getArea_and_CenterOfMass()\n \n x1,y1, x2,y2 = self.getBoundingBox()\n #build a rectangle with same \"width\" as the polygon... is-it good enough??\n w = x2 - x1\n \n #but this width should not lead to go out of the bounding box!\n fW = min(w, (x2-fXg)*2, (fXg-x1)*2)\n \n #same area\n fH = fA / fW\n \n x1,y1, x2,y2 = [ int(round(v)) for v in [ fXg - fW/2.0, fYg - fH/2\n , fXg + fW/2.0, fYg + fH/2 ]]\n \n return x1,y1, x2,y2", "def create_spatial_grid(spatial_domain, grid_length, offset_coords=None, convert_to_shapely=True):\n\n if HAS_GEODJANGO and isinstance(spatial_domain, geos.GEOSGeometry):\n if convert_to_shapely:\n spatial_domain = geodjango_to_shapely(spatial_domain)\n polygon = geometry.Polygon\n bounds = lambda x: x.bounds\n else:\n polygon = geos.Polygon\n bounds = lambda x: x.extent\n else:\n polygon = geometry.Polygon\n bounds = lambda x: x.bounds\n\n intersect_polys = []\n full_extents = []\n full_grid_square = []\n edges_x, edges_y = bounding_box_grid(spatial_domain, grid_length, offset_coords=offset_coords)\n\n for ix in range(len(edges_x) - 1):\n for iy in range(len(edges_y) - 1):\n p = polygon((\n (edges_x[ix], edges_y[iy]),\n (edges_x[ix+1], edges_y[iy]),\n (edges_x[ix+1], edges_y[iy+1]),\n (edges_x[ix], edges_y[iy+1]),\n (edges_x[ix], edges_y[iy]),\n ))\n if p.within(spatial_domain):\n intersect_polys.append(p)\n full_extents.append(bounds(p))\n full_grid_square.append(True)\n elif spatial_domain.intersects(p):\n intersect_polys.append(spatial_domain.intersection(p))\n full_extents.append(bounds(p))\n full_grid_square.append(False)\n\n return intersect_polys, full_extents, full_grid_square", "def create_bounds(dict, number_of_nodes):\n\n x_min = dict[\"x_min\"]\n x_max = dict[\"x_max\"]\n u_min = dict[\"u_min\"]\n u_max = dict[\"u_max\"]\n\n v_min = []\n v_max = []\n for k in range(number_of_nodes - 1):\n v_min += x_min\n v_max += x_max\n v_min += u_min\n v_max += u_max\n\n if \"tf_min\" in dict:\n if \"tf_max\" in dict:\n tf_min = dict[\"tf_min\"]\n tf_max = dict[\"tf_max\"]\n v_min.append(tf_min)\n v_max.append(tf_max)\n\n v_min += x_min\n v_max += x_max\n\n return vertcat(*v_min), vertcat(*v_max)", "def rectangular(m, n, len1=1.0, len2=1.0, origin = (0.0, 0.0)):\n\n from anuga.config import epsilon\n\n delta1 = float(len1)/m\n delta2 = float(len2)/n\n\n #Calculate number of points\n Np = (m+1)*(n+1)\n\n class Index(object):\n\n def __init__(self, n,m):\n self.n = n\n self.m = m\n\n def __call__(self, i,j):\n return j+i*(self.n+1)\n\n\n index = Index(n,m)\n\n points = num.zeros((Np, 2), float)\n\n for i in range(m+1):\n for j in range(n+1):\n\n points[index(i,j),:] = [i*delta1 + origin[0], j*delta2 + origin[1]]\n\n #Construct 2 triangles per rectangular element and assign tags to boundary\n #Calculate number of triangles\n Nt = 2*m*n\n\n\n elements = num.zeros((Nt, 3), int)\n boundary = {}\n nt = -1\n for i in range(m):\n for j in range(n):\n nt = nt + 1\n i1 = index(i,j+1)\n i2 = index(i,j)\n i3 = index(i+1,j+1)\n i4 = index(i+1,j)\n\n\n #Update boundary dictionary and create elements\n if i == m-1:\n boundary[nt, 2] = 'right'\n if j == 0:\n boundary[nt, 1] = 'bottom'\n elements[nt,:] = [i4,i3,i2] #Lower element\n nt = nt + 1\n\n if i == 0:\n boundary[nt, 2] = 'left'\n if j == n-1:\n boundary[nt, 1] = 'top'\n elements[nt,:] = [i1,i2,i3] #Upper element\n\n return points, elements, boundary", "def boundary_polygon(self):\n try:\n return self.boundary_polygon_by_edges()\n except Exception as exc:\n self.log.warning('Warning, boundary_polygon() failed using edges! Trying polygon union method')\n self.log.warning(exc,exc_info=True)\n return self.boundary_polygon_by_union()", "def bisect_rectange(numSplits, minlat, minlong, maxlat, maxlong):\n #initialize function variables\n longpoints = []\n latpoints = []\n extents = []\n\n #Get a list of the split lat/long locations in the rectangle\n for i in range(numSplits+1):\n latpoints.append( (minlat + ((maxlat-minlat)/numSplits)*i) )\n longpoints.append( (minlong + ((maxlong-minlong)/numSplits)*i) )\n\n #Loop through the line locations and create a list of sub-rectangles\n for latindex, latmin in enumerate(latpoints):\n for longindex, longmin in enumerate(longpoints):\n if latindex<(len(latpoints)-1) and longindex<(len(longpoints)-1):\n newextent = [latmin, longmin, latpoints[latindex+1], longpoints[longindex+1]]\n extents.append(newextent)\n return extents", "def draw_grid(self, min_x, max_x, min_y, max_y, min_z, max_z) -> None:\n from pymol import cmd\n from math import sin, cos\n \n # Prepare dimensions\n angle1 = 0.0\n angle2 = 0.0\n min_x = x - min_x\n max_x = max_x - x \n min_y = y - min_y \n max_y = max_y - y \n min_z = z - min_z \n max_z = max_z - z \n\n # Get positions of grid vertices\n # P1\n x1 = -min_x * cos(angle2) - (-min_y) * sin(angle1) * sin(angle2) + (-min_z) * cos(angle1) * sin(angle2) + x\n\n y1 = -min_y * cos(angle1) + (-min_z) * sin(angle1) + y\n\n z1 = min_x * sin(angle2) + min_y * sin(angle1) * cos(angle2) - min_z * cos(angle1) * cos(angle2) + z\n \n # P2\n x2 = max_x * cos(angle2) - (-min_y) * sin(angle1) * sin(angle2) + (-min_z) * cos(angle1) * sin(angle2) + x\n\n y2 = (-min_y) * cos(angle1) + (-min_z) * sin(angle1) + y\n \n z2 = (-max_x) * sin(angle2) - (-min_y) * sin(angle1) * cos(angle2) + (-min_z) * cos(angle1) * cos(angle2) + z\n \n # P3\n x3 = (-min_x) * cos(angle2) - max_y * sin(angle1) * sin(angle2) + (-min_z) * cos(angle1) * sin(angle2) + x\n\n y3 = max_y * cos(angle1) + (-min_z) * sin(angle1) + y\n\n z3 = -(-min_x) * sin(angle2) - max_y * sin(angle1) * cos(angle2) + (-min_z) * cos(angle1) * cos(angle2) + z\n \n # P4\n x4 = (-min_x) * cos(angle2) - (-min_y) * sin(angle1) * sin(angle2) + max_z * cos(angle1) * sin(angle2) + x\n\n y4 = (-min_y) * cos(angle1) + max_z * sin(angle1) + y\n\n z4 = -(-min_x) * sin(angle2) - (-min_y) * sin(angle1) * cos(angle2) + max_z * cos(angle1) * cos(angle2) + z\n\n \n # P5\n x5 = max_x * cos(angle2) - max_y * sin(angle1) * sin(angle2) + (-min_z) * cos(angle1) * sin(angle2) + x\n\n y5 = max_y * cos(angle1) + (-min_z) * sin(angle1) + y\n\n z5 = (-max_x) * sin(angle2) - max_y * sin(angle1) * cos(angle2) + (-min_z) * cos(angle1) * cos(angle2) + z\n \n # P6\n x6 = max_x * cos(angle2) - (-min_y) * sin(angle1) * sin(angle2) + max_z * cos(angle1) * sin(angle2) + x\n\n y6 = (-min_y) * cos(angle1) + max_z * sin(angle1) + y\n\n z6 = (-max_x) * sin(angle2) - (-min_y) * sin(angle1) * cos(angle2) + max_z * cos(angle1) * cos(angle2) + z\n \n # P7\n x7 = (-min_x) * cos(angle2) - max_y * sin(angle1) * sin(angle2) + max_z * cos(angle1) * sin(angle2) + x\n\n y7 = max_y * cos(angle1) + max_z * sin(angle1) + y\n\n z7 = -(-min_x) * sin(angle2) - max_y * sin(angle1) * cos(angle2) + max_z * cos(angle1) * cos(angle2) + z\n\n # P8\n x8 = max_x * cos(angle2) - max_y * sin(angle1) * sin(angle2) + max_z * cos(angle1) * sin(angle2) + x\n\n y8 = max_y * cos(angle1) + max_z * sin(angle1) + y\n\n z8 = (-max_x) * sin(angle2) - max_y * sin(angle1) * cos(angle2) + max_z * cos(angle1) * cos(angle2) + z \n\n # Create box object\n if \"grid\" in cmd.get_names(\"objects\"):\n cmd.delete(\"grid\")\n\n # Create vertices\n cmd.pseudoatom(\"grid\", name=\"v2\", pos=[x2, y2, z2], color=\"white\")\n cmd.pseudoatom(\"grid\", name=\"v3\", pos=[x3, y3, z3], color=\"white\")\n cmd.pseudoatom(\"grid\", name=\"v4\", pos=[x4, y4, z4], color=\"white\")\n cmd.pseudoatom(\"grid\", name=\"v5\", pos=[x5, y5, z5], color=\"white\")\n cmd.pseudoatom(\"grid\", name=\"v6\", pos=[x6, y6, z6], color=\"white\")\n cmd.pseudoatom(\"grid\", name=\"v7\", pos=[x7, y7, z7], color=\"white\")\n cmd.pseudoatom(\"grid\", name=\"v8\", pos=[x8, y8, z8], color=\"white\")\n\n # Connect vertices\n cmd.select(\"vertices\", \"(name v3,v7)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v2,v6)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v5,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v2,v5)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v4,v6)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v4,v7)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v3,v5)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v6,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v7,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"grid\", name=\"v1x\", pos=[x1, y1, z1], color='white')\n cmd.pseudoatom(\"grid\", name=\"v2x\", pos=[x2, y2, z2], color='white')\n cmd.select(\"vertices\", \"(name v1x,v2x)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"grid\", name=\"v1y\", pos=[x1, y1, z1], color='white')\n cmd.pseudoatom(\"grid\", name=\"v3y\", pos=[x3, y3, z3], color='white')\n cmd.select(\"vertices\", \"(name v1y,v3y)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"grid\", name=\"v4z\", pos=[x4, y4, z4], color='white')\n cmd.pseudoatom(\"grid\", name=\"v1z\", pos=[x1, y1, z1], color='white')\n cmd.select(\"vertices\", \"(name v1z,v4z)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.delete(\"vertices\")", "def polygonal(resolution, in_vertices, out_vertices_list = None):\n in_vertices = [Point(in_vertices[k,0],in_vertices[k,1]) for k in range(in_vertices.shape[0])] \n\n domain = mshr.Polygon(in_vertices) # https://bitbucket.org/fenics-project/mshr/wiki/API/Polygon\n # Create polygon defined by the given vertices. Vertices must be in counter-clockwise order and free of self-intersections.\n \n if(out_vertices_list is not None):\n for out_vertices in out_vertices_list:\n out_vertices = [Point(out_vertices[k,0],out_vertices[k,1]) for k in range(out_vertices.shape[0])]\n domain -= mshr.Polygon(out_vertices)\n \n mesh=mshr.generate_mesh(domain, resolution)\n\n # TODO : add refined mesh\n # if(refine_mesh):\n # d = mesh.topology().dim()\n \n # class To_refine(SubDomain):\n # def inside(self, x, on_boundary):\n # return x[1]<=0 and x[1]>= -l_mot/2-h_grid-l_vacuum/4\n\n # to_refine = To_refine()\n # marker = MeshFunction(\"bool\", mesh, d, False)\n # to_refine.mark(marker, True)\n # mesh = refine(mesh,marker)\n\n return mesh", "def get_polygon_constraints(self, range_polygones=range(3, 5), print_out=False):\n rows_A = []\n rows_b = []\n for m in range_polygones:\n if (print_out):\n print('checking {}-polygones'.format(m))\n polygons = self.get_convex_polygons(m)\n row_A, row_b = self.get_polygon_constraints_m(polygons, print_out)\n rows_A.append(row_A)\n rows_b.append(row_b)\n return np.vstack(rows_A), np.hstack(rows_b)", "def _bounding_box_to_polytope(lower, upper):\n intervals = [(a[0], b[0]) for a, b in zip(lower, upper)]\n return box2poly(intervals)", "def __init__(self, min_x, min_y, max_x=0, max_y=0,\n width=0, height=0):\n self.min_x = min_x\n self.min_y = min_y\n if width > 0:\n self.max_x = min_x + width\n else:\n self.max_x = max_x\n if height > 0:\n self.max_y = min_y + height\n else:\n self.max_y = max_y", "def get_bounding_rect(polygon):\n x1, y1, x2, y2 = float('inf'), float('inf'), float('-inf'), float('-inf')\n for x, y in polygon:\n if x < x1:\n x1 = x\n if y < y1:\n y1 = y\n if x > x2:\n x2 = x\n if y > y2:\n y2 = y\n return x1, y1, x2, y2", "def boundary_polygon_by_edges(self):\n lines=self.boundary_linestrings()\n polys=join_features.lines_to_polygons(lines,close_arc=False)\n if len(polys)>1:\n raise GridException(\"somehow there are multiple boundary polygons\")\n return polys[0]", "def rectangular_old(m, n, len1=1.0, len2=1.0, origin = (0.0, 0.0)):\n\n from anuga.config import epsilon\n\n deltax = float(len1)/m\n deltay = float(len2)/n\n\n #Dictionary of vertex objects\n vertices = {}\n points = []\n\n for i in range(m+1):\n for j in range(n+1):\n vertices[i,j] = len(points)\n points.append([i*delta1 + origin[0], j*delta2 + origin[1]])\n\n\n #Construct 2 triangles per rectangular element and assign tags to boundary\n elements = []\n boundary = {}\n for i in range(m):\n for j in range(n):\n v1 = vertices[i,j+1]\n v2 = vertices[i,j]\n v3 = vertices[i+1,j+1]\n v4 = vertices[i+1,j]\n\n #Update boundary dictionary and create elements\n if i == m-1:\n boundary[(len(elements), 2)] = 'right'\n if j == 0:\n boundary[(len(elements), 1)] = 'bottom'\n elements.append([v4,v3,v2]) #Lower element\n\n if i == 0:\n boundary[(len(elements), 2)] = 'left'\n if j == n-1:\n boundary[(len(elements), 1)] = 'top'\n elements.append([v1,v2,v3]) #Upper element\n\n return points, elements, boundary" ]
[ "0.6298787", "0.6130665", "0.5995941", "0.59403425", "0.59305", "0.5916149", "0.58777076", "0.58749014", "0.5831342", "0.5804383", "0.57978517", "0.578452", "0.5773578", "0.5768583", "0.5755375", "0.57550496", "0.57409936", "0.5726229", "0.57090235", "0.5684887", "0.5683477", "0.56779313", "0.56674206", "0.56373835", "0.56250376", "0.55948156", "0.55709535", "0.5563227", "0.5559222", "0.5554882" ]
0.6215077
1
Construct geometry from the GeoJSON representation string.
def geom_from_geojson(cls, json, crs=None): crs = _validate_crs(crs) return cls(arctern.ST_GeomFromGeoJSON(json), crs=crs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_geometry(self, val):\n g = OGRGeometry(val)\n return json.loads(g.json)", "def json2polygon(geojson_str):\n geojson_object = geojson.loads(geojson_str)\n return geometry.shape(geojson_object)", "def get_geojson_feature(id, raw_bbox_string, properties_dict):\n coords = raw_bbox_string.split()\n \n # Tesseract uses ints, but allow floats\n for i, val in enumerate(coords):\n coords[i] = float(val)\n # bbox order = # x0 y0 x1 y1\n \n bbox_json_obj = geojson.Polygon([[\n (coords[0], coords[1]), \n (coords[0], coords[3]), \n (coords[2], coords[3]), \n (coords[2], coords[1]),\n (coords[0], coords[1])\n ]])\n return geojson.Feature(id, bbox_json_obj, properties=properties_dict)", "def __init__(self, geom_input, srs=None):\n str_instance = isinstance(geom_input, str)\n\n # If HEX, unpack input to a binary buffer.\n if str_instance and hex_regex.match(geom_input):\n geom_input = memoryview(bytes.fromhex(geom_input))\n str_instance = False\n\n # Constructing the geometry,\n if str_instance:\n wkt_m = wkt_regex.match(geom_input)\n json_m = json_regex.match(geom_input)\n if wkt_m:\n if wkt_m[\"srid\"]:\n # If there's EWKT, set the SRS w/value of the SRID.\n srs = int(wkt_m[\"srid\"])\n if wkt_m[\"type\"].upper() == \"LINEARRING\":\n # OGR_G_CreateFromWkt doesn't work with LINEARRING WKT.\n # See https://trac.osgeo.org/gdal/ticket/1992.\n g = capi.create_geom(OGRGeomType(wkt_m[\"type\"]).num)\n capi.import_wkt(g, byref(c_char_p(wkt_m[\"wkt\"].encode())))\n else:\n g = capi.from_wkt(\n byref(c_char_p(wkt_m[\"wkt\"].encode())), None, byref(c_void_p())\n )\n elif json_m:\n g = self._from_json(geom_input.encode())\n else:\n # Seeing if the input is a valid short-hand string\n # (e.g., 'Point', 'POLYGON').\n OGRGeomType(geom_input)\n g = capi.create_geom(OGRGeomType(geom_input).num)\n elif isinstance(geom_input, memoryview):\n # WKB was passed in\n g = self._from_wkb(geom_input)\n elif isinstance(geom_input, OGRGeomType):\n # OGRGeomType was passed in, an empty geometry will be created.\n g = capi.create_geom(geom_input.num)\n elif isinstance(geom_input, self.ptr_type):\n # OGR pointer (c_void_p) was the input.\n g = geom_input\n else:\n raise GDALException(\n \"Invalid input type for OGR Geometry construction: %s\"\n % type(geom_input)\n )\n\n # Now checking the Geometry pointer before finishing initialization\n # by setting the pointer for the object.\n if not g:\n raise GDALException(\n \"Cannot create OGR Geometry from input: %s\" % geom_input\n )\n self.ptr = g\n\n # Assigning the SpatialReference object to the geometry, if valid.\n if srs:\n self.srs = srs\n\n # Setting the class depending upon the OGR Geometry Type\n self.__class__ = GEO_CLASSES[self.geom_type.num]", "def decode_geometry(geom: str) -> BasePolygon:\n return shape(geobuf.decode(bytes.fromhex(geom))).buffer(0)", "def normalizeGeometry(geom):\n\t# Convert string GEOSGeometry object to python dict\n\tgeom = json.loads(geom)\n\n\t# Normalize longitude to range [-180, 180) using saw tooth function\n\tc = geom['coordinates'][0]\n\tgeom['coordinates'][0] = (c+180 - ( math.floor( (c+180)/360 ) )*360) - 180\n\n\t# Normalize latitude to range [-90, 90) using saw tooth function\n\tc = geom['coordinates'][1]\n\tgeom['coordinates'][1] = (c+90 - ( math.floor( (c+90)/180 ) )*180) - 90\n\n\t# Encode and return GEOSGeometry object\n\treturn GEOSGeometry(json.dumps(geom))", "def from_json_str(cls, json_str):\n return cls.from_json(simplejson.loads(json_str))", "def as_geom(data: dict) -> dict:\n geom = geom_from_geojson(data)\n validate_geom(geom)\n return geom", "def from_json(cls, string):\n dct = json.loads(string)\n return cls.from_dict(dct)", "def parse_geometry(gj_geom):\n try:\n esri_geom = arcpy.AsShape(gj_geom)\n if gj_geom['type'] == 'Point':\n return {\n \"type\": \"POINT\",\n \"esri_geom\": esri_geom\n }\n elif gj_geom['type'] in ['LineString', 'MultiLineString']:\n return {\n \"type\": \"POLYLINE\",\n \"esri_geom\": esri_geom\n }\n elif gj_geom['type'] in ['Polygon', 'MultiPolygon']:\n return {\n \"type\": \"POLYGON\",\n \"esri_geom\": esri_geom\n }\n else:\n print(\"Not a Point, Line or Polygon feature.\")\n return False\n except KeyError as ke:\n print(f\"Error: {ke}. Malformed JSON. Could not parse.\")\n return False", "def loads(string):\n endianness = string[0:1]\n if endianness == BIG_ENDIAN:\n big_endian = True\n elif endianness == LITTLE_ENDIAN:\n big_endian = False\n else:\n raise ValueError(\"Invalid endian byte: '0x%s'. Expected 0x00 or 0x01\"\n % binascii.hexlify(endianness.encode()).decode())\n\n type_bytes = string[1:5]\n if not big_endian:\n # To identify the type, order the type bytes in big endian:\n type_bytes = type_bytes[::-1]\n\n geom_type = __BINARY_TO_GEOM_TYPE.get(type_bytes)\n data_bytes = string[5:] # FIXME: This won't work for GeometryCollections\n\n importer = __loads_registry.get(geom_type)\n\n if importer is None:\n __unsupported_geom_type(geom_type)\n return importer(big_endian, type_bytes, data_bytes)", "def from_json(cls, json_str: str) -> FormatTest:\n return cls.from_dict(json.loads(json_str))", "def geometry(self, objectId):\n\n objectId = GeometryReference(objectId, self)\n req = urllib2.Request(self.baseUri + 'geometry/%d' % objectId.id)\n r = urllib2.urlopen(req)\n\n data = json.load(r)\n r.close()\n return data", "def from_geometry(cls, geometry):\n if not isinstance(geometry, Rhino.Geometry.Point3d):\n geometry = Rhino.Geometry.Point3d(* geometry)\n point = cls()\n point.geometry = geometry\n return point", "def fromTkStr(cls, geomStr):\n match = cls._geomRE.match(geomStr)\n if not match:\n raise RuntimeError(\"Could not parse geomStr string %r\" % (geomStr,))\n\n groupDict = match.groupdict()\n\n return cls(\n offset = tuple(groupDict[name] for name in (\"x\", \"y\")),\n offsetFlipped = tuple(cls._flippedFromChar(groupDict[name]) for name in (\"xsign\", \"ysign\")),\n extent = tuple(groupDict[name] for name in (\"width\", \"height\")),\n )", "def from_json(cls, json_str: str) -> 'LightlyModelV3':\n return LightlyModelV3(json.loads(json_str))", "def fromjson(cls, jsonstr: ty.Text) -> \"NDArray\":\n return cls(**json.loads(jsonstr))", "def Deserializer(stream_or_string, **options):\n def GEOJsonToEWKT(dict):\n \"\"\" \n Convert to a string that GEOSGeometry class constructor can accept. \n \n The default decoder would pass our geo dict object to the constructor which \n would result in a TypeError; using the below hook we are forcing it into a \n ewkt format. This is accomplished with a class hint as per JSON-RPC \n \"\"\" \n if '__GEOSGeometry__' in dict: # using class hint catch a GEOSGeometry definition \n return dict['__GEOSGeometry__'][1][0]\n \n return dict\n if isinstance(stream_or_string, basestring):\n stream = StringIO(stream_or_string)\n else:\n stream = stream_or_string\n for obj in PythonDeserializer(simplejson.load(stream, object_hook=GEOJsonToEWKT), **options):\n yield obj", "def read_geojson(cls, path_or_json_or_string_or_url):\n assert path_or_json_or_string_or_url\n data = None\n if isinstance(path_or_json_or_string_or_url, (dict, list)):\n data = path_or_json_or_string_or_url\n try:\n data = json.loads(path_or_json_or_string_or_url)\n except ValueError:\n pass\n try:\n path = path_or_json_or_string_or_url\n if path.endswith('.gz') or path.endswith('.gzip'):\n import gzip\n contents = gzip.open(path, 'r').read().decode('utf-8')\n else:\n contents = open(path, 'r').read()\n data = json.loads(contents)\n except FileNotFoundError:\n pass\n if not data:\n import urllib.request\n with urllib.request.urlopen(path_or_json_or_string_or_url) as url:\n data = json.loads(url.read().decode())\n assert data, 'MapData accepts a valid geoJSON object, geoJSON string, path to a geoJSON file, or URL'\n return cls(cls._read_geojson_features(data))", "def from_geometry(cls, geometry):\n mesh = cls()\n mesh.geometry = geometry\n return mesh", "def from_json(cls, json_string:str):\n data = json.loads(json_string)\n validate(data, schema)\n instance = cls.from_dict(data)\n return instance", "def from_json_string(my_str):\n rep = json.loads(my_str)\n return rep", "def GEOJsonToEWKT(dict): \n if '__GEOSGeometry__' in dict: # using class hint catch a GEOSGeometry definition \n return dict['__GEOSGeometry__'][1][0]\n \n return dict", "def geom_from_geojson(data: dict) -> dict:\n if set(('coordinates', 'type')).issubset(set(data.keys())):\n # already a geom\n ret = data\n else:\n try:\n # feature\n ret = as_geom(data['geometry'])\n except KeyError:\n try:\n # FeatureCollection\n features = data['features']\n except KeyError:\n raise GeoJSONError(f'Invalid GeoJSON: {data}')\n\n if len(features) > 1:\n raise GeoJSONError(\n 'FeatureCollection has multiple features. Only one feature'\n ' can be used to get geometry.')\n\n ret = as_geom(features[0])\n return ret", "def from_json_string(my_str):\n import json\n return json.loads(my_str)", "def from_json_string(my_str):\n return loads(my_str)", "def create_ogr_geom(geom) -> ogr.Geometry:\n if isinstance(geom, ogr.Geometry):\n return geom\n\n # Converte os tipos para diferentes situações (python 2.7).\n # if isinstance(geom, str):\n # geom = str(geom)\n # elif isinstance(geom, unicode):\n # geom = str(geom)\n try:\n ogr_geom = ogr.CreateGeometryFromWkb(geom)\n except RuntimeError:\n ogr_geom = ogr.CreateGeometryFromWkt(geom)\n if not ogr_geom:\n ogr_geom = ogr.CreateGeometryFromWkt(geom)\n return ogr_geom", "def from_string(string):\n # in order to complete this lab we are going to use the python lib json in which we have the function json.loads\n # which will automatically load a json from a string\n return json.loads(string)", "def from_json(cls, json_str: str) -> AreaTile:\n return cls.from_dict(json.loads(json_str))", "def from_json(cls, json_str: str):\n\n def read_input(x: dict):\n return TensorSpec.from_json_dict(**x) if x[\"type\"] == \"tensor\" else ColSpec(**x)\n\n return cls([read_input(x) for x in json.loads(json_str)])" ]
[ "0.64928246", "0.6445108", "0.62764484", "0.62726635", "0.6204472", "0.61289346", "0.59531116", "0.5860503", "0.5817557", "0.5810382", "0.5739932", "0.5705384", "0.56882906", "0.5613755", "0.56050944", "0.5557749", "0.5536399", "0.55300456", "0.5526873", "0.5519496", "0.54749906", "0.54732317", "0.5471958", "0.5457804", "0.5444451", "0.5440401", "0.54330266", "0.5413172", "0.53972626", "0.53971136" ]
0.6517472
0
Decorator to load common data used by all views
def base_data_manager(wrapped): @check_session def wrapper(request, *arg, **kwargs): @cache_region.cache_on_arguments() def get_data_manager(collection, journal, document, range_start, range_end): code = document or journal or collection data = {} xylose_doc = request.stats.articlemeta.document(document, collection) if document else None if xylose_doc and xylose_doc.publisher_id: data['selected_document'] = xylose_doc data['selected_document_code'] = document journal = document[1:10] collections = request.stats.articlemeta.certified_collections() journals = request.stats.articlemeta.collections_journals(collection) selected_journal = journals.get(journal, None) selected_journal_code = journal if journal in journals else None today = datetime.datetime.now() y3 = today - datetime.timedelta(365*3) y2 = today - datetime.timedelta(365*2) y1 = today - datetime.timedelta(365*1) data.update({ 'collections': collections, 'selected_code': code, 'selected_journal': selected_journal, 'selected_journal_code': selected_journal_code, 'selected_document_code': document or None, 'selected_collection': collections[collection], 'selected_collection_code': collection, 'journals': journals, 'range_start': range_start, 'range_end': range_end, 'today': today.isoformat()[0:10], 'y3': y3.isoformat()[0:10], 'y2': y2.isoformat()[0:10], 'y1': y1.isoformat()[0:10] }) return data collection_code = request.session.get('collection', None) journal_code = request.session.get('journal', None) under_development = request.session.get('under_development', '') range_end = request.session.get('range_end', datetime.datetime.now().isoformat()[0:10]) range_start = request.session.get('range_start', (datetime.datetime.now() - datetime.timedelta(365*3)).isoformat()[0:10]) document_code = utils.REGEX_ARTICLE.match(request.session.get('document', '')) if document_code: document_code = document_code.string data = get_data_manager(collection_code, journal_code, document_code, range_start, range_end) data['locale'] = request.session.get('_LOCALE_', request.locale_name) data['under_development'] = [i for i in aslist(request.registry.settings.get('under_development', '')) if i != under_development] data['google_analytics_code'] = os.environ.get( 'GOOGLE_ANALYTICS_CODE', request.registry.settings.get('google_analytics_code', None) ) data['google_analytics_sample_rate'] = os.environ.get( 'GOOGLE_ANALYTICS_SAMPLE_RATE', request.registry.settings.get('google_analytics_sample_rate', '100') ) data['subject_areas'] = request.stats.publication.list_subject_areas(data['selected_code'], data['selected_collection_code']) data['languages'] = [(i, choices.ISO_639_1.get(i.upper(), 'undefined')) for i in request.stats.publication.list_languages(data['selected_code'], data['selected_collection_code'])] data['publication_years'] = request.stats.publication.list_publication_years(data['selected_code'], data['selected_collection_code']) if len(data['publication_years']) == 0: data['publication_years'] = [str(datetime.datetime.now().year)] py = '-'.join([data['publication_years'][0], data['publication_years'][-1]]) data['py_range'] = request.session.get('py_range', py).split('-') data['sa_scope'] = request.session.get('sa_scope', data['subject_areas']) data['la_scope'] = request.session.get('la_scope', [k for k,v in data['languages']]) data['content_scope'] = 'document' if data['selected_document_code'] else 'journal' if data['selected_journal_code'] else 'collection' if data['selected_collection_code'] else 'network' data['share_this_url'] = current_url(request.url, data) setattr(request, 'data_manager', data) return wrapped(request, *arg, **kwargs) wrapper.__doc__ = wrapped.__doc__ return wrapper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data():\n return app_views", "def data_for_all(request):\n data = common_data(request)\n data.update({\"tags\": Tag.used_tags(),\n \"archive_qualifier\": \"\",\n \"recent_active_months\": Blog.recent_active_months()})\n return data", "def common_context(request):\n c = {\n 'lessons': get_lesson_numbers(),\n }\n return c", "def a_shared_view(request):\n return view(a_shared_view)", "def all_common_variables(request):\n articles = Article.objects.all()\n random_article = Article.objects.order_by('?')[0:4]\n return {\n 'articles':articles,\n 'random_article':random_article,\n }", "def shared_view(request):\n return view(shared_view, template=\"a_shared_view\")", "def decorator(view_func):\n @wraps(view_func, assigned=available_attrs(view_func))\n def _wrapped_view(request, *args, **kwargs):\n dao = DatasetDBDAO()\n query, total_resources = dao.query(account_id=request.account.id, language=request.user.language)\n if total_resources == 0 or request.GET.get('test-no-datasets', False) == '1':\n raise AnyDatasetRequiredException()\n return view_func(request, *args, **kwargs)\n\n return _wrapped_view", "def local_views():\n\tpass", "def decorator(view_func):\n @wraps(view_func, assigned=available_attrs(view_func))\n def _wrapped_view(request, *args, **kwargs):\n dao = DataStreamDBDAO()\n query, total_resources = dao.query(account_id=request.account.id, language=request.user.language)\n if total_resources == 0 or request.GET.get('test-no-dataviews', False) == '1':\n raise AnyDatastreamRequiredException()\n return view_func(request, *args, **kwargs)\n\n return _wrapped_view", "def initView(self):\n return {}", "def init_static_data(log_to_console=False):\n # These are annoyingly necessary to live in the DB, currently. \n # Really this should be app logic, I think.\n load_report_types()\n load_roles()\n loc_file = getattr(settings, \"STATIC_LOCATIONS\")\n if loc_file:\n load_locations(loc_file, log_to_console=log_to_console)\n product_file = getattr(settings, \"STATIC_PRODUCTS\")\n if product_file:\n load_products(product_file, log_to_console=log_to_console)", "def requires_any_dataset():\n def decorator(view_func):\n \"\"\" for registred and logged user. NO redirect to login\"\"\"\n @wraps(view_func, assigned=available_attrs(view_func))\n def _wrapped_view(request, *args, **kwargs):\n dao = DatasetDBDAO()\n query, total_resources = dao.query(account_id=request.account.id, language=request.user.language)\n if total_resources == 0 or request.GET.get('test-no-datasets', False) == '1':\n raise AnyDatasetRequiredException()\n return view_func(request, *args, **kwargs)\n\n return _wrapped_view\n return decorator", "def decorate_views(self):\n for view in self.get_login_views():\n self.decorate_view(view)", "def includeme(config):\n add_view(config)", "def __call__(request):", "def load_data(schema):\n\n def wrapper(func):\n @functools.wraps(func)\n def second_wrapper(*args, **kwargs):\n data = schema().load(request.get_json()).data\n return func(data=data, *args, **kwargs)\n\n return second_wrapper\n\n return wrapper", "def init_view(self):\n self.view_map = self.ctx.clientmap", "def inject(self, request: BaseRequest, args_view: list, kwargs_view: dict):", "def get_context_data(self, **kwargs):\n # Making an object of 'HomeView class'\n # 'context' variable is an object of Parent class 'HomeView'.\n # 'super()' identifies that, 'get_context_data' is a parent class's function.\n # Here, we are using Recursion\n context = super().get_context_data(**kwargs)\n # '['injection']' is a key and 'Basic Injection!' is the value which will be shown on Template.\n context['injection'] = \"Basic Injection!\"\n return context", "def _wrapped_view(request, *args, **kwargs):\n return view_func(request, *args, **kwargs)", "def common_template_data(request, revision=None, mime_type=None):\n\n cfg = request.cfg\n\n # Initialize data dictionary members (sorted alphanumerically)\n data = TemplateData(\n {\n \"annotate_href\": None,\n \"cfg\": cfg,\n \"docroot\": (\n cfg.options.docroot is None\n and request.script_name + \"/\" + docroot_magic_path\n or cfg.options.docroot\n ),\n \"download_href\": None,\n \"download_text_href\": None,\n \"graph_href\": None,\n \"home_href\": request.script_name or \"/\",\n \"kv\": request.kv,\n \"lockinfo\": None,\n \"log_href\": None,\n \"nav_path\": nav_path(request),\n \"pathtype\": None,\n \"prefer_markup\": ezt.boolean(0),\n \"queryform_href\": None,\n \"rev\": None,\n \"revision_href\": None,\n \"rootname\": (request.rootname and request.server.escape(request.rootname) or None),\n \"rootpath\": request.rootpath,\n \"roots_href\": None,\n \"roottype\": request.roottype,\n \"rss_href\": None,\n \"tarball_href\": None,\n \"up_href\": None,\n \"username\": request.username,\n \"view\": _view_codes[request.view_func],\n \"view_href\": None,\n \"vsn\": __version__,\n \"where\": request.server.escape(request.where),\n }\n )\n\n rev = revision\n if not rev:\n rev = request.query_dict.get(\"annotate\")\n if not rev:\n rev = request.query_dict.get(\"revision\")\n if not rev and request.roottype == \"svn\":\n rev = request.query_dict.get(\"pathrev\")\n try:\n data[\"rev\"] = hasattr(request.repos, \"_getrev\") and request.repos._getrev(rev) or rev\n except vclib.InvalidRevision:\n raise ViewVCException(\"Invalid revision\", \"404 Not Found\")\n\n if request.pathtype == vclib.DIR:\n data[\"pathtype\"] = \"dir\"\n elif request.pathtype == vclib.FILE:\n data[\"pathtype\"] = \"file\"\n\n if request.path_parts:\n dir = _path_join(request.path_parts[:-1])\n data[\"up_href\"] = request.get_url(\n view_func=view_directory, where=dir, pathtype=vclib.DIR, params={}, escape=1\n )\n\n if \"roots\" in cfg.options.allowed_views:\n data[\"roots_href\"] = request.get_url(view_func=view_roots, escape=1, params={})\n\n if request.pathtype == vclib.FILE:\n fvi = get_file_view_info(request, request.where, data[\"rev\"], mime_type)\n data[\"view_href\"] = fvi.view_href\n data[\"download_href\"] = fvi.download_href\n data[\"download_text_href\"] = fvi.download_text_href\n data[\"annotate_href\"] = fvi.annotate_href\n data[\"revision_href\"] = fvi.revision_href\n data[\"prefer_markup\"] = fvi.prefer_markup\n data[\"log_href\"] = request.get_url(view_func=view_log, params={}, escape=1)\n if request.roottype == \"cvs\" and cfg.options.use_cvsgraph:\n data[\"graph_href\"] = request.get_url(view_func=view_cvsgraph, params={}, escape=1)\n file_data = request.repos.listdir(request.path_parts[:-1], request.pathrev, {})\n entries = [item for item in file_data if item.name == request.path_parts[-1]]\n if len(entries) == 1:\n request.repos.dirlogs(request.path_parts[:-1], request.pathrev, entries, {})\n data[\"lockinfo\"] = entries[0].lockinfo\n elif request.pathtype == vclib.DIR:\n data[\"view_href\"] = request.get_url(view_func=view_directory, params={}, escape=1)\n if \"tar\" in cfg.options.allowed_views:\n data[\"tarball_href\"] = request.get_url(view_func=download_tarball, params={}, escape=1)\n if request.roottype == \"svn\":\n data[\"revision_href\"] = request.get_url(\n view_func=view_revision, params={\"revision\": data[\"rev\"]}, escape=1\n )\n\n data[\"log_href\"] = request.get_url(view_func=view_log, params={}, escape=1)\n\n if is_querydb_nonempty_for_root(request):\n if request.pathtype == vclib.DIR:\n params = {}\n if request.roottype == \"cvs\" and request.pathrev:\n params[\"branch\"] = request.pathrev\n data[\"queryform_href\"] = request.get_url(\n view_func=view_queryform, params=params, escape=1\n )\n data[\"rss_href\"] = request.get_url(\n view_func=view_query, params={\"date\": \"month\", \"format\": \"rss\"}, escape=1\n )\n elif request.pathtype == vclib.FILE:\n parts = _path_parts(request.where)\n where = _path_join(parts[:-1])\n data[\"rss_href\"] = request.get_url(\n view_func=view_query,\n where=where,\n pathtype=request.pathtype,\n params={\"date\": \"month\", \"format\": \"rss\", \"file\": parts[-1], \"file_match\": \"exact\"},\n escape=1,\n )\n return data", "def common(self):", "def decorator(request, *dargs, **dkwargs):\n _set_language_by_user(request)\n return view_func(request, *dargs, **dkwargs)", "def test_get_context_data(self):\n i = IndexView()\n self.assertIsInstance(i, IndexView, \"Should be an instance of IndexView\")\n context = i.get_context_data()\n\n self.assertIsNotNone(context, \"Context should not be None\")\n self.assertIsNotNone(context['sponsors'], \"Sponsors was None\")\n self.assertIsNotNone(context['communities'], \"Communities was None\")\n self.assertIsNotNone(context['news_items'], \"News items was none\")\n self.assertIsNotNone(context['images'], \"Images was none\")\n self.assertIsNotNone(context['journal_entries'], \"Journal entries was none\")", "def hydrate_arguments(cls, view_io: ViewIO) -> Dict:\n return {\n **super().hydrate_arguments(view_io),\n # TODO: should we add this here? probably not: \"software_system\"\n \"paper_size\": view_io.paper_size,\n \"automatic_layout\": AutomaticLayout.hydrate(view_io.automatic_layout)\n if view_io.automatic_layout\n else None,\n \"element_views\": map(ElementView.hydrate, view_io.element_views),\n \"relationship_views\": map(\n RelationshipView.hydrate, view_io.relationship_views\n ),\n }", "def getViews(read):\n ...", "def _localWhatDoINeed(self):\n needDict = super()._localWhatDoINeed()\n\n return needDict", "def requires_any_datastream():\n def decorator(view_func):\n \"\"\" for registred and logged user. NO redirect to login\"\"\"\n @wraps(view_func, assigned=available_attrs(view_func))\n def _wrapped_view(request, *args, **kwargs):\n dao = DataStreamDBDAO()\n query, total_resources = dao.query(account_id=request.account.id, language=request.user.language)\n if total_resources == 0 or request.GET.get('test-no-dataviews', False) == '1':\n raise AnyDatastreamRequiredException()\n return view_func(request, *args, **kwargs)\n\n return _wrapped_view\n return decorator", "def setup_view(self, view, request, *args, **kwargs):\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def init_context_data(self):\n pass" ]
[ "0.6341154", "0.5741006", "0.5648095", "0.559915", "0.5597389", "0.55743283", "0.5551376", "0.5519761", "0.5490871", "0.54798126", "0.5479319", "0.5315314", "0.5302181", "0.5232341", "0.522839", "0.52071565", "0.5182045", "0.5180877", "0.51765704", "0.51497936", "0.51259214", "0.51148534", "0.51068836", "0.5103992", "0.51012576", "0.50993866", "0.5098858", "0.5095276", "0.50949687", "0.5076419" ]
0.5870001
1
Constructor. Unless otherwise specified it has a perfect quantum efficiency, samples at a rate of once per second and has a 0.1s integration time
def __init__(self, quantum_efficiency=1.0, sample_rate_times_per_second=1.0, integration_time_seconds=0.1): self.quantum_efficiency = quantum_efficiency self.sample_rate_times_per_second = sample_rate_times_per_second self.integration_time_seconds = integration_time_seconds
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, time_constant: float, sampling_time: float):\n self.alpha = sampling_time / (time_constant + sampling_time)\n self.state = None", "def __init__(self, timer=120, rate=1, percent=0):\n self.timer = timer\n self.rate = rate\n self.percent = percent", "def __init__(self):\n super(ASYMMETRIC, self).__init__(quant_type=Constants.QZ_ASYMMETRIC)", "def __init__(self, window=Amount(1, Time.MINUTES), denominator=Time.SECONDS, clock=time):\n if not isinstance(window, Amount) or not isinstance(window.unit(), Time):\n raise ValueError('Expect bandwidth window to be an Amount of Time!')\n if not isinstance(denominator, Time):\n raise ValueError('Expect bandwidth rate denominator to be a Time unit!')\n self._window = window.as_(denominator)\n self._samples = deque()\n self._aggregate = 0\n self._clock = clock", "def __init__(self,l,options):\n #### Setup options\n self.options = options\n # For execution\n self.shots = 1000 if options.get('shots') == None\\\n else options.get('shots')\n self.seed = options.get('seed')\n if self.seed != None:\n from qiskit.aqua import aqua_globals\n aqua_globals.random_seed = self.seed\n self.prnt = options.get('print')\n self.ancilla_measure = options.get('ancilla') if options.get('ancilla') != None else False\n\n self.ibmq = False\n if options.get('ibmq') == True:\n print('Running on real quantum computer')\n self.ibmq = True\n self.backend = options['backend']\n from qiskit.tools.monitor import job_monitor\n self.monitor = job_monitor\n from attributes import get_measurement_fitter\n self.meas_fitter = get_measurement_fitter(l,\n self.backend,\n None,\n self.shots)\n \n else:\n # For Backend\n if options.get('backend') == None:\n self.options['backend'] = 'qasm_simulator' \n self.backend = qk.Aer.get_backend(options['backend'])\n # For noise model, coupling map and basis gates\n self.noise_model, self.coupling_map, self.basis_gates = None,None,None\n self.meas_fitter = None\n if options.get('device') != None:\n device = QuantumComputer(options.get('device'))\n if options.get('noise_model') != None:\n self.noise_model = device.noise_model\n # Create error mitigation fitter\n if options.get('meas_fit') in [None,True]:\n from attributes import get_measurement_fitter\n self.meas_fitter = get_measurement_fitter(l,\n self.backend,\n device,\n self.shots)\n if options.get('coupling_map') != None:\n self.coupling_map = device.coupling_map\n if options.get('basis_gates') != None:\n self.basis_gates = device.basis_gates\n # Qubit layout, virtual to physical\n self.layout = options.get('layout')\n # Optimization level\n self.optimization_level= 1 if options.get('optimization_level')==None else options['optimization_level']\n\n # GPU accelerated\n if options.get('gpu'):\n from qiskit_qcgpu_provider import QCGPUProvider\n Provider = QCGPUProvider()\n self.backend = Provider.get_backend(options['backend'])", "def __init__(self, n_per_sample, title, unit):\n self.min_sample = 0xffffffffffffffff\n self.max_sample = 0\n self.n_per_sample = n_per_sample\n self.title = title\n self.unit = unit\n self.sum_ = 0\n self.count = 0\n self.sample_lock = threading.Lock()", "def __init__(self, multiplier=1e-1):\r\n self.multiplier = multiplier", "def __init__(self, update_method, rate):\n self._delay = 1.0 / rate\n self._last_time = 0\n self._update_method = update_method", "def __init__(self,tau = 1e-3):\n self._tau = tau \n pass", "def __init__(self):\n self.counts = [0] * 10\n self.values = [2000] * 10\n self.epsilon = 0.1", "def set_rate(self, rate = 1e4, count = 1000, clk_source = 'ao/SampleClock', finite = True):\n if finite:\n ctr_mode = mx.int32(mx.DAQmx_Val_FiniteSamps)\n else:\n ctr_mode = mx.int32(mx.DAQmx_Val_ContSamps)\n ctr_rate = mx.float64(rate) #override python type\n ctr_count = mx.uInt64(int(count))\n self._clock_source = clk_source\n \n self.stop() #make sure task not running, \n # CfgSampClkTiming ( const char source[], float64 rate, int32 activeEdge, \n # int32 sampleMode, uInt64 sampsPerChan );\n # default clock source is subsystem acquisition clock\n try: \n self.task.CfgSampClkTiming(clk_source, ctr_rate, mx.DAQmx_Val_Rising, ctr_mode, ctr_count) \n #exact rate depends on hardware timer properties, may be slightly different from requested rate\n ctr_rate.value = 0\n self.task.GetSampClkRate(mx.byref(ctr_rate));\n self._rate = ctr_rate.value\n self._count = count\n #self._mode = 'buffered'\n except mx.DAQError as err:\n self.error(err)\n self._rate = 0", "def __init__(self, time_series=None, ij=(0, 0), method=None, lb=0, ub=None,\r\n prefer_speed_over_memory=True, scale_by_freq=True):\r\n\r\n BaseAnalyzer.__init__(self, time_series)\r\n #Initialize variables from the time series\r\n self.ij = ij\r\n\r\n #Set the variables for spectral estimation (can also be entered by\r\n #user):\r\n if method is None:\r\n self.method = {'this_method': 'welch'}\r\n\r\n else:\r\n self.method = method\r\n\r\n if self.method['this_method'] != 'welch':\r\n e_s = \"For SparseCoherenceAnalyzer, \"\r\n e_s += \"spectral estimation method must be welch\"\r\n raise ValueError(e_s)\r\n\r\n self.method['Fs'] = self.method.get('Fs', self.input.sampling_rate)\r\n\r\n #Additional parameters for the coherency estimation:\r\n self.lb = lb\r\n self.ub = ub\r\n self.prefer_speed_over_memory = prefer_speed_over_memory\r\n self.scale_by_freq = scale_by_freq", "def __init__(self, frequency, time_start=500.0 * pq.ms,\n time_stop=2000.0 * pq.ms):\n super(SpikeFrequencyObjective, self).__init__(time_start, time_stop)\n if isinstance(frequency, neo.core.AnalogSignal):\n self.frequency = AnalysedSignal(frequency).spike_frequency()\n else:\n self.frequency = pq.Quantity(frequency, units='Hz')", "def __init__(self, data_rate: Union[int, float], processing_window: int = None):\n super().__init__()\n self.data_rate = data_rate\n self.processing_window = processing_window if processing_window else data_rate\n self.raw_data_buffer = []\n self.processed_data_buffer = []\n self._is_one = None", "def __init__(self, t):\n\t\tself.delay = math.ceil(t / config.time_resolution)", "def __init__(self):\n \n smach.State.__init__(self, \n outcomes=['GoToNormal','GoToPlay'])\n \n self.rate = rospy.Rate(200) # Loop at 50 Hz", "def __init__(self,min_instances=30, drift_level=3.0):\n\n from math import sqrt\n self.min_instances = min_instances\n self.drift_level = float(drift_level)\n self.i = None\n self.pi = None\n self.si = None\n self.pi_min = None\n self.si_min = None\n self.sqrt=sqrt\n self.reset()", "def __init__(self, seed_time_series=None, target_time_series=None,\r\n method=None, lb=0, ub=None, prefer_speed_over_memory=True,\r\n scale_by_freq=True):\r\n\r\n self.seed = seed_time_series\r\n self.target = target_time_series\r\n\r\n # Check that the seed and the target have the same sampling rate:\r\n if self.seed.sampling_rate != self.target.sampling_rate:\r\n e_s = \"The sampling rate for the seed time-series and the target\"\r\n e_s += \" time-series need to be identical.\"\r\n raise ValueError(e_s)\r\n\r\n #Set the variables for spectral estimation (can also be entered by\r\n #user):\r\n if method is None:\r\n self.method = {'this_method': 'welch'}\r\n\r\n else:\r\n self.method = method\r\n\r\n if ('this_method' in self.method.keys() and\r\n self.method['this_method'] != 'welch'):\r\n e_s = \"For SparseCoherenceAnalyzer, \"\r\n e_s += \"spectral estimation method must be welch\"\r\n raise ValueError(e_s)\r\n\r\n #Additional parameters for the coherency estimation:\r\n self.lb = lb\r\n self.ub = ub\r\n self.prefer_speed_over_memory = prefer_speed_over_memory\r\n self.scale_by_freq = scale_by_freq", "def __init__(self, N=1024, fc=50., tmax=1., delay=None):\n fc = float(fc)\n tmax = float(tmax)\n dt = tmax / (N-1)\n df = 1. / tmax\n istart = -N//2 + 1\n nroll = -istart\n t = np.arange(istart, N // 2 + 1) * dt\n f = np.arange(istart, N // 2 + 1) * df\n # arrange f as positve, negative freqs\n f = np.roll(f, -nroll)\n\n # freq domain ricker\n ricker_f = 2 * f**2 / (np.sqrt(np.pi) * fc**3) * np.exp(-(f/fc)**2)\n\n if delay is None:\n delay = 1.5 / fc\n ricker_f = ricker_f * np.exp(-1j * 2 * np.pi * f * delay)\n\n # time domain ricker\n ricker_t = N * np.real(np.fft.ifft(ricker_f))\n\n\n amp = np.absolute(ricker_f)\n phase = np.unwrap(np.angle(ricker_f, False))\n\n # ricker_f[0] contains the zero frequency term,\n # ricker_f[1:N//2] contains the positive-frequency terms,\n # ricker_f[N//2 + 1:] contains the negative-frequency terms,\n # in increasing order starting from the most negative frequency\n self.delay = delay\n self.fc = fc\n self.dt = dt\n # arange t and ricker_t in the order of increasing time; the zero phase case contains negative time\n\n self.ricker_t = np.roll(ricker_t, nroll)\n self.f = f\n self.t = t\n self.df = df\n self.ricker_f = ricker_f\n self.amp = amp\n self.phase = phase\n self.nroll = nroll", "def __init__(self, **params):\n # Dimension of the true signal x\n self.N = params.get('N', 1024)\n\n # Dimension of the measurement vector y\n self.M = params.get('M', 256)\n\n # Number of timesteps\n self.T = params.get('T', 4)\n\n # Type of the random measurement matrix to generate\n # (1) : normalized Gaussian matrix\n self.A_type = params.get('A_type', 1)\n\n # Active support probability\n self.lambda_ = params.get('lambda_', 0.08) # high sparsity default\n\n # Amplitude mean\n self.zeta = params.get('zeta', 0)\n\n # Amplitude variance\n self.sigma2 = params.get('sigma2', 1)\n\n # Amplitude innovation rate\n self.alpha = params.get('alpha', 0.10)\n\n # Active-to-inactive transition probability\n self.p01 = params.get('p01', 0.10)\n\n # Desired signal-to-noise ratio, in dB\n self.desired_SNR = params.get('desired_SNR', 25)", "def __init__(self, time, rate, type='sine'):\n self.t_v = np.zeros(time*rate)\n self.heading = np.zeros(self.t_v.shape)\n self.commands = np.zeros(self.t_v.shape)\n self.derivative = np.zeros(self.t_v.shape)", "def __init__(self, time, numerator, denominator):\n self.time = time\n self.numerator = numerator\n self.denominator = denominator", "def __init__(self, max_temp, cool_rate, eval_limit, benchmark):\n super().__init__(size=1,eval_limit=eval_limit, benchmark=benchmark)\n self._max_temp = max_temp\n self._cool_rate = cool_rate\n self._temp = None\n self._step = 0", "def __init__(self):\n\n smach.State.__init__(self, \n outcomes=['GoToNormal','GoToSleep'])\n \n self.rate = rospy.Rate(200) # Loop at 50 Hz", "def __init__(self, duration=1, max_per_interval=5):\n\n self.duration = duration\n self.max_per_interval = max_per_interval\n\n self.stopwatch = StopWatch()\n self.num_processed_this_interval = 0", "def __init__(self, data, t0=None, sampling_interval=None,\r\n sampling_rate=None, duration=None, time=None, time_unit='s',\r\n metadata=None):\r\n\r\n #If a UniformTime object was provided as input:\r\n if isinstance(time, UniformTime):\r\n c_fac = time._conversion_factor\r\n #If the user did not provide an alternative t0, get that from the\r\n #input:\r\n if t0 is None:\r\n t0 = time.t0\r\n #If the user did not provide an alternative sampling interval/rate:\r\n if sampling_interval is None and sampling_rate is None:\r\n sampling_interval = time.sampling_interval\r\n sampling_rate = time.sampling_rate\r\n #The duration can be read either from the length of the data, or\r\n #from the duration specified by the time-series:\r\n if duration is None:\r\n duration = time.duration\r\n length = time.shape[-1]\r\n #If changing the duration requires a change to the\r\n #sampling_rate, make sure that this was explicitely required by\r\n #the user - if the user did not explicitely set the\r\n #sampling_rate, or it is inconsistent, throw an error:\r\n data_len = np.array(data).shape[-1]\r\n\r\n if (length != data_len and\r\n sampling_rate != float(data_len * c_fac) / time.duration):\r\n e_s = \"Length of the data (%s) \" % str(len(data))\r\n e_s += \"specified sampling_rate (%s) \" % str(sampling_rate)\r\n e_s += \"do not match.\"\r\n raise ValueError(e_s)\r\n #If user does not provide a\r\n if time_unit is None:\r\n time_unit = time.time_unit\r\n\r\n else:\r\n ##If the input was not a UniformTime, we need to check that there\r\n ##is enough information in the input to generate the UniformTime\r\n ##array.\r\n\r\n #There are different valid combinations of inputs\r\n tspec = tuple(x is not None for x in\r\n [sampling_interval, sampling_rate, duration])\r\n\r\n tspec_arg_names = [\"sampling_interval\",\r\n \"sampling_rate\",\r\n \"duration\"]\r\n\r\n #The valid configurations\r\n valid_tspecs = [\r\n #interval, length:\r\n (True, False, False),\r\n #interval, duration:\r\n (True, False, True),\r\n #rate, length:\r\n (False, True, False),\r\n #rate, duration:\r\n (False, True, True),\r\n #length, duration:\r\n (False, False, True)\r\n ]\r\n\r\n if tspec not in valid_tspecs:\r\n raise ValueError(\"Invalid time specification. \\n\"\r\n \"You provided: %s\\n %s see docstring for more info.\" % (\r\n str_tspec(tspec, tspec_arg_names),\r\n str_valid_tspecs(valid_tspecs, tspec_arg_names)))\r\n\r\n # Make sure to grab the time unit from the inputs, if it is provided:\r\n if time_unit is None:\r\n # If you gave us a duration with time_unit attached\r\n if isinstance(duration, TimeInterface):\r\n time_unit = duration.time_unit\r\n # Otherwise, you might have given us a sampling_interval with a\r\n # time_unit attached:\r\n elif isinstance(sampling_interval, TimeInterface):\r\n time_unit = sampling_interval.time_unit\r\n\r\n # Calculate the sampling_interval or sampling_rate from each other and\r\n # assign t0, if it is not already assigned:\r\n if sampling_interval is None:\r\n if isinstance(sampling_rate, Frequency):\r\n c_f = time_unit_conversion[time_unit]\r\n sampling_interval = sampling_rate.to_period() / float(c_f)\r\n elif sampling_rate is None:\r\n data_len = np.asarray(data).shape[-1]\r\n sampling_interval = float(duration) / data_len\r\n sampling_rate = Frequency(1.0 / sampling_interval,\r\n time_unit=time_unit)\r\n else:\r\n c_f = time_unit_conversion[time_unit]\r\n sampling_rate = Frequency(sampling_rate, time_unit='s')\r\n sampling_interval = sampling_rate.to_period() / float(c_f)\r\n else:\r\n if sampling_rate is None: # Only if you didn't already 'inherit'\r\n # this property from another time object\r\n # above:\r\n if isinstance(sampling_interval, TimeInterface):\r\n c_f = time_unit_conversion[sampling_interval.time_unit]\r\n sampling_rate = Frequency(1.0 / (float(sampling_interval) /\r\n c_f),\r\n time_unit=sampling_interval.time_unit)\r\n else:\r\n sampling_rate = Frequency(1.0 / sampling_interval,\r\n time_unit=time_unit)\r\n\r\n #Calculate the duration, if that is not defined:\r\n if duration is None:\r\n duration = np.asarray(data).shape[-1] * sampling_interval\r\n\r\n if t0 is None:\r\n t0 = 0\r\n\r\n # Make sure to grab the time unit from the inputs, if it is provided:\r\n if time_unit is None:\r\n #If you gave us a duration with time_unit attached\r\n if isinstance(duration, TimeInterface):\r\n time_unit = duration.time_unit\r\n #Otherwise, you might have given us a sampling_interval with a\r\n #time_unit attached:\r\n elif isinstance(sampling_interval, TimeInterface):\r\n time_unit = sampling_interval.time_unit\r\n\r\n #Otherwise, you can still call the common constructor to get the real\r\n #object initialized, with time_unit set to None and that will generate\r\n #the object with time_unit set to 's':\r\n TimeSeriesBase.__init__(self, data, time_unit, metadata=metadata)\r\n\r\n self.time_unit = time_unit\r\n self.sampling_interval = TimeArray(sampling_interval,\r\n time_unit=self.time_unit)\r\n self.t0 = TimeArray(t0, time_unit=self.time_unit)\r\n self.sampling_rate = sampling_rate\r\n self.duration = TimeArray(duration, time_unit=self.time_unit)", "def __init__(self, rate):\n super(RandomWander, self).__init__()\n self.iteration = 0\n self.rate = rate\n self.speed = 0\n self.heading = 0", "def __init__(self, filename, rate):\n self._filename = filename\n self._raw_rate = rate\n self._rate = rospy.Rate(rate)\n self._start_time = rospy.get_time()\n self._done = False\n #self._limb_left = baxter_interface.Limb(\"left\")\n #self._limb_right = baxter_interface.Limb(\"right\")", "def timing( self, input, band, temp, nsubint, nsubfreq, jump, saveDir, saveFile, verbose, exciseRFI ):\n\n timingObject = Timing( temp, input, band, nsubint, nsubfreq, jump, saveDir, saveFile, verbose, exciseRFI )", "def __init__(self, power=0, time=None):\n self.power = power\n self.time = time" ]
[ "0.6873452", "0.6581009", "0.6519729", "0.6232511", "0.6232355", "0.6188222", "0.616987", "0.61652625", "0.6155164", "0.61493057", "0.6139855", "0.6118475", "0.610823", "0.6098916", "0.6088264", "0.60863924", "0.60642815", "0.6009342", "0.60072887", "0.60061246", "0.59945565", "0.5977856", "0.5976353", "0.59715027", "0.5968276", "0.59658873", "0.59590596", "0.5948084", "0.59440064", "0.5942263" ]
0.80746883
0
Creates new task window
def new_task(self, widget): my_task_window = taskwindow.TaskWindow(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_task(event):\n manager = event.workbench.get_plugin('exopy.tasks')\n dialog = BuilderView(manager=manager,\n parent=event.parameters.get('parent_ui'),\n future_parent=event.parameters.get('future_parent'))\n result = dialog.exec_()\n if result:\n return dialog.config.build_task()\n else:\n return None", "def open_create_task_dialog(self):\n color = self.app.theme_cls.primary_color\n\n create_btn = MDFlatButton(text=\"OK\", text_color=color)\n cancel_btn = MDFlatButton(text=\"CANCEL\", text_color=color)\n name_field = MDTextField(hint_text=\"Name\")\n\n dialog = MDDialog(title=\"Create a new Task\",\n type=\"custom\",\n content_cls=name_field,\n buttons=[cancel_btn, create_btn])\n dialog.auto_dismiss = False\n\n def create_task(*args):\n self.task_list.dismiss()\n dialog.dismiss()\n\n self.task_manager.save(name_field.text)\n\n create_btn.bind(on_press=create_task)\n cancel_btn.bind(on_press=lambda x: dialog.dismiss())\n\n dialog.open()", "def addMainWindow(self,appendToTask):\n self.appendToTask = appendToTask", "def createWindow():\n\n windowName = \"ObjectSpawner\"\n\n if cmds.window(windowName, query=True, exists=True):\n cmds.deleteUI(windowName)\n\n cmds.window(windowName)\n\n populateUI()\n enableEditorDrop()\n\n cmds.showWindow(windowName)", "def init_task(self):\n all_tasks = self.db.get_tasks()\n scroll_parent = Window\n uw = self.ids.upcoming_wrapper\n\n if not all_tasks:\n new_btn = NewButton()\n new_btn.size_hint = [None, None]\n new_btn.size = [\n scroll_parent.width / 1.9,\n scroll_parent.height - (0.45 * scroll_parent.height),\n ]\n new_btn.bind(on_release=self.add_new)\n uw.add_widget(new_btn)\n else:\n for t in all_tasks:\n task = Task()\n task.name = t[1]\n task.details = t[2]\n date, time = t[3].rsplit(\" \", 1)\n x = self.compare_date(date)\n if x == \"today\":\n task.tsk_clr = (0.7, 0.45, 0.1, 0.6)\n elif x == \"past\":\n task.tsk_clr = (0.7, 0, 0, 0.6)\n\n task.time = time\n task.date = date\n\n task.size_hint = [None, None]\n task.size = [\n scroll_parent.width / 1.9,\n scroll_parent.height - (0.45 * scroll_parent.height),\n ]\n\n uw.add_widget(task)", "def _create_example_window():\n return Window({\"warning\": False, \"state\": \"close\"})", "def showUI(cls):\r\n win = cls()\r\n win.create()\r\n return win", "def open_task_list(self):\n scrollable_task = ScrollView()\n scrollable_task.add_widget(self.get_task_list())\n\n new_task_btn = RightBottomFloatingButton(\n icon='plus',\n md_bg_color=self.app.theme_cls.accent_color,\n elevation_normal=8)\n new_task_btn.bind(on_press=lambda x: self.open_create_task_dialog())\n\n self.task_list_layout.add_widget(scrollable_task)\n self.task_list_layout.add_widget(new_task_btn)\n\n self.task_list.open()", "def createWindow(self):\r\n\t\t# give the window a title\r\n\t\tself.parent.title( 'Acrobat Data Acquisition')\r\n\t\t# set the style\r\n\t\tself.style = ttk.Style()\r\n\t\tself.style.theme_use('default')\r\n\t\tself.pack(fill= tk.BOTH, expand=1)", "def OnAddTask(self, event):\n task_dlg = AddEditTask(self)\n task_dlg.ShowModal()", "def create(self):\n\n cv2.namedWindow(winname=self.title, flags=self.style)", "def open_task(self, instance):\n self.task_manager.load(instance.text)\n\n # Build the task in editor\n for component in self.task_manager.task.graph.V:\n self.add_component(component)\n for tagged_link in self.task_manager.task.graph[component]:\n self.task_manager.add_editor_link(\n component,\n self.task_manager.task.graph.V[tagged_link.vertex_index],\n self.ids.edit_area,\n index=tagged_link.tag)\n self.task_list.dismiss()", "def _create_window(self):\n wc = win32gui.WNDCLASS()\n wc.lpfnWndProc = self._on_message\n wc.lpszClassName = self.__class__.__name__\n wc.hInstance = win32api.GetModuleHandle(None)\n class_atom = win32gui.RegisterClass(wc)\n return win32gui.CreateWindow(class_atom, self.__class__.__name__, 0, 0, 0, 0, 0, 0, 0, wc.hInstance, None)", "def create_window(session):\n def create_window():\n windows_before = session.handles\n name = session.execute_script(\"window.open()\")\n assert len(session.handles) == len(windows_before) + 1\n new_windows = list(set(session.handles) - set(windows_before))\n return new_windows.pop()\n return create_window", "def click_add():\n # TODO: 1. In frontend_script.py, create function \"create_window()\" that takes a Toplevel() as a parameter.\n # TODO: 2. In this file, implement the code below\n # new_window = Toplevel(root)\n # frontend_script.create_window(new_window)", "def _open_window(self):\r\n\t\t# Creating the window\r\n\t\tself._window = Window(self, Locations.RESTAL)", "def create(self, verbose=False):\r\n # delete the window if its handle exists\r\n if cmds.window(self.window, exists=True):\r\n cmds.deleteUI(self.window)\r\n # initialize the window as a pane for docking\r\n self.window = cmds.loadUI(uiFile=self.uiFile, verbose=verbose)\r\n #layoutWin = cmds.paneLayout(configuration='single')\r\n # create a dockControl and parent the control to layoutWin\r\n cmds.dockControl(allowedArea='all', area='right', floating=False, \r\n height=cmds.window(self.window, query=True, height=True), \r\n content=self.window, label='Docked Cone Pointer Window')\r\n cmds.showWindow(self.window)", "def see_tasks(self, widget):\n my_task_list = tasklistwindow.TaskListWindow(self.task_list)", "def showUI(cls):\r\n win = cls(uiFile)\r\n win.create()\r\n return win", "def createApp(self):\n app = self.app\n window = self.window\n window.show()\n app.exec()", "def create_main_window_with_browser():\n main_win = create_main_window()\n return main_win.add_browser_tab()", "def create_window(window_class):\n app_created = False\n app = QtCore.QCoreApplication.instance()\n if app is None:\n app = QtGui.QApplication(sys.argv)\n app_created = True\n app.references = set()\n window = window_class()\n app.references.add(window)\n window.show()\n if app_created:\n app.exec_()\n return window", "def create_task():", "def create_window(self, img, roi, name):\n\n self.window = SpinBalanceDialog()\n\n # call the user-implemented functionality\n self.window.main(img, roi)\n # show the window\n self.window.show()\n\n return self.window", "def open_main_window(self):\r\n track_terms_dic = ''\r\n sg.theme(self.look)\r\n\r\n layout = [[sg.Text('Welcome to tweeet monitor ')],\r\n [sg.Text('Please enter Details ')],\r\n [sg.Text('User Mail', size=(15, 1)), sg.InputText()],\r\n [sg.Text('Timout', size=(15, 1)), sg.InputText('', enable_events=True, key='-DIGITS-')],\r\n [sg.Text('')],\r\n [sg.Text('You can select an existing list or create a new one '),\r\n sg.Combo(self.files, default_value='Select Track Terms List ', key='-COMBO1-')],\r\n [sg.Text('')],\r\n [sg.Button('Select Exists List'), sg.Button('Create a New List')],\r\n [sg.Text('\\n')],\r\n [sg.Button('Start Monitor'), sg.Button('Exit')]\r\n ]\r\n\r\n window = sg.Window('Monitor tweeter', layout)\r\n # Event Loop\r\n while True:\r\n event, values = window.read()\r\n\r\n if event == sg.WIN_CLOSED:\r\n exit()\r\n elif event == 'Select Exists List' or event == 'Create a New List' or event == 'Start Monitor':\r\n user_mail = values[0]\r\n timeout = values['-DIGITS-']\r\n list_dic = values['-COMBO1-']\r\n\r\n if self.check(user_mail) == 'Invalid Email':\r\n self.info_popup_window('You Enter not valid mail ', 'Info', self.look)\r\n elif event == 'Select Exists List':\r\n if list_dic == 'Select Track Terms List ':\r\n self.info_popup_window('Track Terms List ', 'Info', self.look)\r\n else:\r\n file_name = self.path + self.bachslash + list_dic\r\n os.system(file_name)\r\n track_terms_dic = list_dic\r\n elif event == 'Create a New List':\r\n track_terms_dic = self.open_window()\r\n track_terms_dic = track_terms_dic + '.txt'\r\n elif event == 'Start Monitor':\r\n if track_terms_dic == '':\r\n self.info_popup_window('Please, Create new Dictionary or select one ', 'Info', self.look)\r\n elif track_terms_dic != '':\r\n file_name = self.path + self.bachslash + track_terms_dic\r\n my_file = open(file_name, \"r\")\r\n content = my_file.read()\r\n content = content.split(\"\\n\")\r\n content = self.cleanList(content)\r\n # print(content)\r\n my_file.close()\r\n now = datetime.now()\r\n date_time = now.strftime(\"%m/%d/%Y, %H:%M:%S\")\r\n dict_list = {'User': user_mail,\r\n 'Timeout': timeout,\r\n 'Dictionary': list_dic,\r\n 'Create Date': date_time,\r\n 'track_terms_list': content\r\n }\r\n header = ['user_mail', 'Timeout', 'Dictionary', 'Create Date', 'list words']\r\n if os.path.isfile(self.file_track_terms_audit) == False:\r\n # check if the file exsist = if not: create file and print header to the file\r\n with open(self.file_track_terms_audit, 'a', newline='\\n') as file:\r\n try:\r\n write = csv.writer(file)\r\n write.writerow(header)\r\n write.writerows(self.values_list)\r\n file.close()\r\n except:\r\n print(\"Something went wrong when writing to the file\")\r\n else:\r\n self.values_list = list(dict_list.values())\r\n # print ('self.values_list :****',self.values_list)\r\n with open(self.file_track_terms_audit, 'a', newline='\\n') as file:\r\n try:\r\n write = csv.writer(file)\r\n self.values_list = [self.values_list]\r\n write.writerows(self.values_list)\r\n file.close()\r\n except:\r\n print(\"Something went wrong when writing to the file\")\r\n print('self.values_list:', self.values_list)\r\n\r\n window.close()\r\n\r\n print('track_terms_dic: ', track_terms_dic)\r\n print('dict_list:', dict_list)\r\n return (dict_list)\r\n\r\n # always check for closed window\r\n if event in (sg.WIN_CLOSED, 'Exit'):\r\n break\r\n\r\n if event == '-LIST-' and len(values['-LIST-']):\r\n sg.popup('Selected ', values['-LIST-'])\r\n\r\n if len(values['-DIGITS-']) and values['-DIGITS-'][-1] not in ('0123456789'):\r\n # delete last char from input\r\n window['-DIGITS-'].update(values['-DIGITS-'][:-1])\r\n\r\n window.close()", "def createNewPeaPodWindow(**kwargs):\n\tglobal currentPeaWindow\n\tnewWindow=dataWindow(window,\"Create Pea Pod\")\n\t#Update the global variable\n\tcurrentPeaWindow=newWindow\n\tnewWindow.functionToRun=initiateNewPeaPod\n\t#Create Section\n\tallPodNames=masterPod.currentMasterPod.peas.keys()\n\tpodName=dataSection(newWindow.contentArea,advancedEntry,\"Pod Name\",cannotContain=allPodNames)\n\tpodName.pack()\n\ttemplateOption=dataSection(newWindow.contentArea,advancedOptionMenu,\"Template\",\n\t\t\t\t\t\t\t values=podTemplate.templates.keys(),default=\"Login\",\n\t\t\t\t\t\t\t optionCommand=changePopupColour)\n\ttemplateOption.pack()\n\n\t#Add the sections\n\tnewWindow.addDataSection(podName)\n\tnewWindow.addDataSection(templateOption)\n\t#Ititate colour\n\tchangePopupColour(\"Login\")", "def createPrefWindow(self):\n if (not hasattr(self, \"pref_window\")):\n self.pref_window = PrefWindow(self, self.settings)\n self.pref_window.show()\n self.pref_window.button_apply.setEnabled(True)", "def create_new_tab(default_script_content=\"\"):\n pass", "def open_mwindow_timetable(self) -> None:\n self.mwindow_timetable.show()", "def new_task(self):\n print \"Create a new task.\"\n\n # Collect new task info from user\n description = raw_input(\"Enter task (140 characters max) > \")\n due_date = raw_input(\"Enter due date as 'year-mm-dd' (optional). > \")\n tags = raw_input(\n \"Enter tags for the task (comma separated) (optional). > \")\n tag_list = [tag.strip() for tag in tags.split(',')]\n try:\n new_task = doto.Task(self.user, description, due_date, tag_list)\n except (NameError, ValueError) as e:\n # On error, print and return.\n print \"Task not created. Error: \", e\n raw_input(\"Press Enter to continue.\")\n return\n self.current_collection.add(new_task)\n return" ]
[ "0.7113246", "0.70224667", "0.6907756", "0.68215317", "0.6519921", "0.6381383", "0.637192", "0.6319704", "0.62721485", "0.6223385", "0.61893445", "0.6168583", "0.61627823", "0.6148006", "0.6140917", "0.6093474", "0.5996884", "0.5986673", "0.5972583", "0.59595525", "0.5941826", "0.59259117", "0.5908", "0.59036815", "0.58875144", "0.5878807", "0.58696157", "0.5827885", "0.5790684", "0.57463086" ]
0.81975675
0
Shows a window with all the tasks and alarms
def see_tasks(self, widget): my_task_list = tasklistwindow.TaskListWindow(self.task_list)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dashboard_showall():\n tasks = Task.query.all()\n return render_template('home/taskshowall/dashboard_showall.html',\n tasks=tasks, title=\"Tasks\")", "def show_tasks():\n\n task = Task(connection=connection, cursor=cursor)\n\n all_tasks = task.get_all_tasks()\n\n context = {\n 'all_tasks': all_tasks\n }\n\n return render_template('pages/tables/tasks.html', **context)", "def open_mwindow_timetable(self) -> None:\n self.mwindow_timetable.show()", "def open_mwindow_agenda(self) -> None:\n self.mwindow_agenda.show()", "def show(self):\n i = 0\n print()\n for task in self.tasks:\n print(\"\\t\", i + 1, \". \", task.name, \"(\", task.priority, \")\")\n i += 1", "def show_tasks():\n top_level_tasks = query_with_results(\"select label, description from task where parent = ''\", [])\n for task in top_level_tasks:\n _show_task(task)", "def show_task1(self):\n self._show_task(self.controller.CURRENT)", "def open_main_window(self):\r\n track_terms_dic = ''\r\n sg.theme(self.look)\r\n\r\n layout = [[sg.Text('Welcome to tweeet monitor ')],\r\n [sg.Text('Please enter Details ')],\r\n [sg.Text('User Mail', size=(15, 1)), sg.InputText()],\r\n [sg.Text('Timout', size=(15, 1)), sg.InputText('', enable_events=True, key='-DIGITS-')],\r\n [sg.Text('')],\r\n [sg.Text('You can select an existing list or create a new one '),\r\n sg.Combo(self.files, default_value='Select Track Terms List ', key='-COMBO1-')],\r\n [sg.Text('')],\r\n [sg.Button('Select Exists List'), sg.Button('Create a New List')],\r\n [sg.Text('\\n')],\r\n [sg.Button('Start Monitor'), sg.Button('Exit')]\r\n ]\r\n\r\n window = sg.Window('Monitor tweeter', layout)\r\n # Event Loop\r\n while True:\r\n event, values = window.read()\r\n\r\n if event == sg.WIN_CLOSED:\r\n exit()\r\n elif event == 'Select Exists List' or event == 'Create a New List' or event == 'Start Monitor':\r\n user_mail = values[0]\r\n timeout = values['-DIGITS-']\r\n list_dic = values['-COMBO1-']\r\n\r\n if self.check(user_mail) == 'Invalid Email':\r\n self.info_popup_window('You Enter not valid mail ', 'Info', self.look)\r\n elif event == 'Select Exists List':\r\n if list_dic == 'Select Track Terms List ':\r\n self.info_popup_window('Track Terms List ', 'Info', self.look)\r\n else:\r\n file_name = self.path + self.bachslash + list_dic\r\n os.system(file_name)\r\n track_terms_dic = list_dic\r\n elif event == 'Create a New List':\r\n track_terms_dic = self.open_window()\r\n track_terms_dic = track_terms_dic + '.txt'\r\n elif event == 'Start Monitor':\r\n if track_terms_dic == '':\r\n self.info_popup_window('Please, Create new Dictionary or select one ', 'Info', self.look)\r\n elif track_terms_dic != '':\r\n file_name = self.path + self.bachslash + track_terms_dic\r\n my_file = open(file_name, \"r\")\r\n content = my_file.read()\r\n content = content.split(\"\\n\")\r\n content = self.cleanList(content)\r\n # print(content)\r\n my_file.close()\r\n now = datetime.now()\r\n date_time = now.strftime(\"%m/%d/%Y, %H:%M:%S\")\r\n dict_list = {'User': user_mail,\r\n 'Timeout': timeout,\r\n 'Dictionary': list_dic,\r\n 'Create Date': date_time,\r\n 'track_terms_list': content\r\n }\r\n header = ['user_mail', 'Timeout', 'Dictionary', 'Create Date', 'list words']\r\n if os.path.isfile(self.file_track_terms_audit) == False:\r\n # check if the file exsist = if not: create file and print header to the file\r\n with open(self.file_track_terms_audit, 'a', newline='\\n') as file:\r\n try:\r\n write = csv.writer(file)\r\n write.writerow(header)\r\n write.writerows(self.values_list)\r\n file.close()\r\n except:\r\n print(\"Something went wrong when writing to the file\")\r\n else:\r\n self.values_list = list(dict_list.values())\r\n # print ('self.values_list :****',self.values_list)\r\n with open(self.file_track_terms_audit, 'a', newline='\\n') as file:\r\n try:\r\n write = csv.writer(file)\r\n self.values_list = [self.values_list]\r\n write.writerows(self.values_list)\r\n file.close()\r\n except:\r\n print(\"Something went wrong when writing to the file\")\r\n print('self.values_list:', self.values_list)\r\n\r\n window.close()\r\n\r\n print('track_terms_dic: ', track_terms_dic)\r\n print('dict_list:', dict_list)\r\n return (dict_list)\r\n\r\n # always check for closed window\r\n if event in (sg.WIN_CLOSED, 'Exit'):\r\n break\r\n\r\n if event == '-LIST-' and len(values['-LIST-']):\r\n sg.popup('Selected ', values['-LIST-'])\r\n\r\n if len(values['-DIGITS-']) and values['-DIGITS-'][-1] not in ('0123456789'):\r\n # delete last char from input\r\n window['-DIGITS-'].update(values['-DIGITS-'][:-1])\r\n\r\n window.close()", "def _show_task_list_panel(self):\n self.task_list_panel.show()\n self.task_list_panel.load_config()\n # set geometry\n _button_pos = self.task_frame.tasklist_button.pos()\n _button_pos = self.task_frame.mapTo(self, _button_pos)\n _button_height = self.task_frame.tasklist_button.height()\n _glo_pos = self.mapTo(tomaya.GetMayaMainWindowPoint(), _button_pos)\n self.task_list_panel.setGeometry(_glo_pos.x(), _glo_pos.y() + _button_height, self.task_frame.width(), tomaya.GetMayaMainWindowPoint().height()*1/2.0)", "def show_tasks(self):\n task_ids = [\n t and t['id'] for t in self.controller.selected_tasks\n ]\n\n if self._check_cluster():\n self.print_list(\n ('id', 'status'), self.controller.get_tasks(),\n lambda x: task_ids.index(x['id'])\n )", "def show_all_tasks(self):\n tasks = self.session.query(self.Table).order_by(self.Table.deadline).all()\n print('All tasks:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}. {task.deadline.strftime(\"%d %b\")}')\n else:\n print('Nothing to do!')\n print()", "def show(self, window):\r\n\r\n return", "def list():\n manager = Actions()\n tasks_list = manager.get_tasks_list()\n console_utils.print_tree(manager, tasks_list)", "def showtask(id):\n\n tasks = Task.query.filter_by(id=id)\n return render_template('home/taskshowall/dashboard_showtask.html',tasks=tasks,title=\"tasks\")", "def show_weeks_tasks(self):\n for day in [datetime.today() + timedelta(days=i) for i in range(7)]:\n tasks = self.session.query(self.Table).filter(self.Table.deadline == day.strftime('%Y-%m-%d')).\\\n order_by(self.Table.deadline).all()\n print(f'{day.strftime(\"%A\")} {day.strftime(\"%d %b\")}:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()", "def list_tasks(self, toplevel=True, keys=None):\n\n message = {}\n # show all tasks by default\n if keys == None:\n keys = self.list_task_keys()\n\n for key in keys:\n try:\n last_run_instance = TaskInstance.objects.filter(task_key=key).exclude(completed=None).order_by('-completed').values_list('completed','task_key')[0]\n last_run = time.mktime(last_run_instance[0].timetuple())\n #no instances\n except (KeyError, IndexError):\n last_run = None\n\n # render the form if the task has one\n task = self.registry[key, None].tasks[key]\n if task.form:\n t = loader.get_template('task_parameter_form.html')\n c = Context ({'form':task.form()})\n rendered_form = t.render(c)\n else:\n rendered_form = None\n\n message[key] = {'description':task.description ,\n 'last_run':last_run,\n 'form':rendered_form}\n\n return message", "def __show_all(self):\n print(\"\\nEvents:\\n\")\n self.__show_all_events()\n print(\"\\nMetrics:\\n\")\n self.__show_all_metrics()", "def show(self,window):\n self.showFunctions(window)", "def open_notifications(self):\n self.android_device_driver.adb.exec_adb_cmd(\n \"shell cmd statusbar expand-notifications\").wait()", "def show_all(self):\n for UI_object in self.__UI_objects:\n UI_object.show()", "def window_tasks(self):\n if self._handle != win32gui.GetForegroundWindow():\n #print \"not in foreground\"\n self.restore_window()\n self.fix_ui()\n self.set_foreground()\n\n self._shell.AppActivate(self._handle)", "def run(self):\n while True:\n self.menu()\n if self.menu_choice == '1':\n self.show_today_tasks()\n elif self.menu_choice == '2':\n self.show_weeks_tasks()\n elif self.menu_choice == '3':\n self.show_all_tasks()\n elif self.menu_choice == '4':\n self.show_missed_tasks()\n elif self.menu_choice == '5':\n self.add_task()\n elif self.menu_choice == '6':\n self.delete_task()\n else:\n print('Bye!')\n break", "def show_messages(self):\n console.alert(\n \"Info\",\n \"If StaSh does not launch anymore after you changed the config, run the 'launch_stash.py' script with \\n'--no-cfgfile'.\",\n \"Ok\",\n hide_cancel_button=True,\n )\n while True:\n self.wait_modal()\n if not self.subview_open:\n break\n console.alert(\n \"Info\",\n \"Some changes may only be visible after restarting StaSh and/or Pythonista.\",\n \"Ok\",\n hide_cancel_button=True,\n )", "def show(self):\n self.present(orientations=ORIENTATIONS)\n # launch a background thread\n # we can not use ui.in_background here\n # because some dialogs would not open anymoe\n thr = threading.Thread(target=self.show_messages)\n thr.daemon = True\n thr.start()", "def init_task(self):\n all_tasks = self.db.get_tasks()\n scroll_parent = Window\n uw = self.ids.upcoming_wrapper\n\n if not all_tasks:\n new_btn = NewButton()\n new_btn.size_hint = [None, None]\n new_btn.size = [\n scroll_parent.width / 1.9,\n scroll_parent.height - (0.45 * scroll_parent.height),\n ]\n new_btn.bind(on_release=self.add_new)\n uw.add_widget(new_btn)\n else:\n for t in all_tasks:\n task = Task()\n task.name = t[1]\n task.details = t[2]\n date, time = t[3].rsplit(\" \", 1)\n x = self.compare_date(date)\n if x == \"today\":\n task.tsk_clr = (0.7, 0.45, 0.1, 0.6)\n elif x == \"past\":\n task.tsk_clr = (0.7, 0, 0, 0.6)\n\n task.time = time\n task.date = date\n\n task.size_hint = [None, None]\n task.size = [\n scroll_parent.width / 1.9,\n scroll_parent.height - (0.45 * scroll_parent.height),\n ]\n\n uw.add_widget(task)", "def new_task(self, widget):\n my_task_window = taskwindow.TaskWindow(self)", "def show_window(self):\n self.show()", "def __show_all_events(self):\n for event in self.events_list:\n self.__print_events_info(event)\n print()", "def homescreen(self):\r\n self.root.ids.items_box.clear_widgets()\r\n homescreen_information = Label(\r\n text='Welcome.\\n\\n\\nThis was designed by Charles to calculate the hours different\\n'\r\n 'workgroups work at CIPS and WPS.\\n\\nTo begin, press the Load Data button on'\r\n ' the \\nleft hand side of this window.', font_size=20, halign='center')\r\n self.root.ids.items_box.add_widget(homescreen_information)", "def addMainWindow(self,appendToTask):\n self.appendToTask = appendToTask" ]
[ "0.6601088", "0.6460697", "0.628572", "0.6235331", "0.61510676", "0.6129666", "0.6097378", "0.607427", "0.6070758", "0.6047063", "0.59771895", "0.59053344", "0.58603084", "0.58067805", "0.58024603", "0.57794356", "0.57792646", "0.5776163", "0.57633907", "0.5740602", "0.5713612", "0.567834", "0.5635513", "0.5630081", "0.5627097", "0.5603862", "0.558525", "0.55813694", "0.55716866", "0.5569356" ]
0.69399023
0
aic(timeSeries, ssc=0) > data, max_weight, max_weight_params
def aic(timeSeries, ssc=0): if np.min(timeSeries) <= 0: timeSeries = timeSeries + -np.min(timeSeries) + .01 # create histogram to determine plot values # note that the original uses hist centers, this uses edges. It may matter counts, plotvals_edges = np.histogram(timeSeries, 50) plotvals = np.array([np.mean([plotvals_edges[i], plotvals_edges[i+1]]) for i in range(plotvals_edges.shape[0]-1)]) distributions = ['normal', 'lognormal', 'exponential', 'pareto', 'boundedpl'] #no gamma currently #pdfs = [dict(name=dist) for dist in distributions] pdfs = defaultdict(dict) aicvals = defaultdict(dict) # calculate maximum likelihood for core distributions # calculate log likelihood value at maximum # find k (number of params) # generate probability density function using parameters kvals = dict() for dist in distributions: aicvals[dist]['mle'] = aicmle(timeSeries, dist) aicvals[dist]['nll'] = aiclike(timeSeries, aicvals[dist]['mle'], dist) kvals[dist] = len(aicvals[dist]['mle']) pdfs[dist]['vals'] = aicpdf(plotvals, dist, aicvals[dist]['mle']) # plot histogram and mle pdf # note: only creats the data to make a plot, does not actually generate it for dist in distributions: scaling = np.sum(counts) / np.sum(pdfs[dist]['vals']) aicvals[dist]['plots'] = {} aicvals[dist]['plots']['xvals'] = plotvals aicvals[dist]['plots']['datay'] = counts aicvals[dist]['plots']['aicy'] = pdfs[dist]['vals'] * scaling # check for small sample correction if timeSeries.shape[0] / np.max(kvals.values()) < 40: ssc = 1 # calculate akaike information criteria for dist in distributions: aicvals[dist]['aic'] = 2 * aicvals[dist]['nll'] + 2 * kvals[dist] if ssc == 1: aicvals[dist]['aic'] = aicvals[dist]['aic'] + 2 * kvals[dist] * (kvals[dist] + 1) / (timeSeries.shape[0] - kvals[dist] -1) # calculate AIC differences and akaike weights aicmin = np.min([aicvals[dist]['aic'] for dist in distributions]) for dist in distributions: aicvals[dist]['aicdiff'] = aicvals[dist]['aic'] - aicmin aicsum = 0 for dist in distributions: aicsum = aicsum + np.exp(-aicvals[dist]['aicdiff'] / 2) for dist in distributions: aicvals[dist]['weight'] = np.exp(-aicvals[dist]['aicdiff'] / 2) / aicsum max_weight_val = np.max([aicvals[dist]['weight'] for dist in distributions]) max_weight = [key for key, value in aicvals.items() if value['weight'] == max_weight_val][0] max_weight_params = aicvals[max_weight]['mle'] return aicvals, max_weight, max_weight_params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aic_ms(distribution):\n print(\"TESTING: AIC model selection for %s distribution\" % distribution.upper())\n params = dist.DISTRIBUTIONS[distribution][dist.KEY_TEST_PARAMS]\n print(\" creating sample\")\n test_sample = dist.samples(distribution, params)\n print(\" calculating AIC for all distributions\")\n fit_results = {}\n aic = {}\n for d in dist.get():\n fit_results[d] = fit.fit_mle(d, test_sample)\n aic[d] = me.aic_measure(dist.log_likelihood(d, fit_results[d]['params'], test_sample, nonzero_only=True),\n len(fit_results[d]['params']))\n delta_aic = {d: aic[d]-min(aic.values()) for d in aic}\n weights = {d: float(exp(-delta_aic[d]/2)) for d in delta_aic}\n best_model = dist.get()[0]\n print(\" input parameters: %s\" % dist.get_params(params, distribution))\n for d in dist.get():\n if weights[d] > weights[best_model]:\n best_model = d\n weights[d] /= sum(weights.values())\n print(\" %s:\" % d.upper())\n print(\" %s\" % dist.get_params(fit_results[d]['params'], d))\n print(\" AIC = %.0f\" % aic[d])\n print(\" dAIC = %.0f\" % delta_aic[d])\n print(\" w = %r\" % weights[d])\n print(\" Most likely model: %s\" % best_model.upper())\n print_pmfs(test_sample, fit_results, 'TEST-AIC.CSV')", "def is_good_qualtiative_example(iaa_score, ann1_total, ann2_total):\n return iaa_score > .3 and iaa_score < 1 and ann1_total > 3 and ann2_total > 3", "def aiclike(timeSeries, params, distribution):\n if distribution == 'pareto':\n nloglval = -(timeSeries.shape[0] * np.log(params['mu']) + timeSeries.shape[0] * params['mu'] * np.log(params['xmin']) - (params['xmin']+1) * np.sum(np.log(timeSeries)))\n return nloglval\n \n elif distribution == 'lognormal':\n nloglval = np.sum(np.log(timeSeries * params['sigma'] * np.sqrt(2*np.pi)) + (np.log(timeSeries) - params['mu'])**2 / (2 * params['sigma']**2))\n return nloglval\n \n elif distribution == 'normal':\n nloglval = np.sum(np.log( params['sigma'] * np.sqrt(2*np.pi) ) + (timeSeries - params['mu'])**2 / (2 * params['sigma']**2))\n return nloglval\n \n elif distribution == 'exponential':\n nloglval = np.sum(params['lambda'] * timeSeries - np.log(params['lambda']))\n return nloglval\n \n elif distribution == 'boundedpl':\n nloglval = -len(timeSeries) * np.log( (params['mu'] - 1) / (np.min(timeSeries)**(1 - params['mu']) - np.max(timeSeries)**(1 - params['mu']))) + params['mu'] * np.sum(np.log(timeSeries))\n return nloglval", "def aic(self):\n if hasattr(self, '_aic'):\n return self._aic\n else:\n self._aic = 2 * self.k + 2 * self.neg_ll()\n return self._aic", "def adaptive_selection(p_values, data, alpha):\n HC_stat, max_idx_HC, HC_vec = HC_update(p_values, alpha)\n cscshm_stat, max_idx_cscshm, cscshm_vec = CsCsHM_update(p_values, alpha)\n thresh_HC = data[max_idx_HC]\n thresh_cscshm = data[max_idx_cscshm]\n nonnull_HC = np.where(data < thresh_HC, 0, data)\n nonnull_cscshm = np.where(data < thresh_cscshm, 0, data)\n return nonnull_HC, nonnull_cscshm", "def AIC(y,yhat,k):\r\n residuals = y-yhat\r\n sse = np.sum(residuals**2) #sum of squared errors\r\n N = len(y)\r\n Lhat = sse/N\r\n return(2*k + N*np.log(Lhat))", "def test_adf(self):\n\n dftest = adfuller(self.ts_df['y'], autolag='AIC')\n dfoutput = pd.Series(dftest[0:4],\n index=['Test Statistic', 'p-value', '#Lags Used', 'Number of Observations Used'])\n for key, value in dftest[4].items():\n dfoutput['Critical Value (%s)' % key] = value\n print(dfoutput)\n if dftest[0] > dftest[4]['5%']:\n print(\n \"Test statistic greater than critical value at 5% --> series seems to be not stationary. \"\n \"Look at critical values at 1% and 10% too, ideally they also should be less than test statistic.\")\n else:\n print(\n \"Test statistic less than critical value at 5% --> series seems to be stationary. \"\n \"Look at critical values at 1% and 10% too, ideally they also should be greater than test statistic.\")", "def opt_optical_absorption(aref, asig, traw, awl, aoff, tcal, tbins, ta_arr,\n cpd_ts, cwl, T, PS, rwlngth=715.):\n # reset shapes of input arguments\n # using np.array ndmin=# seems faster than using np.atleast_#d\n aref = np.array(aref, ndmin=2)\n asig = np.array(asig, ndmin=2)\n traw = np.array(traw, ndmin=1)\n awl = np.around(np.array(awl, ndmin=2), decimals=1)\n aoff = np.array(aoff, ndmin=2)\n tcal = np.array(tcal, ndmin=1)\n tbins = np.array(tbins, ndmin=2)\n # note, np.atleast_3d appends the extra dimension;\n # np.array using ndmin prepends the extra dimension.\n ta_arr = np.array(ta_arr, ndmin=3)\n cpd_ts = np.array(cpd_ts, ndmin=2)\n cwl = np.array(cwl, ndmin=2)\n T = np.array(T, ndmin=1)\n PS = np.array(PS, ndmin=1)\n\n # size up inputs\n npackets = awl.shape[0]\n nwavelengths = awl.shape[1]\n # initialize output array\n apd_ts_s = np.zeros([npackets, nwavelengths])\n\n for ii in range(npackets):\n\n # calculate the internal instrument temperature [deg_C]\n tintrn = opt_internal_temp(traw[ii])\n\n # calculate the uncorrected optical absorption coefficient [m^-1]\n apd, _ = opt_pd_calc(aref[ii, :], asig[ii, :], aoff[ii, :], tintrn,\n tbins[ii, :], ta_arr[ii, :, :])\n\n # correct the optical absorption coefficient for temperature and salinity.\n apd_ts = opt_tempsal_corr('a', apd, awl[ii, :], tcal[ii], T[ii], PS[ii])\n\n # correct the optical absorption coefficient for scattering effects\n apd_ts_s_row = opt_scatter_corr(apd_ts, awl[ii, :], cpd_ts[ii, :], cwl[ii, :], rwlngth)\n apd_ts_s[ii, :] = apd_ts_s_row\n\n # return the temperature, salinity and scattering corrected optical\n # absorption coefficient OPTABSN_L2 [m^-1]\n return apd_ts_s", "def utility_sarimax_predict(series,nforecast=1,loga=False,plot_figures_flag=False,show_mdl_detail=False):\n \n from sklearn.metrics import r2_score, median_absolute_error, mean_absolute_error\n from sklearn.metrics import median_absolute_error, mean_squared_error, mean_squared_log_error\n from scipy.optimize import minimize\n import statsmodels.tsa.api as smt\n import statsmodels.api as sm\n import numpy as np\n import pandas as pd\n from tqdm import tqdm \n from itertools import product\n import warnings\n \n np.random.seed(25)\n \n ## SARIMA\n def mean_absolute_percentage_error(y_true, y_pred):\n return np.mean(np.abs((y_true - y_pred) / y_true)) * 100\n \n warnings.filterwarnings('ignore')\n \n def optimize_SARIMA(data_column,parameters_list):\n \"\"\"\n Return dataframe with parameters and corresponding AIC\n \n parameters_list - list with (ps,d, qs, Ps,D, Qs,s) tuples\n\n \"\"\" \n results = []\n best_aic = float('inf') \n \n #for param in tqdm(parameters_list):\n for param in parameters_list:\n try: model = sm.tsa.statespace.SARIMAX(data_column, order=(param[0], param[1],param[2]),\n seasonal_order=(param[3], param[4], param[5], param[5])).fit(disp=-1)\n # try: model = sm.tsa.statespace.SARIMAX(data_column, order=(param[0], d, param[1])).fit(disp=-1)\n except:\n #print(\"model not built with params\".format(param))\n pass\n continue \n aic = model.aic \n #Save best model, AIC and parameters\n if aic < best_aic:\n best_model = model\n best_aic = aic\n best_param = param\n results.append([param, model.aic]) \n result_table = pd.DataFrame(results)\n result_table.columns = ['parameters', 'aic']\n #Sort in ascending order, lower AIC is better\n result_table = result_table.sort_values(by='aic', ascending=True).reset_index(drop=True) \n return result_table\n \n #Set initial values and some bounds\n ps = range(0, 3)\n d = range(0,1)\n qs = range(0, 3)\n Ps = range(0, 3)\n D = range(0,1)\n Qs = range(0, 3)\n s = [3,5,7]\n #Create a list with all possible combinations of parameters\n parameters = product(ps,d, qs, Ps,D, Qs,s)\n parameters_list = list(parameters)\n len(parameters_list)\n if loga==True:\n series=series.apply(np.log1p)\n series_diff=series\n result_table = optimize_SARIMA(series_diff,parameters_list)\n \n #Set parameters that give the lowest AIC (Akaike Information Criteria) \n p, d, q, P, D, Q,s = result_table.parameters[0] \n best_model = sm.tsa.statespace.SARIMAX(series_diff, order=(p, d, q),seasonal_order=(P, D, Q, s)).fit(disp=-1)\n if show_mdl_detail==True:\n print(best_model.summary())\n if plot_figures_flag==True:\n best_model.plot_diagnostics(figsize=(15, 12)) \n #Forecast on n_steps forward\n forecast = best_model.forecast(nforecast)\n if loga==True:\n forecast=np.exp(forecast)-1 #log1p\n forecast=pd.Series(forecast.values,name='sarimax_model')\n lenn=series[:].shape[0]\n new_index=pd.Index(list(range(series[-1:].index.values[0]+1,series[-1:].index.values[0]+1+nforecast)))\n forecast.index=new_index\n return forecast", "def stop_on_low_ais_ess(trial_id, result):\n return result[\"ais_effective_sample_size\"] < 0.1", "def aic_c(self):\n if hasattr(self, '_aic_c'):\n return self._aic_c\n else:\n k = len(self.params)\n n = self.data['n'].sum()\n self._aic_c = self.aic() + (2*k**2 + 2*k)/(n - k - 1)\n return self._aic_c", "def optimize_SARIMA(data_column,parameters_list): \n results = []\n best_aic = float('inf') \n \n #for param in tqdm(parameters_list):\n for param in parameters_list:\n try: model = sm.tsa.statespace.SARIMAX(data_column, order=(param[0], param[1],param[2]),\n seasonal_order=(param[3], param[4], param[5], param[5])).fit(disp=-1)\n # try: model = sm.tsa.statespace.SARIMAX(data_column, order=(param[0], d, param[1])).fit(disp=-1)\n except:\n #print(\"model not built with params\".format(param))\n pass\n continue \n aic = model.aic \n #Save best model, AIC and parameters\n if aic < best_aic:\n best_model = model\n best_aic = aic\n best_param = param\n results.append([param, model.aic]) \n result_table = pd.DataFrame(results)\n result_table.columns = ['parameters', 'aic']\n #Sort in ascending order, lower AIC is better\n result_table = result_table.sort_values(by='aic', ascending=True).reset_index(drop=True) \n return result_table", "def test_am_threshold(Simulator, plt, seed, rng):\n d = 64\n vocab = Vocabulary(d, pointer_gen=rng)\n vocab.populate('A; B; C; D')\n\n d2 = int(d / 2)\n vocab2 = Vocabulary(d2, pointer_gen=rng)\n vocab2.populate('A; B; C; D')\n\n def input_func(t):\n return '0.49 * A' if t < 0.1 else '0.8 * B'\n\n with spa.Network('model', seed=seed) as m:\n m.am = ThresholdingAssocMem(\n threshold=0.5, input_vocab=vocab, output_vocab=vocab2,\n function=filtered_step_fn, mapping='by-key')\n m.stimulus = spa.Transcode(input_func, output_vocab=vocab)\n m.stimulus >> m.am\n\n in_p = nengo.Probe(m.am.input)\n out_p = nengo.Probe(m.am.output, synapse=0.03)\n\n with Simulator(m) as sim:\n sim.run(0.3)\n t = sim.trange()\n below_th = t < 0.1\n above_th = t > 0.25\n\n plt.subplot(2, 1, 1)\n plt.plot(t, similarity(sim.data[in_p], vocab))\n plt.ylabel(\"Input\")\n plt.subplot(2, 1, 2)\n plt.plot(t, similarity(sim.data[out_p], vocab2))\n plt.plot(t[above_th], np.ones(t.shape)[above_th] * 0.9, c='g', lw=2)\n plt.ylabel(\"Output\")\n\n assert np.mean(sim.data[out_p][below_th]) < 0.01\n assert_sp_close(t, sim.data[out_p], vocab2['B'], skip=0.25, duration=0.05)", "def ransac(data, hypothesis, metric, sample_size, num_iter, inlier_thresh):\n N,d = data.shape\n best_frac, best_hypothesis, best_mask = 0, None, None\n for i in range(num_iter):\n js = np.random.choice(N,size=sample_size,replace=False)\n hypothesis_elements = data[js,:]\n H = hypothesis(hypothesis_elements)\n badness = np.array([metric(row,H) for row in data])\n inlier_mask = (badness<inlier_thresh)\n inlier_frac = inlier_mask.mean()\n if inlier_frac>best_frac:\n best_frac, best_hypothesis, best_mask = inlier_frac,H,inlier_mask\n # print(H)\n # print(inlier_mask)\n return best_hypothesis, best_mask", "def akaike_info_criterion(log_likelihood, n_params, n_samples):\n # Correction in case of small number of observations\n if n_samples / float(n_params) >= 40.0:\n aic = 2.0 * (n_params - log_likelihood)\n else:\n aic = 2.0 * (n_params - log_likelihood) + 2.0 * n_params * (n_params + 1.0) / (\n n_samples - n_params - 1.0\n )\n return aic", "def test_conditional():\n # verify that conditioning increases the likelihood of getting a sample with the specified\n # categorical value", "def binarize(adata, copy=False):\n threshold, upper, lower = 1.0, 1.0, 0.0\n admatrix = adata.X\n admatrix = np.where(admatrix>threshold, upper, lower)\n if copy:\n adata2 = adata.copy()\n adata2.X = admatrix\n return(adata2)\n else:\n adata.X = admatrix", "def ks_test(timeseries):\r\n\r\n hour_ago = time() - 3600\r\n ten_minutes_ago = time() - 600\r\n reference = scipy.array([x[1] for x in timeseries if x[0] >= hour_ago and x[0] < ten_minutes_ago])\r\n probe = scipy.array([x[1] for x in timeseries if x[0] >= ten_minutes_ago])\r\n\r\n if reference.size < 20 or probe.size < 20:\r\n return False\r\n\r\n ks_d,ks_p_value = scipy.stats.ks_2samp(reference, probe)\r\n\r\n if ks_p_value < 0.05 and ks_d > 0.5:\r\n adf = sm.tsa.stattools.adfuller(reference, 10)\r\n if adf[1] < 0.05:\r\n return True\r\n\r\n return False", "def early_warnings_sensitivity_analysis(series,\n indicators=['var','ac'],\n winsizerange = [0.10, 0.8],\n incrwinsize = 0.10,\n smooth = \"Gaussian\",\n bandwidthrange = [0.05, 1.],\n spanrange = [0.05, 1.1],\n incrbandwidth = 0.2,\n incrspanrange = 0.1):\n\n results_kendal_tau = []\n for winsize in np.arange(winsizerange[0],winsizerange[1]+0.01,incrwinsize):\n\n winsize = round(winsize,3)\n if smooth == \"Gaussian\":\n\n for bw in np.arange(bandwidthrange[0], bandwidthrange[1]+0.01, incrbandwidth):\n\n bw = round(bw, 3)\n ews_dic_veg = ewstools.core.ews_compute(series.dropna(),\n roll_window=winsize,\n smooth=smooth,\n lag_times=[1, 2],\n ews=indicators,\n band_width=bw)\n\n result = ews_dic_veg['Kendall tau']\n result['smooth'] = bw\n result['winsize'] = winsize\n\n results_kendal_tau.append(result)\n\n\n elif smooth ==\"Lowess\":\n\n for span in np.arange(spanrange[0], spanrange[1]+0.01, incrspanrange):\n\n span = round(span,2)\n ews_dic_veg = ewstools.core.ews_compute(series.dropna(),\n roll_window=winsize,\n smooth=smooth,\n lag_times=[1, 2],\n ews=indicators,\n span=span)\n\n result = ews_dic_veg['Kendall tau']\n result['smooth'] = bw\n result['winsize'] = winsize\n\n results_kendal_tau.append(result)\n\n else:\n\n ews_dic_veg = ewstools.core.ews_compute(series.dropna(),\n roll_window=winsize,\n smooth='None',\n lag_times=[1, 2],\n ews=indicators)\n\n result = ews_dic_veg['Kendall tau']\n result['smooth'] = 0\n result['winsize'] = winsize\n\n results_kendal_tau.append(result)\n\n sensitivity_df = pd.concat(results_kendal_tau)\n\n return sensitivity_df", "def early_warnings_null_hypothesis(series,\n indicators=['var', 'ac'],\n roll_window=0.4,\n smooth='Lowess',\n span=0.1,\n band_width=0.2,\n lag_times=[1],\n n_simulations=1000):\n\n ews_dic = ewstools.core.ews_compute(series,\n roll_window=roll_window,\n smooth=smooth,\n span=span,\n band_width=band_width,\n ews=indicators,\n lag_times=lag_times)\n\n from statsmodels.tsa.arima_model import ARIMA\n from statsmodels.tsa.arima_process import ArmaProcess\n\n # Use the short_series EWS if smooth='None'. Otherwise use reiduals.\n eval_series = ews_dic['EWS metrics']['Residuals']\n\n # Fit ARMA model based on AIC\n aic_max = 10000\n\n for i in range(0, 2):\n for j in range(0, 2):\n\n model = ARIMA(eval_series, order=(i, j, 0))\n model_fit = model.fit()\n aic = model_fit.aic\n\n print(\"AR\", \"MA\", \"AIC\")\n print(i, j, aic)\n\n if aic < aic_max:\n aic_max = aic\n result = model_fit\n\n def compute_indicators(series):\n \"\"\"\n Rolling window indicators computation based on the ewstools.core.ews_compute function from\n ewstools\n \"\"\"\n\n df_ews = pd.DataFrame()\n # Compute the rolling window size (integer value)\n rw_size = int(np.floor(roll_window * series.shape[0]))\n\n # ------------ Compute temporal EWS---------------#\n\n # Compute standard deviation as a Series and add to the DataFrame\n if 'sd' in indicators:\n roll_sd = series.rolling(window=rw_size).std()\n df_ews['Standard deviation'] = roll_sd\n\n # Compute variance as a Series and add to the DataFrame\n if 'var' in indicators:\n roll_var = series.rolling(window=rw_size).var()\n df_ews['Variance'] = roll_var\n\n # Compute autocorrelation for each lag in lag_times and add to the DataFrame\n if 'ac' in indicators:\n for i in range(len(lag_times)):\n roll_ac = series.rolling(window=rw_size).apply(\n func=lambda x: pd.Series(x).autocorr(lag=lag_times[i]),\n raw=True)\n df_ews['Lag-' + str(lag_times[i]) + ' AC'] = roll_ac\n\n # Compute Coefficient of Variation (C.V) and add to the DataFrame\n if 'cv' in indicators:\n # mean of raw_series\n roll_mean = series.rolling(window=rw_size).mean()\n # standard deviation of residuals\n roll_std = series.rolling(window=rw_size).std()\n # coefficient of variation\n roll_cv = roll_std.divide(roll_mean)\n df_ews['Coefficient of variation'] = roll_cv\n\n # Compute skewness and add to the DataFrame\n if 'skew' in indicators:\n roll_skew = series.rolling(window=rw_size).skew()\n df_ews['Skewness'] = roll_skew\n\n # Compute Kurtosis and add to DataFrame\n if 'kurt' in indicators:\n roll_kurt = series.rolling(window=rw_size).kurt()\n df_ews['Kurtosis'] = roll_kurt\n\n # ------------Compute Kendall tau coefficients----------------#\n\n ''' In this section we compute the kendall correlation coefficients for each EWS\n with respect to time. Values close to one indicate high correlation (i.e. EWS\n increasing with time), values close to zero indicate no significant correlation,\n and values close to negative one indicate high negative correlation (i.e. EWS\n decreasing with time).'''\n\n # Put time values as their own series for correlation computation\n time_vals = pd.Series(df_ews.index, index=df_ews.index)\n\n # List of EWS that can be used for Kendall tau computation\n ktau_metrics = ['Variance', 'Standard deviation', 'Skewness', 'Kurtosis', 'Coefficient of variation', 'Smax',\n 'Smax/Var', 'Smax/Mean'] + ['Lag-' + str(i) + ' AC' for i in lag_times]\n # Find intersection with this list and EWS computed\n ews_list = df_ews.columns.values.tolist()\n ktau_metrics = list(set(ews_list) & set(ktau_metrics))\n\n # Find Kendall tau for each EWS and store in a DataFrame\n dic_ktau = {x: df_ews[x].corr(time_vals, method='kendall') for x in ktau_metrics} # temporary dictionary\n df_ktau = pd.DataFrame(dic_ktau, index=[0]) # DataFrame (easier for concatenation purposes)\n\n # -------------Organise final output and return--------------#\n\n # Ouptut a dictionary containing EWS DataFrame, power spectra DataFrame, and Kendall tau values\n output_dic = {'EWS metrics': df_ews, 'Kendall tau': df_ktau}\n\n return output_dic\n\n process = ArmaProcess.from_estimation(result)\n\n # run simulations on best fitted ARIMA process and get values\n kendall_tau = []\n for i in range(n_simulations):\n ts = process.generate_sample(len(eval_series))\n\n kendall_tau.append(compute_indicators(pd.Series(ts))['Kendall tau'])\n\n surrogates_kendall_tau_df = pd.concat(kendall_tau)\n surrogates_kendall_tau_df['true_data'] = False\n\n # get results for true data\n data_kendall_tau_df = compute_indicators(eval_series)['Kendall tau']\n data_kendall_tau_df['true_data'] = True\n\n # return dataframe with both surrogates and true data\n kendall_tau_df = pd.concat([data_kendall_tau_df,surrogates_kendall_tau_df])\n\n return kendall_tau_df", "def aic(self, tmin=None, tmax=None):\n noise = self.ml.noise(tmin=tmin, tmax=tmax)\n nparam = len(self.ml.parameters[self.ml.parameters.vary == True])\n aic = -2.0 * np.log(sum(noise ** 2.0)) + 2.0 * nparam\n return aic", "def test02a(self):\n a = np.arange(1, 11)\n b = bcolz.carray(a)\n wt = [v for v in a if v <= 5]\n cwt = [v for v in b.where(a <= 5)]\n # print \"numpy ->\", [v for v in a if v<=5]\n # print \"where ->\", [v for v in b.where(a<=5)]\n self.assertTrue(wt == cwt, \"where() does not work correctly\")", "def test10(self):\n a = np.arange(1, 11)\n b = bcolz.carray(a)\n bi = b.where(a <= 5)\n ai = (v for v in a if v <= 5)\n self.assertEqual([i for i in ai], [i for i in bi])\n self.assertEqual([i for i in ai], [i for i in bi])", "def A_weight(signal, fs):\n\n b, a = A_weighting(fs)\n return lfilter(b, a, signal)", "def info_criteria(indep,\n dep,\n models,\n add_aicc=False):\n num_data = len(indep)\n\n bic_calc = BicCalculator(bic_type=BIC_TYPE.STANDARD)\n bic_calc_bkpt = BicCalculator(bic_type=BIC_TYPE.HYBRID)\n\n #bic_calc_bkpt = bic_calc\n #####bic_calc_bkpt = BicCalculator(bic_type = BIC_TYPE.HOS)\n\n bics = []\n aics = []\n aiccs = []\n for model in models:\n\n if model in [Model.ONE_BKPT, Model.TWO_BKPT]:\n bic_calc_to_use = bic_calc_bkpt\n else:\n bic_calc_to_use = bic_calc\n\n estimator = model.estimator(num_end_to_skip=NUM_END_TO_SKIP,\n num_between_to_skip=NUM_BETWEEN_TO_SKIP)\n estimator.fit(indep, dep)\n\n loglikelihood = estimator.loglikelihood\n num_params = estimator.num_params\n\n bic = bic_calc_to_use.bic(num_params=num_params,\n loglikelihood=loglikelihood,\n num_data=num_data)\n aic = stats_util.aic(num_params=num_params,\n loglikelihood=loglikelihood)\n aicc = stats_util.aicc(num_params=num_params,\n loglikelihood=loglikelihood,\n num_data=num_data)\n\n bics.append(bic)\n aics.append(aic)\n aiccs.append(aicc)\n ic_df = pd.DataFrame({\"BIC\": bics, \"AIC\": aics}, index=models)\n if add_aicc:\n ic_df[\"AICC\"] = aiccs\n\n wts_df = ic_df.apply(stats_util.bma_weights, axis=0)\n wts_cols = [x + \" Model Wt\" for x in wts_df.columns]\n wts_df.columns = wts_cols\n both = pd.concat([ic_df, wts_df], join=\"outer\", axis=1)\n return both", "def test_stationarity(dataframe, time_window):\n timeseries = dataframe.iloc[:, 0]\n # Determing rolling statistics\n rolmean = timeseries.rolling(time_window).mean()\n rolstd = timeseries.rolling(time_window).std()\n\n # Plot rolling statistics:\n plt.figure(figsize=(18, 9))\n plt.plot(timeseries, color=\"blue\", label=\"Original\")\n plt.plot(rolmean, color=\"red\", label=\"Rolling Mean\")\n plt.plot(rolstd, color=\"black\", label=\"Rolling Std\")\n plt.legend(loc=\"best\")\n plt.title(\"Rolling Mean & Standard Deviation\")\n plt.show(block=False)\n\n # Perform Dickey-Fuller test:\n print(\"Results of Dickey-Fuller Test:\")\n dftest = adfuller(timeseries, autolag=\"AIC\")\n dfoutput = pd.Series(\n dftest[0:4],\n index=[\n \"Test Statistic\",\n \"p-value\",\n \"#Lags Used\",\n \"Number of Observations Used\",\n ],\n )\n for key, value in dftest[4].items():\n dfoutput[\"Critical Value (%s)\" % key] = value\n print(dfoutput)", "def activation_channels_apoz(activation):\n if activation.ndim == 4:\n featuremap_apoz_mat = (np.abs(activation) > 0).sum(axis=(2, 3)) / (activation.shape[2] * activation.shape[3])\n elif activation.ndim == 2:\n featuremap_apoz_mat = (np.abs(activation) > 0).sum(axis=1) / activation.shape[1] # batch x 1\n else:\n raise ValueError(\"activation_channels_apoz: Unsupported shape: \".format(activation.shape))\n return 100 - featuremap_apoz_mat.mean(axis=0)*100", "def MyFilter(data, window_width=10, beta=2.0, draw_graph=False):\n\n #read data and change the format\n if 'time' in data.columns:\n date_list = []\n for i in data.index:\n date_parse = parse(str(data.ix[i].time))\n date_list.append(date_parse)\n data['date'] = date_list\n data_use = data\n data_use.index = data_use['date'].tolist()\n data_use = data_use.drop(['date','time'], axis=1)\n data_use.index.name = 'time'\n else:\n data_use = data\n #design filter, use the kaiser window here\n window = signal.kaiser(window_width, beta=beta)\n data_use['close_filtered'] = signal.convolve(data_use['close'], window, mode='same') / sum(window)\n data_use['high_frequency'] = data_use['close'] - data_use['close_filtered']\n\n #delete the distortion datas after filtered\n if window_width % 2 == 0:\n data_changed = data_use[window_width/2: -(window_width/2 - 1)]\n else:\n data_changed = data_use[(window_width-1)/2: -(window_width-1)/2]\n\n #draw graph\n if (draw_graph == True) :\n fig = plt.figure()\n ax1 = plt.subplot2grid((3,1), (0,0), rowspan=2)\n data_changed.loc[:,'close'].plot(style='r', label='original')\n data_changed.loc[:,'close_filtered'].plot(style='k', label='filtered')\n plt.title('Kaiser window_width = %d , const = %d' % (window_width, beta))\n plt.legend(loc='best')\n\n ax2 = plt.subplot2grid((3,1), (2,0))\n data_changed.loc[:,'high_frequency'].plot(label='high_frequency')\n ax2.set_ylim([-150, 150])\n plt.title('High Frequency')\n plt.legend(loc='best')\n plt.show()\n # print data_use\n # print data_changed\n data_out = data_changed['close_filtered']\n return np.array(data_out.tolist())", "def check_stationarity(time_series, window, figsize=(10,6)): \n # Calculating rolling mean and standard deviation\n rolling_mn = time_series.rolling(window).mean()\n rolling_std = time_series.rolling(window).std()\n \n plt.figure(figsize=figsize)\n plt.plot(time_series, color = 'blue',label = 'Original TS')\n plt.plot(rolling_mn, color = 'red', label = 'Rolling Mean')\n plt.plot(rolling_std, color = 'black', label = 'Rolling St.Dev.')\n plt.legend(loc = 'best')\n plt.grid(True, color = 'lightgrey')\n plt.title('Rolling Mean & Standard Deviation of the Trade Value of Vaccines', fontsize = 10)\n \n # Dickey-Fuller test:\n print('Results of Dickey-Fuller Test:')\n fuller_test = adfuller(time_series, autolag = 'AIC')\n results_ts = pd.Series(fuller_test[0:4], index = ['Test Statistic','P-value','#Lags Used','Number of Observations Used'])\n for key,value in fuller_test[4].items():\n results_ts['Critical Value (%s)'%key] = value\n print(results_ts)", "def test08(self):\n a = np.arange(1, 11)\n b = bcolz.carray(a)\n ul = [v for v in a if v <= 5]\n u = b.where(a <= 5)\n wl = [v for v in a if v <= 6]\n w = b.where(a <= 6)\n self.assertEqual(ul, list(u))\n self.assertEqual(wl, list(w))" ]
[ "0.52783775", "0.5277361", "0.51554257", "0.5079259", "0.5041149", "0.5019891", "0.49585706", "0.49330664", "0.49314007", "0.4872369", "0.4855907", "0.48519504", "0.48410013", "0.4838592", "0.47834566", "0.47363517", "0.47308505", "0.47119272", "0.47084534", "0.47058246", "0.46982104", "0.46889034", "0.46841618", "0.46496356", "0.46467048", "0.46394926", "0.46243754", "0.46002617", "0.45861483", "0.45795107" ]
0.5706766
0
Show line of asterisks to demarcate bounds of heap
def demarcate_heap(hgt=self.level, cell_wid=minimum_cell): # Number of nodes on bottom is 2^hgt max_nodes = int(np.power(2, hgt)) print (''.center(cell_wid * max_nodes, '*'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_heap(self):\n for i in range(1, (self.size//2)+1): \n print(\" PARENT : \"+ str(self.Heap[i])+\" LEFT CHILD : \"+ \n str(self.Heap[2 * i])+\" RIGHT CHILD : \"+\n str(self.Heap[2 * i + 1]))", "def min_heap(self): \n \n for pos in range(self.size//2, 0, -1): \n self.min_heapify(pos)", "def print_heap(L):\n accum = \"\"\n powToBeat = -1\n for index, item in enumerate(L):\n log = math.log(index + 1, 2)\n if int(log) > powToBeat:\n #print accum\n accum += \"\\n\"\n #powToBeat = int(log)\n powToBeat += 1\n else:\n pass\n #accum += \" \" + str(item)\n accum += \" \" + str(item)\n return accum", "def __repr__(self):\n return \"heap:[\" + ','.join(map(str, self.ar[:self.n])) + \"]\"", "def for_X():\r\n for row in range(7):\r\n for col in range(7):\r\n if row-col==0 or row+col==6:\r\n print(\"*\",end=\" \")\r\n else:\r\n print(\" \",end=\" \")\r\n print()", "def print_heap(self):\n print self.queue[:self.size:]", "def Print(self):\n for i in range(1, (self.size // 2) + 1):\n print(f\" PARENT : {str(self.heap[i])}\"\n f\" LEFT CHILD : {str(self.heap[2 * i])}\"\n f\" RIGHT CHILD : {str(self.heap[2 * i + 1])}\")", "def Print(self):\n for i in range(1, (self.size // 2) + 1):\n print(f\" PARENT : {str(self.heap[i])}\"\n f\" LEFT CHILD : {str(self.heap[2 * i])}\"\n f\" RIGHT CHILD : {str(self.heap[2 * i + 1])}\")", "def print_mines(self):\n\t\tfor y in range(self.height):\n\t\t\tfor x in range(self.width):\n\t\t\t\tprint(\"#\" if self.mines[x][y] else \".\", end=\"\")\n\t\t\tprint(\"\")", "def print_stars():\n for i in range(2):\n for j in range(35):\n print(\"*\", end = '')\n print('')", "def for_P():\r\n for row in range(7):\r\n for col in range(4):\r\n if col==0 or row in (0,3) and col!=3 or col==3 and row in(1,2):\r\n print(\"*\",end=\" \")\r\n else:\r\n print(\" \",end=\" \")\r\n print()", "def heappop(heap):\n pass", "def minHeap(self):\n for pos in range(self.size // 2, 0, -1):\n self.minHeapify(pos)", "def belt():\r\n\tfor i in range(SIZE*2+2):\r\n\t\tif i == 0 or i == SIZE*2+1:\r\n\t\t\tprint('+', end='')\r\n\t\telse:\r\n\t\t\tprint('=', end='')\r\n\tprint('')", "def annotate_major_heap(self):\n # TODO: we could provide a fallback path by manually taking the proper bytes as\n # the ml_heap command does\n if heap_chunk_head_p is None:\n self.set_inaccurate(\"major heap info\")\n return\n\n heap_chunk_ptr = get_value_safe(\"caml_heap_start\", heap_chunk_head_p)\n try:\n while heap_chunk_ptr is not None and heap_chunk_ptr != 0:\n heap_chunk_head_ptr = heap_chunk_ptr - 1\n heap_chunk_head = heap_chunk_head_ptr.dereference()\n\n block = heap_chunk_head[\"block\"]\n size = heap_chunk_head[\"size\"]\n\n memrange = self.get_range(heap_chunk_head_ptr)\n if memrange is not None:\n self.annotate_split_range(heap_chunk_ptr.cast(size_t), size, MemoryType.MajorHeap, \"Major heap\")\n else:\n new_range = MemoryRange(heap_chunk_ptr.cast(size_t), size, \"gdb\", \"Major Heap\", MemoryType.MajorHeap)\n assert(false) # This shouldn't happen\n self.tentative_add_range(new_range)\n\n heap_chunk_ptr = heap_chunk_head[\"next\"].cast(heap_chunk_head_p)\n except gdb.MemoryError:\n print(\"OCaml major heap linked list is corrupt: last entry = 0x%08X\" % (int(heap_chunk_ptr.cast(size_t))))\n\n gray_vals = get_value_safe(\"gray_vals\", size_t)\n gray_vals_cur = get_value_safe(\"gray_vals_cur\", size_t)\n gray_vals_size = get_value_safe(\"gray_vals_size\", size_t)\n gray_vals_end = get_value_safe(\"gray_vals_end\", size_t)\n if gray_vals is not None and gray_vals_size is not None:\n self.annotate_split_range(gray_vals, gray_vals_size, MemoryType.GC_Metadata, \"major GC's gray values\")\n if gray_vals_cur is not None and gray_vals_end is not None:\n self.annotate_split_range(gray_vals_cur, gray_vals_end - gray_vals_cur, MemoryType.GC_Metadata, \"major GC's current gray values\")", "def __str__(self) -> str:\n return 'HEAP ' + str(self.heap)", "def __str__(self) -> str:\n return 'HEAP ' + str(self.heap)", "def __str__(self):\n\n string = \"[\"\n for i in range(1, self.i , 1):\n try:\n string += str(self.heap[i]) + \"\\n\"\n except:\n string += \"Nan \"\n return string + \"]\"", "def starbox(width, height):\n print(\"*\" * width) # print top edge of the box\n # print sides of the box\n for _ in range(height - 2):\n print(\"*\" + \" \" * (width - 2) + \"*\")\n print(\"*\" * width) # print bottom edge of the box", "def print_grid(grid, score):\n print(\"\")\n print(score)\n wall = \"+------\"*len(grid[0])+\"+\"\n print(wall)\n for row in grid:\n meat = \"|\".join(\"{:^6}\".format(val) for val in row)\n print(\"|{}|\".format(meat))\n print(wall)", "def tower_of_hanoi_stack(n, beg, aux, end):", "def annotate_minor_heap(self):\n minor_start = get_value_safe(\"caml_young_base\", size_t)\n minor_end = get_value_safe(\"caml_young_end\", size_t)\n if minor_start is None or minor_end is None:\n return\n minor_size = minor_end - minor_start\n\n memrange = self.get_range(minor_start)\n if memrange is not None:\n self.annotate_split_range(minor_start, minor_size, MemoryType.MinorHeap, \"Minor heap\")\n else:\n new_range = MemoryRange(minor_start, minor_size, \"gdb\", \"Minor Heap\", MemoryType.MinorHeap)\n self.set_inaccurate(\"minor heap memory map info\")\n bisect.insort(self.ranges, new_range)", "def for_L():\r\n\r\n for row in range(6):\r\n for col in range(4):\r\n if col==0 or row==5:\r\n print('*', end = ' ')\r\n else:\r\n print(' ', end = ' ')\r\n print()", "def __init__(self, parent, populator):\n ListHeap.__init__(self, parent, populator, 6)\n self.propagate(0)\n self.config(width=10, height=100)", "def print_grid(grid):\r\n print(\"+\",'-'*len(grid[0]*5),'+',sep='')# top line of box\r\n for i in range(len(grid)):\r\n grid_str = ''\r\n for j in range(len(grid[i])):\r\n if grid[i][j] == 0:\r\n grid_str += \"{:<5}\".format(' ')\r\n else:\r\n grid_str += \"{:<5}\".format(grid[i][j])#append a 5-width column\r\n print('|',grid_str,'|',sep='')\r\n print(\"+\",'-'*len(grid[0]*5),'+',sep='')# bottom line of box\r", "def for_reverse_triange():\r\n\r\n for row in range(6,0,-1):\r\n print(' '*(6-row), '* '*row)", "def __init__(self):\n self.max_heap = [] # to contain left smaller half, or + 1\n self.min_heap = [] # to contain right bigger half", "def a_star(start, end, board):\n board_n = board\n closed_set = deque()\n open_set = deque()\n open_set.append(start)\n\n path = list()\n\n while open_set:\n lowest_f_index = 0\n for i, node in enumerate(open_set):\n if open_set[i].f < open_set[lowest_f_index].f:\n lowest_f_index = i\n # Adds an additional check in case the f values are similar. Then we compare the g score instead\n # and find the lowest\n if open_set[i].f == open_set[lowest_f_index].f:\n if open_set[i].g < open_set[lowest_f_index].g:\n lowest_f_index = i\n\n current_node = open_set[lowest_f_index]\n\n if current_node == end:\n tmp = current_node\n path.append(tmp)\n while tmp.previous:\n path.append(tmp.previous)\n tmp = tmp.previous\n for elem in path[1:-1]: \n elem.symbol = '▪'\n draw_4k(board_n, wait = True)\n\n open_set.remove(current_node)\n closed_set.append(current_node)\n\n neighbors = current_node.neighbors\n for nb in neighbors:\n if nb in closed_set: #Doesnt check walls here since there is no walls\n continue\n \n tmp_g = current_node.g + nb.cost # Adds the cost of the neighbor cell to the tentative g score instead of just 1\n\n if nb not in open_set:\n open_set.append(nb)\n \n elif tmp_g >= nb.g:\n continue\n\n nb.previous = current_node \n nb.g = tmp_g \n nb.h = calculate_manhattan(nb, end)\n nb.f = nb.g + nb.h", "def prints(self):\r\n\r\n for i in range(len(self.heap_array)):\r\n print(self.heap_array[i])", "def show( self):\n def symbol( i):\n return i<0 and (i==-2 and ' ' or '0') or chr(ord('a') + i)\n \n X, Y = np.max( self.board.positions, 0)\n # -2 to indicate outside board.\n display = np.zeros( (X+1,Y+1), dtype=int) - 2 \n for x, y in self.board.positions:\n display[x, y] = -1 # -1 to indicate unoccupied\n for p, i in self.occupation.items():\n x, y = self.board.positions[p]\n display[x, y] = i\n for x in xrange(X+1):\n s = ''.join( [ symbol( display[x, y]) for y in xrange(Y+1) ])\n print s" ]
[ "0.6224625", "0.6115098", "0.60877067", "0.6073852", "0.58567894", "0.5810459", "0.57655525", "0.57655525", "0.5722967", "0.5565458", "0.55614096", "0.55261725", "0.55241466", "0.550935", "0.5506681", "0.54333556", "0.54333556", "0.54319435", "0.5421225", "0.54208", "0.5418962", "0.54035205", "0.53875935", "0.5385004", "0.5374566", "0.5344161", "0.5337163", "0.5331037", "0.532872", "0.5325107" ]
0.6630077
0
Swap values at index_0 and index_1
def swap(self, index_0, index_1): value_0 = self.get_value_at(index_0) value_1 = self.get_value_at(index_1) self.set_value_at(index_0, value_1) self.set_value_at(index_1, value_0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def swap(A, index1, index2):\r\n \r\n temp = A[index1]\r\n A[index1] = A[index2]\r\n A[index2] = temp", "def swap_numbers(numbers, index1, index2):\n temp = numbers[index1]\n numbers[index1] = numbers[index2]\n numbers[index2] = temp", "def __swap(self, index_1, index_2):\n temp = self._lits[index_1]\n self._lits[index_1] = self._lits[index_2]\n self._lits[index_2] = temp", "def swap(lst: list, index_1: int, index_2: int) -> None:\n lst[index_1], lst[index_2] = lst[index_2], lst[index_1]", "def swap(self, index_1, index_2):\n temp = self.store[index_1]\n self.store[index_1] = self.store[index_2]\n self.store[index_2] = temp", "def swap(self, index_a:int, index_b:int):\n if not index_a == index_b:\n self.list[index_a], self.list[index_b] = self.list[index_b], self.list[index_a]", "def swap(in_list: List, index1: int, index2: int) -> List:\n\n in_list[index1], in_list[index2] = in_list[index2], in_list[index1] \n\n return in_list", "def __elementSwap(self,\n index1: int,\n index2: int):\n self.__ordered_holder[index1], self.__ordered_holder[index2] = self.__ordered_holder[index2], self.__ordered_holder[index1]", "def swap_values(self, index1, index2):\n self.heap[index1], self.heap[index2] = self.heap[index2], self.heap[index1]", "def _swap(self, i, j):\n self._data[i], self._data[j] = self._data[j], self._data[i]", "def _swap(self, i, j):\n self._data[i], self._data[j] = self._data[j], self._data[i]", "def _swap(self, i, j):\r\n self._data[i], self._data[j] = self._data[j], self._data[i]", "def swap(t, i, j):\n t[i], t[j] = t[j], t[i]", "def _swap(self, i, j):\n self._data[i], self._data[j] = self._data[j], self._data[i]", "def __swap(self, index1, index2):\n self.heap[index1], self.heap[index2] = self.heap[index2], self.heap[index1]", "def swap(values: list, i = int, j = int) -> None:\n \n temp: int = values[i]\n values[i] = values[j]\n values[j] = temp", "def swap_values_at_indexes(todo_list, first, second):\n try:\n temp = todo_list[first]\n todo_list[first] = todo_list[second]\n todo_list[second] = temp\n except first:\n print(\"issue with first\")\n except second:\n print(\"issue with second\")", "def swap(deck, firstIndex, secondIndex):\n deck[firstIndex], deck[secondIndex] = deck[secondIndex], deck[firstIndex]\n return deck", "def swap(self,i,j):\r\n self._data[i], self._data[j] = self._data[j], self._data[i]", "def swap(i: int, j: int, data: List[int]) -> None:\n temp = data[i]\n data[i] = data[j]\n data[j] = temp", "def swap(ix, jx, ax, ay):\n tempx, tempy = ax[ix], ay[ix]\n ax[ix] = ax[jx]\n ay[ix] = ay[jx]\n ax[jx] = tempx\n ay[jx] = tempy", "def listSwapElement(lst, indexa, indexb):\n temp = lst[indexa]\n lst[indexa] = lst[indexb]\n lst[indexb] = temp", "def jmatswap(ind: int):\n return _jmswap[ind - 1]", "def swap_inarray(array, idx1, idx2):\n if idx1 != idx2:\n temp = array[idx1]\n array[idx1] = array[idx2]\n array[idx2] = temp\n return array", "def swap(arr, i, j):\n arr[i], arr[j] = arr[j], arr[i]", "def swap(self, i, j):\r\n self._data[i], self._data[j] = self._data[j], self._data[i]", "def swap(ss_list, sorted_index, new_index):\n temp_int = ss_list[sorted_index]\n ss_list[sorted_index] = ss_list[new_index]\n ss_list[new_index] = temp_int", "def swap(self, i: int, j: int) -> None:\n self.data[i], self.data[j] = self.data[j], self.data[i]", "def swap(theList, i, j):\n\n temp = theList[i]\n theList[i] = theList[j]\n theList[j] = temp", "def _swap_tasks(self, index_one: int, index_two: int) -> None:\n self.queue[index_one], self.queue[index_two] = self.queue[index_two], self.queue[index_one]" ]
[ "0.8045885", "0.80415386", "0.7976427", "0.7946715", "0.77399236", "0.7737298", "0.7729477", "0.76621556", "0.7614902", "0.7564756", "0.7564756", "0.737364", "0.7371236", "0.73423225", "0.727941", "0.7278792", "0.72698545", "0.71813565", "0.716901", "0.7142264", "0.7134006", "0.7117594", "0.70894575", "0.7083501", "0.70793307", "0.7047735", "0.7002308", "0.69914424", "0.6976514", "0.6954668" ]
0.82595736
0
use dictionary to store the difference (target nums[i]).
def twoSum(self, nums: List[int], target: int) -> List[int]: diffRec = {} for i, v in enumerate(nums): if v in diffRec: return [diffRec[v], i] else: diffRec[target - v] = i return -1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def twoSum(self, nums: List[int], target: int) -> List[int]:\n d = {}\n for i, n in enumerate(nums):\n d[n]=i\n \n for i, n in enumerate(nums):\n m = target - n\n if m in d and d[m] != i:\n return [i,d[m]]\n return []", "def __twoSum(self, numbers, target):\n dic = {}\n for i, value in enumerate(numbers):\n complement = target - value\n if complement in dic:\n return [dic[complement], i]\n else:\n # index the new value\n dic[value] = i", "def solution_two(nums, target):\n myHash = {}\n for index, item in enumerate(nums):\n if target - item in nums:\n myHash[index] = item\n\n return list(myHash.keys())", "def twoSum(self, nums: List[int], target: int) -> List[int]:\n \n # Given nums=[2,7,11,15],target=9\n \n d={}\n for i in range(len(nums)):\n x = target-nums[i]\n if x in d:\n return [d[x],i]\n\n d[nums[i]]=i\n\n return []", "def two_sum(self, nums: List[int], target: int) -> List[int]:\n found = {}\n\n for idx, value in enumerate(nums):\n rest = target - nums[idx]\n if rest in found:\n return [idx, found[rest]]\n else:\n found[value] = idx", "def twoSumFaster(nums, target):\n my_hash = {}\n for a in range(0,len(nums)):\n my_hash[nums[a]] = a\n\n for indice_a in range(0,len(nums) - 1):\n diff = target - nums[indice_a]\n if diff in my_hash:\n for indice_b in range(indice_a + 1, len(nums)):\n if nums[indice_b] == diff:\n return [indice_a, indice_b]", "def twoSum(self, nums, target):\n\n seen = {}\n for position, num in enumerate(nums):\n remaining = target - num\n if remaining in seen:\n return [seen[remaining], position]\n seen[num] = position\n return []", "def twoSum(self, nums: List[int], target: int) -> List[int]:\n dic = {target-n : i for i, n in enumerate(nums)}\n return next(([i, dic[n]] for i, n in enumerate(nums) if n in dic and i != dic[n]), [0, 0])", "def twoSum(self, nums: List[int], target: int) -> List[int]:\n # res = []\n # for i in range(len(nums)):\n # for j in range(len(nums)):\n # if i == j:\n # continue\n\n # if nums[i] + nums[j] == target:\n # res.append(i)\n # res.append(j)\n # return res\n \n memo = {}\n for index, num in enumerate(nums):\n #if target - num is already in memo, then return current index and the num's index\n n = target - num\n\n if num not in memo:\n memo[n] = index\n \n else:\n return [memo[num], index]", "def twoSum(self, nums: List[int], target: int) -> List[int]:\n #computational complexity: O(N) since we iterate through n elements only once\n #space comlexity: O(N) since we need to store n elements in the array\n\n #(0) use hashtable to store the number and its index.\n hashTable={}\n #(1) iterating through all the itmes\n for i in range(len(nums)):\n complement = target - nums[i]\n #(2) check if the complement is in the hashTable. If not, put it into the hashtable\n if complement in hashTable:\n return [hashTable[complement],i]\n else:\n hashTable[nums[i]] = i", "def twoSum(self, nums: List[int], target: int) -> List[int]:\n # Use a dict to record visited numbers\n d = {}\n for i, n in enumerate(nums):\n m = target - n\n if m in d:\n return [d[m], i]\n else:\n d[n] = i", "def two_sum(self, nums, target):\n\n # idea: for each num, check if it complements a previously seen one\n # (keeping track of them in a dictionary)\n seek = {}\n\n for ind, element in enumerate(nums):\n if element in seek:\n return [seek[element], ind]\n else:\n seek[target - element] = ind\n\n return []", "def diffs(current, target):\r\n \r\n additions = [val for val in target if val not in current]\r\n deletions = [val for val in current if val not in target]\r\n\r\n return additions, deletions", "def two_sum(numbers, target):\n already_seen = defaultdict(deque)\n for idx, number in enumerate(numbers):\n complement = target - number\n if complement in already_seen:\n complement_idx = already_seen[complement].popleft()\n return [complement_idx, idx]\n already_seen[number].append(idx)\n raise ValueError('No solution found')", "def subtract(self, other):\n for val, freq in other.items():\n self.incr(val, -freq)", "def getdifference(triplet_old,triplet_new):\r\n for i in range(0,3):\r\n if (triplet_new[i]!=triplet_old[i]):\r\n \r\n return (triplet_new[i],triplet_old[i],i)", "def subtraction_of(number_list):", "def intersect_counter(nums1: List[int], nums2: List[int]) -> List[int]:\n c = dict(Counter(nums1))\n\n res = []\n for e in nums2:\n if e in c and c[e] > 0:\n c[e] -= 1\n res.append(e)\n\n return res", "def dict_subtract(a, b):\n return {k: a[k] for k in set(a) - set(b)}", "def get_gold_pred_idx_dict(self, y_true, y_pred):\n gold_pred_idx_dict = defaultdict(lambda: defaultdict(list))\n gold_pred_ct_dict = defaultdict(lambda: defaultdict(int)) \n\n for gold_idx in range(3,self.nerTags.size):\n gold_filter = (y_true == gold_idx).astype(\"int\") # 1/0 all rows with that gold_idx\n for pred_idx in range(3,self.nerTags.size):\n pred_filter = (y_pred == pred_idx).astype(\"int\") # 1/0 all rows with that ner_idx\n match_ner_idx = np.nonzero(np.all([gold_filter, pred_filter],axis=0).astype(\"int\"))[0]\n gold_pred_idx_dict[gold_idx][pred_idx] = match_ner_idx \n gold_pred_ct_dict[gold_idx][pred_idx] = match_ner_idx.shape[0] \n\n return gold_pred_idx_dict, gold_pred_ct_dict", "def get_gold_idx_dict(self, y_true, y_pred):\n gold_idx_dict = defaultdict(list)\n for gold in self.gold_pred_idx_dict:\n gold_idx_dict[gold] = np.hstack(self.gold_pred_idx_dict[gold].values())\n \n return gold_idx_dict", "def subtract(d1, d2):\n res = {}\n for key in d1:\n if key not in d2:\n res[key] = None\n return res", "def reverse_difference():", "def subtract(d1, d2):\n res = {}\n \n for key in d1:\n if key not in d2:\n res[key]=None\n\n return res", "def explore(self, nums, left, right, target):\n diff = sys.maxsize\n\n while left < right:\n cur_sum = nums[left] + nums[right]\n if cur_sum == target:\n return 0\n \n if abs(target - cur_sum) < abs(diff):\n diff = target - cur_sum\n if cur_sum < target:\n left += 1\n else:\n right -= 1\n return diff", "def _dif_(x, y):\n _check_(x, y)\n return [i - j for (i, j) in zip(x, y)]", "def _create_diff_state(self, cur_state):\n diffState = {}\n for key in list(self.EndState.keys()):\n curCnt = 0\n try:\n curCnt = cur_state.Deck[key]\n except KeyError:\n pass\n\n diff = self.EndState[key] - curCnt\n if diff > 0:\n diffState[key] = CardCosts[key]\n\n return diffState", "def _measure(d, sources, target, niter=25, bound=None):\n uniques = {}\n for source in sources:\n others = list(sources)\n others.remove(source)\n others = list(flatten(others))\n uniques[source] = two_way_skar(d, [source, target], others)\n return uniques", "def find_pair(numbers, target_sum):\n for num in numbers:\n partner_num = target_sum - num\n if partner_num in numbers:\n return num * partner_num", "def singleNumber2(self, nums):\n hash_table={}\n \n for i in nums:\n try:\n hash_table.pop(i)\n except:\n hash_table[i] = 1\n \n return hash_table.popitem()[0]" ]
[ "0.6771178", "0.6764401", "0.6693631", "0.65968764", "0.6315861", "0.62800086", "0.62199515", "0.62003785", "0.61241025", "0.6120513", "0.61066025", "0.5899261", "0.56001204", "0.55316275", "0.5446655", "0.5416312", "0.5380408", "0.5378301", "0.537053", "0.5293179", "0.52861613", "0.52744466", "0.5259028", "0.5242472", "0.5236648", "0.5190447", "0.51706314", "0.51618874", "0.5152051", "0.5115063" ]
0.6832244
0
checks if user can afford item and deducts item cost from self.resources and adds the item (or the purchases effects) to the user. returns true if purchased, returns false if not. Default usage is to use an item name from purchase.py. If a Balance object "balance" is given INSTEAD of "item", then it is used directly.
def purchase(self, item=None, balance=None): if item!= None: cost = purchases.getCost(item) if self.affords(cost): self.payFor(cost) # TODO: actually do whatever was purchased # self.applyItem(item) return True else: return False elif balance!= None: if self.affords(balance): self.payFor(balance) return True else: return False else: raise ValueError('item or balance object must be given!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def userCanAffordItemObj(self, user : bbUser.bbUser, item : bbItem.bbItem) -> bool:\n return user.credits >= item.getValue()", "def can_afford(self, item_name):\n item = self.get(item_name)\n for resource in RESOURCES:\n if item.cost.get(resource, 0) > self.game.resources.get(resource):\n return False\n\n return True", "def userBuyWeaponObj(self, user : bbUser.bbUser, requestedWeapon : bbWeapon.bbWeapon):\n if self.userCanAffordItemObj(user, requestedWeapon):\n self.weaponsStock.removeItem(requestedWeapon)\n user.credits -= requestedWeapon.getValue()\n user.inactiveShips.addItem(requestedWeapon)\n else:\n raise RuntimeError(\"user \" + str(user.id) + \" attempted to buy weapon \" + requestedWeapon.name + \" but can't afford it: \" + str(user.credits) + \" < \" + str(requestedWeapon.getValue()))", "def purchase(self, item_type):", "def donate(self):\n\n # Get item\n import converter\n self.hero.inventory_menu()\n item = prompt(\"Select a weapon, shield or armor to donate. Or \\\npress enter to exit. \").lower()\n item = converter.convert(item)\n\n # If item is a weapon, shield or armor, accept the donation\n if isinstance(item, items.Weapon) or isinstance(item, items.Shield) or isinstance(item, items.Armor):\n if item in self.hero.inventory:\n self.donations.append(item)\n self.hero.drop(item)\n self.sort_donations()\n prompt(\"\\\"Thank you for your donation.\\\"\")\n else:\n prompt(\"You don't have one!\")\n\n # If item is a real item but is not in the above classes, do not accept.\n elif item != False:\n prompt(\"That type of item is not needed.\")", "def userBuyTurretObj(self, user : bbUser.bbUser, requestedTurret : bbTurret.bbTurret):\n if self.userCanAffordItemObj(user, requestedTurret):\n self.turretsStock.removeItem(requestedTurret)\n user.credits -= requestedTurret.getValue()\n user.inactiveShips.addItem(requestedTurret)\n else:\n raise RuntimeError(\"user \" + str(user.id) + \" attempted to buy turret \" + requestedTurret.name + \" but can't afford it: \" + str(user.credits) + \" < \" + str(requestedTurret.getValue()))", "def is_available(self, item_name):\n item = self.get(item_name)\n if not item:\n return False\n for resource in RESOURCES:\n if self.game.resources.get(resource) < item.prerequisites.get(resource, 0):\n return False\n for required_item in item.prerequisites[\"items\"]:\n if not self.game.has_item(required_item):\n return False\n for research in item.prerequisites[\"research\"]:\n if research not in self.game.state.research_completed:\n return False\n for trigger in item.prerequisites[\"triggers\"]:\n if trigger not in self.game.state.triggers:\n return False\n return True", "def buy_item(self, item):\n if self.amount < item.price:\n custom_log(\"Insufficient amount. Insert more coins.\", MSG_ERROR)\n else:\n self.amount = round((self.amount - item.price), 2)\n item._buy()\n custom_log(f\"You bought - {item.name}, remaining cash - €{self.amount}\", MSG_DEBUG)", "def has_item(item: Item):\n async def _wrapper(ctx):\n if not (res := 0 < await ctx.db.get(\"items\", ctx.author, item.id)):\n name = (f\"an \" if any(item.name.startswith(vowel) for vowel in \"aeiou\") else \"a \") + f\"**{item}**\"\n await ctx.send(f\"You need to own {name} in order to use this command.\" + (\n f\" You can go buy one from the shop! (`{ctx.clean_prefix}shop`)\" if item.buyable else \"\"\n ))\n return res\n\n return discord.ext.commands.check(_wrapper)", "def userCanAffordWeaponIndex(self, user : bbUser.bbUser, index : int) -> bool:\n return self.userCanAffordItemObj(user, self.weaponsStock[index].item)", "def _apply_item(self, item: Item) -> bool:\n return False", "def userBuyModuleObj(self, user : bbUser.bbUser, requestedModule : bbModule.bbModule):\n if self.userCanAffordItemObj(user, requestedModule):\n self.modulesStock.removeItem(requestedModule)\n user.credits -= requestedModule.getValue()\n user.inactiveShips.addItem(requestedModule)\n else:\n raise RuntimeError(\"user \" + str(user.id) + \" attempted to buy module \" + requestedModule.name + \" but can't afford it: \" + str(user.credits) + \" < \" + str(requestedModule.getValue()))", "def do_use(self, arg):\r\n itemToUse = arg.lower()\r\n \r\n if itemToUse == '':\r\n print('Use what? Type \"inv\" to see the items in your invetory.')\r\n return\r\n \r\n cantUse = False\r\n \r\n #look up the item the player describes\r\n invDescWords = getAllDescWords(inventory)\r\n \r\n if itemToUse not in invDescWords:\r\n print('You do not have that item to use it')\r\n return\r\n \r\n for item in getAllItemsMatchingDesc(itemToUse, inventory):\r\n if worldItems[item].get(USEABLE, True) == False:\r\n cantUse = True\r\n continue\r\n print('%s' % (worldItems[item][USEDESCTRUE]))\r\n #print('You use %s' % (worldItems[item][SHORTDESC]))\r\n #inventory.remove(item) \r\n return\r\n \r\n if cantUse:\r\n print('You cannot use \"%s\".' % (itemToUse))\r\n else:\r\n print('You do not have that item to use.')", "def userBuyShipObj(self, user : bbUser.bbUser, requestedShip : bbShip.bbShip):\n if self.userCanAffordItemObj(user, requestedShip):\n self.shipsStock.removeItem(requestedShip)\n user.credits -= requestedShip.getValue()\n user.inactiveShips.addItem(requestedShip)\n else:\n raise RuntimeError(\"user \" + str(user.id) + \" attempted to buy ship \" + requestedShip.name + \" but can't afford it: \" + str(user.credits) + \" < \" + str(requestedShip.getValue()))", "def apply_effects(self, item_name):\n item = self.get(item_name)\n\n # Enable commands\n for command in item.effects.get(\"enable_commands\", []):\n if command not in self.game.state.commands_enabled:\n self.game.alert(\"You unlocked the `{}` command\", command)\n self.game.state.commands_enabled.append(command)\n\n # Enable resouces\n for resources in item.effects.get(\"enable_resources\", []):\n if resources not in self.game.state.resources_enabled:\n self.game.alert(\"You can now mine *{}*.\", resources)\n self.game.state.resources_enabled.append(resources)\n\n # Enable items\n for item_name in item.effects.get(\"enable_items\", []):\n if item_name not in self.game.state.tools_enabled:\n self.game.alert(\"You can now craft ${}$.\", item_name)\n self.game.state.tools_enabled.append(item_name)\n\n # Enable research\n for research in item.effects.get(\"enable_research\", []):\n if research not in self.game.state.research_enabled:\n self.game.alert(\"You can now research @{}@.\", research)\n self.game.state.research_enabled.append(research)\n\n # Trigger flags\n for trigger in item.effects.get(\"triggers\", []):\n if trigger not in self.game.state.triggers:\n self.game.state.triggers.append(trigger)\n\n # Grant resources\n for resource in RESOURCES:\n if resource in item.effects:\n value = to_float(item.effects[resource])\n self.game.resources.add(resource, value)\n if value > 0:\n self.game.alert(\"You found *{} {}*.\", value, resource)\n else:\n self.game.alert(\"You lost *{} {}*.\", -value, resource)\n\n # Change mining difficulty\n for resource in RESOURCES:\n change = item.effects.get(f\"{resource}_mining_difficulty\", None)\n if change:\n change = to_float(change)\n self.game.mining_difficulty.multiply(resource, 1 - change)\n self.game.alert(\n \"*{}* mining difficulty reduced by {:.0%}.\", resource, change\n )\n\n # Trigger events\n self.game.events.trigger(*item.effects.get(\"events\", []))", "def _is_item_allowed(resource, item, resourcesalloweddict, resourcesuseddict):\n\n if item in resourcesalloweddict[resource]:\n # this is semi nonsensical, but allows us to indicate which ports are used\n # through get_resource_information()\n resourcesuseddict[resource].add(item)\n return True\n\n else:\n return False", "def buy_shoppingitem(self, user_id, shoppinglist_id, item_id):\n item = self.get_shoppingitem(user_id, shoppinglist_id, item_id)\n if not item['bought']:\n item['bought'] = True", "def purchase_item(self):\r\n self.purchased_callback()\r\n self.status = 'purchased'\r\n self.fulfilled_time = datetime.now(pytz.utc)\r\n self.save()", "def buy_item(self, item_name, cost, additional_cps):\n if self._current_cookies >= cost:\n self._current_cookies -= cost\n self._current_cps += additional_cps\n self._history.append((self._current_time, item_name,\n cost, self._total_cookies_produced))", "def _apply_item(self, item: Item) -> bool:\n if self.locked:\n self.__locked = item.item_type != self.__key\n return not self.locked", "def qualifies(self, weapon):\n return True", "def buy_item(self, item_name, cost, additional_cps):\n if self.get_cookies() < cost:\n return\n else:\n self._current_cookies -= cost\n self._current_cps += additional_cps\n self._history.append((self.get_time(), item_name, cost, self._total_cookies))", "def buy_item(self, item_name, cost, additional_cps):\n if self._current_cookies >= cost:\n self._current_cookies -= cost\n self._cps += additional_cps\n self._history.append((self._time, item_name, cost, self._total_cookies))\n else:\n pass", "def is_satisfied(self, item: Product) -> bool:\n return self.satisfied(item)", "def execute_use(item_id):\r\n if (item_id in inventory.keys()) or (item_id in current_room[\"items\"].keys()):\r\n if items[item_id][\"use\"] != False:\r\n items[item_id][\"use\"]()\r\n global valid_move\r\n valid_move = True\r\n else:\r\n wrap_print(\"You cannot use that.\")\r\n else:\r\n wrap_print(\"You can't see that in the room.\")", "def buy_item(self, item_name, cost, additional_cps):\n if cost <= self._current_cookies:\n self._current_cookies -= cost\n self._current_cps += additional_cps\n self._history.append((self._current_time_sec, item_name, cost, self._total_cookies))", "def on_use(self):\n assert self.can_use, 'Used an unuseable item!'", "async def _vis_buy(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n number, item = ch.parse_number_and_name(args)\n if item:\n await ctx.send(vis_helpers.shop_buy(ctx.user_object, item, number))", "async def balance(self, ctx, name=None):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n user: User = ctx.user_object\n item = Item.objects.get(name=\"coins\")\n\n if name is None:\n amount = '{:,}'.format(user.get_item_by_item(COINS).amount)\n name = get_display_name(ctx.author)\n await ctx.send(f'{name} has {amount} coins')\n elif name == 'universe':\n await ctx.send('As all things should be.')\n else:\n user = User.objects.filter(Q(name__icontains=name) | Q(nick__icontains=name))\n if not user:\n await ctx.send(f'Name {name} not found in server.')\n elif len(user) > 1:\n await ctx.send(f'Input {name} can refer to multiple people.')#({members})')\n else:\n user = user[0]\n amount = '{:,}'.format(user.get_item_by_item(COINS).amount)\n await ctx.send(f'{user.plain_name} has {amount} coins')", "def tier_trading(self, item):\r\n\r\n # Initial tier is the item the user has going into the store and final\r\n # tier is the item the user has when leaving the store\r\n initial_tier = self.item.tier\r\n final_tier = item.tier\r\n\r\n # Not allowing items that are too large to be carried\r\n if item.size is False:\r\n self.add = False\r\n print(\"The\", item.name_i, \"is too big to carry around the mall.\" +\r\n \"\\nPlease select a different item.\\n\\nAfter you have\" +\r\n \" checked all items, if no item of the proper tier\" +\r\n \" exists\\nplease type [4] to leave the store.\")\r\n\r\n # Standard jumping of tier trading and checking to make sure the final\r\n # tier is one tier higher than the initial tier\r\n elif final_tier == initial_tier + 1:\r\n self.add = True\r\n\r\n # Jumping exceptions; if the initial item is earrings, that can jump\r\n # to purse, and if the initial item is iPod_Shuffle, that can jump\r\n # to Air_Jordan_Space_Jam_11\r\n elif self.item.name_i == 'Earrings' and item.name_i == 'Purse':\r\n self.add = True\r\n print(\"You have hit a jumping exception and get to skip a tier!\")\r\n\r\n elif (self.item.name_i == 'iPod_Shuffle' and\r\n item.name_i == 'Air_Jordan_Space_Jam_11'):\r\n self.add = True\r\n print(\"You have hit a jumping exception and get to skip a tier!\")\r\n\r\n # If the tier is not acceptable we have to set self.add back to False\r\n else:\r\n self.add = False\r\n print(\"You are not allowed to select items in that tier.\"\r\n \"\\n\\nPlease pick another item one tier higher than your\" +\r\n \" current tier.\\n\\nAfter you have checked all items,\" +\r\n \" if no item of the proper tier exists,\\nplease type [4]\" +\r\n \" to leave the store.\")" ]
[ "0.69444394", "0.6761436", "0.6278624", "0.62341774", "0.6051828", "0.5915107", "0.5884033", "0.5883767", "0.5861438", "0.58475995", "0.5811648", "0.5739377", "0.56401324", "0.560546", "0.5601953", "0.5595759", "0.55743337", "0.5556562", "0.5518422", "0.55150044", "0.5479447", "0.5479012", "0.5467133", "0.5442926", "0.5430374", "0.54169637", "0.53977585", "0.53635144", "0.5362871", "0.5350916" ]
0.7959187
0
Returns the date reformated into python datetime format.
def get_python_date(self): return dateutil.parser.parse(self.iso_date)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modis_to_from_pydatetime(date):\n \n if isinstance(date, (str, unicode)): \n return dt.datetime.strptime(date[1:], '%Y%j').date()\n return dt.datetime.strftime(date, 'A%Y%j')", "def _to_date(self, x):\n if isinstance(x, datetime.datetime):\n return x.date()\n return x", "def todate(self):\n return self._date", "def date_to_python(self, value):\r\n # this throws away fractions of a second\r\n return datetime(*strptime(value[:-5], \"%Y-%m-%dT%H:%M:%S\")[0:6])", "def to_datetime(self,date):\n\n dt = datetime.datetime(date.year,date.month,date.day)\n return timezone.make_aware(dt, timezone.get_default_timezone())", "def todate(self):\n return date(self.year, self.month, self.day)", "def convert(date):\n converted_date = datetime.datetime.strptime(date, \n \"%Y-%m-%d\").date()\n return converted_date", "def get_date(date):\n return date", "def get_formatted_date(self, date):\n\n formatted_date = date\n\n possible_datetime_formats = [\n \"%Y-%m-%dT%H:%M:%S%z\", # \"2021-10-19T16:46:02Z\"\n \"%a, %d %b %Y %H:%M:%S %z\", # \"Tue, 19 Oct 2021 21:00:13 +0300\"\n \"%a, %d %b %Y %H:%M:%S %Z\", # \"Tue, 19 Oct 2021 18:54:00 GMT\"\n \"%a, %d %b %Y %H:%M:%S\", # \"Tue, 19 Oct 2021 18:54:00\"\n ]\n\n for format in possible_datetime_formats:\n try:\n formatted_date = datetime.strptime(date, format).strftime(\"%Y%m%d\")\n except:\n pass\n return formatted_date", "def convert_date_time(self, dt):\n return datetime.fromtimestamp(dt).strftime(\"%Y-%m-%d\")", "def convert_date(raw_date):\n if raw_date:\n date = datetime.strptime(raw_date, \"%Y-%m-%d\")\n return date.strftime(\"%m/%d/%YZ\")", "def _date_to_datetime(value):\r\n assert isinstance(value, datetime.date)\r\n return datetime.datetime(value.year, value.month, value.day)", "def get_date(date_time):\n year = str(date_time.year)\n month = str(date_time.month)\n day = str(date_time.day)\n\n # Formata\n year = format_value(year)\n month = format_value(month)\n day = format_value(day)\n\n return year + '-' + month + '-' + day", "def convert_str2date(date):\n import datetime\n date = str(date)\n year = int(date[0:4])\n month = int(date[4:6])\n day = int(date[6:8])\n return datetime.datetime(year,month,day)", "def get_date(self, datetime):\n return datetime.date()", "def convert_date(self, date=None):\n if date is not None:\n format_str = '%d/%m/%Y'\n converted_date = datetime.strptime(date, format_str)\n return converted_date.date()", "def str2date(date):\n return datetime.datetime.strptime(date, \"%m/%d/%Y\").date()", "def numeric_date_recover(self):\n \n sp_time_zone, current_datetime = self.setup_datetime() \n converter2sptimezone = current_datetime.astimezone(sp_time_zone)\n \n return converter2sptimezone.strftime('%d-%m-%Y')", "def trost2date(trost_date):\n year, month, day = (int(val) for val in trost_date.split('-'))\n return datetime.date(year, month, day)", "def convert_datetime_to_date(datetime_obj: datetime) -> date:\n assert isinstance(datetime_obj, datetime), \"Not a datetime object.\"\n return datetime_obj.date()", "def _get_datetime(date):\n return datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.000%z')", "def get_date(self):\n return datetime.date(\n int(self.kwargs['year']),\n int(self.kwargs['month']),\n int(self.kwargs['day'])\n )", "def date_to_python_date(date):\n try:\n ret_date = datetime.datetime.strptime(date, \"%d/%m/%y\")\n except ValueError:\n #another format -- a year can also have four digits\n ret_date = datetime.datetime.strptime(date, \"%d/%m/%Y\")\n return ret_date", "def pretty_date(self, date):\r\n return time.strftime(\"%a, %b %d, %Y\", time.strptime(date,\"%Y%m%d\"))", "def get_date(self):\n return self.date.strftime(\"%a %x\")", "def date(self):\n return date(self._year, self._month, self._day)", "def parseDate(self, date):\n\n temp = dateparser.parse(date)\n temp_date = temp.strftime(\"%Y-%m-%d\")\n\n return temp_date", "def getDatetime(self, date):\n dt = datetime.datetime.strptime(date, \"%Y-%m-%d@%H:%M\")\n return dt", "def txfDate(date):\n return date.strftime('%m/%d/%Y')", "def datetime(self):\n year = self._year\n # Check if the first word of the date (the month) is either january or\n # february, and increase the year by 1.\n if self._date.split(' ')[0].lower() in ['january', 'february']:\n year = int(year) + 1\n date_string = '%s %s %s' % (self._day,\n self._date,\n year)\n return datetime.strptime(date_string, '%a %B %d %Y')" ]
[ "0.71629053", "0.6894753", "0.68495804", "0.6835706", "0.68110466", "0.6798551", "0.678441", "0.6690061", "0.6632137", "0.6628195", "0.66245985", "0.66197765", "0.6619198", "0.660381", "0.6570892", "0.65689874", "0.653436", "0.6477465", "0.64709014", "0.6451577", "0.6413728", "0.64090633", "0.6406376", "0.64018065", "0.63957185", "0.6390311", "0.63800454", "0.6378655", "0.63571095", "0.63373744" ]
0.70473415
1
Renders a page for a particular compound.
def CompoundPage(request): form = compound_form.CompoundForm(request.GET) if not form.is_valid(): logging.error(form.errors) raise Http404 # Compute the delta G estimate. kegg_id = form.cleaned_compoundId compound = models.Compound.objects.get(kegg_id=kegg_id) compound.StashTransformedSpeciesEnergies(form.cleaned_ph, form.cleaned_pmg, form.cleaned_ionic_strength) delta_g_estimate = compound.DeltaG( pH=form.cleaned_ph, pMg=form.cleaned_pmg, ionic_strength=form.cleaned_ionic_strength) template_data = {'is_superuser': django_utils.IsSuperUser(request), 'compound': compound, 'ph': form.cleaned_ph, 'pmg': form.cleaned_pmg, 'ionic_strength': form.cleaned_ionic_strength, 'delta_g_estimate': delta_g_estimate, 'no_dg_explanation': compound.no_dg_explanation, 'kegg_link': compound.GetKeggLink()} return render_to_response('compound_page.html', template_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main_page():\n pages=get_accounts()\n return render_template('disp.html',pages=pages)", "def renderPage():\n return render_template(\"index.html\")", "def main_page():\n return render_template(\"main_page.html\")", "def homepage():\n\n pagesClassIDs = {\n \"index\": {\n \"bannertitle\": [],\n \"subtitle\": [],\n \"firstText\": [],\n \"secondText\": []\n }\n }\n for key in pagesClassIDs[\"index\"].keys():\n pagesClassIDs[\"index\"][key].append(\n str(\n pageTexts.query.filter_by(pageID=key,\n htmlName=\"index\").first()))\n\n experts = Expert.query.all()\n data = {\n \"modules\": [],\n \"discipline\": [],\n \"subdivision\": [],\n \"publications\": [],\n \"People\": []\n }\n for expert in experts:\n if not expert.is_admin and str(expert.accepted) == \"Yes\":\n data[\"modules\"].append(str(expert.affiliation))\n data[\"discipline\"].append(str(expert.discipline))\n data[\"subdivision\"].append(str(expert.specialization))\n data[\"publications\"].append(0)\n if str(expert.core_exp) == \"Yes\":\n data[\"People\"].append(\n str(expert.title) + \" \" + str(expert.first_name) + \" \" +\n str(expert.last_name) + \"(T)\")\n else:\n data[\"People\"].append(\n str(expert.title) + \" \" + str(expert.first_name) + \" \" +\n str(expert.last_name) + \"(N)\")\n\n crdata = pd.DataFrame(data).groupby(\n ['modules', 'discipline', 'subdivision'], as_index=False).agg({\n 'publications':\n 'sum',\n 'People':\n lambda x: ','.join(x)\n })\n circleData = createJSON(crdata)\n\n return render_template(\n 'home/index.html',\n pageDicts=pagesClassIDs,\n bannertitle=\"Introduction to Research\",\n subtitle=\"Interactive Causal Mapping\",\n title=\"Home\",\n circleData=circleData)", "def scoringpage (request):\n # Define views here\n context = {}\n return render(request, 'scoringPage.html', context=context)", "def main():\n page = get_page_arg()\n\n items = load_data(page)\n total_pages = get_total_pages()\n context = {\"current_page\": page,\n \"total_pages\": total_pages,\n \"items\": items}\n\n return render_template('index.html', **context)", "def page_main():\n \n cur_page = request.form['page']\n session['page'] = utils.id_to_page(cur_page,g.graph)\n return render_template('do_action.html')", "def main_page(self):\n return render_template(\"index.html\", traders_count=len(self.market.traders),\n current_iteration=self.market.current_iteration, traders=self.market.traders,\n buy_orders=self.market.buy_orders, sell_orders=self.market.sell_orders,\n current_stock_price=self.market.stock.price_history[-1])", "def process_book():\n return render_template('process_book.html')", "def create_page(self):", "def page(request, pagenum):\n context = Paginator().filter(Book.objects.all(), pagenum)\n return render(request, 'books/bookListPage.html', context)", "def render(self):\n navbar = self.render_navbar()\n postcards = self.render_postcards()\n sm_metadata = self.render_social_media_metadata()\n\n return PAGE_TEMPLATE % (sm_metadata, navbar, postcards)", "def main_page():\n return render_template(\"index.html\")", "def index(self):\n return render(\"/derived/rock/index.mako\")", "def prototype_page1():\n return render_template('Prototype1.html')", "def plot_page( stat_type ) :\r\n logger.debug( f\"stat_type={stat_type}\" )\r\n param = request.args[\"param\"]\r\n\r\n script = server_document( url=f'http://localhost:5006/{stat_type_2_plot_route[stat_type]}',\r\n arguments={'param' : param ,\r\n 'stat_type' : stat_type ,\r\n 'session_id' : session[ session_info.session_id_key ] } )\r\n\r\n return render_template('plot_page.html',\r\n script=script ,\r\n param=param ,\r\n stat_type=param_stats.StatTypes[stat_type].value )", "def data_page():\n\n return render_template('Data_Page.html')", "def get(self, request ):\n return render(request, \"main_display_cards.html\")", "def render_all(pages):\n for page in pages:\n render_template(page['template'], page['output'], page['values'])", "def pagemainred():\n return render_template('do_action.html')", "def template1(self):\n self.indirectobject(1, 0, \"<<\\n /Type /Catalog\\n /Outlines 2 0 R\\n /Pages 3 0 R\\n>>\")\n self.indirectobject(2, 0, \"<<\\n /Type /Outlines\\n /Count 0\\n>>\")\n self.indirectobject(3, 0, \"<<\\n /Type /Pages\\n /Kids [4 0 R]\\n /Count 1\\n>>\")\n self.indirectobject(4, 0, \"<<\\n /Type /Page\\n /Parent 3 0 R\\n /MediaBox [0 0 612 792]\\n /Contents 5 0 R\\n /Resources <<\\n /ProcSet [/PDF /Text]\\n /Font << /F1 6 0 R >>\\n >>\\n>>\")\n self.indirectobject(6, 0, \"<<\\n /Type /Font\\n /Subtype /Type1\\n /Name /F1\\n /BaseFont /Helvetica\\n /Encoding /MacRomanEncoding\\n>>\")", "def contract(docid):\n return render_template('doc.html', docid=docid)", "def index_page():\n\n return render_template(\"index.html\")", "def index_page():\n\n return render_template(\"index.html\")", "def index_page():\n\n return render_template(\"index.html\")", "def index(self, **args):\n if not self.isConfigured:\n self.configure()\n s = self.override()\n if not s:\n self.pageConfig['timeStamp'] = time.strftime('%a %b %d %X %Z %Y')\n contents = self.content(**args) # Make sure contents is run first (so it\n # can change any pageConfig entries if desired\n s = startPage % self.pageConfig\n s = s + htmlDiv('header', self.header(**args),\n keepEmptyDiv=self.pageConfig['keepEmptyHeader'])\n s = s + htmlDiv('navigation', self.navigation(**args),\n keepEmptyDiv=self.pageConfig['keepEmptyNavigation'])\n s = s + htmlDiv('content', contents,\n keepEmptyDiv=self.pageConfig['keepEmptyContent'])\n s = s + htmlDiv('footer', self.footer(**args),\n keepEmptyDiv=self.pageConfig['keepEmptyFooter'])\n s = s + endPage\n return s", "def display():\n\n #still needs some cleanup on imagry and what the site is about. \n\n return render_template(\"index.html\")", "def read_page(request, slug):\n pg = get_object_or_404(Page, slug=slug)\n return render(request, 'scrib/templates/read_page.html', {'pg': pg})", "def create_species_cb_page(outfile: TextIO, do_print: bool, species: TMB_Classes.SpeciesClass,\n measurement_data: TMB_Classes.SpeciesMeasurements, refdict: dict) -> Tuple[float, float]:\n if do_print:\n start_page_division(outfile, \"base_page\")\n else:\n common_html_header(outfile, species.binomial() + \" Carapace Breadth\", indexpath=\"../\")\n outfile.write(\" <header id=\\\"\" + species.species + \"_cb.html\\\">\\n\")\n outfile.write(\" <h1 class=\\\"nobookmark\\\"><em class=\\\"species\\\">\" + species.binomial() +\n \"</em> Carapace Breadth</h1>\\n\")\n\n if not do_print:\n outfile.write(\" <nav>\\n\")\n outfile.write(\" <ul>\\n\")\n outfile.write(\" <li><a href=\\\"\" + rel_link_prefix(do_print, \"../\") + \"u_\" + species.species +\n \".html\\\">\" + fetch_fa_glyph(\"info\") + \"Species page</a></li>\\n\")\n outfile.write(\" <li><a href=\\\"index.html\\\">\" + fetch_fa_glyph(\"measure\") +\n \"Measurement Guide</a></li>\\n\")\n outfile.write(\" </ul>\\n\")\n outfile.write(\" </nav>\\n\")\n outfile.write(\" </header>\\n\")\n\n cdat = TMB_Measurements.combine_measurement_data(measurement_data.all)\n mdat = TMB_Measurements.combine_measurement_data(measurement_data.male)\n fdat = TMB_Measurements.combine_measurement_data(measurement_data.female)\n filename = WEBOUT_PATH + \"sizes/\" + species.species + \"_cb.png\"\n TMB_Measurements.plot_measurement_data(measurement_data, cdat, mdat, fdat, filename)\n\n mean = numpy.mean(cdat)\n std = numpy.std(cdat)\n\n outfile.write(\" <h2>Summary</h2>\\n\")\n slabel = size_label(mean + 1.96*std)\n outfile.write(\" <p>{} Carapace Breadth: {:0.1f} mm ± {:0.2f} (sd), 95% range: \"\n \"{:0.1f}&ndash;{:0.1f} mm</p>\\n\".format(slabel, mean, std, max(mean - 1.96*std, 0), mean + 1.96*std))\n\n outfile.write(\" <figure class=\\\"sizeimg\\\">\\n\")\n outfile.write(\" <img src=\\\"{0}_cb.png\\\" alt=\\\"size data for {1}\\\" \"\n \"title=\\\"size data for {1}\\\"/>\\n\".format(species.species, species.binomial()))\n outfile.write(\" </figure>\\n\")\n\n with open(WEBOUT_PATH + \"sizes/\" + species.species + \"_cb.txt\", \"w\") as datfile:\n outfile.write(\" <h2>Data</h2>\\n\")\n outfile.write(\" <p>All measurements are in millimeters (mm). <a href=\\\"\" + species.species + \"_cb.txt\\\">\" +\n fetch_fa_glyph(\"file download\") + \" Download Data</a></p>\")\n if \"individual\" in measurement_data.all:\n outfile.write(\" <h3>Individuals</h3>\\n\")\n outfile.write(\"<table class=\\\"size_data_table\\\">\\n\")\n outfile.write(\"<tr><th>Reference</th><th>Sex</th><th>Width</th><th>Notes</th></tr>\\n\")\n datfile.write(\"Individuals\\n\")\n datfile.write(\"Reference\\tSex\\tWidth\\tNotes\\n\")\n idata = measurement_data.all[\"individual\"]\n for d in idata:\n rstr = format_reference_cite(refdict[d.ref], do_print, AUTHOR_PAREN, \"../\")\n outfile.write(\"<tr><td>{}</td><td>{}</td><td>{}</td><td>{}</td></tr>\\n\".format(rstr, d.sex, d.value,\n d.notes))\n datfile.write(\"{}\\t{}\\t{}\\t{}\\n\".format(d.ref, d.sex, d.value, d.notes))\n outfile.write(\"</table>\\n\")\n datfile.write(\"\\n\")\n\n if \"range\" in measurement_data.all:\n outfile.write(\" <h3>Ranges</h3>\\n\")\n outfile.write(\"<table class=\\\"size_data_table\\\">\\n\")\n outfile.write(\"<tr><th>Reference</th><th>Sex</th><th>n</th><th>Min</th><th>Max</th><th>Notes</th></tr>\\n\")\n datfile.write(\"Ranges\\n\")\n datfile.write(\"Reference\\tSex\\tn\\tMin\\tMax\\tNotes\\n\")\n idata = measurement_data.all[\"range\"]\n for d in idata:\n rstr = format_reference_cite(refdict[d.ref], do_print, AUTHOR_PAREN, \"../\")\n outfile.write(\"<tr><td>{}</td><td>{}</td><td>{}</td><td>{}</td>\"\n \"<td>{}</td><td>{}</td></tr>\\n\".format(rstr, d.sex, d.n, d.value.min_val,\n d.value.max_val, d.notes))\n datfile.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(d.ref, d.sex, d.n, d.value.min_val, d.value.max_val,\n d.notes))\n outfile.write(\"</table>\\n\")\n datfile.write(\"\\n\")\n\n if \"mean\" in measurement_data.all:\n outfile.write(\" <h3>Means</h3>\\n\")\n outfile.write(\"<table class=\\\"size_data_table\\\">\\n\")\n outfile.write(\"<tr><th>Reference</th><th>Sex</th><th>n</th><th>Mean</th><th>Notes</th></tr>\\n\")\n datfile.write(\"Means\\n\")\n datfile.write(\"Reference\\tSex\\tn\\tMean\\tNotes\\n\")\n idata = measurement_data.all[\"mean\"]\n for d in idata:\n rstr = format_reference_cite(refdict[d.ref], do_print, AUTHOR_PAREN, \"../\")\n outfile.write(\"<tr><td>{}</td><td>{}</td><td>{}</td><td>{}</td>\"\n \"<td>{}</td></tr>\\n\".format(rstr, d.sex, d.n, d.value.mean, d.notes))\n datfile.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(d.ref, d.sex, d.n, d.value.mean, d.notes))\n outfile.write(\"</table>\\n\")\n datfile.write(\"\\n\")\n\n if \"mean/sd\" in measurement_data.all:\n outfile.write(\" <h3>Means w/Standard Deviation</h3>\\n\")\n outfile.write(\"<table class=\\\"size_data_table\\\">\\n\")\n outfile.write(\"<tr><th>Reference</th><th>Sex</th><th>n</th><th>Mean</th><th>SD</th><th>Notes</th></tr>\\n\")\n datfile.write(\"Means w/Standard Deviation\\n\")\n datfile.write(\"Reference\\tSex\\tn\\tMean\\tSD\\tNotes\\n\")\n idata = measurement_data.all[\"mean/sd\"]\n for d in idata:\n rstr = format_reference_cite(refdict[d.ref], do_print, AUTHOR_PAREN, \"../\")\n outfile.write(\"<tr><td>{}</td><td>{}</td><td>{}</td><td>{}</td><td>{}</td>\"\n \"<td>{}</td></tr>\\n\".format(rstr, d.sex, d.n, d.value.mean, d.value.sd, d.notes))\n datfile.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(d.ref, d.sex, d.n, d.value.mean, d.value.sd, d.notes))\n outfile.write(\"</table>\\n\")\n datfile.write(\"\\n\")\n\n if \"mean/se\" in measurement_data.all:\n outfile.write(\" <h3>Means w/Standard Error</h3>\\n\")\n outfile.write(\"<table class=\\\"size_data_table\\\">\\n\")\n outfile.write(\"<tr><th>Reference</th><th>Sex</th><th>n</th><th>Mean</th><th>SE</th><th>Notes</th></tr>\\n\")\n datfile.write(\"Means w/Standard Error\\n\")\n datfile.write(\"Reference\\tSex\\tn\\tMean\\tSE\\tNotes\\n\")\n idata = measurement_data.all[\"mean/se\"]\n for d in idata:\n rstr = format_reference_cite(refdict[d.ref], do_print, AUTHOR_PAREN, \"../\")\n outfile.write(\"<tr><td>{}</td><td>{}</td><td>{}</td><td>{}</td><td>{}</td>\"\n \"<td>{}</td></tr>\\n\".format(rstr, d.sex, d.n, d.value.mean, d.value.se, d.notes))\n datfile.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(d.ref, d.sex, d.n, d.value.mean, d.value.se, d.notes))\n outfile.write(\"</table>\\n\")\n datfile.write(\"\\n\")\n\n if \"mean/sd/min/max\" in measurement_data.all:\n outfile.write(\" <h3>Means w/Range and Standard Deviation</h3>\\n\")\n outfile.write(\"<table class=\\\"size_data_table\\\">\\n\")\n outfile.write(\"<tr><th>Reference</th><th>Sex</th><th>n</th><th>Mean</th><th>SD</th><th>Min</th>\"\n \"<th>Max</th><th>Notes</th></tr>\\n\")\n datfile.write(\"Means w/Range and Standard Deviation\\n\")\n datfile.write(\"Reference\\tSex\\tn\\tMean\\tSD\\tMin\\tMax\\tNotes\\n\")\n idata = measurement_data.all[\"mean/sd/min/max\"]\n for d in idata:\n rstr = format_reference_cite(refdict[d.ref], do_print, AUTHOR_PAREN, \"../\")\n outfile.write(\"<tr><td>{}</td><td>{}</td><td>{}</td><td>{}</td><td>{}</td><td>{}</td><td>{}</td>\"\n \"<td>{}</td></tr>\\n\".format(rstr, d.sex, d.n, d.value.mean, d.value.sd,\n d.value.min_val, d.value.max_val, d.notes))\n outfile.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(d.ref, d.sex, d.n, d.value.mean, d.value.sd,\n d.value.min_val, d.value.max_val, d.notes))\n outfile.write(\"</table>\\n\")\n datfile.write(\"\\n\")\n\n if \"classcount\" in measurement_data.all:\n outfile.write(\" <h3>Histogram Counts</h3>\\n\")\n outfile.write(\"<table class=\\\"size_data_table\\\">\\n\")\n outfile.write(\"<tr><th>Reference</th><th>Set</th><th>Sex</th><th>n</th><th>Min</th><th>Max</th>\"\n \"<th>Notes</th></tr>\\n\")\n datfile.write(\"Histogram Counts\\n\")\n datfile.write(\"Reference\\tSet\\tSex\\tn\\tMin\\tMax\\tNotes\\n\")\n idata = measurement_data.all[\"classcount\"]\n classes = set()\n for d in idata:\n classes.add(d.class_id)\n for c in classes:\n current_class = []\n for d in idata:\n if c == d.class_id:\n current_class.append(d)\n for d in current_class:\n rstr = format_reference_cite(refdict[d.ref], do_print, AUTHOR_PAREN, \"../\")\n outfile.write(\"<tr><td>{}</td><td>{}</td><td>{}</td><td>{}</td><td>{}</td><td>{}</td>\"\n \"<td>{}</td></tr>\\n\".format(rstr, c, d.sex, d.n, d.value.min_val, d.value.max_val,\n d.notes))\n datfile.write(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(d.ref, c, d.sex, d.n, d.value.min_val,\n d.value.max_val, d.notes))\n outfile.write(\"<tr><td colspan=7>&nbsp;</td></tr>\\n\")\n datfile.write(\"\\n\")\n outfile.write(\"</table>\\n\")\n\n if do_print:\n end_page_division(outfile)\n else:\n common_html_footer(outfile)\n\n return float(mean), float(std)", "def index_page():\n \n return render_template(\"index.html\")" ]
[ "0.59548026", "0.57890856", "0.57370317", "0.5636853", "0.55066913", "0.54987985", "0.54826313", "0.54345924", "0.542592", "0.53984725", "0.53741604", "0.537301", "0.5337757", "0.5306245", "0.5297516", "0.5291385", "0.5291248", "0.5286969", "0.5282097", "0.5271443", "0.52632856", "0.5255599", "0.52432287", "0.52432287", "0.52432287", "0.52131003", "0.51809216", "0.51636577", "0.5163499", "0.5155086" ]
0.7018998
0
Run SugarPy on a given .mzML file based on identified peptides from an evidences.csv Translated Ursgal parameters are passed to the SugarPy main function.
def _execute(self): self.time_point(tag="execution") main = self.import_engine_as_python_function() output_file = os.path.join( self.params["output_dir_path"], self.params["output_file"] ) input_file = os.path.join( self.params["input_dir_path"], self.params["input_file"] ) translations = self.params['translations']['_grouped_by_translated_key'] pyqms_params = { "PERCENTILE_FORMAT_STRING": None, "M_SCORE_THRESHOLD": None, "ELEMENT_MIN_ABUNDANCE": None, "MIN_REL_PEAK_INTENSITY_FOR_MATCHING": None, "REQUIRED_PERCENTILE_PEAK_OVERLAP": None, "MINIMUM_NUMBER_OF_MATCHED_ISOTOPOLOGUES": None, "INTENSITY_TRANSFORMATION_FACTOR": None, "UPPER_MZ_LIMIT": None, "LOWER_MZ_LIMIT": None, "MZ_TRANSFORMATION_FACTOR": None, "REL_MZ_RANGE": None, "REL_I_RANGE": None, "INTERNAL_PRECISION": None, "MAX_MOLECULES_PER_MATCH_BIN": None, "SILAC_AAS_LOCKED_IN_EXPERIMENT": None, "BUILD_RESULT_INDEX": None, "MACHINE_OFFSET_IN_PPM": None, "FIXED_LABEL_ISOTOPE_ENRICHMENT_LEVELS": None, "MZ_SCORE_PERCENTILE": None, } sugarpy_params = {} sugarpy_params["charges"] = list( range( self.params["translations"]["precursor_min_charge"], self.params["translations"]["precursor_max_charge"] + 1, ) ) for translated_key, translation_dict in translations.items(): if translated_key == "REL_MZ_RANGE": if self.params["translations"]["ms_level"] == 1: print( """ [ WARNING ] precursor_mass_tolerance_plus and precursor_mass_tolerance_minus [ WARNING ] need to be combined for SugarPy (use of symmetric tolerance window). [ WARNING ] The arithmetic mean is used. """ ) pyqms_params["REL_MZ_RANGE"] = ( float( self.params["translations"]["precursor_mass_tolerance_plus"] ) + float( self.params["translations"][ "precursor_mass_tolerance_minus" ] ) ) / 2.0 if ( self.params["translations"]["precursor_mass_tolerance_unit"] == "da" ): pyqms_params[ "REL_MZ_RANGE" ] = ursgal.ucore.convert_dalton_to_ppm( pyqms_params["REL_MZ_RANGE"], base_mz=self.params["translations"]["base_mz"], ) else: pyqms_params["REL_MZ_RANGE"] = self.params["translations"][ "frag_mass_tolerance" ] if self.params["translations"]["frag_mass_tolerance_unit"] == "da": pyqms_params[ "REL_MZ_RANGE" ] = ursgal.ucore.convert_dalton_to_ppm( pyqms_params["REL_MZ_RANGE"], base_mz=self.params["translations"]["base_mz"], ) pyqms_params["REL_MZ_RANGE"] = pyqms_params["REL_MZ_RANGE"] * 1e-6 elif translated_key in pyqms_params.keys(): pyqms_params[translated_key] = list(translation_dict.values())[0] elif "charge" in translated_key: continue elif translated_key == "mzml_file": sugarpy_params[translated_key] = list(translation_dict.values())[0][0] elif len(translation_dict) == 1: sugarpy_params[translated_key] = list(translation_dict.values())[0] else: print( "The translatd key ", translated_key, " maps on more than one ukey, but no special rules have been defined", ) print(translation_dict) sys.exit(1) sugarpy_params["pyqms_params"] = pyqms_params sugarpy_params["ident_file"] = input_file sugarpy_params["output_file"] = output_file sugarpy_params["force"] = True out = main(**sugarpy_params) self.print_execution_time(tag="execution") return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n langs = []\n\n with open(\"sql/07_populate.sql\", 'w', encoding='utf8') as sql:\n sql.write(\"--this file is generated from csv files in data folder\\n\\n\")\n\n langs = write_lang_city(sql)\n write_groups_diets(sql, langs)\n\n with open(\"sql/10_populate_test_data.sql\", 'w', encoding='utf8') as sql:\n sql.write(\"--this file is generated from csv files in moc_data folder\\n\\n\")\n write_test_data(sql)\n # This command sets postgis coordinates based on latitude and longitude\n sql.write(\"UPDATE restaurant SET geo_location = ST_POINT(latitude, longitude);\\n\")\n sql.close()", "def main(args):\n ## Starting time\n start_time = datetime.now()\n ## Reading all elements and converting to python dictionary\n param_dict = vars(args)\n ## Checking for correct input\n param_vals_test(param_dict)\n #\n # Creating instance of `ReadML` with the input parameters\n param_dict['ml_args'] = ReadML(**param_dict)\n ## Program message\n prog_msg = param_dict['Prog_msg']\n # Adding additional parameters\n param_dict = add_to_dict(param_dict)\n ##\n ## Creating Folder Structure\n # proj_dict = cwpaths.cookiecutter_paths(__file__)\n proj_dict = param_dict['ml_args'].proj_dict\n proj_dict = directory_skeleton(param_dict, proj_dict)\n ##\n ## Printing out project variables\n print('\\n'+50*'='+'\\n')\n for key, key_val in sorted(param_dict.items()):\n if key != 'Prog_msg':\n print('{0} `{1}`: {2}'.format(prog_msg, key, key_val))\n print('\\n'+50*'='+'\\n')\n ##\n ## Feature keys\n param_dict['feat_cols_dict'] = param_dict['ml_args'].feat_cols_names_dict(\n return_all=True)\n ##\n ## Reading in the main catalogue\n catl_pd = catl_file_read_clean(param_dict, proj_dict)\n ###\n ### ------ Figures ------ ###\n ##\n ## Comparison of estimated group masses via HAM and Dynamical Masses\n frac_diff_model(param_dict, proj_dict, plot_opt=param_dict['plot_opt'])\n #\n # Covariance Matrix\n covariance_plot(catl_pd, param_dict, proj_dict)\n #\n # Traditional methods for estimating masses\n # pred_masses_halo_mass(param_dict, proj_dict)\n #\n # Fractional Difference plots vs True mass of galaxy GROUPS\n # frac_diff_groups_model(param_dict, proj_dict,\n # plot_opt=param_dict['plot_opt'])\n ##\n ## End time for running the catalogues\n end_time = datetime.now()\n total_time = end_time - start_time\n print('{0} Total Time taken (Create): {1}'.format(prog_msg, total_time))", "def run(pars, #parameter files\n #directory of scenario files\n scen_dir = r'C:\\LS\\03_TOOLS\\_git\\COVID_01\\scenarios',\n \n #map to scenario files\n scen_d = {\n 'NoNPI':'NPI_Scenario1_None.R',\n 'BI1918':'NPI_Scenario2_Bootsma_1918Influenza.R',\n 'SouthKorea':'NPI_Scenario3_SouthKorea.R',\n 'Reduced':'NPI_Scenario4_ReducedGamma.R', \n }\n ):\n \n \n \n #===========================================================================\n # precheck \n #===========================================================================\n assert len(pars)==4, 'unexpected inputs count'\n print('pars: \\n%s'%pars)\n \n #check the R Environment variables\n assert 'R_USER' in os.environ\n assert 'R_HOME' in os.environ\n \n #print('R_USER=%s \\nR_HOME=%s'%(os.getenv('R_USER'), os.getenv('R_HOME')))\n\n \n \n \n \n #===========================================================================\n # setup\n #===========================================================================\n s = setup.Setup(setup_name = 'mid_utah_'+pars[2],\n spatial_setup = WestCoastSpatialSetup(),\n nsim = int(pars[1]),\n ti = datetime.date(2020, 3, 6),\n tf = datetime.date(2020, 10, 1),\n interactive = False,\n write_csv = True,\n dt = 1/4)\n \n #===========================================================================\n # set the scenario parmaters\n #===========================================================================\n\n \n \n assert pars[2] in scen_d, 'unrecognized scenario: %s'%pars[2]\n \n rfp = os.path.join(scen_dir, scen_d[pars[2]])\n assert os.path.exists(rfp)\n \n s.script_npi = rfp\n \n print('set script_npi=%s'%s.script_npi)\n\n #===========================================================================\n # execute\n #===========================================================================\n\n print()\n print()\n print(f\">>> Starting {s.nsim} model runs on {pars[3]} processes\")\n print(f\">>> Setup *** {s.setup_name} *** from {s.ti} to {s.tf} !\")\n print(f\">>> writing to folder : {s.datadir}{s.setup_name}\")\n print()\n print()\n \n tic = time.time()\n \n res_l = seir.run_parallel(s, int(pars[3]))\n print(f\">>> Runs done in {time.time()-tic} seconds...\")", "def main_execute(vars):\n\n # Unpack necessary variables\n # output_directory is the root output folder for the run\n output_directory = vars[\"output_directory\"]\n\n # This will run operations which will:\n # 1) generate new ligands\n # 2) optionally filter ligands\n # 3) optionally convert from 1D smiles to 3D (mol2/PDB)\n\n sys.stdout.flush()\n\n\n smile_file_new_gen, new_gen_ligands_list = operations.populate_generation(vars)\n sys.stdout.flush()\n\n if new_gen_ligands_list is None:\n raise ValueError(\"Population failed to make enough mutants... \\\n Errors could include not enough diversity, too few seeds to the generation, \\\n number_of_mutants is too high, \\\n or all of the seed lack functional groups for performing reactions.\")\n\n sys.stdout.flush()", "def run_script(input_dir, output_dir):\n\n \"\"\"+++++++++++++++++++++++++++++++++++\"\"\"\n \"\"\"Step 1. load dataset \"\"\"\n print(\"loading data ......\")\n print(\"+++++++Read the surface shape data+++++++\")\n shape_file_name = input_dir + \"aligned_shapes.mat\"\n mat = loadmat(shape_file_name)\n y_design = mat['aligned_shape']\n n, l, m = y_design.shape\n print(\"The dimension of shape matrix is \" + str(y_design.shape))\n print(\"+++++++Read the sphere coordinate data+++++++\")\n template_file_name = input_dir + \"template.mat\"\n mat = loadmat(template_file_name)\n coord_mat = mat['template']\n # d = coord_mat.shape[1]\n print(\"+++++++Read the design matrix+++++++\")\n design_data_file_name = input_dir + \"design_data.txt\"\n design_data = np.loadtxt(design_data_file_name)\n # read the covariate type\n var_type_file_name = input_dir + \"var_type.txt\"\n var_type = np.loadtxt(var_type_file_name)\n print(\"+++++++Construct the design matrix: normalization+++++++\")\n x_design = read_x(design_data, var_type)\n p = x_design.shape[1]\n print(\"The dimension of design matrix is \" + str(x_design.shape))\n\n \"\"\"+++++++++++++++++++++++++++++++++++\"\"\"\n \"\"\"Step 2. Statistical analysis: including (1) smoothing and (2) hypothesis testing\"\"\"\n gpvals, lpvals_fdr, clu_pvals, efit_beta, efity_design, efit_eta = mfsda.run_stats(y_design, coord_mat, design_data, var_type)\n\n \"\"\"+++++++++++++++++++++++++++++++++++\"\"\"\n \"\"\"Step3. Save all the results\"\"\"\n gpvals_file_name = output_dir + \"global_pvalue.txt\"\n np.savetxt(gpvals_file_name, gpvals)\n lpvals_fdr_file_name = output_dir + \"local_pvalue_fdr.txt\"\n np.savetxt(lpvals_fdr_file_name, lpvals_fdr)\n clu_pvals_file_name = output_dir + \"cluster_pvalue.txt\"\n np.savetxt(clu_pvals_file_name, clu_pvals)", "def main():\n\n args = docopt.docopt(__doc__, version='0.0.1')\n\n # Initialize expyriment & wait its message to show\n initialize.init_arguments(args)\n exp = initialize.init_expyriment(args)\n\n # Useful shortcuts throughout the file\n kb = expyriment.io.Keyboard()\n\n # If we need to calibrate, then do so and terminate.\n if args[\"calibrate\"]:\n calibration(exp, args)\n expyriment.control.end('Merci !', 2000)\n return 0\n\n # Hash table for fast retrieval when presenting: reading from disk is slow!\n hash_table = dict()\n\n # Now let's read the csv file line by line and populate the events.\n # PriorityQueue sort on insertion based on the first element of the\n # inserted tuple: this means your csv file can have random order, or that\n # you can take input from several csv files\n events = queue.PriorityQueue()\n for csv_file in args[\"<file>\"]:\n # Save the path to the CSV file\n exp.add_experiment_info(csv_file)\n\n # Create the path to the stimuli\n bp = args[\"--stim-dir\"]\n\n # Open the csv file and read its rows.\n # ATTENTION : Encoding is platform dependant. See the open() manual\n for row in csv.reader(open(csv_file), delimiter='\\t'):\n # Destruct a row into its parts, they will be of type str\n onset, stype, f, *meta = row\n\n # If this is the first encounter of this stimuli then preload it\n if (stype, f) not in hash_table:\n hash_table[stype, f] = load_stimuli(stype, f, bp, args)\n hash_table[stype, f].preload()\n\n # Then push relevant events based on the type\n events.put((int(onset), stype, f, (stype, f), meta))\n\n expyriment.control.start(skip_ready_screen=True,\n subject_id=args[\"--subject-id\"])\n\n good = expyriment.stimuli.Audio(bp + \"/correct.wav\")\n bad = expyriment.stimuli.Audio(bp + \"/incorrect.wav\")\n good.preload()\n bad.preload()\n\n show_text(\"Waiting for scanner trigger\", args).present()\n kb.wait_char('t')\n\n # Start the experiment clock and loop through the events\n clock = expyriment.misc.Clock()\n last_right_pos = -1\n has_played = False\n while not events.empty():\n onset, stype, id, (stype, f), *meta = events.get()\n\n # If it's still too early, then wait for the onset but log keypresses\n while clock.time < (onset - 1):\n k = kb.check()\n if k is not None:\n exp.data.add([clock.time, \"keypressed\", k])\n if (not has_played) and (stype == \"oddity\" or stype == \"oddity-faces\"):\n has_played = True\n if k == 114:\n if last_right_pos in [0, 1, 5]:\n good.present()\n elif last_right_pos in [2, 3, 4]:\n bad.present()\n elif k == 108:\n if last_right_pos in [2, 3, 4]:\n good.present()\n elif last_right_pos in [0, 1, 5]:\n bad.present()\n\n # When time has come, present the stimuli and log that you just did so\n reported_time = hash_table[stype, f].present()\n if (stype == \"oddity\" or stype == \"oddity-faces\"):\n last_right_pos = int(meta[0][0])\n has_played = False\n exp.data.add(list([clock.time, stype, id, onset, reported_time] + meta[0]))\n\n # Now the experiment is done, terminate the exp\n expyriment.control.end('Merci !', 2000)\n return 0", "def main(mzml_file):\n run = pymzml.run.Reader(mzml_file)\n print(\n \"\"\"\nSummary for mzML file:\n {file_name}\nRun was measured on {start_time} using obo version {obo_version}\nFile contains {spectrum_count} spectra\n \"\"\".format(\n **run.info\n )\n )", "def main(um_file, ptl_file, wl_min_r=0.08, wl_max_r=50.0, wl_n_bins=22, verbose=True):\n # Read in the UM mock catalog\n um_mock = Table(np.load(um_file))\n if verbose:\n print(\"# Load in UM mock catalog: {}\".format(um_file))\n print(\"# Dealing with {} galaxies\".format(len(um_mock)))\n # Read in the particle table\n sim_particles = Table(np.load(ptl_file))\n if verbose:\n print(\"# Load in particle table: {}\".format(ptl_file))\n print(\"# Dealing with {} particles\".format(len(sim_particles)))\n\n # Output file name\n um_pre, _ = os.path.splitext(um_file)\n ptl_pre, _ = os.path.splitext(ptl_file)\n n_ptl = ptl_pre.split('_')[-1]\n precompute_out = \"{}_{}_r_{:4.2f}_{:4.1f}_{:2d}bins.npy\".format(\n um_pre, n_ptl, wl_min_r, wl_max_r, wl_n_bins\n )\n if verbose:\n print(\"# Output file name : {}\".format(precompute_out))\n\n # Run precompute\n if 'smdpl' in ptl_file:\n mass_encl = vagc.precompute_wl_smdpl(\n um_mock, sim_particles, wl_min_r=wl_min_r, wl_max_r=wl_max_r,\n wl_n_bins=wl_n_bins)\n elif 'mdpl2' in ptl_file:\n mass_encl = vagc.precompute_wl_mdpl2(\n um_mock, sim_particles, wl_min_r=wl_min_r, wl_max_r=wl_max_r,\n wl_n_bins=wl_n_bins)\n else:\n raise NameError(\"# Wrong simulation: [smdpl/mdpl2]\")\n\n np.save(precompute_out, mass_encl)", "def batch_RMSE_sample_pts(src_dir, overwrite, run_pairs_f, dryrun):\n \n def submit_job(src_dir, pair_dir):\n # Get DEMs in date order\n dem_files = get_dems(os.path.join(src_dir, pair_dir)) \n if len(dem_files) == 2:\n dem1, dem2 = dem_files[0], dem_files[1]\n \n # Build cmd\n cmd = 'qsub -v p1=\"{}\",p2=\"{}\",p3=\"{}\" ~/scratch/code/coreg/qsub_RMSE_sample_pts.sh'.format(dem1, dem2, method)\n \n if dryrun:\n print(cmd)\n else:\n p = subprocess.Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)\n output = p.stdout.read()\n print(output)\n else:\n logging.info(pair_dir)\n logging.info('Incorrect number of DEM files found: {}'.format(len(dem_files)))\n pass\n \n \n data_dir, method = os.path.split(src_dir)\n \n # List subdirectory names\n pairs = os.listdir(src_dir)\n pairs = [x for x in pairs if os.path.isdir(os.path.join(src_dir, x))]\n if run_pairs_f:\n pairs = constrict_pairs(run_pairs_f, pairs)\n\n for pair_dir in pairs:\n print(pair_dir)\n \n files = os.listdir(os.path.join(src_dir, pair_dir))\n rmse_match = [x for x in files if 'rmse.txt' in x]\n# print('RMSE match files found: {}'.format(len(rmse_match)))\n \n if len(rmse_match) == 0:\n submit_job(src_dir, pair_dir)\n elif overwrite:\n submit_job(src_dir, pair_dir)\n else:\n pass", "def main():\n\tparser = argparse.ArgumentParser(description=\"Estimate the efferents modulation induced by EES and afferent input together\")\n\tparser.add_argument(\"eesFrequency\", help=\"ees frequency\", type=float, choices=[gt.Range(0,1000)])\n\tparser.add_argument(\"eesAmplitude\", help=\"ees amplitude (0-600] or %%Ia_II_Mn\")\n\tparser.add_argument(\"species\", help=\"simulated species\", choices=[\"rat\",\"human\"])\n\tparser.add_argument(\"inputFile\", help=\"neural network structure file (e.g. fsSFrFfMnArtModHuman.txt)\")\n\tparser.add_argument(\"name\", help=\"name to add at the output files\")\n\tparser.add_argument(\"--mnReal\", help=\"Real Mn flag, IntFire Mn otherwise\",action=\"store_true\")\n\tparser.add_argument(\"--simTime\", help=\"simulation time\", type=int, default=1000)\n\tparser.add_argument(\"--burstingEes\", help=\"flag to use burst stimulation\", action=\"store_true\")\n\tparser.add_argument(\"--nPulsesPerBurst\", help=\"number of pulses per burst\", type=int, default=5)\n\tparser.add_argument(\"--burstsFrequency\", help=\"stimulation frequency within bursts\",type=float, default=600, choices=[gt.Range(0,1000)])\n\tparser.add_argument(\"--seed\", help=\"positive seed used to initialize random number generators (default = time.time())\", type=int, choices=[gt.Range(0,999999)])\n\targs = parser.parse_args()\n\n\tif args.seed is not None: sh.save_seed(args.seed)\n\telse: sh.save_seed(int(time.time()))\n\n\t# Import simulation specific modules\n\tfrom simulations import ForSimSpinalModulation\n\tfrom NeuralNetwork import NeuralNetwork\n\tfrom EES import EES\n\tfrom BurstingEES import BurstingEES\n\tfrom NetworkStimulation import NetworkStimulation\n\n\t# Initialze variables...\n\tif args.eesAmplitude[0]==\"%\": eesAmplitude = [float(x) for x in args.eesAmplitude[1:].split(\"_\")]\n\telse: eesAmplitude = float(args.eesAmplitude)\n\tname = args.name+\"_amp_\"+args.eesAmplitude+\"_freq_\"+str(args.eesFrequency)\n\tpc = h.ParallelContext()\n\tnn=NeuralNetwork(pc,args.inputFile)\n\tif not args.burstingEes: ees = EES(pc,nn,eesAmplitude,args.eesFrequency,pulsesNumber=100000,species=args.species)\n\telse: ees = BurstingEES(pc,nn,eesAmplitude,args.eesFrequency,args.burstsFrequency,args.nPulsesPerBurst,species=args.species)\n\tees.get_amplitude(True)\n\tprint \"The stimulation frequency is: \",args.eesFrequency,\" Hz\"\n\tafferentsInput = None\n\n\tcellsToRecord = {}\n\tcellsToRecord['Iaf'] = nn.cells['SOL']['Iaf']\n\tcellsToRecord['MnS']=nn.cells['SOL']['MnS']\n\t# cellsToRecord['MnFf']=nn.cells['SOL']['MnFf']\n\t# cellsToRecord['MnFr']=nn.cells['SOL']['MnFr']\n\t# modelTypes = {\"MnS\":\"artificial\",\"MnFr\":\"artificial\",\"MnFf\":\"artificial\",\"Iaf\":\"artificial\"}\n\tmodelTypes = {\"MnS\":\"artificial\",\"Iaf\":\"artificial\"}\n\tsimulation = ForSimSpinalModulation(pc,nn,cellsToRecord,modelTypes, afferentsInput, None, None, args.simTime)\n\tsimulation.set_results_folder(\"../../results/AffEffModSweap/\")\n\tsimulation.run()\n\tsimulation.raster_plot(name,False)\n\tcomm.Barrier()\n\n\tsimulation.save_results(name)", "def main():\n # Initialize key variables\n alldata = [[\n 'Meet', 'City', 'Country', 'Course', 'Event ID', 'Distance', 'Stroke',\n 'Round', 'Gender', 'Firstname', 'Lastname', 'Birthyear', 'Height cm',\n 'Weight Kg', 'BMI', 'Speed / Kg', 'Speed m/s', 'Time']]\n finadata = []\n olympicdata = []\n ts_start = int(time.time())\n\n # Get filename\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-l', '--lenex_directory',\n help='Name of directory with LENEX XML files.',\n type=str, required=True)\n parser.add_argument(\n '-o', '--olympic_directory',\n help='Name of directory with Olympic XLSX files.',\n type=str, required=True)\n parser.add_argument(\n '-p', '--profile_directory',\n help='Name of directory with athlete profiles.',\n type=str, required=True)\n parser.add_argument(\n '-d', '--database_file',\n help='Name of database file.',\n type=str, required=True)\n args = parser.parse_args()\n lenex_directory = args.lenex_directory\n profile_directory = args.profile_directory\n database_file = args.database_file\n olympic_directory = args.olympic_directory\n\n # Get the profiles\n profiles = _read_profiles(profile_directory)\n\n # Process Fina data\n finadata = _lenex(lenex_directory, profiles)\n\n # Process Olympic data\n olympicdata = _olympic(olympic_directory, profiles)\n\n # Get all data\n alldata.extend(finadata)\n alldata.extend(olympicdata)\n\n # Create output file\n with open(database_file, 'w') as f_handle:\n writer = csv.writer(f_handle, delimiter='|')\n writer.writerows(alldata)\n\n # Print status\n print('Swimmer event results created: {}'.format(len(alldata) - 1))\n print('Duration: {}'.format(int(time.time() - ts_start)))", "def main() -> None:\n args = _get_arguments()\n\n file_level_logging = logging.DEBUG if args.log_to_file else None\n setup_logger(logging.INFO, file_level_logging)\n\n if not os.path.exists(args.smiles):\n mol = Molecule(smiles=args.smiles)\n if mol.rd_mol is None:\n logger().error(\n f\"The --smiles argument ({args.smiles})\"\n \" does not point to an existing file or is a valid RDKit SMILES.\"\n \" Cannot start retrosynthesis planning.\"\n )\n return\n\n if args.nproc:\n _multiprocess_smiles(args)\n return\n\n multi_smiles = os.path.exists(args.smiles)\n\n finder = AiZynthFinder(configfile=args.config)\n _select_stocks(finder, args)\n post_processing = _load_postprocessing_jobs(args.post_processing)\n finder.expansion_policy.select(args.policy or finder.expansion_policy.items[0])\n if args.filter:\n finder.filter_policy.select(args.filter)\n else:\n finder.filter_policy.select_all()\n\n params = [\n args.smiles,\n finder,\n args.output,\n args.cluster,\n args.route_distance_model,\n post_processing,\n args.checkpoint,\n ]\n if multi_smiles:\n _process_multi_smiles(*params)\n else:\n params = params[:-1]\n _process_single_smiles(*params)", "def __main__():\n try:\n gff_file = sys.argv[1]\n mat_file = sys.argv[2]\n except:\n print __doc__\n sys.exit(-1)\n\n genes, transcripts, exons, utr3, utr5, cds = GFFParse(gff_file) \n gene_models = CreateGeneModels(genes, transcripts, exons, utr3, utr5, cds)\n # TODO Write to matlab/octave struct instead of cell arrays.\n sio.savemat(mat_file, \n mdict=dict(genes=gene_models), \n format='5', \n oned_as='row')", "def main():\n\n\t# Script arguments... \n\t\"\"\" If running as standalone, hardcode theWorkspace and inFile \"\"\"\n\ttheWorkspace = arcpy.GetParameterAsText(0)\n\tif not theWorkspace:\n\t\ttheWorkspace = r\"d:\\_dataTest\"\n\ttheWorkspace = r\"d:\\_dataTest\"\n\tarcpy.env.workspace = theWorkspace\n\tarcpy.env.overwriteOutput = True\n\toutWorkspace = os.path.join(theWorkspace, \"_repair\")\n\n\tinFile = arcpy.GetParameterAsText(1)\n\tif not inFile:\n\t\tinFile = \"updateMultipleSourcePaths.csv\"\n\t#inFile = \"FixSource4.csv\"\n\t#inFile = os.path.join(theWorkspace, inFile) + \".csv\"\n\t# opens the infile.csv, read only; then creates tuple of inFile\n\t#f = open(inFile, \"r\") \n\t#update_list = [tuple(line.strip().split(\",\") for line in f)]\n\n\n\tmxd = None\n\toutMXDName = \"none\"\n\tnewPath = []\n\t# makes sure the .csv file exists\n\tif arcpy.Exists(inFile):\n\t\tmyMsgs (\"Repair source list: \" + inFile)\n\t\t# walks thru the workspace to create list of files \n\t\tfor root, dirs, files in os.walk(theWorkspace): \n\t\t\tif root == outWorkspace:\n\t\t\t\tprint(\"heh now\")\n\t\t\t\tpass\n\t\t\t# creates list of .mxd's and works thru them\n\t\t\tmxdList = arcpy.ListFiles(\"*.mxd\")\n\t\t\tfor fileName in mxdList:\n\t\t\t\tfullPath = os.path.join(root, fileName) \n\t\t\t\tmxd = arcpy.mapping.MapDocument(fullPath)\n\t\t\t\tmyMsgs (\"*** Processing mxd: \" + fullPath)\n\t\t\t\t#mxd.findAndReplaceWorkspacePaths(\"v:\\\\\", \"\\\\\\\\dfg.alaska.local\\\\gis\\\\Anchorage\\\\gisshare\\\\\", validate=False)\n\t\t\t\t#mxd.findAndReplaceWorkspacePaths(\"t:\\\\\", \"\\\\\\\\dfg.alaska.local\\\\gis\\\\Anchorage\\\\GISStaff\\\\\", validate=False)\n\t\t\t\t#mxd.findAndReplaceWorkspacePaths(\"u:\\\\\", \"\\\\\\\\dfg.alaska.local\\\\gis\\\\Anchorage\\\\GISStaff\\\\\", validate=False)\n\t\t\t\t# New output mxd....\n\t\t\t\tbasename, extension = os.path.splitext(fileName)\n\t\t\t\toutMXDName = os.path.join(outWorkspace, (str(basename) + \"_fix.mxd\"))\n\t\t\t\t# create list of the tables since they are handle differently\n\t\t\t\ttheTables = arcpy.mapping.ListTableViews(mxd)\n\t\t\t\t# Loops thru layers, checks for broken links and tries to repai\n\t\t\t\tlyrList = arcpy.mapping.ListLayers(mxd)\n\t\t\t\tfor lyr in lyrList:\n\t\t\t\t\tif lyr.isBroken:\n\t\t\t\t\t\tif lyr.isGroupLayer or (\"Events\" in lyr.name):\n\t\t\t\t\t\t\tprint(\"...skipping group or event\")\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t#print(lyr.isServiceLayer)\n\t\t\t\t\t\tif lyr.isServiceLayer:\n\t\t\t\t\t\t\tif lyr.supports(\"SERVICEPROPERTIES\"):\n\t\t\t\t\t\t\t\tcnt = 0\n\t\t\t\t\t\t\t\tfor i, j in lyr.serviceProperties.iteritems():\n\t\t\t\t\t\t\t\t\tif cnt == 2:\n\t\t\t\t\t\t\t\t\t\tdataSource = str(j)\n\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tcnt += 1 \n\t\t\t\t\t\t\t\tprint(\"sees this as service....using findAndReplsWorkspacePath\")\n\t\t\t\t\t\t\t\tnewPath = findUpdatePath(inFile, dataSource)\n\t\t\t\t\t\t\t\tlyr.findAndReplaceWorkspacePath(lyr.dataSource, newPath, False)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tprint(\"--> a service layer but no SERVICE PROPOERTIES\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint(lyr.dataSource)\n\t\t\t\t\t\t\tnewPath = findUpdatePath(inFile, lyr.dataSource)\n\t\t\t\t\t\t\tnewDSPath, newDSName = os.path.split(newPath[0])\n\t\t\t\t\t\t\tprint(\"..newDSPAth \" + newDSPath)\n\t\t\t\t\t\t\tprint(\"..newDSName \" + newDSName)\n\t\t\t\t\t\t\tsameType = newPath[1]\n\t\t\t\t\t\t\tprint(\" same type? \" + str(sameType))\n\t\t\t\t\t\t\tcvrList = [r\"\\arc\", r\"\\polygon\", r\"\\region\", r\"\\point\", r\"\\tic\" ]\n\t\t\t\t\t\t\t#print newDSPath\n\t\t\t\t\t\t\tif newPath == \"no match\":\n\t\t\t\t\t\t\t\tprint(\"...no match to: \" + lyr.dataSource)\n\t\t\t\t\t\t\t\tnewPath[0] = \"not found\"\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\telif lyr.supports(\"dataSource\") and lyr.supports(\"datasetName\"):\n\t\t\t\t\t\t\t\tif lyr in theTables:\n\t\t\t\t\t\t\t\t\tprint(\"thinks its a table....using findAndReplsWorkspacePath\")\n\t\t\t\t\t\t\t\t\tlyr.findAndReplaceWorkspacePath(lyr.dataSource, newPath, False) \n\t\t\t\t\t\t\t\telif lyr.isRasterLayer:\n\t\t\t\t\t\t\t\t\tprint(\"thinks its a raster....using findAndReplsWorkspacePath\")\n\t\t\t\t\t\t\t\t\t#lyr.replaceDataSource(newPath, \"RASTER_WORKSPACE\", lyr.datasetName, False)\n\t\t\t\t\t\t\t\t\tlyr.findAndReplaceWorkspacePath(lyr.dataSource, newPath, False)\n\t\t\t\t\t\t\t\telif lyr.supports(\"dataSource\") and lyr.supports(\"datasetName\"):\n\t\t\t\t\t\t\t\t\tif not sameType and newPath[1] == \"gdb\":\n\t\t\t\t\t\t\t\t\t\tprint(\"..................moving to fgdb\")\n\t\t\t\t\t\t\t\t\t\tlyr.replaceDataSource(newDSPath, \"FILEGDB_WORKSPACE\", newDSName, False) \n\t\t\t\t\t\t\t\t\telif r\".shp\" in lyr.dataSource:\n\t\t\t\t\t\t\t\t\t\tprint(\"thinks its a shape\")\n\t\t\t\t\t\t\t\t\t\tlyr.replaceDataSource(newDSPath, \"SHAPEFILE_WORKSPACE\", lyr.datasetName, False)\n\t\t\t\t\t\t\t\t\telif r\".sde\" in lyr.dataSource:\n\t\t\t\t\t\t\t\t\t\tprint(\"thinks its a sde\")\n\t\t\t\t\t\t\t\t\t\tlyr.replaceDataSource(newDSPath, \"SDE_Workspace\", lyr.datasetName, False)\n\t\t\t\t\t\t\t\t\telif r\".mdb\" in lyr.dataSource:\n\t\t\t\t\t\t\t\t\t\tprint(\"thinks its a pgdb\")\n\t\t\t\t\t\t\t\t\t\tlyr.replaceDataSource(newDSPath, \"ACCESS_WORKSPACE\", lyr.datasetName, False)\n\t\t\t\t\t\t\t\t\telif r\".gdb\" in lyr.dataSource:\n\t\t\t\t\t\t\t\t\t\tprint(\"thinks its a fgdb\")\n\n\t\t\t\t\t\t\t\t\t\tlyr.replaceDataSource(newDSPath, \"FILEGDB_WORKSPACE\", lyr.datasetName, False)\n\t\t\t\t\t\t\t\t\telif sameType:\n\t\t\t\t\t\t\t\t\t\tfor cvr in cvrList:\n\t\t\t\t\t\t\t\t\t\t\tif cvr in lyr.dataSource:\n\t\t\t\t\t\t\t\t\t\t\t\tprint(\"to WS sametype is True\")\n\t\t\t\t\t\t\t\t\t\t\t\tlyr.replaceDataSource(newDSPath, \"ARCINFO_WORKSPACE\", newDSName, False)\n\t\t\t\t\t\t\t\t\telif not sameType:\n\t\t\t\t\t\t\t\t\t\tfor cvr in cvrList:\n\n\t\t\t\t\t\t\t\t\t\t\tlyr.replaceDataSource(newDSPath, \"FILEGDB_WORKSPACE\", newDSName, False)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\"\"\"else:\n newPath[0] = \"not found\" \"\"\"\n\t\t\t\t\t\t\tprint(\" **** the new data source: \" + newPath[0])\n\t\t\t\t\t\t\tprint(\"\")\n\n\t\t\t\tprint(outMXDName)\n\t\t\t\t#mxd.saveACopy(outMXDName, '10.1')\n\t\t\tif arcpy.Exists(outMXDName):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\toutMXDName.save()\n\t\t\t\telse:\n mxd.saveACopy(outMXDName, '10.1')\n\t\t\t\tdel mxd\n\telse:\n\t\tmyMsgs (\"Repair source list: \" + inFile + \" does not exit.\")\n\n\tmyMsgs('!!! Success !!! ')", "def run_pyfuse(self, pyfuse):\n# get pyfuse model instance\n# if not isinstance(pyfuse, pyfuse_Model):\n# raise Exception('Ass a pyfuse model instance to perform \\\n# sensitivity')\n\n try:\n self.pars\n except:\n raise PystanInputError(\"Use the modpar class when working \"\n \"with the pyfuse model environment\")\n\n #run model, all outputs saved in hdf5\n for run in range(self.parset2run.shape[0]):\n #create pardict for pyfuse input from self._parmap\n par2pyfuse = {}\n for i in range(self._ndim):\n par2pyfuse[self._parmap[i]] = self.parset2run[run, i]\n #run the pyfuse model with new pars\n print(self._methodname, 'Run'+str(run))\n print('Simulation %d of %d is \\\n running...' % (run + 1, self.parset2run.shape[0]))\n pyfuse.run(new_pars = par2pyfuse,\n run_id = self._methodname + 'Run' + str(run))\n\n print('All simulations are performed and saved in hdf5. You can now \\\n transform the output data to an evaluation criterion.')", "def execute(self, parameters, messages):\n \n #CHECK LICENSING\n #Advanced License\n status = arcpy.SetProduct('arcInfo')\n if status == 'CheckedOut':\n pass\n if status == 'AlreadyInitialized':\n pass\n if status == 'NotLicensed':\n arcpy.ExcecuteError(\"ERROR: ArcGIS Advanced licence is required to run this tool.\")\n if status == 'Failed':\n arcpy.ExcecuteError(\"ERROR: ArcGIS Advanced licence is required to run this tool.\")\n \n #Extensions\n if arcpy.CheckExtension(\"Spatial\") == \"Available\":\n arcpy.CheckOutExtension(\"Spatial\")\n else:\n arcpy.ExcecuteError(\"ERROR: The Spatial Analyst extension is required to run this tool.\")\n \n arcpy.env.overwriteOutput = True\n \n rapid_out_folder = parameters[0].valueAsText\n Drainage_Lines = parameters[1].valueAsText\n Stream_ID_DrainageLine = parameters[2].valueAsText\n Next_Down_ID = parameters[3].valueAsText\n length_field_DrainageLine = parameters[4].valueAsText\n Slope_field_DrainageLine = parameters[5].valueAsText\n Catchment_Features = parameters[6].valueAsText\n Stream_ID_Catchments = parameters[7].valueAsText\n Input_Reservoirs = parameters[8].valueAsText\n \n \n script_directory = os.path.dirname(__file__)\n arcpy.ImportToolbox(os.path.join(os.path.dirname(script_directory), \"RAPID Tools.pyt\"))\n \n #Create Network Connecitivty File\n out_network_connectivity_file = os.path.join(rapid_out_folder, \"rapid_connect.csv\")\n arcpy.CreateNetworkConnectivityFile_RAPIDTools(Drainage_Lines, \n Stream_ID_DrainageLine, \n Next_Down_ID,\n out_network_connectivity_file)\n # Create subset file\n out_subset_file = os.path.join(rapid_out_folder, \"riv_bas_id.csv\") \n arcpy.CreateSubsetFile_RAPIDTools(Drainage_Lines, Stream_ID_DrainageLine, out_subset_file)\n \n \n #Create Muksingum Parameters\n # Process: Muksingum k\n out_muskingum_kfac_file = os.path.join(rapid_out_folder, \"kfac.csv\")\n arcpy.CreateMuskingumKfacFile_RAPIDTools(in_drainage_line_features=Drainage_Lines, \n stream_ID=Stream_ID_DrainageLine, \n length=length_field_DrainageLine, \n slope=Slope_field_DrainageLine, \n co=1000.0/3600.0, \n in_formula=\"Eta*River Length/Sqrt(River Slope) [0.05, 0.95]\", \n in_network_connectivity_file=out_network_connectivity_file,\n out_muskingum_kfac_file=out_muskingum_kfac_file)\n \n out_muskingum_k_file = os.path.join(rapid_out_folder, \"k.csv\")\n arcpy.CreateMuskingumKFile_RAPIDTools(0.35, \n out_muskingum_kfac_file, \n out_muskingum_k_file)\n \n # Process: Muskingum x \n out_muskingum_x_file = os.path.join(rapid_out_folder, \"x.csv\")\n arcpy.CreateMuskingumXField_RAPIDTools(Drainage_Lines, Stream_ID_DrainageLine,\"0.3\", Input_Reservoirs)\n arcpy.CreateMuskingumXFile_RAPIDTools(Drainage_Lines, Stream_ID_DrainageLine, out_muskingum_x_file)\n\n if Catchment_Features:\n lsm_grid_directory = os.path.join(script_directory, \"lsm_grids\")\n \n # Create ECMWF Low Res Weight Table\n low_resolution_ecmwf_grid = os.path.join(lsm_grid_directory, \"runoff_ecmwf_tco639_grid.nc\")\n low_resolution_weight_table = os.path.join(rapid_out_folder, \"weight_ecmwf_tco639.csv\") \n arcpy.CreateWeightTableFromECMWFRunoff_RAPIDTools(low_resolution_ecmwf_grid,\n out_network_connectivity_file,\n Catchment_Features,\n Stream_ID_Catchments,\n low_resolution_weight_table) \n\n # Create ECMWF High Res Weight Table\n high_resolution_ecmwf_grid = os.path.join(lsm_grid_directory, \"runoff_ecmwf_t1279_grid.nc\")\n high_resolution_weight_table = os.path.join(rapid_out_folder, \"weight_ecmwf_t1279.csv\") \n arcpy.CreateWeightTableFromECMWFRunoff_RAPIDTools(high_resolution_ecmwf_grid,\n out_network_connectivity_file,\n Catchment_Features,\n Stream_ID_Catchments,\n high_resolution_weight_table) \n \n # Create ERA Interim Weight Table\n era_interim_ecmwf_grid = os.path.join(lsm_grid_directory, \"runoff_era_t511_grid.nc\")\n era_interim_weight_table = os.path.join(rapid_out_folder, \"weight_era_t511.csv\") \n arcpy.CreateWeightTableFromECMWFRunoff_RAPIDTools(era_interim_ecmwf_grid,\n out_network_connectivity_file,\n Catchment_Features,\n Stream_ID_Catchments,\n era_interim_weight_table) \n\n # Flowline to point\n out_point_file = os.path.join(rapid_out_folder, \"comid_lat_lon_z.csv\")\n arcpy.FlowlineToPoint_RAPIDTools(Drainage_Lines, out_point_file)\n\n return", "def main():\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n\n parser.add_argument('-V', '--version', action='version', version=VERSION)\n\n\n file_group = parser.add_argument_group('Input Files')\n file_group.add_argument('-f', dest='traj', required=True, type=str,\n help='trajectory file (XTC/TRR/GRO/PDB ...)')\n file_group.add_argument('-s', dest='tpr', required=True, type=str,\n help='tpr file (TPR)')\n file_group.add_argument('-o', dest='outpath', type=str,\n help='name of the mapped trajectory (XTC/GRO)')\n file_group.add_argument('-m', dest='map_file', type=str,\n help='.mapping file or path to directory of .map files')\n\n mapping_group = parser.add_argument_group('Mapping Options')\n mapping_group.add_argument('-mode', dest='mode', required=False, type=str,\n help='COG or COM mapping', default='COG')\n mapping_group.add_argument('-pbc', action='store_true', required=False, dest='pbc_complete',\n help='complete pbc with MDAnalysis; this is slow!')\n mapping_group.add_argument('-mols', dest='mol_names', required=True, type=str, nargs='+',\n help='names of molecules to consider when mapping as in the [moleculetypes] directive')\n mapping_group.add_argument('-add_H', dest='h_association', nargs='+', type=lambda s: s.split(':'),\n default=[],\n help='atom-types corresponding to CH3, CH2, CH1 for aliphatic groups and CH2d for double bonds.')\n args = parser.parse_args()\n\n print(\"INFO - Loading universe\")\n # load trajectory\n init_universe = UniverseHandler(args.mol_names,\n args.tpr,\n args.traj,\n in_memory=True)\n if args.pbc_complete:\n print(\"INFO - PBC completing trajectory\")\n init_universe.pbc_complete()\n\n if args.h_association:\n print(\"INFO - Adding Hydrogen to united-atoms\")\n treated_atoms = init_universe.shift_united_atom_carbons(dict(args.h_association))\n else:\n treated_atoms = np.array([])\n\n print(\"INFO - Loading mapping files\")\n #determine if we have a single .mapping file or a directory of .map files\n map_path = pathlib.Path(args.map_file)\n if map_path.is_file() == True:\n with open(args.map_file, \"r\") as _file:\n lines = _file.readlines()\n elif map_path.is_dir() == True:\n l = []\n for i in map_path.glob('*.map'):\n with open(i, \"r\") as _file:\n l.append(_file.readlines())\n if len(l) > 0:\n lines = [item for sublist in l for item in sublist]\n else:\n msg = (\"Couldn't find any .map files in the directory given.\"\n \"Please check the -m argument!\")\n raise IOError(msg)\n else:\n msg = (\"\\nCannot determine if you have given me a single .mapping file\\n\"\n \"or a directory of .map files. Please check!\\n\")\n raise IOError(msg)\n\n mappings = read_mapping(lines)[0]\n\n print(\"INFO - Mapping universe - indices\")\n # first mapp the atom indices\n mapped_atoms, bead_idxs = forward_map_indices(init_universe,\n mappings)\n n_frames = len(init_universe.trajectory)\n\n print(\"INFO - Mapping universe - positions\")\n mapped_atoms = numba.typed.List(mapped_atoms)\n bead_idxs = numba.typed.List(bead_idxs)\n # extract the position array from universe\n # if it's not a trajectory we have to emulate\n # a single frame\n path = pathlib.Path(args.traj)\n file_extension = path.suffix.casefold()[1:]\n if file_extension in [\"xtc\", \"trr\"]:\n positions = init_universe.trajectory.coordinate_array\n else:\n positions = init_universe.atoms.positions\n positions = positions.reshape(1, -1, 3)\n\n mapped_trajectory = forward_map_positions(mapped_atoms,\n bead_idxs,\n positions,\n n_frames,\n args.mode,\n treated_atoms)\n\n print(\"INFO - Mapping universe - building pos-array\")\n cg_universe = create_new_universe(init_universe, mapped_trajectory, mappings)\n\n # write coordinate\n print(\"INFO - Writing CG trajectory\")\n if args.traj:\n path = pathlib.Path(args.traj)\n file_extension = path.suffix.casefold()[1:]\n else:\n file_extension = \"xtc\"\n\n if file_extension in [\"xtc\", \"trr\"]:\n cg_beads = cg_universe.atoms\n with mda.Writer(args.outpath,\n multiframe=True,\n n_atoms=len(cg_universe.atoms)) as mapped:\n for time_step in cg_universe.trajectory:\n mapped.write(cg_beads)\n else:\n cg_universe.atoms.positions = cg_universe.trajectory.coordinate_array[0]\n cg_beads = cg_universe.atoms\n cg_universe.atoms.dimensions = init_universe.atoms.dimensions\n with mda.Writer(args.outpath, n_atoms=len(cg_universe.atoms)) as mapped:\n mapped.write(cg_beads)", "def evol_run(self, pkg_setup):\n\n params = specs.ODict()\n params[(\"tune\", \"soma\", \"Ra\")] = [100.0 * 0.5, 100 * 1.5]\n\n amps = [0.0, 0.65] # amplitudes\n times = [100, 200] # start times\n dur = 50 # ms\n targetRates = [0.0, 81.0]\n\n # initial cfg set up\n initCfg = {} # specs.ODict()\n initCfg[\"duration\"] = 200 * len(amps)\n initCfg[(\"hParams\", \"celsius\")] = 37\n\n initCfg[\"savePickle\"] = True\n initCfg[\"saveJson\"] = False\n initCfg[\"saveDataInclude\"] = [\"simConfig\", \"netParams\", \"net\", \"simData\"]\n\n initCfg[(\"IClamp1\", \"pop\")] = \"ITS4\"\n initCfg[(\"IClamp1\", \"amp\")] = amps\n initCfg[(\"IClamp1\", \"start\")] = times\n initCfg[(\"IClamp1\", \"dur\")] = 100\n\n initCfg[(\"analysis\", \"plotfI\", \"amps\")] = amps\n initCfg[(\"analysis\", \"plotfI\", \"times\")] = times\n initCfg[(\"analysis\", \"plotfI\", \"dur\")] = dur\n initCfg[(\"analysis\", \"plotfI\", \"targetRates\")] = targetRates\n\n for k, v in params.items():\n initCfg[k] = v[0] # initialize params in cfg so they can be modified\n\n # fitness function\n fitnessFuncArgs = {}\n fitnessFuncArgs[\"targetRates\"] = targetRates\n\n def fitnessFunc(simData, **kwargs):\n targetRates = kwargs[\"targetRates\"]\n\n diffRates = [abs(x - t) for x, t in zip(simData[\"fI\"], targetRates)]\n fitness = np.mean(diffRates)\n\n print(\" Candidate rates: \", simData[\"fI\"])\n print(\" Target rates: \", targetRates)\n print(\" Difference: \", diffRates)\n\n return fitness\n\n # create Batch object with paramaters to modify, and specifying files to use\n b = Batch(cfgFile='src/cfg.py', netParamsFile='src/netParams.py', params=params, initCfg=initCfg)\n\n # Set output folder, grid method (all param combinations), and run configuration\n b.batchLabel = \"ITS4_evol\"\n b.saveFolder = \"/tmp/\" + b.batchLabel\n b.method = \"evol\"\n b.seed = 0\n b.runCfg = {\"type\": \"mpi_bulletin\", \"script\": \"init.py\"}\n b.evolCfg = {\n \"evolAlgorithm\": \"custom\",\n \"fitnessFunc\": fitnessFunc, # fitness expression (should read simData)\n \"fitnessFuncArgs\": fitnessFuncArgs,\n \"pop_size\": 2,\n \"num_elites\": 1, # keep this number of parents for next generation if they are fitter than children\n \"mutation_rate\": 0.4,\n \"crossover\": 0.5,\n \"maximize\": False, # maximize fitness function?\n \"max_generations\": 1,\n \"time_sleep\": 0.25, # wait this time before checking again if sim is completed (for each generation)\n \"maxiter_wait\": 20, # max number of times to check if sim is completed (for each generation)\n \"defaultFitness\": 1000, # set fitness value in case simulation time is over\n }\n # Run batch simulations\n b.run()", "def makexmlfunc(healpix,ra,dec,week1,week2,distance):\n\t\n\tif week1!=week2:\n\t\tidentity=\"%06d_%d_%d_w%03d_w%03d\" %(healpix,ra,dec,week1,week2)\n\t\tltcube=\"%s/lat_ltcube_weekly_w%03d_w%03d_p203_v001.fits\" %(cfg.home,week1,week2)\n\t\tspacecraft=\"%s/w%03d_w%03d_newspacecraft.fits\" %(cfg.ispace,week1,week2)\n\telse:\n\t\tidentity=\"%06d_%d_%d_w%03d\" %(healpix,ra,dec,week1)\n\t\tltcube=\"%s/lat_spacecraft_weekly_w%03d_p203_v001_ltcube.fits\" %(cfg.home,week1)\n\t\tspacecraft=\"%s/lat_spacecraft_weekly_w%03d_p202_v001.fits \" %(cfg.ispace,week1)\n\n\tregion_filtered=\"%s_region_filtered_gti.fits\" %(identity)\n\tfermisources=\"%s_fermisources_model.xml\" %(identity)\n\tinputmodel=\"%s_input_model.xml\" %(identity)\n\tfermis=\"%s_fermis.xml\" %identity\n\tresponse=\"P7REP_SOURCE_V15\"\n\tmakexmllog=\"%s_output_makexml.log\" %identity\n\tglobal extendedsource\n\tglobal numberofextendedsources\n\textendedlog=\"%s_number_of_extendedsources.log\" %identity\n\tExtendedList=\"ExtendedList.txt\"\n\tOthersList=\"OthersList.txt\"\n\n\t\n\twith open (makexmllog,'r') as outputFile: #opens the makexmllog file from makesyfunc. This document contains info about the extended sources.\n\t\t\n\t\tfor line in outputFile:\n\t\t\t\n\t\t\twith open (makexmllog,'r') as File:\n\t\t\t\tif line.startswith('Added')==True:\n\t\t\t\t\ta,b=line.split('and ')\t\n\t\t\t\t\tb1,b2,b3=b.split(' ')\n\t\t\t\t\n\t\t\t\t\tnumberofextendedsources=int(b1) #b1 is the number of extended sources\n\toutputFile.close()\n\toutputFile=open(inputmodel, 'w')\n\tprint numberofextendedsources\n\n\tif numberofextendedsources==1: #if there is an extended source\n\t\twith open (makexmllog,'r') as outputFile:\n\t\t\n\t\t\tfor line in outputFile:\n\t\t\t\n\t\t\t\twith open (makexmllog,'r') as File:\n\t\t\t\t\tif line.startswith('Extended')==True:\n\t\t\t\t\t\tprint line\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\tc,d=line.split(' in')\n\t\t\t\t\t\n\t\t\t\t\t\tc1,c2,c3,c4=c.split(' ')\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\textendedsource=str(c3) #extracts the name of the extended source from makexmllog\n\t\n\n\t\t\n\n\n\t\toutputFile.close()\t\n\n\n\t\n\n\t\twith open(\"%s\" %fermisources) as thefile: #opens the xml file that was created from makesyfunc\n\t\t\tfor line in thefile:\n\t\t\t\tif line.startswith('\t<spatialModel file=\"%s.fits\"' %(extendedsource))==True:\n\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\tspecial=str.replace(line,'%s.fits'%extendedsource,'%s/%s.fits' %(cfg.homesy,extendedsource)) \n\t\t\t\t\tprint special #replace with the correct path to the extendedsource(Templates folder)\n\t\t\t\n\t\t\t\t\tspecial1=str.replace(special,'type=\"SpatialMap\"','type=\"SpatialMap\" map_based_integral=\"true\"')\n\t\t\t\t\tprint special1 #instruction from fermi tutorial, you must add map_based...\n\t\t\t\t\toutputFile=open(fermis, 'w') #write to fermis, the original xml with the right path to the extended source\n\t\t\t\t\twith open(\"%s\" %fermisources,'r') as infile:\n\t\t\t\t\t\tfor line in infile:\n\t\t\t\t\t\t\tif line.startswith('\t<spatialModel file=\"%s.fits\"' %(extendedsource))==False:\n\t\t\t\t\t\t\t\toutputFile.write(line)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\toutputFile.write(special1)\n\t\t\t\t\toutputFile.close()\n\t\t\t\t\t\t\t\t\t\n\n\n\t\t\t\n\t\toutputFile=open(inputmodel, 'w') #final xml file. contains the right path and the source info of \"your\" source.\n\t\twith open(fermis,'r') as infile:\n\t\t\tfor line in infile:\n\t\t\t\tif line.startswith('</source_library>')==False:\n\t\t\t\t\toutputFile.write(line)\n\t\t\t\t\t\t\t\n\t\toutputFile.write('\\n\\\n\t\t\t<!-- My sources -->\\n\\\n\t\t\t<source name=\"%f_%f\" type=\"PointSource\">\\n\\\n\t\t\t<spectrum type=\"PowerLaw\">\\n\\\n\t\t\t<parameter free=\"1\" max=\"1000.0\" min=\"0.001\" name=\"Prefactor\" scale=\"1e-09\" value=\"10\"/>\\n\\\n\t\t\t<parameter free=\"1\" max=\"-1.0\" min=\"-5.0\" name=\"Index\" scale=\"1.0\" value=\"-2.1\"/>\\n\\\n\t\t\t<parameter free=\"0\" max=\"2000.0\" min=\"30.0\" name=\"Scale\" scale=\"1.0\" value=\"100.0\"/>\\n\\\n\t\t\t</spectrum>\\n\\\n\t\t\t<spatialModel type=\"SkyDirFunction\">\\n\\\n\t\t\t<parameter free=\"0\" max=\"360\" min=\"-360\" name=\"RA\" scale=\"1.0\" value=\"%f\"/>\\n\\\n\t\t\t<parameter free=\"0\" max=\"90\" min=\"-90\" name=\"DEC\" scale=\"1.0\" value=\"%f\"/>\\n\\\n\t\t\t</spatialModel>\\n\\\n\t\t\t</source>\\n\\\n\t\t\t</source_library>\\n' % (ra,dec,ra,dec))\n\n\t\t\t\t\n\n\t\toutputFile.close()\n\t\n\t\twith open(\"%s_diffrsp.log\" % (identity), 'w') as outsyputFile: #run diffrsp if you have an extended source.\n\t\t\tsubprocess.call(['%s' %(cfg.pythoncommand),'gtdiffrsp.py', '%s' %(region_filtered),'%s' %(spacecraft), '%s' %inputmodel, '%s' %(response),'%s' %identity ],stdout=outsyputFile)\n\t\t\t\n\t\twith open(ExtendedList,\"a+\") as outsyFile:\n\t\t\toutsyFile.write(\"%d %f %f %d %d %f\\n\" %(healpix,ra,dec,week1,week2,distance))\n\t\t\t\t\t\n\tif numberofextendedsources==0: #if there is no extended source\n\t\toutputFile=open('%s' %(inputmodel), 'w') #write to inputmodel, \"your\" source\n\t\twith open('%s' %(fermisources),'r') as infile:\n\t\t\tfor line in infile:\n\t\t\t\tif line.startswith('</source_library>')==False:\n\t\t\t\t\toutputFile.write(line)\n\t\t\t\t\t\n\t\t\t\n\n\t\toutputFile.write('\\n\\\n\t\t\t<!-- My sources -->\\n\\\n\t\t\t<source name=\"%f_%f\" type=\"PointSource\">\\n\\\n\t\t\t<spectrum type=\"PowerLaw\">\\n\\\n\t\t\t<parameter free=\"1\" max=\"1000.0\" min=\"0.001\" name=\"Prefactor\" scale=\"1e-09\" value=\"10\"/>\\n\\\n\t\t\t<parameter free=\"1\" max=\"-1.0\" min=\"-5.0\" name=\"Index\" scale=\"1.0\" value=\"-2.1\"/>\\n\\\n\t\t\t<parameter free=\"0\" max=\"2000.0\" min=\"30.0\" name=\"Scale\" scale=\"1.0\" value=\"100.0\"/>\\n\\\n\t\t\t</spectrum>\\n\\\n\t\t\t<spatialModel type=\"SkyDirFunction\">\\n\\\n\t\t\t<parameter free=\"0\" max=\"360\" min=\"-360\" name=\"RA\" scale=\"1.0\" value=\"%f\"/>\\n\\\n\t\t\t<parameter free=\"0\" max=\"90\" min=\"-90\" name=\"DEC\" scale=\"1.0\" value=\"%f\"/>\\n\\\n\t\t\t</spatialModel>\\n\\\n\t\t\t</source>\\n\\\n\t\t\t</source_library>\\n' % (ra,dec,ra,dec))\n\n\t\toutputFile.close()\n\tif numberofextendedsources>1:\n\t\twith open(OthersList,\"a+\") as outsyFile:\n\t\t\toutsyFile.write(\"%d %f %f %d %d %f\\n\" %(healpix,ra,dec,week1,week2,distance))\n\t\n\tif numberofextendedsources==1:\n\t\toutsyputFile=open(extendedlog,'w') #write the number of extended sources and name in a file\n\t\toutsyputFile.write(\"%s\\n\\\n \t%s\"%(numberofextendedsources,extendedsource))\n\t\toutsyputFile.close()\n\n\tif numberofextendedsources !=1:\n\t\toutsyputFile=open(extendedlog,'w') #write the number of extended sources and name in a file\n\t\toutsyputFile.write(\"%s\" %(numberofextendedsources))\n\t\toutsyputFile.close()", "def prepare_runs(args):\n output_directory = _prepare_output_dir(args.output_directory)\n z_score_dir = args.z_score_dir\n region_list = args.region_list \n if args.region_list is None:\n try:\n flanking_region = int(args.flanking_region)\n except ValueError:\n logging.error('Flanking region argument needs to be an integer')\n sys.exit(COMMAND_LINE_ERROR)\n build = args.build\n bed_directory = args.bed_directory\n # Create the SNPList\n try:\n min_maf = float(args.maf)\n except:\n logging.error(\"Min Maf -m or --min-maf needs to be an floating point number\")\n sys.exit(COMMAND_LINE_ERROR)\n if args.region_list is not None:\n region_list = {}\n snp_list = []\n with open(args.region_list) as input_file:\n # When using no flaking region SNP must be valid, but it doesn't actually matter what it is, need to ensure that is actually the case.\n for i, line in enumerate(input_file):\n rsid = str(i)+ \"_\" + ''.join(line.strip().split(\"\\t\"))\n chromosome = line.strip().split(\":\")[0] \n snp = Snp(chromosome,\"1\",rsid)\n snp_list.append(snp)\n region_list[snp.rsid] = line.strip()\n else:\n snp_list = SnpList(args.snp_list, build)\n logging.info(snp_list)\n # Locus to process\n # population_to_extract_vcf\n if not args.annotation_only:\n no_flanking = args.flanking_units\n if no_flanking:\n raise NotImplementedError(\"Using a number of flanking SNPs instead of a region is not supported\")\n populations= args.populations.split(',')\n logging.info(\"Populations to process: {0}\".format(populations))\n loci = []\n gemini_databases = []\n output_vcfs = []\n for snp in snp_list:\n logging.info('Preparing output files for SNP {0}'.format(snp.rsid))\n locus = snp.rsid\n loci.append(locus)\n logging.info(\"Obtaining VCF file from the 1000 genomes project\")\n if region_list is not None:\n vcf = get_vcf_file(snp, string_region=region_list[locus])\n else: \n vcf = get_vcf_file(snp, flanking_region=flanking_region)\n for population in populations:\n tmp_vcf = extract_population_from_1000_genomes(vcf=vcf, super_population=population)\n z_score_file = get_relevant_zscore(snp.chrom, population, z_score_dir)\n pos_list_zscore = create_pos_hash_table(z_score_file)\n output_vcf = generate_zscore_and_vcf_output(output_directory=output_directory, zscore_hash=pos_list_zscore, vcf=tmp_vcf, locus=locus,population=population, multiply_rsquare=args.multiply_rsquare)\n if bed_directory is None:\n logging.info(\"Creating gemini database\")\n # TODO: Fix broxen gemini referenec\n gemini_databases.append(create_gemini_database(vcf=output_vcf))\n vcf_to_plink(locus, output_directory=output_directory, vcf=output_vcf, population=population)\n plink_to_ld_matrix(locus, output_directory=output_directory, population=population)\n logging.info(\"Generate transancestrals matrices\")\n generate_transancestral_output(loci, populations, output_directory)\n if bed_directory is None:\n logging.info(\"Generating annotation matrices to be used with Paintor\")\n logging.info(gemini_databases)\n generate_and_write_encode_annotations(databases=gemini_databases, output_directory=output_directory, loci=snp_list)\n else:\n logging.info(\"Annotation using bed files\")\n generate_bed_file_annotations(loci=loci, bed_directory=bed_directory, output_directory=output_directory) \n # So finally we need to fix the LD matrices for inputting into PAINTOR. \n\n with open(os.path.join(output_directory, 'input.files'), 'w') as out_f:\n for snp in snp_list:\n out_f.write(snp.rsid +'\\n')\n # Remove .tbi files\n for file in os.listdir('.'):\n if fnmatch.fnmatch(file, '*.tbi'):\n try:\n os.remove(file)\n except OSError:\n logging.warning(\"Could not remove a .tbi file from the 1000 genomes tabix run\")\n else: \n loci = []\n for snp in snp_list:\n loci.append(snp.rsid)\n if bed_directory is not None:\n logging.info(\"Annotation using bed files\")\n generate_bed_file_annotations(loci=loci, bed_directory=bed_directory, output_directory=output_directory) \n logging.info(\"Finemapping file preparation complete\")", "def run_psea(fname):\n ...", "def runSurvey():\n fieldFile = globals()['settings']['fieldFile']\n # Number of particles to launch\n numParticles = globals()['settings']['numParticles']\n # Radius of spherical simulation boundary used for launching and exiting\n rLim = globals()['settings']['rLim']\n # Particle stepping method\n steppingMethod = globals()['settings']['steppingMethod']\n # Coarseness of output grid that counts particle fluxes in simulation volume\n fluxGridCoarseness = globals()['settings']['fluxGridCoarseness']\n \n # B field in R and Z\n r, z, BR = fieldGrid('fields/Brs_' + fieldFile)\n _, _, BZ = fieldGrid('fields/Bzs_' + fieldFile)\n _, _, habitatBR = fieldGrid('fields/Brs_habitat_' + fieldFile)\n _, _, habitatBZ = fieldGrid('fields/Bzs_habitat_' + fieldFile)\n r = r[:-1]\n z = z[:-1]\n BR = BR[:-1,:-1] # I MAY CAUSE A BUG IN THE FUTURE\n BZ = BZ[:-1,:-1]\n habitatMax = np.max((habitatBR**2+habitatBZ**2)**.5)\n habitatPrescription = 30\n BR += habitatBR*habitatPrescription/habitatMax\n BZ += habitatBZ*habitatPrescription/habitatMax\n print('Habitat prescription (T):', habitatPrescription)\n Bmagnitude = (BR**2+BZ**2)**.5\n\n qms, vs = qmAndVelocitySpectrum(numParticles)\n if globals()['settings']['qmPrescribed']:\n qms = np.ones(numParticles)*globals()['settings']['qmPrescribed']\n if globals()['settings']['v0Prescribed']:\n vs = np.ones(numParticles)*globals()['settings']['v0Prescribed']\n\n startingPoints = [randomPointOnSphere(rLim) for _ in range(numParticles)]\n directions = [randomDirectionCos(-sp) for sp in startingPoints]\n\n # Simulate without magnetic field\n start = time.time()\n rReduced, zReduced, gridOff, _, habitatCrossingsOff, GDTcrossingsOff, gridOffUnscaled, _ = monteCarloRun(startingPoints, qms, vs, directions, BR, BZ, r, z, rLim, fluxGridCoarseness, 0)\n print('Time elapsed (s):', int(time.time()-start))\n \n # Simulate with magnetic field\n start = time.time()\n _, _, gridOn, trappedOn, habitatCrossingsOn, GDTcrossingsOn, gridOnUnscaled, trappedOnUnscaled = monteCarloRun(startingPoints, qms, vs, directions, BR, BZ, r, z, rLim, fluxGridCoarseness, steppingMethod)\n print('Time elapsed (s):', int(time.time()-start))\n # np.save('cache/{}particles_accel.npy'.format(numParticles), [rReduced, zReduced, gridOn])\n try:\n print('---\\nGDT crossing change: {}%'.format(round(100*(GDTcrossingsOn-GDTcrossingsOff)/GDTcrossingsOff, 3)))\n print('Habitat crossing change: {}%\\n---'.format(round(100*(habitatCrossingsOn-habitatCrossingsOff)/habitatCrossingsOff, 3)))\n except Exception as e:\n print(e)\n \n # plotDiff(r, z, Bmagnitude, gridOn, gridOff)\n plot6panel(r, z, rReduced, zReduced, Bmagnitude, gridOn, gridOff, trappedOn)", "def main():\n # Model setup\n source = np.array([1500, 8, 10, 5]) # assume source concentration and 3D coordinates\n u, pg_stability = 2, 'F' # setup environment\n sample_path = r\"data/ObservedData.csv\"\n # Build model object\n func = GaussianPlumeEAAI(lower=(10, -500, -500, 0), upper=(5000, 500, 500, 10), u=u,\n pg_stability=pg_stability, sample_path=sample_path)\n # Generate sample observed data\n func.generate_observed_data(source[0], source[1], source[2], source[3])\n\n # Reverse search source use observed data and PSO (assume unknown the source)\n pso_search_with_recommended_param(func)\n pso_search_with_optimized_param(func)", "def main() -> None:\n # The first thing to do is get the lines of the PyFlex file we are given.\n parser = Parser(filename=sys.argv[1])\n parsed_data = parser.ParseFile()\n\n # Upon retrieving the Parsed Data, assign the parsed data to the\n # Symbol Table.\n SymbolTable.RULESET = parsed_data['ruleset']\n SymbolTable.INSTRUCTIONS = parsed_data['instructions']\n SymbolTable.CODE = parsed_data['code']\n # SymbolTable.PrintTable()\n\n # Using the Generator backend, we can build the generated script\n generator = Generator()\n generator.GenerateNewScript()\n\n autopep8.fix_file(filename=generator.file_main)\n\n print(\"Generated Script can be found in {}\".format(generator.file_main))", "def main(command_line=True, **kwargs):\n # initialize some variables\n mag_file = ''\n meas_file=\"magic_measurements.txt\"\n user=\"\"\n specnum = 0\n samp_con = '1'\n labfield = 0\n er_location_name = ''\n codelist = None\n\n # get command line args\n if command_line:\n args=sys.argv\n if \"-h\" in args:\n print(main.__doc__)\n return False\n if \"-usr\" in args:\n ind=args.index(\"-usr\")\n user=args[ind+1]\n else:\n user=\"\"\n if '-F' in args:\n ind=args.index(\"-F\")\n meas_file=args[ind+1]\n if '-f' in args:\n ind=args.index(\"-f\")\n magfile=args[ind+1]\n print(\"got magfile:\", magfile)\n if \"-dc\" in args:\n ind=args.index(\"-dc\")\n labfield=float(args[ind+1])*1e-6\n phi=float(args[ind+2])\n theta=float(args[ind+3])\n if \"-ac\" in args:\n ind=args.index(\"-ac\")\n peakfield=float(args[ind+1])*1e-3\n if \"-spc\" in args:\n ind=args.index(\"-spc\")\n specnum=int(args[ind+1])\n if \"-loc\" in args:\n ind=args.index(\"-loc\")\n er_location_name=args[ind+1]\n if \"-ncn\" in args:\n ind=args.index(\"-ncn\")\n samp_con=sys.argv[ind+1]\n if '-LP' in args:\n ind=args.index(\"-LP\")\n codelist=args[ind+1]\n\n\n\n # lab process:\n\n # unpack key-word args if used as module\n if not command_line:\n user = kwargs.get('user', '')\n meas_file = kwargs.get('meas_file', 'magic_measurements.txt')\n magfile = kwargs.get('magfile', '')\n specnum = int(kwargs.get('specnum', 0))\n labfield = int(kwargs.get('labfield', 0)) *1e-6\n phi = int(kwargs.get('phi', 0))\n theta = int(kwargs.get('theta', 0))\n peakfield = kwargs.get('peakfield', 0)\n if peakfield:\n peakfield = float(peakfield)*1e-3\n er_location_name = kwargs.get('er_location_name', '')\n samp_con = kwargs.get('samp_con', '1')\n codelist = kwargs.get('codelist', '')\n CR_cooling_times=kwargs.get('CR_cooling_times', None)\n\n # format and validate variables\n if magfile:\n try:\n input=open(magfile,'r')\n except:\n print(\"bad mag file name\")\n return False, \"bad mag file name\"\n else: \n print(\"mag_file field is required option\")\n print(main.__doc__)\n return False, \"mag_file field is required option\"\n \n if specnum!=0:\n specnum=-specnum\n if \"4\" in samp_con:\n if \"-\" not in samp_con:\n print(\"option [4] must be in form 4-Z where Z is an integer\")\n return False, \"option [4] must be in form 4-Z where Z is an integer\"\n else:\n Z=int(samp_con.split(\"-\")[1])\n samp_con=\"4\"\n if \"7\" in samp_con:\n if \"-\" not in samp_con:\n print(\"option [7] must be in form 7-Z where Z is an integer\")\n return False, \"option [7] must be in form 7-Z where Z is an integer\"\n else:\n Z=int(samp_con.split(\"-\")[1])\n samp_con=\"7\"\n\n if codelist:\n codes=codelist.split(':')\n else:\n print(\"Must select experiment type (-LP option)\")\n return False, \"Must select experiment type (-LP option)\"\n if \"AF\" in codes:\n demag='AF' \n LPcode=\"LP-DIR-AF\"\n if \"T\" in codes:\n demag=\"T\"\n if not labfield: LPcode=\"LP-DIR-T\"\n if labfield: LPcode=\"LP-PI-TRM\"\n if \"ANI\" in codes:\n if not labfield:\n print(\"missing lab field option\")\n return False, \"missing lab field option\"\n LPcode=\"LP-AN-TRM\"\n\n if \"TRM\" in codes: \n demag=\"T\"\n LPcode=\"LP-TRM\"\n #trm=1\n \n if \"CR\" in codes:\n demag=\"T\"\n # dc should be in the code\n if not labfield:\n print(\"missing lab field option\")\n return False, \"missing lab field option\"\n\n LPcode=\"LP-CR-TRM\" # TRM in different cooling rates\n if command_line:\n ind=args.index(\"-LP\")\n CR_cooling_times=args[ind+2].split(\",\")\n\n if \"ANI\" in codes:\n demag=\"T\"\n LPcode=\"LP-AN-TRM\"\n\n #print CR_cooling_time ,\"CR_cooling_time\"\n\n version_num=pmag.get_version()\n\n MagRecs=[]\n \n #--------------------------------------\n # Read the file\n # Assumption:\n # 1. different lab protocolsa are in different files\n # 2. measurements are in the correct order\n #--------------------------------------\n\n Data={}\n\n line_no=0\n\n for line in input.readlines():\n line_no+=1\n this_line_data={}\n line_no+=1\n instcode=\"\"\n if len(line)<2:\n continue\n if line[0]==\"#\": #HUJI way of marking bad data points\n continue\n \n rec=line.strip('\\n').split()\n specimen=rec[0]\n date=rec[2].split(\"/\")\n hour=rec[3].split(\":\")\n treatment_type=rec[4]\n treatment=rec[5].split(\".\")\n dec_core=rec[6]\n inc_core=rec[7]\n dec_geo=rec[8]\n inc_geo=rec[9]\n dec_tilted=rec[10]\n inc_tilted=rec[11]\n moment_emu=float(rec[12])\n\n if specimen not in list(Data.keys()):\n Data[specimen]=[]\n \n # check duplicate treatments:\n # if yes, delete the first and use the second\n\n if len(Data[specimen])>0:\n if treatment==Data[specimen][-1]['treatment']:\n del(Data[specimen][-1])\n print(\"-W- Identical treatments in file %s magfile line %i: specimen %s, treatment %s ignoring the first. \" %(magfile, line_no, specimen,\".\".join(treatment)))\n\n this_line_data={}\n this_line_data['specimen']=specimen\n this_line_data['date']=date\n this_line_data['hour']=hour\n this_line_data['treatment_type']=treatment_type\n this_line_data['treatment']=treatment\n this_line_data['dec_core']=dec_core\n this_line_data['inc_core']=inc_core\n this_line_data['dec_geo']=dec_geo\n this_line_data['inc_geo']=inc_geo\n this_line_data['dec_tilted']=dec_tilted\n this_line_data['inc_tilted']=inc_tilted\n this_line_data['moment_emu']=moment_emu \n Data[specimen].append(this_line_data)\n\n \n print(\"-I- done reading file %s\"%magfile)\n\n #--------------------------------------\n # Convert to MagIC\n #--------------------------------------\n \n specimens_list=list(Data.keys())\n specimens_list.sort()\n\n\n MagRecs=[]\n for specimen in specimens_list:\n for i in range(len(Data[specimen])):\n this_line_data=Data[specimen][i]\n methcode=\"\"\n MagRec={}\n MagRec[\"er_specimen_name\"]=this_line_data['specimen']\n if specnum!=0:\n MagRec[\"er_sample_name\"]=this_line_data['specimen'][:specnum]\n else:\n MagRec[\"er_sample_name\"]=this_line_data['specimen']\n\n if samp_con==\"1\":\n MagRec[\"er_site_name\"]=MagRec[\"er_sample_name\"][:-1]\n elif samp_con==\"2\":\n parts=MagRec[\"er_sample_name\"].split('-')\n MagRec[\"er_site_name\"]= parts[0]\n elif samp_con==\"3\":\n parts=MagRec[\"er_sample_name\"].split('.')\n MagRec[\"er_site_name\"]= parts[0]\n elif samp_con=='4':\n MagRec[\"er_site_name\"]=MagRec[\"er_sample_name\"][0:-Z]\n elif samp_con=='5':\n MagRec[\"er_site_name\"]=MagRec[\"er_sample_name\"]\n elif samp_con=='7':\n MagRec[\"er_site_name\"]=MagRec[\"er_sample_name\"][0:Z] \n else:\n MagRec[\"er_site_name\"]=MagRec[\"er_sample_name\"] # site=sample by default\n \n if er_location_name:\n MagRec['er_location_name']=er_location_name\n else:\n MagRec['er_location_name']=MagRec[\"er_site_name\"]\n \n MagRec[\"measurement_temp\"]='%8.3e' % (273) # room temp in kelvin\n MagRec[\"measurement_magn_moment\"]='%10.3e'% (float(this_line_data['moment_emu'])*1e-3) # moment in Am^2 (from emu)\n MagRec[\"measurement_dec\"]=this_line_data['dec_core']\n MagRec[\"measurement_inc\"]=this_line_data['inc_core']\n date=this_line_data['date']\n hour=this_line_data['hour'] \n if float(date[2])>80:\n yyyy=\"19\"+date[2]\n else:\n yyyy=\"20\"+date[2]\n if len (date[0])==1:\n date[0]=\"0\"+date[0]\n if len (date[1])==1:\n date[1]=\"0\"+date[1]\n MagRec[\"measurement_date\"]=\":\".join([yyyy,date[0],date[1],hour[0],hour[1],\"00.00\"])\n MagRec[\"measurement_time_zone\"]='JER'\n MagRec['er_analyst_mail_names'] =user \n MagRec[\"er_citation_names\"]=\"This study\"\n MagRec[\"magic_instrument_codes\"]=\"HUJI-2G\"\n MagRec[\"measurement_flag\"]=\"g\"\n MagRec[\"measurement_positions\"]=\"1\"\n MagRec[\"measurement_positions\"]=\"1\"\n MagRec[\"measurement_standard\"]=\"u\"\n MagRec[\"measurement_description\"]=\"\"\n #MagRec[\"treatment_temp\"]='%8.3e' % (float(treatment[0])+273.) # temp in kelvin\n \n #---------------------------------------- \n # AF demag\n # do not support AARM yet\n #----------------------------------------\n \n if demag==\"AF\":\n treatment_type=this_line_data['treatment_type']\n # demag in zero field\n if LPcode != \"LP-AN-ARM\":\n MagRec[\"treatment_ac_field\"]='%8.3e' %(float(this_line_data['treatment'][0])*1e-3) # peak field in tesla\n MagRec[\"treatment_dc_field\"]='0'\n MagRec[\"treatment_dc_field_phi\"]='0'\n MagRec[\"treatment_dc_field_theta\"]='0'\n if treatment_type==\"N\":\n methcode=\"LP-DIR-AF:LT-NO\"\n elif treatment_type==\"A\":\n methcode=\"LP-DIR-AF:LT-AF-Z\"\n else:\n print(\"ERROR in treatment field line %i... exiting until you fix the problem\" %line_no)\n print(this_line_data)\n return False, \"ERROR in treatment field line %i... exiting until you fix the problem\" %line_no\n \n # AARM experiment \n else:\n print(\"Dont supprot AARM in HUJI format yet. sorry... do be DONE\")\n MagRec[\"magic_method_codes\"]=methcode\n MagRec[\"magic_experiment_name\"]=specimen+ \":\" + LPcode\n MagRec[\"measurement_number\"]=\"%i\"%i\n MagRec[\"measurement_description\"]=\"\"\n\n MagRecs.append(MagRec)\n \n #----------------------------------------\n # Thermal: \n # Thellier experiment: \"IZ\", \"ZI\", \"IZZI\", pTRM checks\n # Thermal demag\n # Thermal cooling rate experiment\n # Thermal NLT\n #----------------------------------------\n\n\n if demag==\"T\": \n\n treatment=this_line_data['treatment']\n treatment_type=this_line_data['treatment_type']\n \n \n #----------------------------------------\n # Thellier experimet\n #----------------------------------------\n\n if LPcode == \"LP-PI-TRM\" : # Thelllier experiment\n\n \n\n MagRec[\"magic_experiment_name\"]=specimen+ \":\" + LPcode\n methcode=LPcode \n \n if treatment_type==\"N\" or ( (treatment[1]=='0' or treatment[1]=='00') and float(treatment[0])==0):\n LT_code=\"LT-NO\"\n MagRec[\"treatment_dc_field_phi\"]='0' \n MagRec[\"treatment_dc_field_theta\"]='0' \n MagRec[\"treatment_dc_field\"]='0'\n MagRec[\"treatment_temp\"]='273.'\n \n elif treatment[1]=='0' or treatment[1]=='00':\n LT_code=\"LT-T-Z\"\n MagRec[\"treatment_dc_field_phi\"]='0' \n MagRec[\"treatment_dc_field_theta\"]='0' \n MagRec[\"treatment_dc_field\"]='%8.3e'%(0)\n MagRec[\"treatment_temp\"]='%8.3e' % (float(treatment[0])+273.) # temp in kelvin\n\n # check if this is ZI or IZ:\n # check if the same temperature already measured:\n methcode=\"LP-PI-TRM:LP-PI-TRM-ZI\"\n for j in range (0,i):\n if Data[specimen][j]['treatment'][0] == treatment[0]:\n if Data[specimen][j]['treatment'][1] == '1' or Data[specimen][j]['treatment'][1] == '10':\n methcode=\"LP-PI-TRM:LP-PI-TRM-IZ\"\n else:\n methcode=\"LP-PI-TRM:LP-PI-TRM-ZI\"\n \n \n elif treatment[1]=='1' or treatment[1]=='10':\n LT_code=\"LT-T-I\"\n MagRec[\"treatment_dc_field\"]='%8.3e' % (labfield) # labfield in tesla (convert from microT)\n MagRec[\"treatment_dc_field_phi\"]='%7.1f' % (phi) # labfield phi\n MagRec[\"treatment_dc_field_theta\"]='%7.1f' % (theta) # labfield theta\n MagRec[\"treatment_temp\"]='%8.3e' % (float(treatment[0])+273.) # temp in kelvin\n\n # check if this is ZI or IZ:\n # check if the same temperature already measured:\n methcode=\"LP-PI-TRM:LP-PI-TRM-IZ\"\n for j in range (0,i):\n if Data[specimen][j]['treatment'][0] == treatment[0]:\n if Data[specimen][j]['treatment'][1] == '0' or Data[specimen][j]['treatment'][1] == '00':\n methcode=\"LP-PI-TRM:LP-PI-TRM-ZI\"\n else:\n methcode=\"LP-PI-TRM:LP-PI-TRM-IZ\"\n \n elif treatment[1]=='2' or treatment[1]=='20':\n LT_code=\"LT-PTRM-I\"\n MagRec[\"treatment_dc_field\"]='%8.3e' % (labfield) # labfield in tesla (convert from microT)\n MagRec[\"treatment_dc_field_phi\"]='%7.1f' % (phi) # labfield phi\n MagRec[\"treatment_dc_field_theta\"]='%7.1f' % (theta) # labfield theta\n MagRec[\"treatment_temp\"]='%8.3e' % (float(treatment[0])+273.) # temp in kelvin\n methcode=\"LP-PI-TRM:LP-PI-TRM-IZ\"\n\n else:\n print(\"ERROR in treatment field line %i... exiting until you fix the problem\" %line_no)\n return False, \"ERROR in treatment field line %i... exiting until you fix the problem\" %line_no\n \n MagRec[\"magic_method_codes\"]=LT_code+\":\"+methcode\n MagRec[\"measurement_number\"]=\"%i\"%i \n MagRec[\"measurement_description\"]=\"\"\n MagRecs.append(MagRec)\n #continue\n \n \n #----------------------------------------\n # demag experimet\n #----------------------------------------\n\n\n if LPcode == \"LP-DIR-T\" :\n MagRec[\"magic_experiment_name\"]=specimen+ \":\" + LPcode\n methcode=LPcode \n \n if treatment_type==\"N\":\n LT_code=\"LT-NO\"\n else:\n LT_code=\"LT-T-Z\"\n \n methcode=LPcode+\":\"+\"LT-T-Z\"\n MagRec[\"treatment_dc_field_phi\"]='0' \n MagRec[\"treatment_dc_field_theta\"]='0' \n MagRec[\"treatment_dc_field\"]='%8.3e'%(0)\n MagRec[\"treatment_temp\"]='%8.3e' % (float(treatment[0])+273.) # temp in kelvin\n MagRec[\"magic_method_codes\"]=LT_code+\":\"+methcode\n MagRec[\"measurement_number\"]=\"%i\"%i \n MagRec[\"measurement_description\"]=\"\"\n MagRecs.append(MagRec)\n #continue\n \n\n #----------------------------------------\n # ATRM measurements\n # The direction of the magnetization is used to determine the\n # direction of the lab field.\n #----------------------------------------\n \n if LPcode ==\"LP-AN-TRM\" :\n \n MagRec[\"magic_experiment_name\"]=specimen+ \":\" + LPcode\n methcode=LPcode \n\n if float(treatment[1])==0:\n MagRec[\"magic_method_codes\"]=\"LP-AN-TRM:LT-T-Z\"\n MagRec[\"treatment_dc_field_phi\"]='0'\n MagRec[\"treatment_dc_field_theta\"]='0'\n MagRec[\"treatment_temp\"]='%8.3e' % (float(treatment[0])+273.) # temp in kelvin\n MagRec[\"treatment_dc_field\"]='0'\n else:\n MagRec[\"magic_method_codes\"]=\"LP-AN-TRM:LT-T-I\"\n inc=float(MagRec[\"measurement_inc\"]);dec=float(MagRec[\"measurement_dec\"])\n if abs(inc)<45 and (dec<45 or dec>315): # +x\n tdec,tinc=0,0\n MagRec[\"measurement_number\"]='1'\n if abs(inc)<45 and (dec<135 and dec>45):\n tdec,tinc=90,0\n MagRec[\"measurement_number\"]='2' # +y\n if inc>45 :\n tdec,tinc=0,90\n MagRec[\"measurement_number\"]='3' # +z\n if abs(inc)<45 and (dec<225 and dec>135):\n tdec,tinc=180,0\n MagRec[\"measurement_number\"]='4' # -x\n if abs(inc)<45 and (dec<315 and dec>225):\n tdec,tinc=270,0\n MagRec[\"measurement_number\"]='5'# -y\n if inc<-45 :\n tdec,tinc=0,-90\n MagRec[\"measurement_number\"]='6'# -z\n if float(treatment[1])==7 or float(treatment[1])==70:\n # alteration check\n #methcode=\"LP-AN-TRM:LT-PTRM-I\"\n MagRec[\"magic_method_codes\"]=\"LP-AN-TRM:LT-PTRM-I\"\n MagRec[\"measurement_number\"]='7'# -z\n\n \n MagRec[\"treatment_dc_field_phi\"]='%7.1f' %(tdec)\n MagRec[\"treatment_dc_field_theta\"]='%7.1f'% (tinc)\n MagRec[\"treatment_temp\"]='%8.3e' % (float(treatment[0])+273.) # temp in kelvin\n MagRec[\"treatment_dc_field\"]='%8.3e'%(labfield)\n MagRec[\"measurement_description\"]=\"\"\n MagRecs.append(MagRec)\n #continue\n\n #----------------------------------------\n # NLT measurements\n # or TRM acquisistion experiment\n #----------------------------------------\n\n \n if LPcode == \"LP-TRM\" :\n MagRec[\"magic_experiment_name\"]=specimen+ \":\" + LPcode\n MagRec[\"magic_method_codes\"]=\"LP-TRM:LT-T-I\"\n if float(treatment[1])==0:\n labfield=0\n else:\n labfield=float(float(treatment[1]))*1e-6\n MagRec[\"treatment_temp\"]='%8.3e' % (float(treatment[0])+273.) # temp in kelvin \n MagRec[\"treatment_dc_field\"]='%8.3e' % (labfield) # labfield in tesla (convert from microT)\n MagRec[\"treatment_dc_field_phi\"]='%i' % (int(phi)) # labfield phi\n MagRec[\"treatment_dc_field_theta\"]='%i' % (int(theta)) # labfield theta\n MagRec[\"measurement_number\"]=\"%i\"%i \n MagRec[\"measurement_description\"]=\"\"\n MagRecs.append(MagRec)\n #continue\n \n\n #----------------------------------------\n # Cooling rate experiments\n #----------------------------------------\n \n if LPcode ==\"LP-CR-TRM\":\n index=int(treatment[1][0])\n #print index,\"index\"\n #print CR_cooling_times,\"CR_cooling_times\"\n #print CR_cooling_times[index-1]\n #print CR_cooling_times[0:index-1]\n if index==7 or index==70: # alteration check as final measurement\n meas_type=\"LT-PTRM-I:LP-CR-TRM\"\n CR_cooling_time=CR_cooling_times[-1]\n elif index==0 or index==00: # baseline\n meas_type=\"LT-T-Z:LP-CR-TRM\"\n CR_cooling_time=CR_cooling_times[0]\n else: \n meas_type=\"LT-T-I:LP-CR-TRM\"\n CR_cooling_time=CR_cooling_times[index-1]\n MagRec[\"magic_method_codes\"]=meas_type \n MagRec[\"magic_experiment_name\"]=specimen+ \":\" + LPcode\n MagRec[\"treatment_temp\"]='%8.3e' % (float(treatment[0])+273.) # temp in kelvin \n MagRec[\"treatment_dc_field\"]='%8.3e' % (labfield) # labfield in tesla (convert from microT)\n MagRec[\"treatment_dc_field_phi\"]='%7.1f' % (phi) # labfield phi\n MagRec[\"treatment_dc_field_theta\"]='%7.1f' % (theta) # labfield theta\n MagRec[\"measurement_number\"]=\"%i\"%index\n MagRec[\"measurement_description\"]=\"cooling_rate\"+\":\"+CR_cooling_time+\":\"+\"K/min\"\n #MagRec[\"measurement_description\"]=\"%.1f minutes per cooling time\"%int(CR_cooling_time)\n MagRecs.append(MagRec)\n #continue\n\n pmag.magic_write(meas_file,MagRecs,'magic_measurements')\n print(\"-I- results put in \",meas_file)\n return True, meas_file", "def main():\n filename = \"data/exercise.csv\"\n analyze(filename)", "def run(md_file, interact):\n lines = open(md_file, 'r').readlines()\n blocks = extract_blocks(lines)\n run_py(blocks, interact=interact)", "def main():\r\n run_processes('tests.csv', 'labs.csv')", "def main():\n parser = ArgumentParser(usage='%(prog)s [options] ecommonsMetadata.csv')\n parser.add_argument(\"-d\", \"--date\", dest=\"date\",\n help=\"Date on or after that an ETD was published for \\\n creating DOIs. Put in format YYYY-MM\")\n parser.add_argument(\"datafile\", help=\"eCommons metadata worked from.\")\n\n args = parser.parse_args()\n\n if not len(sys.argv) > 0:\n parser.print_help()\n parser.exit()\n\n workingdir = csvparse(args.datafile, args.date)\n doiparse(workingdir)\n print('ANVL files available in: ' + workingdir)", "def run(self):\n # params\n work_dir = self.param_required(\"work_dir\")\n\n # initial sequence loading, using ensembl-analysis scripts \n self.initial_sequence_loading(work_dir)\n\n # load data from the corresponding core db tables\n external_db_map = self.load_map_from_core_db(\"external_db\", [\"db_name\", \"external_db_id\"], work_dir) # for external_db\n attrib_type_map = self.load_map_from_core_db(\"attrib_type\", [\"code\", \"attrib_type_id\"], work_dir) # for attrib_type\n seq_region_map = self.load_map_from_core_db(\"seq_region\", [\"name\", \"seq_region_id\"], work_dir) # for seq_region\n\n # update synonyms and seq_region_attribs\n unversion = self.param(\"unversion_scaffolds\")\n is_primary_assembly = self.from_param(\"manifest_data\", \"agp\", not_throw = True) is None\n seq_region_file = self.from_param(\"manifest_data\", \"seq_region\", not_throw = True)\n\n # add seq_region synonyms\n self.add_sr_synonyms(seq_region_file,\n seq_region_map,\n external_db_map,\n self.pjc(work_dir, \"seq_region_syns\"),\n unversion = unversion)\n\n # add seq_region attributes\n self.add_sr_attribs(seq_region_file,\n seq_region_map,\n attrib_type_map,\n self.pjc(work_dir, \"seq_region_attr\"),\n unversion = unversion)\n\n # add seq_region EBI and BRC4 name attributes in the \"BRC4 mode\"\n # special case of attributes adding with default values derived from seq_region names\n # do not add if preparing to swap RefSeq and GeneBank ids; in this case attributes to be added at a later stage in pipeline\n # (easier to insert then to update)\n if self.param(\"brc4_mode\") and not self.param(\"swap_gcf_gca\"):\n self.add_sr_ebi_brc4_names(seq_region_file,\n seq_region_map,\n attrib_type_map,\n self.pjc(work_dir, \"seq_region_ebi_brc4_name\"),\n unversion = unversion)\n\n # add karyotype related data\n self.add_karyotype_data(seq_region_file,\n seq_region_map,\n attrib_type_map,\n self.pjc(work_dir, \"karyotype\"),\n unversion = unversion)" ]
[ "0.5694265", "0.5572508", "0.5570067", "0.55660534", "0.55130184", "0.5468869", "0.5449003", "0.54461205", "0.5285306", "0.52705884", "0.52448237", "0.52008086", "0.5169603", "0.5167817", "0.5165798", "0.5160243", "0.5145471", "0.51369846", "0.51311797", "0.512673", "0.5118558", "0.50833434", "0.50798327", "0.50763357", "0.5072122", "0.5067454", "0.50645304", "0.5063487", "0.50583893", "0.50538975" ]
0.5930959
0
Initialize a player at Python Casino, we give new players 100 chips to play
def __init__(self, name="Player"): self.name = name self.chips = 100 self.hand1 = [] self.hand2 = [] self.bet = 0 self.lastbet = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def newPlayer():\r\n pass", "def __init__(self):\n\n self.name = 'KuhnPoker'\n self.num_players = 2", "def initGame(self):\n self.map = {}\n self.blocks = Group()\n self.Coins =Group()\n self.players = Group()\n self.player1 = Player(1525,75,2)\n self.players.add(self.player1)\n if self.playernum == 2:\n self.player2 = Player(75,825,1)\n self.players.add(self.player2)\n else:\n self.player2 = False", "def __init__(self, number_players=1000):\n self.player_list = []\n for i in range(number_players):\n self.player_list.append(Player())", "def __init__(self, player):\n\t\tself.player = player", "def _initialize_game(wager_credits):\n player = {}\n player['chips'] = wager_credits\n player['round'] = 0\n player['blackjack'] = 0\n player['won'] = 0\n player['lost'] = 0\n player['push'] = 0\n player['bust'] = 0\n return player", "def __init__(self, players):\n\n # Define the players\n self.players = players\n\n # Define who starts the game\n self.nplayer = 1 \n\n # Define the board\n self.board = [0] * 9", "def __init__(self, player):\n self.player = player", "def __init__(self, player_id):\n self.player_id = player_id\n self.hand = []\n self.name = [\"Anikó\",\n \"Bori\",\n \"Nagyapa\",\n \"Kinga\",\n \"Jocó\",\n \"Nagyi\",\n \"Éva\",\n \"Robi\",\n \"Józsi\"][player_id]", "def __init__(self, player_id = 0):\n all_players = ['adiumy', 'amanda', 'beastie', 'emule', 'gavroche', 'hexley', 'kiki', 'konqi', 'nolok', 'pidgin', 'puffy', 'sara_the_racer', 'sara_the_wizard', 'suzanne', 'tux', 'wilber', 'xue']\n self.kart = all_players[np.random.choice(len(all_players))]", "def initialize_players():\n return [Player(name, Hand([]), 0) for name in PLAYER_NAMES]", "def __init__(self, player_name):\n self._player_name = player_name\n self._hand = Deck() \n self._coder = Deck()", "def __init__(self, players):\n\n self._players = players\n self._game = None", "def __init__(self, players, num_of_players):\r\n self.players = players\r\n self.num_of_players = num_of_players\r\n self.active_players = num_of_players\r\n self.dealer = Dealer()\r\n self.card_stack = CardStack()\r\n self.money_stack = MoneyStack()\r\n self.cur_player = 0\r\n self.round_num = 0\r\n self.round_player_money = 0", "def __init__(self, players, piles=None):\n self.players = players\n self.piles = piles if (piles != None) else [5, 5, 5, 5]\n self.nplayer = 1 # player 1 starts.", "def __init__(self):\n super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)\n self.player_count: int = None\n self.player_hand_0: arcade.SpriteList = None\n self.player_hand_1: arcade.SpriteList = None\n self.deck: arcade.SpriteList = None\n self.pile: arcade.SpriteList = None", "def init_players(self):\n self.spaceships.append(self.player)\n SoundManager.add_sfx(\n self.player.states['exploded']['sfx'],\n self.player\n )", "def __init__(self, min_player_count):\n self.min_player_count = min_player_count", "def __init__(self, player_control, players=None):\r\n self.player_control = player_control\r\n self.players = {} # copy for restoration\r\n if players is not None:\r\n for player in players.values():\r\n self.add_player(player)", "def __init__(self, parent, players):\n self.parent = parent\n self.players = players\n self.table_cards = []\n self.pot = 0", "def __init__(self, name=\"Player\", resources=[0,0,0,0,0,0,0,0], xor_resources=None,\\\n current_hand=None, structures=None, starting_gold=3, discounted_resources=None):\n if structures != None:\n self.structures = structures # by type? Should we have a structure type? \n else:\n self.structures = []\n \n self.name = name\n self.wonders = None \n player.west_natural= False\n player.west_manufactured = False\n player.east_natural= False\n player.east_manufactured= False\n\n if current_hand == None:\n self.current_hand = None\n else:\n self.current_hand = current_hand #I dont know if we need this\n self.starting_gold = starting_gold", "def __init__(self, players):\n self.players = players\n self.board = Board()", "def __init__(self, num_players):\n self.num_players = num_players\n self.firework = [[], [], [], [], []]\n self.nb_blue_stone = MAX_BLUE_STONE\n self.nb_red_stone = MAX_RED_STONE\n self.draw = None\n self.hands = None\n self.fill_draw()\n random.shuffle(self.draw)\n self.discard = []\n self.draw_initial_hands()", "def spawn_players(self) -> None:\n #Create the player\n self.player1 = Player(self.sensitivity, self.screen_width, self.screen_height, self.screen_width//(3/2), self.screen_height-50, self.player_lives, self.fps, self.player1_bullet, Direction.UP, self.debug)\n\n #Create the AI\n self.player2 = AIPlayer(self.sensitivity, self.screen_width, self.screen_height, self.screen_width//3, self.screen_height-50, self.player_lives, self.fps, self.player2_bullet, Direction.UP, 1, True, self.debug)", "def start(self):\n self.__init__()\n self.set_n_players()\n self.init_players()\n self.init_territory_selection_phase()\n self.init_troop_deployment_phase()\n # self.game_phase()", "def setUp(self):\n self.player = Player()", "def __init__(self, initial=[1, 3, 5, 7]):\n self.piles = initial.copy()\n self.player = 0\n self.winner = None", "def __init__(self, initial=[1, 3, 5, 7]):\n self.piles = initial.copy()\n self.player = 0\n self.winner = None", "def __init__(self):\n self.players = {1: [\"Player_a\", \"\\u25CF\"], 2: [\"Player_b\", \"\\u25CB\"]}\n self.current_player = 1\n self.playing_player = self.players[1]\n self.grid = [[\" \"] * 6 for x in range(7)]", "def __init__(self, players):\n\n self._players = players\n self._current_player = players.get()" ]
[ "0.6672925", "0.6669602", "0.6631138", "0.6495722", "0.6478701", "0.6469059", "0.6440115", "0.64392686", "0.64156747", "0.6413498", "0.6401054", "0.63888377", "0.63830507", "0.6313076", "0.62933445", "0.6278405", "0.6247309", "0.62469554", "0.62101966", "0.6190493", "0.6186686", "0.6168503", "0.61679024", "0.6147953", "0.6116136", "0.6114239", "0.6113349", "0.6113349", "0.610728", "0.61039394" ]
0.6768657
0
Receive a card dealt during the deal round
def dealt_card(self, card): self.hand1.append(card) print(f"{self.name} was dealt a {card}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deal(self):\n dealt_card = self.deck_of_cards.pop()\n print(\"You have been dealt the {} \".format(dealt_card.value) \\\n + \"of {}.\".format(dealt_card.suit) + \"\\n\")", "def deal_card(self):\n return self._deal(1)[0]", "def deal(self):\n\n if self.dealer: # Has cards in hand\n self.dealer.reset()\n\n if self.player: # Has cards in hand\n self.player.reset()\n\n dealer_first = self.deck.draw()\n dealer_second = self.deck.draw()\n dealer_second.flip()\n self.dealer.take_card(dealer_first)\n self.dealer.take_card(dealer_second)\n\n player_first = self.deck.draw()\n player_second = self.deck.draw()\n player_first.flip()\n player_second.flip()\n self.player.take_card(player_first)\n self.player.take_card(player_second)\n\n if self.verbose:\n print('Player bets:', self.player_bet)\n for player in (self.player, self.dealer):\n print(player, 'dealt:')\n for card in player:\n if card.face():\n print(' '*3, str(card)+':', 'face up')\n else:\n print(' '*3, str(card)+':', 'face down')", "def deal_cards(self):\n aux = random.randint(0, len(self.deck))\n card = self.deck[aux]\n self.deck.pop(aux)\n print(f\"Received: {card}\")\n return card", "def testDiscTender(self):\n self.setupTransaction()\n if not checkout.pay_card(card_name='Discover'):\n tc_fail(\"Failed to pay with Discover credit tender\")\n self.handleMessages()", "def deal_card(self):\n \n return self.deck.pop()", "def deal_card(self):\n return self.deck.pop()", "def deal(this_deck):\n dealt_card = this_deck.popleft()\n\n return dealt_card", "def deal_card(self):\r\n dealt = self.deck.pop(-1)\r\n self.graveyard.append(dealt)\r\n return dealt", "def waitForCard(self):\n try:\n cardService = self.cardrequest.waitforcard()\n self.timer.stop()\n\n cardService.connection.connect()\n\n self.cardUid = self.getUID(cardService)\n self.ATR = self.getATR(cardService)\n\n account = self.action.getAccount(self.cardUid)\n \n if account is None:\n self.warning.emit(WARN_NO_ACCOUNT)\n else:\n if account['statement'] == STA_USER_ACTIVE:\n for device in account['devices']:\n if device['uid'] == self.cardUid:\n if device['status'] == STA_DEVICE_ACTIVE:\n # all is ok\n self.cardDetected.emit(account['balance'])\n elif device['status'] == STA_DEVICE_LOST:\n self.warning.emit(WARN_DEVICE_LOST)\n elif device['status'] == STA_DEVICE_STOLEN:\n self.warning.emit(WARN_DEVICE_STOLEN)\n elif device['status'] == STA_DEVICE_DELETED:\n self.warning.emit(WARN_DEVICE_DELETED)\n else:\n self.warning.emit(WARN_DEVICE_DELETED)\n break\n elif account['statement'] == STA_USER_INACTIVE:\n self.warning.emit(WARN_ACCOUNT_INACTIVE)\n elif account['statement'] == STA_USER_DELETED:\n self.warning.emit(WARN_ACCOUNT_DELETED)\n else:\n self.warning.emit(WARN_ACCOUNT_DELETED)\n\n except CardRequestTimeoutException:\n self.updateWaiting.emit()\n # init variables\n self.cardUid = None\n self.ATR = None", "def deal_card(self, player: BaseBlackjackPlayer, is_open: bool = True) -> str:\n card = self.deck.pick_card(discard=True, is_open=is_open)\n player.hand.cards.append(card)\n return f\"\\n{player.username} received {card}. Total is: {player.hand.value}\"", "def deal_demo():\n deck = get_deck()\n print(hand_to_string(deck))\n print(hand_to_string(get_hand(deck)))\n print(hand_to_string(get_hand(deck)))", "def deal_cards(self, agent, param):\n return agent.deal(param, big_blind, small_blind, self.bet_hist, self.pot)", "def deal(self):\n self.dealer.hit(self.deck)\n self.dealer.hit(self.deck)\n self.player.hit(self.deck)\n self.player.hit(self.deck)\n\n if self.player.sum_cards() == 21:\n self.round_winner = True\n self.print_hands()\n print(\"BLACKJACK! You win!\")", "def Send_deal(self, dealtCards, round):\n serializedDeal = [[c.serialize() for c in hand] for hand in dealtCards]\n self.Send({\"action\": \"deal\", \"hands\": serializedDeal, \"round\": round})", "def dealPlayer(self, player):\n self.send(Message(text=\"You have been dealt: \" + str(player.hand)), thread_id=player.fbid, thread_type=ThreadType.USER)", "def hit(self, player):\n\n hit_card = self.deck.draw()\n hit_card.flip()\n player.take_card(hit_card)\n\n if self.verbose:\n print(player, 'receives', hit_card)", "def cards_to_deal(cls, context={}):\n\t\treturn cls.CARDS_TO_DEAL", "def testDebitTender(self):\n self.setupTransaction()\n if not checkout.pay_card(card_name='Debit'):\n tc_fail(\"Failed to pay with debit tender\")\n # This is an attempt to prevent PHYK-85 from happening\n self.setupTransaction()\n if not checkout.pay_card(card_name='Debit'):\n tc_fail(\"Failed to pay with debit tender\")\n self.handleMessages()", "def demote(self):\n if self.rank != \"A\":\n raise TypeError(\"Card must be an Ace\")\n else:\n self.value = 1", "def deal(self):\n card = self.cards.pop()\n return card", "async def deal(self, ctx):\n frames = ['( •_•)', '( •_•)>⌐■-■', '(⌐■_■)', '(⌐■_■) Deal', '(⌐■_■) Deal with', '(⌐■_■) Deal with it.']\n msg = await ctx.send(frames[0])\n for frame in frames[1:]:\n await asyncio.sleep(0.3)\n await msg.edit(content = frame)", "def dealer_turn(self, s):\r\n action = None\r\n while not s.is_terminal and action != Action.STICK:\r\n action = self.dealer.policy(s)\r\n if action == Action.HIT:\r\n s.dealer_sum += self.take_card()\r\n s.is_terminal = self.check_bust(s.dealer_sum)\r\n return s", "def deal_card():\r\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\r\n return (random.choice(cards))", "def receive_card(self, card: Card) -> None:\n\t\tself.deck.append(card)\n\t\t\n\t\t# Sorts the Deck by type and colour for aesthetic purposes\n\t\t\"\"\"self.deck.sort(key=lambda x: repr(x.type))\n\t\tself.deck.sort(key=lambda x: repr(x.colour))\"\"\"", "def deal_card():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n card = random.choice(cards)\n return card", "def dealer_turn(self):\n self.dealer.reveal()\n show_table_later(self.player, self.dealer, self.pot)\n while self.dealer.hand.value < 17:\n self.dealer.take_card(self.deck)\n show_table_later(self.player, self.dealer, self.pot)", "def takeoff_second():\n\tglobal c2\n\tglobal a2\n\tglobal BUF_SIZE\n\tglobal state\n\n\tmsg = c2.recv(BUF_SIZE) # wait for the armed message\n\tprint a2, ' >> ', msg\n\tif msg != 'Armed':\n\t\terror(msg)\n\t\tstate = 9 # exit failure\n\telse:\n\t\tnew_msg = {}\n\t\tnew_msg['msg'] = 'TAKEOFF'\n\t\tnew_msg['arg1'] = init2[2]\n\t\tc2.send(json.dumps(new_msg))\n\t\tstate += 1", "def deal(self):\n\t\tplayerList = self.getPlayers()\n\t\tstart = self.curDealerSeatNo + 1\n\t\tfor i in range(len(playerList)*2):\n\t\t\tplayerList[(start + i) % len(playerList)].hand.append(self.deck.pop())\n\t\t\tplayerList[(start + i) % len(playerList)].isHandLive = True", "def action_hit(self) -> None:\n print(self.deal_card(self.user))" ]
[ "0.7164214", "0.6649279", "0.6613201", "0.6463644", "0.64456797", "0.6287789", "0.62788486", "0.6256954", "0.6247799", "0.6185928", "0.618543", "0.6180968", "0.61769384", "0.61759967", "0.61480325", "0.6139512", "0.613029", "0.6088372", "0.6074345", "0.60608476", "0.6047473", "0.6033551", "0.6019128", "0.60093415", "0.6006691", "0.60047096", "0.59797513", "0.59735936", "0.59485435", "0.593862" ]
0.7149964
1
Return the value of the players hand. Still need to handle split hands somehow
def hand_value(self): return deck.bj_hand_value(self.hand1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def player_hand_value(self, hand_idx=0):\n return self._get_hand_value(self.players[hand_idx]['hand'])", "def _get_hand_value(self):\n\t\tvalue_list = []\n\t\tfor index, hand in enumerate(self.player_hand):\n\t\t\tif self.status[index] == 'won':\n\t\t\t\tvalue_list.append(hand.bet)\n\t\t\telif self.status[index] == 'blackjack':\n\t\t\t\tvalue_list.append(hand.bet * 1.5)\n\t\t\telif self.status[index] == 'push':\n\t\t\t\tvalue_list.append(0)\n\t\t\telse:\n\t\t\t\tvalue_list.append(0-hand.bet)\n\t\treturn value_list", "def hand_value(hand):\n val = 0 \n for card in hand:\n val += card.value\n\n return val", "def calculate_value(self, hand):\n global FACE_CARDS\n #could refactor the 2 hand possiblities into methods of a Dealer and Player Class\n if hand == \"player\":\n if self.player_hand[-1].value in FACE_CARDS:\n self.player_value += 10\n elif self.player_hand[-1].value == \"A\":\n self.player_value += 11\n self.player_ace_count += 1\n else:\n self.player_value += int(self.player_hand[-1].value)\n\n if self.player_value > 21:\n if self.player_ace_count > self.player_almost_bust:\n #To prevent a Bust, your Ace became a one\n self.player_value -= 10\n self.player_almost_bust += 1\n else:\n self.player_lose()\n elif self.player_value == 21:\n self.blackjack = True\n self.endgame()\n\n elif hand == \"dealer\":\n if len(self.dealer_hand) > 1:\n if self.dealer_hand[-1].value in FACE_CARDS:\n self.dealer_value += 10\n elif self.dealer_hand[-1].value == \"A\":\n self.dealer_value += 11\n self.dealer_ace_count += 1\n else:\n self.dealer_value += int(self.dealer_hand[-1].value)\n\n if self.dealer_value > 21:\n if self.dealer_ace_count > self.dealer_almost_bust:\n #To prevent a Bust, the Dealer's Ace became a one\n self.dealer_value -= 10\n self.dealer_almost_bust += 1\n else:\n self.player_win()\n elif self.dealer_value == 21:\n self.player_lose()", "def get_value(self):\n global VALUES\n hand_value = 0\n has_ace = False\n\n for card in self.hand:\n v = VALUES[card.get_rank()]\n hand_value += v\n if card.get_rank() is 'A':\n has_ace = True\n\n if not has_ace:\n return hand_value\n else:\n if hand_value + 10 <= 21:\n return hand_value + 10\n else:\n return hand_value", "def poker(hands):\n try:\n print \"The winning hand had: \" + \\\n pranks[hand_rank(allmax(hands,key=hand_rank)[0])[0]]\n except ValueError:\n print \"The winning hand had: \" + \\\n pranks[hand_rank(allmax(hands,key=hand_rank))[0]]\n return allmax(hands, key=hand_rank)", "def get_hand(self):\n return self.hands[self.state.active]", "def determine_hand_value(hand):\n\thand_value = [False, False, False, False, False, False, False, False, False]\t\n\tvalue = is_straight_flush(hand)\n\tif value:\n\t\thand_value[8] = True\n\t\treturn hand_value, value[1]\n\tvalue = is_four_of_a_kind(hand)\n\tif value:\n\t\thand_value[7] = True\n\t\treturn hand_value, value[1]\n\tvalue = is_full_house(hand)\n\tif value:\n\t\thand_value[6] = True\n\t\treturn hand_value, value[1]\n\tvalue = is_flush(hand)\n\tif value:\n\t\thand_value[5] = True\n\t\treturn hand_value, value[1]\n\tvalue = is_straight(hand)\n\tif value:\n\t\thand_value[4] = True\n\t\treturn hand_value, value[1]\n\tvalue = is_three_of_a_kind(hand)\n\tif value:\n\t\thand_value[3] = True\n\t\treturn hand_value, value[1]\n\tvalue = is_two_pair(hand)\n\tif value:\n\t\thand_value[2] = True\n\t\treturn hand_value, value[1]\n\tvalue = is_pair(hand)\n\tif value:\n\t\thand_value[1] = True\n\t\treturn hand_value, value[1]\n\tvalue = is_high_card(hand)\n\tif value:\n\t\thand_value[0] = True\n\t\treturn hand_value, value[1]", "def value(self, hand):\n return sum(self.accelerations[hand]) / (sum(self.velocities[hand])+.1)", "def get_hand(self):\n return self._hand", "def get_hand(self):\n return self.cards", "def _get_hand_value(self, hand, allow_soft_limit=True):\n hand_values = [0]\n for face, suit in hand:\n card_value = self._face_value(face)\n hand_values = [value + card_value for value in hand_values]\n if face == 'ace' and allow_soft_limit:\n hand_values_ace = [value - 10 for value in hand_values if value < 21]\n hand_values += hand_values_ace\n # Get the higehst value that's 21 or less. If none found, get the bust value\n hand_values.sort(reverse=True) # Highest number First\n for value in hand_values:\n hand_value = value\n if hand_value <= 21: # Found the highest number <= 21\n break\n return hand_value", "def dealer_hand_value(self):\n return self._get_hand_value(self.dealer_hand, allow_soft_limit=self.allow_soft_limit)", "def stand(hand=bj.player1.hand):\r\n phv = bj.player1.hand_value_check(hand) # check player hand value\r\n phv = [x for x in phv if x <= 21]\r\n if hand == bj.player1.hand:\r\n if len(phv) > 0:\r\n bj.player1.final_hand_val = max(phv)\r\n else:\r\n bj.player1.final_hand_val = \"bust\"\r\n else:\r\n if len(phv) > 0:\r\n bj.player1.final_hand2_val = max(phv)\r\n else:\r\n bj.player1.final_hand2_val = \"bust\"", "def hand():\n return PokerHand()", "def test_hand_values(hand, result):\n from poker_rankings import PokerHand\n from collections import defaultdict\n heroes_hand = PokerHand(hand)\n assert heroes_hand._hand_value == result", "def get_value(self):\r\n value, aces = 0, 0\r\n for card in self.hand:\r\n value += VALUES[card.get_rank()]\r\n # Keep track of the aces in Hand\r\n if card.get_rank() == \"A\":\r\n aces += 1\r\n if aces >= 1 and value + 10 <= 21:\r\n value += 10\r\n return value", "def value(self, card):\n return self.valores[self.deck.index(card)]", "def get_value(self):\n \n value = 0\n ace = False\n\n for card in self.hand:\n value += VALUES[card.get_rank()]\n \n if (card.get_rank() == 'A'):\n ace = True\n \n if not ace:\n return value\n else:\n if (value + 10) <= 21:\n return (value + 10)\n else:\n return value", "def get_hp():\n\n return character['HP']", "def get_main_hand_equipped(self):\n\t\treturn self.equippedMainHand", "def enemy_trump(self, hand):\n card = None\n for i in range(len(hand)):\n if hand[i].trump:\n card = hand[i]\n break\n return card", "def handDecision(handIn):", "def get_suit(self):\r\n return self.suit", "def get_user_input(self, game, hand, message, allowed_actions):\n if self.first_turn:\n hand = self.hands[0]\n if hand.cards == 2:\n card1, card2 = hand.cards\n if card1.get_value() == card2.get_value():\n return 'split'\n return 'double'\n else:\n return 'stand'", "def hand_points(hand):\n points = [[]]\n branch = 1\n for card in hand:\n if not card[\"is_hidden\"]:\n if card[\"value\"].isnumeric():\n for possibility in range(branch):\n points[possibility].append(int(card[\"value\"]))\n elif card[\"value\"] == \"A\":\n for possibility in range(branch):\n # Ace is 1 or 11. Creating the two possibility\n points.append(points[possibility] + [11]) \n points[possibility].append(1)\n branch += 1\n else:\n # Left are the face value of 10\n for possibility in range(branch):\n points[possibility].append(10)\n\n score = list(zip([sum(branch) for branch in points], points))\n score.sort(key=lambda x: x[0], reverse=True)\n\n for total, points in score:\n if total == 21 and len(hand) == 2:\n return total, \"BlackJack!\"\n if total <= 21:\n if 1 in points and 11 in points:\n return total, None\n if 1 in points: \n return total, \"Soft\"\n if 11 in points:\n return total, \"Hard\"\n else:\n return total, None\n\n # If you get there, you have lost or you had empty hand \n # or all card in hand was hiddien\n if score:\n return score[-1][0], None\n else:\n return 0, None", "def hand_value_check(self, hand):\r\n hand_value = 0\r\n ace = 0\r\n result = []\r\n a = 0\r\n for card in hand: # calculate value of a hand\r\n if card.value < 10:\r\n a = card.value\r\n elif card.value in range(10, 14):\r\n a = 10\r\n elif card.value == 14: # keep track of Aces that may be counted both as 11 and as 1\r\n a = 11\r\n ace += 1\r\n hand_value += a\r\n\r\n if ace > 0: # if hand had aces, return all possible hand values\r\n for i in range(0, ace + 1):\r\n result.append(hand_value)\r\n hand_value -= 10\r\n self.display_hand_val = result\r\n return result\r\n else:\r\n result.append(hand_value)\r\n self.display_hand_val = result\r\n return result", "def value(self):\n return self.piece_behavior.value", "def hand(self, id):\n return self.players[id].cards", "def winner(self):\n for i, hand in enumerate(self._hands):\n if not len(hand):\n return i\n return None" ]
[ "0.7838407", "0.7544803", "0.7197065", "0.7069113", "0.68920386", "0.68238187", "0.68197185", "0.6810919", "0.68102187", "0.68066204", "0.6653587", "0.66281724", "0.6549728", "0.6535241", "0.6513264", "0.6500152", "0.64739597", "0.63902885", "0.63755286", "0.6328881", "0.62096316", "0.6203147", "0.6170512", "0.616982", "0.61618406", "0.6161711", "0.6154311", "0.611921", "0.61033946", "0.6081369" ]
0.808111
0
Lost hand, lost bet
def lose(self, dlr): print(f"Sorry {self.name}, your total of {sum(self.hand1)} didn't beat the dealers {dlr}") self.lastbet = self.bet self.bet = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lose(self) -> None:\n self._actual_money -= self._bet", "def rough_outcome(self) -> float:\n # HUYNH YOU PRICK WHY THE FUCK DO YOU MAKE US WRITE THIS SHIT EVEN IT'S NOT USED ANYWHERE\n # pick move based on this may not be optimal but better than random\n # return 1 if win immediately\n # return -1 if all states reachable will result the other player win\n # return 0 if otherwise ??? what the fuck does this mean\n # look two states forward\n pass", "def event_house_wins(self) -> None:\n print(\"You lose\")\n self.user.lose_balance(self.user.bet)", "def event_player_blackjack(self) -> None:\n win_amount = self.user.bet + 1.5\n print(\"Congratulations, you win:\", win_amount)\n self.user.win_balance(win_amount)", "def lose(self):\n super().lose(self.current_bet)\n return self ## fluent", "def rough_outcome(self) -> float:\n if is_win(self):\n return 1\n elif is_lose(self):\n return -1\n return 0", "def bet(self, amount):\r\n\r\n if self.players[self.active_player].credits < self.big_blind:\r\n message = \"Player {} won! Not enough money remaining.\".format(self.players[(self.active_player + 1) %\r\n len(self.players)].name)\r\n self.game_message.emit(message)\r\n self.restart()\r\n if self.players[(self.active_player + 1) % len(self.players)].credits < self.big_blind:\r\n message = \"Player {} won! Not enough money remaining.\".format(self.players[self.active_player].name)\r\n self.game_message_warning.emit(message)\r\n self.restart()\r\n\r\n if amount == 0:\r\n message = \"Raises must be larger than zero!\"\r\n self.game_message_warning.emit(message)\r\n\r\n elif self.previous_bet + amount > self.players[self.active_player].credits:\r\n message = \"Not enough money!\"\r\n self.game_message_warning.emit(message)\r\n else:\r\n self.pot += amount\r\n self.new_pot.emit()\r\n\r\n self.players[self.active_player].credits -= (self.previous_bet + amount)\r\n self.new_credits.emit()\r\n\r\n output_text = \"{} bet ${} and raised ${}\".format(self.players[self.active_player].name, self.previous_bet,\r\n amount)\r\n\r\n self.previous_bet = (self.previous_bet + amount)\r\n self.actions += 1\r\n\r\n self.new_output.emit(output_text)\r\n\r\n self.active_player = (self.active_player + 1) % len(self.players)\r\n\r\n # Update the players to hide their cards when it is not their turn\r\n for player in self.players:\r\n player.flip_cards()\r\n\r\n self.progress_game()", "def showWorstBetUse(self) :\n worstBetUse = self.level_history[0].bet\n for level in self.level_history :\n worstBetUse = level.bet if worstBetUse > level.bet else worstBetUse\n Scenario.messageGetWorstBetUse(worstBetUse)", "def player_lose(self):\n global chips\n chips = chips - self.final_bet\n self.defeat = True\n placed_bet = False", "def endRoundPayout(player, betted, playerStatus, multiplier): \n print(player.getName().upper(), playerStatus.name) \n if playerStatus == status.WON:\n player += (betted*multiplier) \n elif playerStatus == status.LOST:\n player -= (betted*multiplier) \n elif playerStatus == status.DRAW:\n print()\n else:\n print(\"PLAYER STATUS HASNT CHANGED {playerStatus.name}\")", "def evaluate_game(self):\n self.dealer.hit_until_hold()\n message = self.bold_message(self.dealer.final_str_with_hand()+ \"\\n\")\n if self.dealer.has_blackjack():\n message += \"The Dealer has Blackjack. All players without blackjack will lose the round.\\n\\n\"\n for player in self.players:\n if not player.has_blackjack():\n message += \" \" + player.mention_user() + \" lost \" + self.bold_message(str(player.lose_bet())) + \" memes\\n\"\n else:\n message += \" \" + player.mention_user() + \" also had blackjack so they gained/lost no memes.\\n\"\n elif self.dealer.is_bust():\n message += \"The Dealer is busted. All players left in the game win.\\n\\n\"\n for player in self.players:\n if player.is_busted or player.is_bust():\n message += \" \" + player.mention_user() + \" lost \" + self.bold_message(str(player.lose_bet())) + \" memes\\n\"\n else:\n message += \" \" + player.mention_user() + \" won \" + self.bold_message(str(player.gain_bet())) + \" memes\\n\"\n else:\n # players with a higher point total win, players with a lower point total than dealer lose\n dealer_hand_value = min(i for i in self.dealer.get_hand_values())\n message += \"The Dealer has a hand value of {}. All non-busted players above this value win!\\n\\n\".format(dealer_hand_value)\n for player in self.players:\n player_hand_value = min(i for i in player.get_hand_values())\n for value in player.get_hand_values(): # picks the largest value below 21 as the value to use when comparing\n if 21 >= value > player_hand_value:\n player_hand_value = value\n if not player.is_bust() and player_hand_value > dealer_hand_value:\n message += \" \" + player.mention_user() + \" won \" + self.bold_message(str(player.gain_bet())) + \" memes\\n\"\n elif not player.is_bust() and player_hand_value == dealer_hand_value:\n message += \" \" + player.mention_user() + \" tied and gained/lost no memes.\\n\"\n else:\n message += \" \" + player.mention_user() + \" lost \" + self.bold_message(str(player.lose_bet())) + \" memes\\n\"\n return message", "def test_bet(self):\n hand = self._hand\n self.assertEqual(hand.bet.amount, 150)", "def on_place_bet (self,event):\n global placed_bet\n placed_bet = True\n arcade.draw_text(f\"Value: {self.dealer_value}\", 280, 450, arcade.color.BLACK, 16)\n arcade.draw_text(f\"Value: {self.player_value}\", 280, 250, arcade.color.BLACK, 16)\n \n self.final_bet = self.bet\n self.dealer_hand[1].face_up()\n self.player_hand[0].face_up()\n self.player_hand[1].face_up()", "def playGame(self):\n BET_AMOUNT=1\n bets = 0\n win = 0\n loss=0\n result = 0\n cash=0\n for i in range(self.trials):\n cash=self.stake\n #loop continue till stake amount goes to 0 or stake amount reaches the goal\n while cash > 0 or cash == self.GOAL:\n bets+=1 #increments bet by 1\n result=random.randint(0,1)\n if result == 1:\n cash+=BET_AMOUNT\n win+=1\n else:\n cash-=BET_AMOUNT\n loss+=1\n\n print(\"No of times games won=\",win)\n print(\"No of times games lost=\",loss)\n print(\"Percentage of game won=\",100.0 * win / trials) #calculates winning percentage\n print(\"Percentage of game lost=\",100.0 * loss / trials) #calculates loss percentage", "def raise_bet(self, points):\n return self.call(points=points)\n # self.points -= points\n # self.bet += points\n # return points", "def vanilaScore(self,attended,state,W):", "def blind_bet(self):\n self.this_player.bet(SMALL_BLIND_BET)\n self.other_player.bet(BIG_BLIND_BET)\n if SHOW_MESSAGE:\n print(\"Making blind bets.\")\n print(\"Player1:\")\n self.player1.show()\n print(\"Player2:\")\n self.player2.show()", "def win(self) -> int:\n earnings: int = self._bet\n\n # If it has a blackjack, the earnings increments 1.5 times\n for hand in self._hands:\n if hand.has_blackjack():\n earnings = round(earnings * 1.5)\n\n self._actual_money += earnings\n return earnings", "def check_win_lose(self):\n if self.b.get_player_i() == 7: # player got to the bank\n return 1 # win\n if self.b.get_chaser_i() == self.b.get_player_i(): # chaser catch the player\n return 2 # lose\n return 0 # nothing", "def won(self):\r\n return None", "def balance(self, player):\n print 'hand of %s: %s'%(player.name,player.cards.hand)\n print 'hand of %s: %s'%(self.name,self.cards.hand)\n if player.cards.hand == self.cards.hand:\n return 0\n elif player.cards.hand > self.cards.hand:\n return player.bet_amount*2\n else:\n return -player.bet_amount", "def event_player_wins(self) -> None:\n win_amount = self.user.bet\n print(\"Congratulations, you win:\", win_amount)\n self.user.win_balance(self.user.bet)", "def showWorstGainWon(self) :\n worstGainWon = self.level_history[0].profit\n for level in self.level_history :\n worstGainWon = level.profit if ((worstGainWon > level.profit) and (level.result == 1)) else worstGainWon\n Scenario.messageGetWorstGainWon(worstGainWon)", "def raise_bet(value):\r\n\r\n global total_bet, dealer_bet, in_play, bottom_alert\r\n if value > player.get_cash() or not in_play:\r\n bottom_alert = \"You cannot bet $%i right now.\" % (value)\r\n elif in_play:\r\n player.spend_cash(value)\r\n dealer_bet += value\r\n total_bet += value * 2\r\n bottom_alert = \"\"", "def betting_round(self, method, params):\n self.bet_history += [[]]\n current_bets = [self.starting_player] * len(self.agents)\n \n max_bet = 0\n if method == self.deal_cards:\n max_bet = big_blind\n current_bets[self.starting_player] = small_blind\n current_bets[(self.starting_player + 1) % len(self.agents)] = big_blind\n\n (self.all_in[self.starting_player], bet) = self.normalize_bet(self.chips[self.starting_player], method(self.agents[self.starting_player], params[self.starting_player]), max_bet)\n self.in_game[self.starting_player] = (not self.all_in[self.starting_player])\n current_bets[self.starting_player] = bet\n self.chips[self.starting_player] -= bet\n check = True if bet == 0 else False\n max_bet = max(max_bet, bet)\n self.pot += bet\n self.bet_history[-1] += [bet]\n\n raised_player = self.starting_player\n i = (raised_player + 1) % len(self.agents)\n\n if method == self.deal_cards:\n # raised_player = (self.starting_player + 1) % len(agents)\n check = False\n if bet > max_bet:\n raised_player = i\n max_bet = bet\n\n if bet == 0:\n self.in_game[i] = False\n self.in_game_count -= 1\n\n while (i != raised_player) and (not self.all_in[i]) and (current_bets[i] <= max_bet):\n if self.in_game[i]:\n (self.all_in[i], bet) = self.normalize_bet(self.chips[i], method(self.agents[i], params[i]), max_bet)\n self.in_game[i] = (not self.all_in[i])\n delta_bet = max(0, bet - current_bets[i])\n current_bets[i] = bet\n self.chips[i] -= delta_bet\n self.pot += delta_bet\n self.bet_history[-1] += [bet]\n\n if bet > max_bet:\n check = False\n raised_player = i\n max_bet = bet\n\n if bet == 0 and not check:\n self.in_game[i] = False\n self.in_game_count -= 1\n\n i = (i + 1) % len(self.agents)", "def is_game_lost(self):\n values = [self.hand[i]._lvalue + self.hand[i]._rvalue for i in range(len(self.hand))]\n return not sum_in_list_dyn(values, self.number_point)", "def player_lose(self):\r\n\r\n self.summary = (\" \" * 83) + \"YOU LOSE\"\r\n print(\"Player loses against opponent.\\n\")\r\n self.opp_wins += 1", "def all_in():\r\n\r\n raise_bet(player.get_cash())", "def player_win(self):\n global chips\n global placed_bet\n\n chips = (self.final_bet*2 + chips)\n self.victory = True\n placed_bet = False", "def add_bet(self,bet):\n \n #subtract bet from the purse\n self.purse -= bet\n \n #add bet to existing bet\n self.bet += bet\n \n return self.bet" ]
[ "0.69771945", "0.66398084", "0.65543616", "0.6549002", "0.64223045", "0.64099866", "0.6402642", "0.6375774", "0.6362669", "0.6361766", "0.6273635", "0.6248365", "0.6205552", "0.6203644", "0.6169682", "0.61347765", "0.6123261", "0.6119863", "0.61127377", "0.6111896", "0.6092984", "0.6084716", "0.6058699", "0.6053911", "0.6044568", "0.60435927", "0.6041492", "0.60165375", "0.6016231", "0.60098886" ]
0.7257465
0
Checks if creates data collection.
def test_creates_data_collection(self): data_collection = create_data_collection(read_config_file("test/data_collection.yaml")) self.assertIsInstance(data_collection, DataCollection)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_collection(self):\n pass", "def check_for_new_data(self):\n return", "def _validate_create_data(self, data):\n return", "def data_loaded_check(self):\n return True", "def assertExists(self):\n for db in self._db_tree:\n assert(db in self._datastore.conn.database_names)\n for collection in db['collections']:\n assert(collection['name'] in self._datastore[db['database']].collection_names())", "def test_collections_not_available(self):\n # Acquire SDK client for mutations\n client = self.sdk_client_pool.get_client_for_bucket(self.bucket)\n\n scope_name = self.bucket_util.get_random_name()\n col_name = self.bucket_util.get_random_name()\n\n try:\n client.create_collection(CbServer.default_collection)\n self.log_failure(\"Collection with default name created\")\n except CouchbaseException as e:\n if \"First character must not be _ or %\" not in str(e):\n self.log_failure(\"Create default collection invalid message\")\n\n client.create_scope(scope_name)\n client.create_collection(col_name, scope=CbServer.default_scope)\n client.create_collection(col_name, scope=scope_name)\n\n # Create collection with same name\n try:\n client.create_collection(col_name, scope=scope_name)\n except CollectionExistsException:\n pass\n\n # Create scope under invalid scope\n try:\n client.create_collection(col_name, scope=\"scope_unavailable\")\n except ScopeNotFoundException:\n pass\n\n client.drop_collection(CbServer.default_scope, col_name)\n client.drop_collection(scope_name, col_name)\n\n # Drop already dropped collection\n try:\n client.drop_collection(scope_name, col_name)\n except CollectionNotFoundException:\n pass\n\n self.sleep(10, \"Wait for meta kv refresh\")\n client.select_collection(scope_name, col_name)\n result = client.crud(\"create\", \"key\", \"value\")\n if result[\"status\"] is True:\n self.log_failure(\"Collection create successful\")\n elif SDKException.AmbiguousTimeoutException \\\n not in str(result[\"error\"]):\n self.log_failure(\"Invalid exception during doc create\")\n\n # Drop scope\n client.drop_scope(scope_name)\n\n # Drop scope which was already dropped\n try:\n client.drop_scope(scope_name)\n except ScopeNotFoundException:\n pass\n\n # Release the acquired client\n client.select_collection(CbServer.default_scope,\n CbServer.default_collection)\n self.sdk_client_pool.release_client(client)\n self.validate_test_failure()", "def test_collection_not_exists(self):\n def validate_vb_detail_stats():\n failed = durability_helper.verify_vbucket_details_stats(\n self.bucket, self.cluster_util.get_kv_nodes(self.cluster),\n vbuckets=self.cluster.vbuckets,\n expected_val=verification_dict)\n if failed:\n self.log_failure(\"vBucket_details validation failed\")\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster)\n\n sync_write_enabled = DurabilityHelper.is_sync_write_enabled(\n self.bucket_durability_level, self.durability_level)\n num_cols_in_bucket = 0\n for _, scope in self.bucket.scopes.items():\n for _, _ in scope.collections.items():\n num_cols_in_bucket += 1\n\n verification_dict = dict()\n verification_dict[\"ops_create\"] = num_cols_in_bucket * self.num_items\n verification_dict[\"ops_update\"] = 0\n verification_dict[\"ops_delete\"] = 0\n verification_dict[\"rollback_item_count\"] = 0\n verification_dict[\"sync_write_aborted_count\"] = 0\n verification_dict[\"sync_write_committed_count\"] = 0\n\n durability_helper = DurabilityHelper(self.log,\n len(self.cluster.kv_nodes),\n durability=self.durability_level)\n\n drop_scope = self.input.param(\"drop_scope\", False)\n if self.scope_name != CbServer.default_scope:\n self.scope_name = self.bucket_util.get_random_name()\n if self.collection_name != CbServer.default_collection:\n self.collection_name = self.bucket_util.get_random_name()\n\n # Doc generator used for mutations\n doc_gen = doc_generator(\"test_col_not_exists\", 0, 10)\n\n # Acquire SDK client for mutations\n client = self.sdk_client_pool.get_client_for_bucket(\n self.bucket,\n self.scope_name,\n self.collection_name)\n\n doc_ttl, _ = \\\n SDKExceptionTests.__get_random_doc_ttl_and_durability_level()\n self.log.info(\"Creating docs with doc_ttl %s into %s:%s:%s\"\n % (doc_ttl,\n self.bucket.name,\n self.scope_name,\n self.collection_name))\n\n retry_reason = SDKException.RetryReason\n while doc_gen.has_next():\n key, value = doc_gen.next()\n result = client.crud(\"create\", key, value,\n exp=doc_ttl,\n durability=self.durability_level,\n timeout=30)\n if self.collection_name == CbServer.default_collection:\n if result[\"status\"] is False:\n self.log_failure(\"Create doc failed for key: %s\" % key)\n else:\n verification_dict[\"ops_create\"] += 1\n if sync_write_enabled:\n verification_dict[\"sync_write_committed_count\"] += 1\n self.bucket.scopes[\n self.scope_name].collections[\n self.collection_name].num_items += 1\n elif result[\"status\"] is True:\n self.log_failure(\"Create didn't fail as expected for key: %s\"\n % key)\n elif (SDKException.AmbiguousTimeoutException\n not in str(result[\"error\"])\n or retry_reason.COLLECTION_NOT_FOUND\n not in str(result[\"error\"])) \\\n and (\n SDKException.RequestCanceledException\n not in str(result[\"error\"])\n or retry_reason.COLLECTION_MAP_REFRESH_IN_PROGRESS\n not in str(result[\"error\"])):\n self.log_failure(\"Invalid exception for key %s: %s\"\n % (key, result[\"error\"]))\n\n validate_vb_detail_stats()\n # Create required scope/collection for successful CRUD operation\n self.create_scope_collection()\n\n # Reset doc_gen itr value for retry purpose\n doc_gen.reset()\n doc_ttl, _ = \\\n SDKExceptionTests.__get_random_doc_ttl_and_durability_level()\n self.log.info(\"Creating docs with doc_ttl %s into %s:%s:%s\"\n % (doc_ttl,\n self.bucket.name,\n self.scope_name,\n self.collection_name))\n op_type = \"create\"\n if self.collection_name == CbServer.default_collection:\n op_type = \"update\"\n\n while doc_gen.has_next():\n key, value = doc_gen.next()\n result = client.crud(op_type, key, value,\n exp=doc_ttl,\n durability=self.durability_level)\n if result[\"status\"] is False:\n self.log_failure(\"Create fail for key %s: %s\"\n % (key, result))\n else:\n if op_type == \"create\":\n verification_dict[\"ops_create\"] += 1\n self.bucket.scopes[\n self.scope_name].collections[\n self.collection_name].num_items += 1\n else:\n verification_dict[\"ops_update\"] += 1\n\n if sync_write_enabled:\n verification_dict[\"sync_write_committed_count\"] += 1\n validate_vb_detail_stats()\n self.validate_test_failure()\n\n if drop_scope:\n self.log.info(\"Dropping scope %s\" % self.scope_name)\n self.bucket_util.drop_scope(self.cluster.master,\n self.bucket,\n self.scope_name)\n else:\n self.log.info(\"Dropping collection %s:%s\" % (self.scope_name,\n self.collection_name))\n self.bucket_util.drop_collection(self.cluster.master,\n self.bucket,\n self.scope_name,\n self.collection_name)\n validate_vb_detail_stats()\n self.validate_test_failure()\n\n # Reset doc_gen itr value for retry purpose\n doc_gen.reset()\n while doc_gen.has_next():\n key, value = doc_gen.next()\n result = client.crud(\"create\", key, value,\n exp=doc_ttl,\n durability=self.durability_level)\n if result[\"status\"] is True:\n self.log_failure(\"Create doc succeeded for dropped collection\")\n validate_vb_detail_stats()\n self.validate_test_failure()\n\n # Re-create the dropped collection\n self.create_scope_collection(create_scope=drop_scope)\n\n if self.collection_name != CbServer.default_collection:\n doc_gen.reset()\n while doc_gen.has_next():\n key, value = doc_gen.next()\n result = client.crud(\"create\", key, value,\n exp=doc_ttl,\n durability=self.durability_level)\n if result[\"status\"] is False:\n self.log_failure(\"Create failed after collection recreate \"\n \"for key %s: %s\" % (key, result[\"error\"]))\n else:\n verification_dict[\"ops_create\"] += 1\n if sync_write_enabled:\n verification_dict[\"sync_write_committed_count\"] += 1\n self.bucket.scopes[\n self.scope_name].collections[\n self.collection_name].num_items += 1\n validate_vb_detail_stats()\n\n # Release the acquired client\n self.sdk_client_pool.release_client(client)\n self.validate_test_failure()", "def create_collections(self):\n\n ''''''", "def can_create(self):\n return True", "async def ensure_collection(self, collection):\n if await self.does_collection_exist(collection):\n return\n # Create Solr collection\n try:\n # Collection creation in API v2 doesn't support collection.configName yet.\n # So using old API (/solr/...).\n response = await self.get(\n '/solr/admin/collections',\n params={\n 'action': 'CREATE',\n 'name': collection,\n 'collection.configName': APPSCALE_CONFIG_SET_NAME,\n 'replicationFactor': self._settings.replication_factor,\n 'autoAddReplicas': True,\n 'numShards': self._settings.shards_number,\n 'maxShardsPerNode': self._settings.max_shards_per_node,\n 'waitForFinalState': True,\n }\n )\n logger.info('Successfully created collection {} ({})'\n .format(collection, response.body))\n except SolrError as err:\n if 'collection already exists' in err.error_detail:\n logger.info('Collection {} already exists'.format(collection))\n elif 'Cannot create collection ' in err.error_detail:\n logging.warning('Solr message: {}'.format(err.error_detail))\n logging.warning('Scheduling deletion of collection {}'\n .format(collection))\n ioloop.IOLoop.current().spawn_callback(\n self.delete_collection, collection\n )\n raise\n else:\n logger.warning('Failed to create collection {}'.format(collection))\n raise\n # Update collections cache in background\n ioloop.IOLoop.current().spawn_callback(self.list_collections)", "def test_initial_validation(self) -> None:\n self.collection.validate()", "def has_data(self, *args, **kwargs):\n return False", "def check_for_data():\n if not (os.path.exists(ep.get_test_data_path()) or os.path.exists(ep.get_dbn_weight_path())):\n return False\n return True", "def _has_data(cls):\n return User.objects.count() > 0", "def requiresData():\n return True", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.list_query_path):\n raise RuntimeError(\"'{}' is not available\".format(self.list_query_path))\n if not osp.exists(self.list_gallery_path):\n raise RuntimeError(\"'{}' is not available\".format(self.list_gallery_path))", "def initDataBase():\n if len(Elems):\n if not addElem2Specie():\n return False\n return True", "def can_insert(data):\n return hasattr(data, 'read')", "def can_create(self): # pylint: disable=unused-argument,possibly-unused-variable\n valid_create = True\n for field in value_fields:\n if not field:\n valid_create = False\n return valid_create", "def checkPersistence(self, _, __): # pylint: disable=C0103\n return False", "def create_collection(collection_json):\n print(\"collection_checker.create_collection()\")\n title = collection_json['title']\n collection_id = collection_dao.create_collection(title)\n\n list_book_ids = collection_json['book_ids']\n for book_id in list_book_ids:\n if BookDao.contains(book_id):\n continue\n else:\n abort(404, 'Invalid book ID')\n\n for book_id in list_book_ids:\n book = BookDao.get_book_object(book_id)\n collection_dao.append_collection(collection_id, book)\n return collection_dao.get_collection(collection_id)", "def _handle_exists_collection(name: str, exists: Optional[str], db: Database) -> None:\n\n if exists == \"fail\":\n if db[name].count() > 0:\n raise ValueError(f\"Collection '{name}' already exists.\")\n return\n\n if exists == \"replace\":\n if db[name].count() > 0:\n db[name].drop()\n return\n\n if exists == \"append\":\n return\n\n raise ValueError(f\"'{exists}' is not valid for if_exists\")", "def test_create(self):\n self.assertTrue(Category.objects.exists())", "def has_data(self):\n return len(self.data) > 0", "def _create(self, postData):\n if self.infos is None:\n r = self.connection.session.post(self.getIndexesURL(), params = {\"collection\" : self.collection.name}, data = json.dumps(postData, default=str))\n data = r.json()\n if (r.status_code >= 400) or data['error']:\n raise CreationError(data['errorMessage'], data)\n self.infos = data", "def exist(self):", "def test_guess_and_set_use_collection_no_configuration(self) -> None:\n\n self.checker.guess_and_set_use_collection()\n actual = self.checker.use_collection\n expected = False\n\n self.assertEqual(expected, actual)", "def check_dataset(self):\n url = self.metadata.metadata_url\n document = Document.objects.get(\n metadata=self.metadata,\n document_type=DocumentEnum.METADATA.value,\n is_original=True,\n )\n original_document = document.content\n self.check_document(url, original_document)", "def test_create_collection_with_metadata(self):\n c1 = self.hiarc_collections.create_collection(\n self.hiarc_util.create_collection(self.hiarc_util.get_test_metadata()))\n gc = self.hiarc_collections.get_collection(c1.key)\n\n assert self.hiarc_util.compare_entity_to_entity(c1, gc)\n gc.metadata['startDate'] = dateutil.parser.parse(\n gc.metadata['startDate'])\n self.assertDictEqual(self.hiarc_util.get_test_metadata(), gc.metadata)", "def check_Data(self):\r\n \r\n if self._target_data is None:\r\n self.processData()" ]
[ "0.69012666", "0.66905195", "0.6603279", "0.64278567", "0.63041574", "0.61867595", "0.61684585", "0.6142971", "0.6142184", "0.60509443", "0.60087866", "0.60020673", "0.59476244", "0.5895808", "0.5882238", "0.58364767", "0.5814158", "0.58005524", "0.5793292", "0.57574344", "0.5718903", "0.57113653", "0.57096606", "0.57052505", "0.5702629", "0.5691472", "0.5688347", "0.56856936", "0.5684233", "0.5682289" ]
0.69225866
0
Retrieves the current IAM policy data for listing example ```python import pulumi import pulumi_gcp as gcp policy = gcp.bigqueryanalyticshub.get_listing_iam_policy(project=google_bigquery_analytics_hub_listing["listing"]["project"], location=google_bigquery_analytics_hub_listing["listing"]["location"], data_exchange_id=google_bigquery_analytics_hub_listing["listing"]["data_exchange_id"], listing_id=google_bigquery_analytics_hub_listing["listing"]["listing_id"]) ```
def get_listing_iam_policy(data_exchange_id: Optional[str] = None, listing_id: Optional[str] = None, location: Optional[str] = None, project: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetListingIamPolicyResult: __args__ = dict() __args__['dataExchangeId'] = data_exchange_id __args__['listingId'] = listing_id __args__['location'] = location __args__['project'] = project opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('gcp:bigqueryanalyticshub/getListingIamPolicy:getListingIamPolicy', __args__, opts=opts, typ=GetListingIamPolicyResult).value return AwaitableGetListingIamPolicyResult( data_exchange_id=pulumi.get(__ret__, 'data_exchange_id'), etag=pulumi.get(__ret__, 'etag'), id=pulumi.get(__ret__, 'id'), listing_id=pulumi.get(__ret__, 'listing_id'), location=pulumi.get(__ret__, 'location'), policy_data=pulumi.get(__ret__, 'policy_data'), project=pulumi.get(__ret__, 'project'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_listing_iam_policy_output(data_exchange_id: Optional[pulumi.Input[str]] = None,\n listing_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[Optional[str]]] = None,\n project: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetListingIamPolicyResult]:\n ...", "def policy_info(self) -> 'outputs.PolicyInfoResponse':\n return pulumi.get(self, \"policy_info\")", "def policy_data(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"policy_data\")", "def read(self, policy_name):\n path = self.vault.normalize(\"/sys/policies/acl/\" + policy_name)\n address = self.vault.vault_adress + \"/v1\" + path\n logging.debug(\"Reading the policy: %s\", address)\n response = self.vault.requests_request(\n \"GET\", address, headers=self.vault.token_header\n )\n policy_details = response.json()[\"data\"][\"policy\"]\n return policy_details", "def get_iam_policy_output(folder: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetIamPolicyResult]:\n ...", "def policy_info(self) -> pulumi.Input['PolicyInfoArgs']:\n return pulumi.get(self, \"policy_info\")", "def get_policy(client, policy_name):\n response = client.describe_firewall_policy(\n FirewallPolicyName=policy_name,\n )\n return response", "def policy_data(self) -> str:\n return pulumi.get(self, \"policy_data\")", "def policy_data(self) -> str:\n return pulumi.get(self, \"policy_data\")", "def policy_data(self) -> str:\n return pulumi.get(self, \"policy_data\")", "def get_iam_policy(\n self,\n ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"get_iam_policy\" not in self._stubs:\n self._stubs[\"get_iam_policy\"] = self.grpc_channel.unary_unary(\n \"/google.iam.v1.IAMPolicy/GetIamPolicy\",\n request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,\n response_deserializer=policy_pb2.Policy.FromString,\n )\n return self._stubs[\"get_iam_policy\"]", "def get_iam_policy(\n self,\n ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"get_iam_policy\" not in self._stubs:\n self._stubs[\"get_iam_policy\"] = self.grpc_channel.unary_unary(\n \"/google.iam.v1.IAMPolicy/GetIamPolicy\",\n request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,\n response_deserializer=policy_pb2.Policy.FromString,\n )\n return self._stubs[\"get_iam_policy\"]", "def policy(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"policy\")", "def policy(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"policy\")", "def policy(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"policy\")", "def get_policy(usage_id):\r\n return policy.get(policy_key(usage_id), {})", "def policy_data(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy_data\")", "def get_policy(self):\n return self.agent.get_policy()", "def get_bucket_iam_policy_output(bucket: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetBucketIamPolicyResult]:\n ...", "def get_policy(self, *args, **kwargs):\r\n pass", "def policy(self) -> pulumi.Output['outputs.ServicePolicy']:\n return pulumi.get(self, \"policy\")", "def get_policy(self):\n try:\n LOG.debug(\"Searching for retention_policy in K2.\")\n return self.client.search(\"retention_policies\",\n name=\"Best_Effort_Retention\").hits[0]\n except Exception as ex:\n LOG.exception(\"Retention policy search failed in K2.\")\n raise KaminarioCinderDriverException(reason=ex)", "def _retrieve(self):\n # Get the projects for which we will retrieve the IAM policies.\n try:\n project_numbers = self.dao.get_project_numbers(\n self.RESOURCE_NAME, self.cycle_timestamp)\n except dao_errors.MySQLError as e:\n raise inventory_errors.LoadDataPipelineError(e)\n\n # Retrieve data from GCP.\n # Not using iterator since we will use the iam_policy_maps twice.\n iam_policy_maps = []\n for project_number in project_numbers:\n iam_policy = self.safe_api_call('get_project_iam_policies',\n self.RESOURCE_NAME,\n project_number)\n if iam_policy:\n iam_policy_map = {'project_number': project_number,\n 'iam_policy': iam_policy}\n iam_policy_maps.append(iam_policy_map)\n return iam_policy_maps", "def get_policy(self):\n\n return", "def get_sp_policy(self, context, id):\n # handling policy method in RPC\n response = self.dns_manager.get_sp_policy(context, id)\n return response", "def _get_policies(self):\n flag, response = self._commcell_object._cvpysdk_object.make_request('GET', self._POLICY)\n\n if flag:\n if response.json() and 'taskDetail' in response.json():\n policies = response.json()['taskDetail']\n policies_dict = {}\n\n for policy in policies:\n temp_name = policy['task']['taskName'].lower()\n temp_id = str(policy['task']['taskId']).lower()\n policies_dict[temp_name] = temp_id\n\n return policies_dict\n else:\n raise SDKException('Response', '102')\n else:\n response_string = self._commcell_object._update_response_(response.text)\n raise SDKException('Response', '101', response_string)", "def list_policies(self):\n return self.con.list_policies(\n Scope='Local'\n )", "def get_policies():\r\n policy = policies.values()\r\n return policy", "def policy(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy\")", "def policy(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy\")" ]
[ "0.7828049", "0.6415348", "0.6217417", "0.6160221", "0.6139532", "0.6135994", "0.61296153", "0.61258733", "0.61258733", "0.61258733", "0.6070894", "0.6070894", "0.6063281", "0.6063281", "0.6063281", "0.5996258", "0.5977472", "0.59605813", "0.59031934", "0.59001213", "0.5898236", "0.589177", "0.58666795", "0.58343625", "0.57984203", "0.57910246", "0.577401", "0.57719254", "0.57711434", "0.57711434" ]
0.7959973
0
Retrieves the current IAM policy data for listing example ```python import pulumi import pulumi_gcp as gcp policy = gcp.bigqueryanalyticshub.get_listing_iam_policy(project=google_bigquery_analytics_hub_listing["listing"]["project"], location=google_bigquery_analytics_hub_listing["listing"]["location"], data_exchange_id=google_bigquery_analytics_hub_listing["listing"]["data_exchange_id"], listing_id=google_bigquery_analytics_hub_listing["listing"]["listing_id"]) ```
def get_listing_iam_policy_output(data_exchange_id: Optional[pulumi.Input[str]] = None, listing_id: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[Optional[str]]] = None, project: Optional[pulumi.Input[Optional[str]]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetListingIamPolicyResult]: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_listing_iam_policy(data_exchange_id: Optional[str] = None,\n listing_id: Optional[str] = None,\n location: Optional[str] = None,\n project: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetListingIamPolicyResult:\n __args__ = dict()\n __args__['dataExchangeId'] = data_exchange_id\n __args__['listingId'] = listing_id\n __args__['location'] = location\n __args__['project'] = project\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('gcp:bigqueryanalyticshub/getListingIamPolicy:getListingIamPolicy', __args__, opts=opts, typ=GetListingIamPolicyResult).value\n\n return AwaitableGetListingIamPolicyResult(\n data_exchange_id=pulumi.get(__ret__, 'data_exchange_id'),\n etag=pulumi.get(__ret__, 'etag'),\n id=pulumi.get(__ret__, 'id'),\n listing_id=pulumi.get(__ret__, 'listing_id'),\n location=pulumi.get(__ret__, 'location'),\n policy_data=pulumi.get(__ret__, 'policy_data'),\n project=pulumi.get(__ret__, 'project'))", "def policy_info(self) -> 'outputs.PolicyInfoResponse':\n return pulumi.get(self, \"policy_info\")", "def policy_data(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"policy_data\")", "def read(self, policy_name):\n path = self.vault.normalize(\"/sys/policies/acl/\" + policy_name)\n address = self.vault.vault_adress + \"/v1\" + path\n logging.debug(\"Reading the policy: %s\", address)\n response = self.vault.requests_request(\n \"GET\", address, headers=self.vault.token_header\n )\n policy_details = response.json()[\"data\"][\"policy\"]\n return policy_details", "def get_iam_policy_output(folder: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetIamPolicyResult]:\n ...", "def policy_info(self) -> pulumi.Input['PolicyInfoArgs']:\n return pulumi.get(self, \"policy_info\")", "def get_policy(client, policy_name):\n response = client.describe_firewall_policy(\n FirewallPolicyName=policy_name,\n )\n return response", "def policy_data(self) -> str:\n return pulumi.get(self, \"policy_data\")", "def policy_data(self) -> str:\n return pulumi.get(self, \"policy_data\")", "def policy_data(self) -> str:\n return pulumi.get(self, \"policy_data\")", "def get_iam_policy(\n self,\n ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"get_iam_policy\" not in self._stubs:\n self._stubs[\"get_iam_policy\"] = self.grpc_channel.unary_unary(\n \"/google.iam.v1.IAMPolicy/GetIamPolicy\",\n request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,\n response_deserializer=policy_pb2.Policy.FromString,\n )\n return self._stubs[\"get_iam_policy\"]", "def get_iam_policy(\n self,\n ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"get_iam_policy\" not in self._stubs:\n self._stubs[\"get_iam_policy\"] = self.grpc_channel.unary_unary(\n \"/google.iam.v1.IAMPolicy/GetIamPolicy\",\n request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,\n response_deserializer=policy_pb2.Policy.FromString,\n )\n return self._stubs[\"get_iam_policy\"]", "def policy(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"policy\")", "def policy(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"policy\")", "def policy(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"policy\")", "def get_policy(usage_id):\r\n return policy.get(policy_key(usage_id), {})", "def policy_data(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy_data\")", "def get_policy(self):\n return self.agent.get_policy()", "def get_bucket_iam_policy_output(bucket: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetBucketIamPolicyResult]:\n ...", "def get_policy(self, *args, **kwargs):\r\n pass", "def policy(self) -> pulumi.Output['outputs.ServicePolicy']:\n return pulumi.get(self, \"policy\")", "def get_policy(self):\n try:\n LOG.debug(\"Searching for retention_policy in K2.\")\n return self.client.search(\"retention_policies\",\n name=\"Best_Effort_Retention\").hits[0]\n except Exception as ex:\n LOG.exception(\"Retention policy search failed in K2.\")\n raise KaminarioCinderDriverException(reason=ex)", "def _retrieve(self):\n # Get the projects for which we will retrieve the IAM policies.\n try:\n project_numbers = self.dao.get_project_numbers(\n self.RESOURCE_NAME, self.cycle_timestamp)\n except dao_errors.MySQLError as e:\n raise inventory_errors.LoadDataPipelineError(e)\n\n # Retrieve data from GCP.\n # Not using iterator since we will use the iam_policy_maps twice.\n iam_policy_maps = []\n for project_number in project_numbers:\n iam_policy = self.safe_api_call('get_project_iam_policies',\n self.RESOURCE_NAME,\n project_number)\n if iam_policy:\n iam_policy_map = {'project_number': project_number,\n 'iam_policy': iam_policy}\n iam_policy_maps.append(iam_policy_map)\n return iam_policy_maps", "def get_policy(self):\n\n return", "def get_sp_policy(self, context, id):\n # handling policy method in RPC\n response = self.dns_manager.get_sp_policy(context, id)\n return response", "def _get_policies(self):\n flag, response = self._commcell_object._cvpysdk_object.make_request('GET', self._POLICY)\n\n if flag:\n if response.json() and 'taskDetail' in response.json():\n policies = response.json()['taskDetail']\n policies_dict = {}\n\n for policy in policies:\n temp_name = policy['task']['taskName'].lower()\n temp_id = str(policy['task']['taskId']).lower()\n policies_dict[temp_name] = temp_id\n\n return policies_dict\n else:\n raise SDKException('Response', '102')\n else:\n response_string = self._commcell_object._update_response_(response.text)\n raise SDKException('Response', '101', response_string)", "def list_policies(self):\n return self.con.list_policies(\n Scope='Local'\n )", "def get_policies():\r\n policy = policies.values()\r\n return policy", "def policy(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy\")", "def policy(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"policy\")" ]
[ "0.7957954", "0.6418271", "0.62199444", "0.6163126", "0.61404836", "0.6138383", "0.6131887", "0.6128473", "0.6128473", "0.6128473", "0.6072734", "0.6072734", "0.60656893", "0.60656893", "0.60656893", "0.5997886", "0.59795076", "0.5963106", "0.59044737", "0.59022045", "0.5900357", "0.58940774", "0.5868748", "0.583658", "0.580018", "0.57945734", "0.57767355", "0.57758206", "0.5772992", "0.5772992" ]
0.7827125
1
initialize with location of my articles and outdir
def __init__(self, workdir = "archived_links", outdir = "tmp"): self.workdir = workdir self.outdir = outdir self.bigdf = "" self.ArticlesLoaded = False self.clf = ""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init(self) -> None:\n logger.debug(f\"[nbtutorial]: Outdir is: {self.outdir}\")", "def __init__(self, texts_path, slug, metadata):\n self.texts_path = os.path.abspath(texts_path)\n\n self.slug = slug\n\n self.metadata = metadata", "def __init__(self, output_dir: str):\n self.output_dir = output_dir\n makedirs(self.output_dir, exist_ok=True)", "def __init__(self, out_dir):\n self.env = Environment(loader=PackageLoader(\"emu\", \"templates\"))\n self.dest = out_dir", "def __init__(self, directory, file_name, logger, cache_path):\n image_storage = ROOT_DIR / cache_path / \"images\"\n image_storage.mkdir(exist_ok=True)\n self.img_storage = image_storage\n self.dir = Path(directory).absolute()\n self.full_path_to_file = self.dir / file_name\n self.logger = logger\n self.template = Path(__file__).resolve().parent / \"templates\" / \"news.html\"", "def __init__(self, base_path=\"\", structure_dir=\"structure\", content_path=\"\", structure_filename=\"\"):\n self.base_path = base_path\n self.structure_dir = structure_dir\n self.content_path = content_path\n self.structure_filename = structure_filename\n self.setup_md_to_html_converter()", "def __init__(self, stylesDir, outputDir, imagesDir, scriptsDir, templatesDir):\n self.html = \"\"\n self.style = None\n self.name = None\n self.stylesDir = Path(stylesDir)\n self.outputDir = Path(outputDir)\n self.imagesDir = Path(imagesDir)\n self.scriptsDir = Path(scriptsDir)\n self.templatesDir = Path(templatesDir)\n if not self.outputDir.exists(): \n self.outputDir.mkdir()", "def __init__(self, outBaseDir = \"../data/\"):\n self.baseUrl = \"http://guvitimed.jhuapl.edu/\"\n self.outBaseDir = outBaseDir", "def __init__(self, config):\n\n self.config = config\n self.dir_helper = DirectoryHelper(config)\n\n self.dir_helper.prepare_working_directory()\n print '[i] working directory prepeared'\n\n self.temp_dir = self.dir_helper.temp_dir\n self.source_tex_file = self.dir_helper.publication_path + self.config.TEX_FILE\n self.publication_path = self.dir_helper.publication_path\n print '[i] main tex file: \"{}\"'.format(self.source_tex_file)\n\n self.dest_tex_file = self.temp_dir + self.config.TEX_FILE", "def __init__(self):\n root_dir = os.path.dirname(os.path.abspath(__file__))\n self.base_dir = root_dir + \"/data/index/\" # base directory location for all indexes", "def initialize_options(self):\n self.input_dir = getcwd()\n self.output_dir = path.join(getcwd(), 'dependency', 'static', 'apidocs')", "def __init__(self, output_dir):\n self.output_dir = os.path.abspath(output_dir)\n # Create the file if it doesn't already exist\n os.makedirs(self.output_dir, exist_ok=True)\n self.f = None\n self.data = None\n self.L = None", "def init(self):\n\n self.checkDirectory(self.output_dir,\"output\")\n self.checkDirectory(self.working_dir,\"working\")", "def __init__(self, dirname):\n self._dirname = dirname", "def __init__(self, *args):\n super().__init__(*args)\n\n self.output_dir = os.path.join(self.config.results_dir, \"cowinner\")\n self.merged_dir = os.path.join(self.output_dir, \"merged\")", "def __init__(self, dirname, sites, news_types):\n self.dirname = dirname\n self.sites = []\n self.news_types = []\n if type(sites) == str:\n self.sites.append(sites)\n if type(news_types) == str:\n self.news_types.append(news_types)\n else:\n self.sites = sites\n self.news_types = news_types\n self.list_news_path = list(self.get_list_news_files())\n # self.list_news_path = Parallel(n_jobs=-1)(delayed(list(self.get_list_news_files())))\n # self.feature_type = feature_type", "def __init__(self):\n self.filepath = os.path.dirname(__file__)\n self.filepath = os.path.join(self.filepath, \"Datenbank\")\n self.filepath_render_database = os.path.join(self.filepath, self.render_database)\n self.filepath_object_database = os.path.join(self.filepath, self.object_database)\n self.filepath_output_database = os.path.join(self.filepath, self.output_database)\n self.create_database()", "def __init__(self):\n self.destFolderSSD = 'C:/earthquake/'\n self.destFolderHDD = 'F:/myProjects/cmps242/earthquake/data/'\n self.destFolder = self.destFolderSSD\n\n pass", "def __init__(self):\n self.cur_path = os.path.dirname(__file__)", "def __init__(self, dir, public = False):\n\t\tself.dir = dir", "def init(path):\n\n def create_unexisted_dir(directory, element):\n \"\"\"\n create_unexisted_dir(directory, element)-> create unexisted directory.\n\n This function create directory if there are unexisted directory in the\n path.\n\n Required argument:\n directory -- a full path of directory.\n element -- a directory's name.\n \"\"\"\n directory += \"/\" + element\n if get_file_type(directory) == 0:\n mkdir(directory)\n return directory\n\n path = path.split(\"/\")\n path[0] = \"/\"\n directory = \"\"\n for element in path[1:]:\n directory = create_unexisted_dir(directory, element)\n folders = ['.lgit', '.lgit/objects', '.lgit/commits', '.lgit/snapshots']\n\n return ([mkdir(directory + \"/\" + folder) for folder in folders],\n add_content_file(directory + \"/.lgit/index\"),\n add_content_file(directory + \"/.lgit/config\",\n environ['LOGNAME'] + '\\n'))", "def __init__(self, data_dir, base_url):\n self.data_dir = data_dir\n self.base_url = base_url", "def __init__(self, doc_root):\n self.doc_root = doc_root\n site = yaml_load('data/site.yaml')['site']\n self.site = site", "def _initialize_directory(self):\n self._generate_settings()\n if os.path.exists(self.target_path):\n sys.exit(\"WARNING: %s already exists, exiting\" % self.target_path)\n self._print_initialize_message()\n self._create_directories()\n self._create_general_config_file()\n self._create_default_pipeline_config_file()\n self._create_filelist()\n print", "def __init__(self): \n self.path=os.getcwd()+'/'", "def __init__(self, projectDir, configFile=None, outfp=sys.stdout):\n super(WorldfileMultiple, self).__init__(projectDir, configFile, outfp)", "def __init__(self, dirs):\n self.dirs = dirs\n self.fps = self.get_filepaths()", "def setUp(self):\n self._wiki = None\n self._app = None\n self.rootdir = mkdtemp()\n self.create_file(u'config.py', self.config_content)", "def __init__(self):\n\n self.this_dir = os.path.dirname(os.path.abspath(__file__))\n self.db_path = self._discover_in_dir(self.this_dir)", "def __init__(self, config):\n self.config = config\n\n self.data_dir = self.config['data_dir']\n if not os.path.exists(self.data_dir):\n raise FileNotFoundError('data directory not found {}.'.format(self.data_dir))\n self.save_dir = self.config['resparams']['save_dir']\n if not os.path.exists(self.save_dir):\n os.makedirs(self.save_dir)" ]
[ "0.6814485", "0.63777536", "0.63513637", "0.6347799", "0.631205", "0.62790346", "0.62379956", "0.6223603", "0.61957836", "0.6175018", "0.6114833", "0.6083057", "0.6074342", "0.6068505", "0.6055408", "0.6051235", "0.60070103", "0.59881234", "0.5972294", "0.59503233", "0.59262776", "0.591193", "0.59054554", "0.59036005", "0.5885768", "0.58345497", "0.58266705", "0.5813106", "0.580141", "0.5799977" ]
0.66417944
1
Test that view renders data from model
def test_the_view_render_Contact_instance(self): my_info = self.response.context_data['info'] self.assertIsInstance(my_info, Contact) model_instance = Contact.objects.first() self.assertIn(model_instance.name, self.response.content) self.assertIn(model_instance.surname, self.response.content) self.assertIn(model_instance.email, self.response.content) self.assertIn(model_instance.bio, self.response.content) self.assertIn(model_instance.skype, self.response.content) self.assertIn(model_instance.contacts, self.response.content)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_context_data(self):\n self.view.object = self.obj\n context = self.view.get_context_data()\n self.assertIn(\"code\", context)\n self.assertIn(\"edit\", context)\n self.assertTrue(context[\"edit\"])", "def test():\n return render_template(\n 'test.html',\n title='Test',\n time=datetime.now(),\n message='test your model',\n )", "def test_get(self):\n response = self._get()\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, self.template_name)\n self.assertTrue('object' in response.context)\n self.assertEquals(response.context['object'], self.obj)\n self.assertEquals(self.model.objects.count(), 1)", "def test_get(self):\n response = self._get()\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, self.template_name)\n self.assertTrue('form' in response.context)\n self.assertFalse(response.context['form'].is_bound)\n self.assertEquals(self.model.objects.count(), 0)", "def test_TodoList_views_get(self):\n # testing the urls\n client = Client()\n response = client.get(reverse('TodoList'))\n self.assertEqual(response.status_code,200)\n #self.assertTemplateUsed(response,'webapp/todolistmodel.html')", "def test_view_displays_all(self):\n set_up_one_user(self, 1, 0)\n login = self.client.login(username='test', password='2HJ1vRV0Z&3iD')\n response = self.client.get(reverse('index'))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(str(response.context['user']), 'test')\n self.assertEqual(len(response.context['data']), 1)", "def view(self):", "def test_person_loads_and_renders(self):\n response = self.client.get(reverse('main'))\n request_data = RequestData.objects.latest('pub_date')\n try:\n template = '{% load edit_link %}{% edit_link object %}'\n context = {'object': request_data}\n rendered = Template(template).render(Context(context))\n except:\n self.fail(\"raised exception while template rendering\")\n self.assertEquals(rendered, '<a href=\"/admin/hello/requestdata/%s/\">(admin)</a>' % str(request_data.pk))", "def test_get_view_html(self):\n response = self.setup_get_html_test('/api/view/1')\n self.assertEqual(response.status_code, 200)", "def test_visualisations_get_visualisation_render_data(self):\n pass", "def test_view(self):\n self.assertEqual(status.HTTP_200_OK, self.response.status_code)", "def test_get_object(self, detail_view, employee_model):\n\n employee = Mock()\n employee_model.objects.get.return_value = Mock()\n detail_view.get_object.return_value = employee\n\n emp = detail_view.get_object(1)\n self.assertEqual(employee, emp)", "def test_context_data(self):\n factory = RequestFactory()\n request = factory.get('/movies/')\n response = MoviesListView.as_view()(request)\n self.assertIsNotNone(response.context_data['movies'])", "def test_view():\n\treturn \"this is a response\"", "def testGetModelsData(self):\n models = models_logic._getModelsData()\n self.assertTrue(models)", "def test_get_context_data(self):\n # First test it with no data at all\n response = self.client.get(self.url)\n for key in self.context_keys:\n self.assertIn(key, response.context, f\"'{key}' should be in context of DetailedView\")\n\n # Now test with an Ailment but no Employees\n AilmentFactory()\n response = self.client.get(self.url)\n for key in self.context_keys:\n self.assertIn(key, response.context, f\"'{key}' should be in context of DetailedView\")\n\n # Test with a birthplace to see if it appears in top birthplaces\n place = PlaceFactory()\n EmployeeFactory(place_of_birth=place)\n response = self.client.get(self.url)\n self.assertIn(str(place), response.context, \"Top place of birth should be in context of DetailedView\")", "def test_render_data(self):\n url = '{}?is_bigcourse=0'.format(reverse(\n 'completion_data_view', kwargs={\n 'course_id': self.course.id}))\n self.response = self.staff_client.get(url)\n data = json.loads(self.response.content.decode())\n self.assertEqual(data['data'],[[False]])\n\n self.response = self.staff_client.get(url)\n self.assertEqual(self.response.status_code, 200)\n data = json.loads(self.response.content.decode())\n self.assertEqual(len(data['data']), 12)\n self.assertEqual(\n data['data'][-1], ['[email protected]', 'student', '', '', '0/1', '0/1', 'No'])", "def test_get(self):\n response = self._get()\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, self.template_name)\n self.assertTrue('object' in response.context)\n self.assertEquals(response.context['object'], self.obj)\n self.assertTrue('form' in response.context)\n self.assertFalse(response.context['form'].is_bound)\n self.assertEquals(response.context['form'].instance, self.obj)\n self._assert_no_change()", "def test_home_view_two_object(self):\n self.create_obj()\n UserData.objects.create(\n first_name=\"aaaaaa\",\n last_name=\"aaaaa\",\n date_of_birth='1998-02-23',\n bio=\"aaa\",\n email=\"[email protected]\",\n jabber=\"aaaaa\",\n skype=\"aaaaa\",\n other_contacts=\"aaaaa\"\n )\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(UserData.objects.first(), response.context['data'])", "def test_basic(self):\n #* using request factory is independent of existence of the path and calls the view as a function.\n #* it may well be get('nonesense/') unless this argument is needed in the view function\n #* it calls the view as a regular function and tests its effects\n request = self.factory.get('/solos/1/')\n # since view is class based\n response =SoloDetailView.as_view()(\n request,\n pk=self.drum_solo.pk\n )\n\n self.assertEqual(response.status_code, 200) # check that the drum is in database\n self.assertEqual(response.context_data['solo'].artist, 'Rich') # check that this drummers' name is correct\n # check that the template used for this detail is correct\n with self.assertTemplateUsed('solos/solo_detail.html'):\n # use render since we use the request factory\n response.render()", "def test_render_student_view(self):\r\n html = self.module_system.render(self.vertical, 'student_view', {}).content\r\n self.assertIn(self.test_html_1, html)\r\n self.assertIn(self.test_html_2, html)", "def testViewData(self):\n try:\n entryD = self.__mU.doImport(self.__instanceSavePath, fmt=\"pickle\")\n for entryId in entryD:\n for entityId, eD in entryD[entryId][\"selected_polymer_entities\"].items():\n\n analD = eD[\"anal_instances\"] if \"anal_instances\" in eD else {}\n for asymId, aD in analD.items():\n logger.info(\"entryId %s entityId %s asymId %s analD: %r\", entryId, entityId, asymId, aD)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def test_detail_views(self):\n obj = self.create_post(title='Some new title for new test')\n response = self.client.get(obj.get_absolute_url())\n # TODO You need to check that the description and title are present in the html returned from the server Dilshad\n self.assertEqual(response.status_code, 200)", "def test_graphs_view_html(self):\n self.assertTemplateUsed(self.response, 'charts/graphs.html')", "def test_view_uses_correct_template(self):\n factory = RequestFactory()\n request = factory.get('/movies/')\n response = MoviesListView.as_view()(request)\n self.assertEqual(response.template_name, 'movies.html')", "def test_get_context_data(self):\n\n view = HomeView()\n context = view.get_context_data()\n self.assertEqual(context.get('title'), 'Letterpress', \"'Letterpress' should be in context\")\n self.assertEqual(context.get('nbar'), 'home', \"nbar: home should be in context\")", "def test_get_item(self):\n self.assertEqual(self.expected_described_model, self.mapped_model[\"described_model_type\"])", "def testRenderToResponse(self):\n self.datagrid.render_listview_to_response()", "def test_model_endpoint(request, client, fixture_name, detail_attr, comparison_attr):\n instance = request.getfixturevalue(fixture_name)\n model = instance.__class__\n resource_name = model.__name__.lower()\n\n # test list endpoint\n response = client.get(api_reverse(\"%s-list\" % resource_name))\n check_response(response)\n results = response.json()['results']\n assert results\n assert len(results) == model.objects.count()\n\n # test detail endpoint\n response = client.get(api_reverse(\"%s-detail\" % resource_name, args=[getattr(instance, detail_attr)]))\n check_response(response)\n results = response.json()\n assert results[comparison_attr] == getattr(instance, comparison_attr)", "def test_get_context_data(self):\n i = IndexView()\n self.assertIsInstance(i, IndexView, \"Should be an instance of IndexView\")\n context = i.get_context_data()\n\n self.assertIsNotNone(context, \"Context should not be None\")\n self.assertIsNotNone(context['sponsors'], \"Sponsors was None\")\n self.assertIsNotNone(context['communities'], \"Communities was None\")\n self.assertIsNotNone(context['news_items'], \"News items was none\")\n self.assertIsNotNone(context['images'], \"Images was none\")\n self.assertIsNotNone(context['journal_entries'], \"Journal entries was none\")" ]
[ "0.6840645", "0.66080403", "0.6523233", "0.64715046", "0.6451231", "0.6444405", "0.64431345", "0.6432887", "0.6404577", "0.64019704", "0.6368436", "0.63485414", "0.634387", "0.63366467", "0.63298786", "0.63038164", "0.62876505", "0.62627774", "0.62624705", "0.6258345", "0.6240217", "0.6209774", "0.61798334", "0.6178892", "0.6178617", "0.6162072", "0.6159339", "0.6152802", "0.6152444", "0.6139211" ]
0.7052593
0
Test for form date field validation
def test_form_date_validation(self): form = My_add_data_form(data={'date': date(1800, 05, 03)}) self.assertEqual(form.errors['date'], ['You already dead now']) form = My_add_data_form(data={'date': date(2200, 05, 03)}) self.assertEqual(form.errors['date'], ['You not born yet'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_date_field():", "def validate(self, test_data):\n if not isinstance(test_data, datetime.date):\n raise ValidationError('Invalid type/value.', 'datetime.date',\n type(test_data))", "def check_date_format(form, field):\n try:\n field.data = datetime.datetime.strptime(field.data, '%d/%m/%Y')\n field.data = field.data.strftime('%d/%m/%Y')\n except ValueError:\n raise ValidationError('Invalid date format.')", "def validate_date(value):\n if date_regex.fullmatch(value):\n return True\n else:\n return False", "def test_date_invalid_data(self):\n try:\n SelectedMenuForm()\n except TypeError as error:\n self.assertEqual(type(error), TypeError)", "def clean_date(self):\n input_day = self.cleaned_data.get('day')\n input_date = self.cleaned_data.get('date')\n if input_date < datetime.date.today():\n raise forms.ValidationError(\"Can not create a lesson in the past.\")\n elif input_date.strftime(\"%A\").lower() != input_day:\n raise forms.ValidationError(input_date.strftime(\"%d-%m-%Y\")+\" does not fall on a \"+input_day.title()+\".\")\n return input_date", "def test_date_valid_data(self):\n selected_menu_form = SelectedMenuForm(self.possible_meals_choices)\n\n self.assertTrue(selected_menu_form)", "def _validate(year, month, day):\n if day is not None and month is None:\n raise ValueError(\"Day without month\")\n if day is None:\n day = 1\n if month is None:\n month = 1\n if year is None:\n year = 2000\n # actual validation happens here\n datetime.date(year, month, day)", "def validate_date_field(self, field: dict, value: str):\n if field.get(\"required\") and value.strip() == \"\":\n return f\"{field.get('label')} is required!\"\n\n try:\n datetime.datetime.strptime(value, self.config.get(\"date_format\"))\n except ValueError:\n return f\"{field.get('label')} should be a date with the format provided in \" \\\n f\"config {self.config.get('date_format')}\"\n\n return \"\"", "def test_validate_date_entry_returns_correct_ValueError(self):\n date_string = \"2018-21-01\"\n date_format = settings.DATE_FORMATS['iso 8601']\n\n error_text = \"{} is not valid in format {}\".format(\n date_string,\n date_format['UI format']\n )\n\n result = self.menu.validate_date_entry(date_string, date_format)\n\n expected_result = (error_text, None)\n\n self.assertEqual(result, expected_result)", "def test_validate_date_entry_returns_correct_outOfBounds_if_future(self):\n date_string = \"3018-01-21\"\n date_format = settings.DATE_FORMATS['iso 8601']\n\n error_text = \"dates in the future are not permitted\"\n\n result = self.menu.validate_date_entry(date_string, date_format)\n\n expected_result = (error_text, None)\n\n self.assertEqual(result, expected_result)", "def test_dob_field(self):\n dob_field = self.record.find('field[@name=\\'dob\\']')\n dob_date = datetime.strptime(dob_field.text, '%Y-%m-%d %H:%M:%S')\n self.assertTrue(isinstance(dob_date, datetime), 'Date isn\\'t correct')", "def test_datetime_field():", "def clean_date(self):\r\n from datetime import datetime\r\n\r\n date = self.cleaned_data[\"date\"]\r\n if date < datetime.now():\r\n self.add_error(\"date\", \"You cannot add a date for the past.\")\r\n return date", "def test_validate_date_entry_returns_correct_iso_date(self):\n date_string = \"2018-01-21\"\n date_format = settings.DATE_FORMATS['iso 8601']\n date_object = datetime.datetime.strptime(\n date_string,\n date_format['datetime format'])\n\n result = self.menu.validate_date_entry(date_string, date_format)\n\n expected_result = (None, date_object)\n\n self.assertEqual(result, expected_result)", "def _validate(self, date, format):\n try:\n datetime.datetime.strptime(date, format) # format = \"%m/%d/%Y\"\n return True\n except ValueError:\n return False", "def _validate(self, date, format):\n try:\n datetime.datetime.strptime(date, format) # format = \"%m/%d/%Y\"\n return True\n except ValueError:\n return False", "def test_hotshot_check_date_error(self):\n try:\n check_date('N/A', 'N/A', '20.11.2015')\n except ValueError as error:\n self.assertEqual(type(error), ValueError)", "def test_charter_form_end_date(self):\n\n data = {\n \"language\": 1,\n \"countries\": 1,\n \"start_date_month\": \"1\",\n \"start_date_day\": \"1\",\n \"start_date_year\": \"2015\",\n \"end_date_month\": \"1\",\n \"end_date_day\": \"1\",\n \"end_date_year\": \"2015\",\n \"number\": \"12345\",\n \"lead_dept\": 1,\n \"contact_person\": \"Vicky Leong\",\n \"created_by\": \"Vicky Leong\"\n }\n charter_form = CharterForm(data=data)\n result = charter_form.is_valid()\n self.assertFalse(result)", "def form_DateDifferentEmpty(request):\n schema = schemaish.Structure()\n schema.add('myDateField', schemaish.Date())\n form = formish.Form(schema, 'form')\n form['myDateField'].widget = formish.Input(empty=datetime.date.today())\n return form", "def test_validate_date_entry_returns_correct_outOfBounds_if_past(self):\n date_string = \"1899-12-12\"\n date_format = settings.DATE_FORMATS['iso 8601']\n\n error_string = \"dates before {} are not permitted\"\n date_fmt = \"%Y-%m-%d\"\n earliest_date = self.menu.OPTIONS['earliest allowed date']\n earliest_date_string = earliest_date.strftime(date_fmt)\n\n error_text = error_string.format(earliest_date_string)\n\n result = self.menu.validate_date_entry(date_string, date_format)\n\n expected_result = (error_text, None)\n\n self.assertEqual(result, expected_result)", "def valid_date(date):\n import datetime\n try:\n datetime.datetime.strptime(date, '%Y-%m-%d')\n except ValueError:\n raise ValueError(\"Incorrect data format, should be YYYY-MM-DD\")", "def test_patient_one_date_of_birth(self):\r\n self.assertEqual(self.test_patient.dateOfBirth, datetime.date(2000, 2, 13))", "def test_invalid_date(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=\n {'name': 'soda', 'amount': 1233, 'date_of_expense': 'fgjfj'})\n self.assertEqual(res.status_code, 400)\n results = json.loads(res.data)\n self.assertEqual(results['message'], 'The date fgjfj does not match the format DD-MM-YYYY')", "def test_invalid_dob(self):\n file = SimpleUploadedFile(\n \"test.csv\",\n b\"msisdn,facility code,id type,messaging consent,edd year,edd month,\"\n b\"edd day,dob year,dob month,dob day,baby dob year,baby dob month,\"\n b\"baby dob day\\n\"\n b\"+27820001001,123456,none,true,2021,2,3,1990,2,29,,,\\n\",\n )\n form = MomConnectImportForm(\n data={\"source\": \"MomConnect Import\"}, files={\"file\": file}\n )\n instance = form.save()\n self.assertEqual(instance.status, MomConnectImport.Status.ERROR)\n [error] = instance.errors.all()\n self.assertEqual(\n error.error,\n \"Failed validation: Invalid date of birth date, day is out of range for \"\n \"month\",\n )", "def test_date_entry_returns_correct_value_for_date(self):\n date_string = \"2018-01-21\"\n date_format = settings.DATE_FORMATS['iso 8601']\n self.menu.OPTIONS['date format'] = date_format\n\n user_input = [date_string]\n\n with patch('builtins.input', side_effect=user_input):\n result = self.menu.date_entry()\n\n expected_result = (\n None,\n datetime.datetime.strptime(date_string,\n date_format['datetime format'])\n )\n\n self.assertEqual(result, expected_result)", "def dateFieldValidator(field):\n if not (field[\"type\"] == \"datetime\" or field[\"type\"] == \"date\"):\n raise ValueError(\"DateFieldValidator error: field type \" + field[\"type\"])\n if \"format\" in field:\n format_string = field[\"format\"]\n # The following is borrowed from datapackage.py...\n\n # Order of the replacements is important since month and minutes\n # can be denoted in a similar fashion\n replacement_order = [('hh', '%m'), (':mm', ':%M'), ('ss', '%S'),\n ('yyyy', '%Y'), ('yy', '%y'), ('mm', '%m'),\n ('dd', '%d')]\n\n # For each replacement we substitute (and ignore the case)\n for (old, new) in replacement_order:\n format_string = re.sub(\"(?i)%s\" % old, new, format_string)\n if field[\"type\"] == \"datetime\":\n return lambda x: datetime.datetime.strptime(x, format_string)\n else:\n return lambda x: datetime.datetime.strptime(x, format_string).date()\n else:\n if field[\"type\"] == \"datetime\":\n return lambda x: datetime.datetime.strptime(x, '%Y-%m-%dT%H:%M:%S%Z')\n else:\n return lambda x: datetime.datetime.strptime(x, '%Y-%m-%d').date()", "def form_Date(request):\n schema = schemaish.Structure()\n schema.add('myDateField', schemaish.Date())\n form = formish.Form(schema, 'form')\n return form", "def test_sell_ticket_valid_date(self, *_):\n # logout to invalidate any logged in session\n self.open(base_url + '/logout')\n # login a user\n self.open(base_url + '/login')\n # fill email and password\n self.type(\"#email\", \"[email protected]\")\n self.type(\"#password\", \"Test_frontend@\")\n # click enter button\n self.click('input[type=\"submit\"]')\n # open the /sell route\n self.open(base_url)\n # Enter an invalid ticket name\n self.type('#name_sell', \"ticketname\")\n self.type(\"#price_sell\", \"10\")\n self.type(\"#quantity_sell\", \"1\")\n self.type(\"#exp_date_sell\", \"09212020\")\n self.click('#submit-sell')\n # Assert that the valid error message is shown.\n self.assert_text(\"Invalid ticket date\", \"#message\")", "def validate_date(self, date_to_validate):\r\n try:\r\n if date_to_validate != datetime.datetime.strptime(date_to_validate, \"%Y-%m-%d\").strftime('%Y-%m-%d'):\r\n raise ValueError\r\n return True\r\n except ValueError:\r\n return False" ]
[ "0.79683304", "0.7457315", "0.74449104", "0.73319423", "0.73196423", "0.7264576", "0.7140698", "0.7116886", "0.70986587", "0.70432013", "0.7036628", "0.6971", "0.69465804", "0.69303936", "0.69096756", "0.69046116", "0.69046116", "0.68596894", "0.6810483", "0.67967176", "0.6786685", "0.67754626", "0.67676437", "0.67418945", "0.67231244", "0.6709284", "0.67066354", "0.67024297", "0.6697615", "0.667217" ]
0.84299916
0
Test that view return errors in Json format
def test_that_view_return_errors_in_json(self): self.client.login(username='admin', password='admin') url = reverse("to_form", args=str(self.my_instance.id)) response = self.client.post(url, data={'name': 'Oleg'}, format='json') self.assertEqual(response.status_code, 200) for c in json.loads(response.content): self.assertEqual(['This field is required.'], json.loads(response.content)[c])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_invalid_json(self):\r\n data = {\"Testing invalid\"}\r\n response = self.client.post(\r\n reverse('verify_student_results_callback'),\r\n data=data,\r\n content_type='application/json',\r\n HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB: testing',\r\n HTTP_DATE='testdate'\r\n )\r\n self.assertIn('Invalid JSON', response.content)\r\n self.assertEqual(response.status_code, 400)", "def test_renderer_works_correctly_with_error_detail(self):\n rendered = self.renderer.render(\n data=ErrorDetail(\"Test\", code=status.HTTP_400_BAD_REQUEST),\n media_type=\"application/json\",\n renderer_context={},\n )\n self.assertEqual(rendered.decode(), '\"Test\"')", "def test_invalid_JSON_returns_error(self):\n\n response = self.client.post(\n reverse('transcript:record_telegram'),\n content_type='application/json',\n data='''{\"something\":''')\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response.content, b\"Could not parse JSON\")\n self.assertEqual(Message.objects.count(), 0)", "def test_view(self):\n self.assertEqual(status.HTTP_200_OK, self.response.status_code)", "def json_error(message):\n return json_response(isError=True, message=message)", "def test_get_json_data_throw_not_found_error(self):\n response = self.app.test_client().get('/test/get_json_data/99999')\n self.assertEqual(response.json['status'], 'failure')\n self.assertEqual(response.json['error'], 'Test not found')", "def test_validation_error_standard(self):\n\n drf_validation_error = serializers.ValidationError({\n 'test_field': [\n ErrorDetail('test_message', 'test_code', )\n ]})\n\n expected_error_view = {\n 'type': 'test_code',\n 'title': 'test_message',\n 'status': drf_validation_error.status_code,\n 'detail': 'test_message',\n 'instance': self.context['request'].path,\n }\n\n response = custom_exception_handler(\n drf_validation_error,\n self.context,\n )\n\n assert response is not None\n assert isinstance(response, Response)\n assert response.data['test_field'] == [expected_error_view]", "def _err_response(msg):\r\n return HttpResponse(json.dumps({'success': False, 'error': msg}),\r\n mimetype=\"application/json\")", "def json_err(msg: str) -> Response:\n return jsonify({\"success\": False, \"error\": msg})", "def on_response_validation_error(err):\n return jsonify(message='Bad response'), 500", "def test_response_json(self):\n response = self.client.search()\n self.assertTrue(isinstance(response.json, dict))\n\n\n # with invalid json\n from rubber import settings\n settings.RUBBER_MOCK_HTTP_RESPONSE = \"\"\";;;\"\"\"\n \n response = self.client.search()\n self.assertIsNone(response.json)", "def test_invalid_data_types(self):\n response=self.check_invalid_data_type()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['Error'],\"Require int or float type\")\n self.assertEqual(response.status_code, 200)", "def test_jsonIsCorrect(self):\n\n elements = [\n 'LaunchSite',\n 'OperationalStatus',\n 'OrbitalStatus',\n 'Source',\n 'CatalogEntry',\n 'TLE',\n 'DataSource',\n ]\n\n for element in elements:\n # Dynamicly instanciate the view class\n request = self.factory.get('/api/v1/%s/?format=json' % element.lower())\n view_class = globals()['%sViewSet' % element]\n view = view_class.as_view({'get': 'list'})\n response = view(request).render()\n\n self.assertTrue(is_correct_json(response.content.decode('utf8')))", "def test_error_post(self):\n Parameters = Parameters()\n response = self.client.open(\n '/error',\n method='POST',\n data=json.dumps(Parameters),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def response(self):\r\n error = unicode(self)\r\n return HttpResponseBadRequest(json.dumps({'error': error}))", "def test_graphs_view_status_code(self):\n self.assertEqual(self.response.status_code, 200)", "def test_login_view_fail(self):\n url = reverse('xds_api:login')\n\n response = self.client.post(url, self.userDict_login_fail)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_validation_error_json():\n error = ValidationError(\n type=\"Syntax Error\",\n data={\"data\": [1, 2, 3]},\n )\n\n assert ValidationError(**json.loads(error.json())) == error", "def error_response(error_text):\n return Response(json.dumps({'error' : error_text}), status=404, mimetype='application/json')", "def test_cli_format_error_handler_broken_json():\n resp = MagicMock()\n resp.json.side_effect = ValueError(\"\")\n resp.text = \"Not JSON\"\n output = format_utils.cli_format_error_handler(resp)\n assert 'Error: Unable to decode response. Value: Not JSON' in output", "def display_error():\n return flask.jsonify(flask.request.args)", "def testExtendedErrorMessage(self):\n\n json_message = current.xml.json_message\n\n msg = json_message(False, 405, message=\"Test\")\n msg = json.loads(msg)\n self.assertEqual(len(msg), 3)\n self.assertEqual(msg[\"status\"], \"failed\")\n self.assertEqual(msg[\"statuscode\"], \"405\")\n self.assertEqual(msg[\"message\"], \"Test\")", "def test_404_search_results_unprocesssable(self):\n data = {'searchTerm':'wxyz'}\n res = self.client().post('/search', \n data=json.dumps(data),\n content_type='application/json')\n self.data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n json_res = json.loads(res.get_data(as_text=False))", "def jsonable_server_error(request, template_name='500.html'):\r\n if request.is_ajax():\r\n msg = {\"error\": \"The edX servers encountered an error\"}\r\n return HttpResponseServerError(json.dumps(msg))\r\n else:\r\n return server_error(request, template_name=template_name)", "def jsonable_error(status=500, message=\"The Studio servers encountered an error\"):\r\n def outer(func):\r\n @functools.wraps(func)\r\n def inner(request, *args, **kwargs):\r\n if request.is_ajax():\r\n content = json.dumps({\"error\": message})\r\n return HttpResponse(content, content_type=\"application/json\",\r\n status=status)\r\n else:\r\n return func(request, *args, **kwargs)\r\n return inner\r\n return outer", "def testDefaultErrorMessage(self):\n\n json_message = current.xml.json_message\n\n msg = json_message(False)\n msg = json.loads(msg)\n self.assertEqual(len(msg), 2)\n self.assertEqual(msg[\"status\"], \"failed\")\n self.assertEqual(msg[\"statuscode\"], \"404\")", "def test_invalid_countries(self):\n data = self.valid_payload\n data['nationality'] = 500\n response = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n # self.assertEqual(response.data, 'ey')", "def test_form_errors(self):\n form = self.response.context.get('form')\n self.assertTrue(form.errors)", "def _test_request_error(self, view_name, view_kwargs, data, mock_request):\r\n self._setup_mock_request(mock_request, include_depth=(view_name == \"create_sub_comment\"))\r\n\r\n response = self.client.post(reverse(view_name, kwargs=view_kwargs), data=data)\r\n self.assertEqual(response.status_code, 400)\r\n for call in mock_request.call_args_list:\r\n self.assertEqual(call[0][0].lower(), \"get\")", "def _error_response(self):\r\n response_dict = {'success': False, 'version': 1}\r\n self.send_response(\r\n 400, content=json.dumps(response_dict),\r\n headers={'Content-type': 'application/json'}\r\n )" ]
[ "0.6963432", "0.68196243", "0.6738075", "0.6731617", "0.66677034", "0.66568595", "0.6615065", "0.65761465", "0.65392554", "0.6538512", "0.6524916", "0.6521766", "0.6519895", "0.6510612", "0.6499493", "0.6459137", "0.6440112", "0.6433431", "0.64058393", "0.6382526", "0.6369236", "0.6302503", "0.62885183", "0.6288374", "0.6282862", "0.62715024", "0.62587655", "0.62556326", "0.62523824", "0.62483627" ]
0.8210779
0
Test that view saves data if form valid
def test_that_view_saves_data_if_form_valid(self): self.client.login(username='admin', password='admin') url = reverse("to_form", args=str(self.my_instance.id)) response = self.client.post(url, data={'name': 'Oleg', 'surname': 'Senyshyn', 'date': date(1995, 05, 03), 'email': '[email protected]', 'skype': 'sen9a1990'}, format='json') self.assertEqual('Data has been edit', json.loads(response.content)['ok']) my_instance = Contact.objects.first() self.assertEqual('Oleg', my_instance.name) self.assertEqual('Senyshyn', my_instance.surname) self.assertEqual(date(1995, 05, 03), my_instance.date) self.assertEqual('[email protected]', my_instance.email) self.assertEqual('sen9a1990', my_instance.skype)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_submit_form_using_valid_data():", "def test_valid_form_true(self):\n form = UserRegisterForm(data=self.data)\n self.assertTrue(form.is_valid())", "def test_form_valid(self):\n form = Mock()\n form.cleaned_data = Mock()\n self.view.form_valid(form)\n self.assertIsInstance(form.instance.modified_by, User)\n self.assertEqual(form.instance.modified_by, self.request.user)", "def test_cleaned_data_worked(self):\n pass", "def test_saving(self):\n if self.form.is_valid():\n self.compound = self.form.save()\n self.assertIsNotNone(self.compound.id)", "def test_done(self, mock_redirect, mock_save):\n mock_cleaned_data = {\n \"code\": \"tst\",\n \"questionnaire\": self.questionnaire,\n }\n self.view.get_all_cleaned_data = Mock(return_value=mock_cleaned_data)\n self.view.done(self.view.get_form_list)\n self.assertEqual(mock_save.call_count, 1)\n self.assertEqual(mock_redirect.call_count, 1)", "def form_valid(self, form):\n form.save()\n return super().form_valid(form)", "def test_blank(self):\n form_data = self.form_data('')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def form_valid(self, form, factura_form, remito_form, ot_linea_form):\n form.save()\n factura_form.save()\n remito_form.save()\n ot_linea_form.save()\n return HttpResponseRedirect(self.get_success_url())", "def post(self, request, *args, **kwargs):\n form = self.get_form()\n if form.validate():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)", "def test_form_create(self):\n create = {\n 'title': 'Last Post (Final)',\n 'content': '### Goodbye!',\n 'is_published': False,\n }\n\n form = self.form_cls(create)\n print(form.errors)\n\n form.save()\n\n actual = models.Entry.objects.get(slug='last-post-final')\n self.assertEquals(actual.title, create['title'])\n self.assertEquals(actual.content.raw, create['content'])\n self.assertIsNone(actual.published_timestamp)", "def test_form_valid(self):\n self.client.login(username='mike', password='password')\n form = {'title': 'Merry Christmas!',\n 'body': 'Happy New Year!'}\n response = self.client.post(reverse_lazy('create_journal'),\n form)\n self.assertEqual(response.status_code, 302)\n self.assertEqual(response.url, reverse_lazy('journal'))", "def form_valid(self, form, factura_form, remito_form, ot_linea_form):\n self.object = form.save()\n factura_form.instance = self.object\n factura_form.save()\n remito_form.instance = self.object\n remito_form.save()\n ot_linea_form.instance = self.object\n ot_linea_form.save()\n return HttpResponseRedirect(self.get_success_url())", "def form_valid(self, form):\n if self.fields is None:\n self.object.__dict__.update({\n field.name:form.cleaned_data[field.name] for field in form.visible_fields()\n })\n else:\n self.object.__dict__.update({\n field:form.cleaned_data[field] for field in self.fields\n })\n self.object.save()\n if self.request.is_ajax():\n return self.ajax_form_valid()\n else:\n return HttpResponseRedirect(self.get_success_url())", "def test_view_with_bad_blank_data(self):\n site = Site.test_objects.create_site('site name')\n response = self.client.post(\n reverse('streamwebs:camera_point_add',\n kwargs={'site_slug': site.site_slug}\n ), {\n 'camera_point-TOTAL_FORMS': '3', # 3 for now\n 'camera_point-INITIAL_FORMS': '0', # none are prefilled\n 'camera_point-MAX_NUM_FORMS': '3',\n 'camera_point-MIN_NUM_FORMS': '3',\n\n 'form-TOTAL_FORMS': '3', # 3 for now\n 'form-INITIAL_FORMS': '0', # none are prefilled\n 'form-MAX_NUM_FORMS': '3',\n 'form-MIN_NUM_FORMS': '3',\n }\n )\n self.assertFormError(response, 'camera_form', 'cp_date',\n 'This field is required.')\n self.assertTemplateUsed(\n response,\n 'streamwebs/datasheets/camera_point_add.html'\n )", "def test_that_view_return_errors_in_json(self):\n\n self.client.login(username='admin', password='admin')\n url = reverse(\"to_form\", args=str(self.my_instance.id))\n response = self.client.post(url, data={'name': 'Oleg'}, format='json')\n self.assertEqual(response.status_code, 200)\n for c in json.loads(response.content):\n self.assertEqual(['This field is required.'], json.loads(response.content)[c])", "def form_valid(self, form, factura_form, ot_linea_form):\n form.save()\n factura_form.save()\n ot_linea_form.save()\n return HttpResponseRedirect(self.get_success_url())", "def test_make_form():", "def post(self, request, *args, **kwargs):\n form = self.get_form(self.form_class)\n if form.is_valid():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)", "def test_post_invalid(self):\n self.post_data['name'] = ''\n response = self._post()\n self.assertEquals(self.model.objects.count(), 0)\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, self.template_name)\n self.assertTrue('form' in response.context)\n self.assertTrue(response.context['form'].is_bound)\n self.assertFalse(response.context['form'].is_valid())", "def form_valid(self, form, factura_form, ot_linea_form):\n self.object = form.save()\n factura_form.instance = self.object\n factura_form.save()\n ot_linea_form.instance = self.object\n ot_linea_form.save()\n return HttpResponseRedirect(self.get_success_url())", "def post(self, request, *args, **kwargs): # pylint: disable=unused-argument\n formset = self.get_formset()\n if formset.is_valid():\n return self.formset_valid(formset)\n return self.formset_invalid(formset)", "def form_valid(self, form, ot_linea_form):\n form.save()\n ot_linea_form.save()\n return HttpResponseRedirect(self.get_success_url())", "def form_valid(self, form, ot_linea_form):\n form.save()\n ot_linea_form.save()\n return HttpResponseRedirect(self.get_success_url())", "def testFormValidates(self):\n sdq1 = getattr(self.s1, 'sdq1')\n app = self.app\n dummy_controller_state = ControllerState(\n id='base_edit',\n context=sdq1,\n button='submit',\n status='success',\n errors={},\n next_action=None,)\n controller = self.portal.portal_form_controller\n controller_state = controller.validate(dummy_controller_state, app.REQUEST, ['validate_base',])\n errors = controller_state.getErrors()\n errors = sdq1.post_validate(self.app.REQUEST, errors)\n assert errors == {}, \"Validation error raised: %s\" % controller_state.getErrors()", "def test_form_errors(self):\n form = self.response.context.get('form')\n self.assertTrue(form.errors)", "def post(self, request, *args, **kwargs):\n form = self.get_form()\n if form.is_valid():\n return self.form_valid(form)\n else:\n self.form_invalid_init(form=form)\n self.form_invalid_add_global_errormessages(form=form)\n return self.form_invalid(form)", "def test_valid_form(self):\n\n data = {'category': ['103','109'] }\n form = CategoriesForm(data=data)\n self.assertTrue(form.is_valid())", "def test_saves_user_on_save(self):\n person = Person.objects.get(user__username='admin')\n personform = PersonForm(instance=person, data={'user': person.user.pk, 'name': 'has_changed'})\n \n if personform.is_valid():\n person = personform.save()\n self.assertEquals(User.objects.get(pk=person.user.pk).first_name, \\\n \"has_changed\")\n else:\n self.fail(personform.errors)\n # self.fail(\"personform not valid\")", "def post(self, request, *args, **kwargs):\n self.object = None\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n if form.is_valid():\n return self.form_valid(form, request)\n else:\n return self.form_invalid(form, request)" ]
[ "0.75905186", "0.6972983", "0.68898875", "0.68876064", "0.6843631", "0.68388474", "0.6799494", "0.6762418", "0.6688027", "0.66799116", "0.6619247", "0.6603506", "0.6595378", "0.65945286", "0.6561139", "0.65168655", "0.6509908", "0.64848745", "0.6478526", "0.6477208", "0.6453057", "0.6432566", "0.6424713", "0.6424713", "0.6422211", "0.63883257", "0.63829255", "0.6381678", "0.6366585", "0.6313156" ]
0.806071
0
Loads patient Procedure observations
def load(cls): # Loop through procedures and build patient procedure lists: procs = csv.reader(file(PROCEDURES_FILE,'U'),dialect='excel-tab') header = procs.next() for proc in procs: cls(dict(zip(header,proc))) # Create a procedure instance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_patient(self, patient):\n \n # Maintain patient count \n self.count_total_patients += 1\n \n # Allocate patient to subunit/session\n self.allocate_patient(patient)\n \n # Add to appropriate _population lists\n patient_dict = {'negative': self._pop.negative_patients, \n 'positive': self._pop.positive_patients,\n 'recovered': self._pop.recovered_patients,\n 'died': self._pop.died_patients}\n \n # Add to population dictionary of all patients\n patient_dict[patient.status].append(patient)", "def load_patient_data():\n data_file = open(\"test_data.txt\", \"r\")\n still_finding_patients = True\n my_patients = []\n while still_finding_patients is True:\n name_line = next(data_file)\n if name_line != \"END\":\n name_line = name_line.split()\n fname = name_line[0]\n lname = name_line[1]\n age = next(data_file).strip()\n gender = next(data_file).strip().casefold()\n tsh_data = next(data_file)\n tsh_data = tsh_data.strip().split(\",\")\n tsh_data.remove(\"TSH\")\n new_patient = create_patient(fname, lname, age, gender, tsh_data)\n my_patients.append(new_patient)\n else:\n still_finding_patients = False\n data_file.close()\n return my_patients", "def load_ps(self):\n self.ps = self.read_var(self.psvar)\n self.test_shape(self.psvar, self.ps.shape, 2)", "def load_p(self):\n self.p = self.read_var(self.pvar)\n new_arr = []\n for p in range(np.shape(self.p)[0]):\n new_arr.append(p)\n self.p = new_arr\n self.p = np.array(self.p)\n self.test_shape(self.pvar, self.p.shape, 1)", "def _load_protein():\n data_file = os.path.join(data_dir, \"protein/CASP.csv\")\n data = pd.read_csv(data_file, sep=\",\")\n X = data.values[:, 1:]\n y = data.values[:, 0]\n return X, y", "def load_patient_data(db_dir, patient_id, include_labels=True, unzipped=False):\n signal = load_signal(db_dir, patient_id, unzipped=unzipped)\n if include_labels:\n labels = load_labels(db_dir, patient_id, unzipped=unzipped)\n return signal, labels\n else:\n return signal, None", "def load(fidfile, procfile):\r\n dic, data = _ng.varian.read_fid(fidfile)\r\n procs = _ng.varian.read_procpar(procfile)\r\n\r\n offset = [float(i) for i in procs['tof']['values']][0]\r\n magfreq = [float(i) for i in procs['sfrq']['values']][0]\r\n rangeHz = [float(i) for i in procs['sw']['values']][0]\r\n\r\n rangeppm = rangeHz / magfreq\r\n offsetppm = offset / magfreq\r\n\r\n # Fourier transform\r\n data = _ng.proc_base.fft(data)\r\n data = data / _np.max(data)\r\n\r\n u = data.real.sum(axis=0)\r\n v = data.imag.sum(axis=0)\r\n\r\n w = _np.linspace(rangeppm - offsetppm, -offsetppm, u.size)\r\n\r\n result = _containers.Data(w[::-1], u[::-1], v[::-1])\r\n return result", "def load_data(self) -> None:", "def get_patient_dict():\r\n return common.get_dict_all(get_patient_filename(), None)", "def load(cls):\n \n # Loop through problems and build patient problem lists:\n probs = csv.reader(file(PROBLEMS_FILE,'U'),dialect='excel-tab')\n header = probs.next() \n for prob in probs:\n cls(dict(zip(header,prob))) # Create a problem instance ", "def load_data(self):", "def load(prep_id):\n module_logger.debug(\"In load. Specified ID: %s\", prep_id)\n\n session = iHMPSession.get_session()\n module_logger.info(\"Got iHMP session.\")\n\n module_logger.info(\"Retrieving data for %s.\", __name__)\n prep_data = session.get_osdf().get_node(prep_id)\n\n prep = HostSeqPrep.load_host_seq_prep(prep_data)\n\n return prep", "def _loadData(self, data):\n Movie._loadData(self, data)\n PlexSession._loadData(self, data)", "def load(self, p):\n return", "def load_raw_data(prot, shuffle=True):\n dna_seq = []\n binding = []\n data_file = os.path.join(DATA_DIR, prot, 'train')\n with open(data_file, 'r') as f:\n datas = f.readlines()\n if shuffle:\n random.shuffle(datas)\n dna_seq.extend(data.split()[0] for data in datas)\n binding.extend(eval(data.split()[1]) for data in datas)\n return dna_seq, binding", "def main():\n\n args = get_args()\n \n patient_years_dict = {}\n\n isolates_to_seq = []\n\n with open(args.input_file, \"r\") as infile1:\n for line in infile1:\n if not line.startswith(\"PID\"):\n line_elements = line.strip().split(\"\\t\")\n patient_anumber = line_elements[1].split(\"_\")[0]\n patient_year = line_elements[2].split(\"-\")[0]\n if patient_anumber not in patient_years_dict:\n patient_years_dict[patient_anumber] = {patient_year:[line]}\n else:\n if patient_year not in patient_years_dict[patient_anumber]:\n patient_years_dict[patient_anumber][patient_year] = [line]\n if line not in patient_years_dict[patient_anumber][patient_year]:\n patient_years_dict[patient_anumber][patient_year].append(line)\n \n for patient in patient_years_dict:\n for year in patient_years_dict[patient]:\n print(f\"Checking patient {patient} and year {year}...\")\n wgs_codes = []\n pfge_codes = []\n isolate_dates = []\n isolate_lines = []\n for isolate in patient_years_dict[patient][year]:\n isolate_elements = isolate.strip().split(\"\\t\")\n wgs_codes.append(int(isolate_elements[5]))\n pfge_codes.append(int(isolate_elements[4]))\n isolate_dates.append(isolate_elements[2])\n isolate_lines.append(isolate)\n \n if any(wgs_codes):\n print(f\"\\tWGS present, moving on to next year/patient.\")\n continue\n else:\n print(f\"\\tWGS missing, checking PFGE...\\n\\tPFGE presence/absence codes for {year} are: {pfge_codes}\")\n if any(pfge_codes):\n pfge_index = pfge_codes.index(1)\n isolate_to_seq = isolate_dates[pfge_index]\n isolate_line_to_seq = isolate_lines[pfge_index]\n print(f\"\\tPFGE present, selecting first isolate with PFGE to sequence...\\n\\tIsolate to sequence is ---> {isolate_to_seq} <--- out of possible isolates {isolate_dates}.\")\n isolates_to_seq.append(isolate_line_to_seq)\n else:\n print(f\"\\tPFGE missing...\\n\\tSelecting 1st available isolate for sequencing ---> {isolate_dates[0]} <--- out of available isolates {isolate_dates}.\")\n isolates_to_seq.append(isolate_lines[0])\n\n header = f\"PID\\tADATE\\tCULTDAT\\tvalues\\tPFGE_Isolates\\tSequenced_Isolates\\n\" \n to_write = \"\".join(isolates_to_seq)\n with open(args.output_file, \"w\") as outfile1:\n outfile1.write(header + to_write)", "def load_LFPprobes(self, cond_dict):\n preprocess_ind = search_preproc(self.preprocess, cond_dict)\n if not preprocess_ind: # checks if the condition is previously run\n print(\"no preprocessing with these parameters is done\")\n return\n\n cond = self.preprocess[preprocess_ind[0]]\n for probe_id in self.probes.keys():\n # first prepare the file name\n filename = os.path.join(self.result_path, 'PrepData', '{}_{}{}_pres{}s.pkl'.format(\n probe_id, cond['cond_name'], int(cond['srate']),cond['prestim']))\n # second load each probe and add it to the ROI list\n self.probes[probe_id] = ProbeF.LFPprobe.from_file(filename)\n self.loaded_cond = cond['cond_name']", "def get_patients(self):\n return", "def get_data(year):\n base = \"data\"\n\n if year == 1999:\n xpt_files_use = xpt_files\n elif year == 2000:\n xpt_files_use = [\"DEMO.XPT\", \"TELO_B.XPT\", \"BMX.XPT\"]\n else:\n raise ValueError(\"Please provide the files names!\")\n\n # Retain these variables.\n vars = [\n [\"SEQN\", \"RIAGENDR\", \"RIDAGEYR\", \"RIDRETH1\"],\n [\"SEQN\" , \"TELOMEAN\", \"TELOSTD\"],\n [\"SEQN\", \"BMXWT\", \"BMXHT\", \"BMXBMI\", \"BMXLEG\", \"BMXARMC\"],\n ]\n\n # Load each individual file and keep only the variables of interest\n da = []\n for idf, fn in enumerate(xpt_files_use):\n df = pd.read_sas(os.path.join(base, fn))\n df = df.loc[:, vars[idf]]\n da.append(df)\n\n # SEQN is a common subject ID that can be used to merge the files.\n # These are cross sectional (wide form) files, so there is at most\n # one row per subject. Subjects may be missing from a file if they\n # did not participate in those assessments. All subjects should be\n # present in the demog file.\n dx = pd.merge(da[0], da[1], left_on=\"SEQN\", right_on=\"SEQN\")\n dx = pd.merge(dx, da[2], left_on=\"SEQN\", right_on=\"SEQN\")\n\n # Recode sex as an indicator for female sex\n dx[\"Female\"] = (dx.RIAGENDR == 2).astype(np.int)\n\n # Recode the ethnic groups\n dx[\"RIDRETH1\"] = dx.RIDRETH1.replace({1: \"MA\", 2: \"OH\", 3: \"NHW\", 4: \"NHB\", 5: \"OR\"})\n\n # Drop rows with any missing data in the variables of interest\n dx = dx.dropna()\n\n return dx", "def load_primals_into_pyomo_model(self):\n for ndx, nlp in self._nlps.items():\n nlp.load_primals_into_pyomo_model()", "def load_primals_into_pyomo_model(self):\n for ndx, nlp in self._nlps.items():\n nlp.load_primals_into_pyomo_model()", "def loadData(self, file):\n self.data = batchImport(file, self.ps)", "def get_patient_data(self, client):\n for patient in self._monitored_patients.get_patient_list():\n # print(\"Requesting data for \" + patient.first_name+\" \"+patient.last_name+\"...\")\n patient.update_data(client.get_patient_data(patient.id))", "def load_data(data_fn,variables_fn,URL,VariableSet,Drop_extra=True):\r\n \r\n #If the data file exists locally, load it using pandas\r\n breakhere=1\r\n try: df = pd.read_csv(data_fn)\r\n except: \r\n #If the data file does not exist locally, download and save locally, then open\r\n print('Downloading data file (~430mb)')\r\n urllib.request.urlretrieve(URL,data_fn)\r\n print('Download Complete')\r\n df = pd.read_csv(data_fn)\r\n \r\n print('\\n************\\nNOTE RE: MIXED TYPE WARNING - These variables contain:')\r\n print(' 294 - HQ ID of person providing most of the information for this form')\r\n print(' 295 - HQ ID of 1st Other providing information for this form')\r\n print(' 701 - HF2 Cross wave ID (xwaveid) - 0001')\r\n print(' 702 - HF2 Cross wave ID (xwaveid) - 0002')\r\n print('These variables are not used in this project so the mixed type warning can be ignored\\n************')\r\n \r\n #Read the variables descriptor file\r\n variables = pd.read_csv(variables_fn)\r\n \r\n #extract the name of the response variable\r\n responseVar = variables['Variable'][variables['Response']=='y'].values[0]\r\n \r\n if Drop_extra:\r\n #Store the current number of recores\r\n NumRecords = df.shape[0]\r\n #remove entries with negative response variables (non-responding person, N/A, not asked, etc)\r\n DataSubset = pd.DataFrame(df[df[responseVar]>-1])\r\n #Notify the user how of how many records were dropped\r\n print('\\n{} records dropped due to lack of response variable (non-responding person, N/A, etc)'.format(NumRecords-DataSubset.shape[0]))\r\n \r\n #Extract the variable names for the variable set being analyzed\r\n KeepVars = variables['Variable'][variables[VariableSet]=='y']\r\n \r\n #Keep only the variables in the variable set\r\n DataSubset = DataSubset[KeepVars]\r\n \r\n else:\r\n #Return all variables and records\r\n DataSubset = df\r\n \r\n return DataSubset,variables,responseVar", "def createPatient(self):\n p = Prescription()\n p.patient_id = self.patients.data\n p.medication = self.medication.data\n p.frequency = self.frequency.data\n p.start_dt = self.start_dt.data\n p.end_dt = self.end_dt.data\n p.noti_type = self.noti_type.data\n return p", "def __init__(self, viruses, maxPop):\n\n Patient.__init__(self,viruses,maxPop)\n self.Prescriptions = []", "def _loadData(self, data):\n Episode._loadData(self, data)\n PlexSession._loadData(self, data)", "def get_programs(self):\n self.logger.info(\"Preparing programs...\")\n current_dir = Path()\n dir_path = current_dir / \"data\" / \"break_data\" / \"programs\"\n\n file_name = \"programs_\" + self.dataset_split + \".pkl\"\n if not (dir_path / file_name).is_file():\n self.create_matching_programs(dir_path, file_name)\n data = load_obj(dir_path, file_name)\n\n self.logger.info(\"Programs ready.\")\n return data", "def read_pro(path,var_to_plot= None):\n\n # Which variables are you interested in?\n\n if var_to_plot:\n var_codes = ['0500','0501',pro_code_dict(var_to_plot,inverse=True)]\n else:\n var_codes = ['0500', '0501', '0502', '0503', '0506', '0508',\n '0509', '0511', '0512', '0513', '0515',\n '0516', '0521', '0535', '0540', '0541']\n\n # Set up the dictionary to be returned. Dictionary is organised by variable name.\n\n code_dict = pro_code_dict(return_all=True)\n\n variables = {}\n for var in var_codes:\n variables[code_dict[var]] = []\n\n # Open the .PRO file\n\n with open(path, \"r\") as f:\n\n # Iterate line by line\n\n for line in f.readlines():\n\n # If the variable code (first four chars) matches the variable of interest,\n # append that line to the list of lines\n\n if line[:4] in var_codes:\n variables[code_dict[line[:4]]].append(line)\n\n\n # Now remove the header data\n\n for variable in variables.keys():\n\n variables[variable].pop(0)\n\n snowpro_list = [snowpro_from_snapshot(date_index, variables) for date_index in range(len(variables['Date']))]\n\n return (snowpro_list)", "def load(self):\n self.data = NSPSpecIO().read(self.path)" ]
[ "0.6553638", "0.5726791", "0.5658638", "0.56560385", "0.5612213", "0.5544604", "0.5427802", "0.533888", "0.5330403", "0.5310503", "0.5299247", "0.52846605", "0.5251733", "0.5122264", "0.5118435", "0.50937766", "0.50423414", "0.50294584", "0.50182426", "0.4999633", "0.4999633", "0.49912623", "0.49823967", "0.49760243", "0.4968931", "0.4966912", "0.4963316", "0.4961419", "0.49596906", "0.49595696" ]
0.68310565
0
Returns a tabseparated string representation of a procedure
def asTabString(self): dl = [self.pid, self.date, self.snomed, self.name[:20]] s = "" for v in dl: s += "%s\t"%v return s[0:-1] # Throw away the last tab
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def asTabString(self):\n dl = [self.pid, self.start, self.snomed, self.name[:20]]\n s = \"\"\n for v in dl:\n s += \"%s\\t\"%v \n return s[0:-1] # Throw away the last tab", "def print_para_table(s):\n if MODE == 1:\n t = [['Parameter', 'Value', 'Unit'],\n ['Number of bends', NBENDS, '/'], \n ['Width', WIDTH, 'm'],\n ['Depth', DEPTH, 'm'],\n ['Length', LAMBDA*(NBENDS+1), 'm'],\n ['Arc wavelength', LAMBDA, 'm'],\n ['Slope', SLOPE, '/'],\n ['Streamwise resolution', DS, 'm'],\n ['Transverse resolution', np.around(INTERVAL, decimals=4), 'm'],\n ['Streamwise # of pts', s.size + 2*int(LAMBDA/2/DS), '/'],\n ['Transverse # of pts', NUM*2+1, '/']]\n elif MODE == 2:\n if FNAME[0].islower():\n f = FNAME[0].upper() + FNAME[1:]\n else:\n f = FNAME\n t = [['Parameter', 'Value', 'Unit'],\n ['River name', f.rsplit('.', 1)[0], '/'],\n ['Width', WIDTH, 'm'],\n ['Depth', DEPTH, 'm'],\n ['Length', np.round(s[-1], decimals=2), 'm'],\n ['Slope', SLOPE, '/'],\n ['Streamwise resolution', np.round(np.mean(np.diff(s)), decimals=2), 'm'],\n ['Transverse resolution', np.round(INTERVAL, decimals=2), 'm'],\n ['Streamwise # of pts', s.size, '/'],\n ['Transverse # of pts', NUM*2+1, '/']]\n print(tabulate(t, tablefmt='psql', stralign='right', headers='firstrow'))", "def tab_printer(args):\n args = vars(args)\n keys = sorted(args.keys())\n t = Texttable() \n t.add_rows([[\"Parameter\", \"Value\"]] + [[k.replace(\"_\",\" \").capitalize(), args[k]] for k in keys])\n print(t.draw())", "def tab_printer(args):\n args = vars(args)\n keys = sorted(args.keys())\n tab = Texttable()\n tab.add_rows([[\"Parameter\", \"Value\"]])\n tab.add_rows([[k.replace(\"_\", \" \").capitalize(), args[k]] for k in keys])\n print(tab.draw())", "def tab_printer(args):\n args = vars(args)\n keys = sorted(args.keys())\n t = Texttable() \n t.add_rows([[\"Parameter\", \"Value\"]] + [[k.replace(\"_\", \" \").capitalize(), args[k]] for k in keys])\n print(t.draw())", "def console(pro):\n transect = pro['transect' ] \n nm = pro['nm120r' ].flatten()#[pro['m120swr_'][0,:]]\n t = pro['t120r' ].flatten()#[pro['m120swr_'][0,:]]\n sb = pro['sbliner' ].flatten()#[0][:-1]\n NASC = pro['NASC120swr'].flatten()#[pro['m120swr_'] ]\n pc = pro['pc120swr' ].flatten()#[pro['m120swr_'] ]\n \n # Preallocate table object\n table = io.StringIO()\n \n # Outline alignment and format for table lines, header, and data\n line = '+{:-^10}+{:-^11}+{:-^25}+{:-^8}+{:-^13}+{:-^11}+ \\n'\n header = '{:<9} | {:<9} | {:<23} | {:>6} | {:>11} |{:>12} \\n'\n data = '| {:<3d} | {:<9.3f} | {:<15} | {:>6.1f} | {:>11.2f} | {:>9.1f} | \\n'\n \n # Write table lines and header\n table.write(line.format('','','','','',''))\n table.write(header.format('| Transect','N. miles','Time','Seabed','NASC','% samples |')) \n table.write(line.format('','','','','','')) \n \n # Populate table with data\n for nmi, ti, sbi, NASCi, pci in zip(nm, t, sb, NASC, pc):\n table.write(data.format(transect, nmi, ti, sbi, NASCi, pci))\n \n # Close table with a line\n table.write(line[:-2].format('','','','','',''))\n \n # Print table in the console\n table = table.getvalue() \n print(table)", "def tab_printer(args):\n args = vars(args)\n keys = sorted(args.keys())\n t = Texttable() \n t.add_rows([[\"Parameter\", \"Value\"]] + [[k.replace(\"_\",\" \").capitalize(),args[k]] for k in keys])\n print(t.draw())", "def tab_printer(args):\n args = vars(args)\n keys = sorted(args.keys())\n t = Texttable() \n t.add_rows([[\"Parameter\", \"Value\"]] + [[k.replace(\"_\",\" \").capitalize(),args[k]] for k in keys])\n print(t.draw())", "def primitive_procedure_names():\n return mapp(car, primitive_procedures)", "def __str__(self):\n dictt = self.getFullDict()\n return \"SymbolTable(\\n{}\\n)\".format(pprint.pformat(dictt))", "def pretty_print(self):\n pt = PrettyTable()\n for i in self.files_summary:\n pt.field_names = [\"File Name\", \"Classes\", \"Functions\", \"Lines\", \"Characters\"]\n pt.add_row(list([i, self.files_summary[i][\"class\"], self.files_summary[i][\"function\"], self.files_summary[i][\"line\"], self.files_summary[i][\"char\"]]))\n print(pt) #Using a Print statement here because i tried to return self.pt and it didnt give me anything but the print works", "def __str__(self):\n return str(self.pk) + ' - ' + self.procedure_class", "def display_table(a, m):\n # Initialize string\n result = ''\n result += '{'\n\n # Add all polynomials to the string, given they are already a string\n for i in a:\n for j in i[:-1]:\n result += display_poly(j, m)\n result += ', '\n\n # Add the last one here to prevent unneeded comma\n result += display_poly(i[-1], m)\n result += '; '\n\n # Remove final semicolon and close the brace\n result = result[:-2]\n result += '}'\n\n return result", "def procedure(self):\n return self.__procedure", "def ascii_table(self, tablefmt=\"pipe\"):\n methods = self.methods\n xvalues = self.xvalues\n plot_matrix = self.plot_matrix\n\n import tabulate\n # https://pypi.python.org/pypi/tabulate\n aug_table = np.hstack((np.array(methods)[:, np.newaxis], plot_matrix))\n return tabulate.tabulate(aug_table, xvalues, tablefmt=tablefmt)", "def test_create_procedure(self):\n self.assertEqual([\"CREATE\", \"DEFINER\", \"=\", \"test\", \"PROCEDURE\", \"test\",\n [[\"IN\", \"a1\", \"INT\"], [\"IN\", \"a_2\", \"VARCHAR(255)\"], [\"OUT\", \"a.b\", \"BOOL\"]], \"COMMENT\", \"\\\"returns union\\\"\"],\n grammar._CREATE_PROCEDURE.parseString(\n \"CREATE DEFINER = `test` PROCEDURE test(a1 INT,IN a_2 VARCHAR(255), OUT `a.b` BOOL) COMMENT \\\"returns union\\\"\").asList())", "def raw_out(self):\n\t\treturn '\\t'.join(self.raw_out_tab)", "def __str__(self):\n return str(self.t1)+\"<-->t1, \\t\"+str(self.t2)+\"<-->t2, \\t\"+str(self.phi)+\"<-->phi, \\t\"+str(self.m)+\"<-->m, \\t\"+str(self.t31)+\"<-->t31, \\t\"+str(self.t32)+\"<-->t32, \\n\"", "def pddl_rep(self):\n rep = ''\n rep += \"(:action \" + self.name + \"\\n\"\n rep += \"\\t:parameters \" + str(self.parameters) + \"\\n\"\n if len(self.precondition) > 1:\n rep += \"\\t:precondition (and\\n\"\n else:\n rep += \"\\t:precondition \\n\"\n for precon in self.precondition:\n rep += \"\\t\\t\" + str(precon) + \"\\n\"\n if len(self.precondition) > 1:\n rep += \"\\t)\\n\"\n if len(self.effect) > 1:\n rep += \"\\t:effect (and\\n\"\n else:\n rep += \"\\t:effect \\n\"\n for eff in self.effect:\n rep += \"\\t\\t\" + str(eff) + \"\\n\"\n if len(self.effect) > 1:\n rep += \"\\t)\\n\"\n rep += \")\\n\"\n return rep", "def print_table1(df, eval_dir):\n\n out_file = os.path.join(eval_dir, 'table1.txt')\n\n header_string = ' & '\n line_string = 'METHOD '\n\n\n for s_idx, struc_name in enumerate(['LV', 'RV', 'Myo']):\n for measure in ['dice', 'assd']:\n\n header_string += ' & {} ({}) '.format(measure, struc_name)\n\n dat = df.loc[df['struc'] == struc_name]\n\n if measure == 'dice':\n line_string += ' & {:.3f}\\,({:.3f}) '.format(np.mean(dat[measure]), np.std(dat[measure]))\n else:\n line_string += ' & {:.2f}\\,({:.2f}) '.format(np.mean(dat[measure]), np.std(dat[measure]))\n\n if s_idx < 2:\n header_string += ' & '\n line_string += ' & '\n\n header_string += ' \\\\\\\\ \\n'\n line_string += ' \\\\\\\\ \\n'\n\n with open(out_file, \"w\") as text_file:\n text_file.write(header_string)\n text_file.write(line_string)\n\n return 0", "def tab_delim_table(self):\n self.generate()\n\n header = ' \\t '.join([r'{: ^7}'.format(col) for col in self.columns])\n lines = []\n for row in self.rows:\n bits = []\n for col in self.columns:\n if col in self.formatters:\n bits.append(self.formatters[col].format(row[col]))\n else:\n bits.append(self.formatters.get(col, '{: ^7}').format(row[col] if row[col] else ''))\n lines.append(' \\t '.join(bits))\n\n return \"{}\\n{}\".format(header, '\\n'.join(lines))", "def tabuleiro_str(tab): \r\n if not eh_tabuleiro(tab):\r\n raise ValueError('tabuleiro_str: o argumento e invalido')\r\n else:\r\n res = ''\r\n for j in range(3):\r\n for i in range(3):\r\n if tab[j][i]==1:\r\n res+=' X '\r\n elif tab[j][i]==-1:\r\n res+=' O '\r\n else:\r\n res+=' '\r\n if i < 2:\r\n res+='|'\r\n if j<2:\r\n res+='\\n-----------\\n'\r\n return res", "def write_p(pred):\n return PRED_T.format(pred[0], ARG_SEP.join(pred[1]))", "def tabulate(self) -> str:\n items = [\n ('Number of stations', self._num_stations),\n ('Loss probability', self.drop_prob),\n ]\n\n for node in range(self._num_stations):\n items.append((f'[[ STATION #{node} ]]', ''))\n\n ssize = self.system_size[node]\n qsize = self.queue_size[node]\n busy = self.busy[node]\n\n ssize_pmf = [ssize.pmf(x) for x in range(ssize.truncated_at + 1)]\n qsize_pmf = [qsize.pmf(x) for x in range(qsize.truncated_at + 1)]\n busy_pmf = [busy.pmf(x) for x in range(busy.truncated_at + 1)]\n\n items.extend([\n ('System size PMF', str_array(ssize_pmf)),\n ('System size average', ssize.mean),\n ('System size std.dev.', ssize.std),\n ('Queue size PMF', str_array(qsize_pmf)),\n ('Queue size average', qsize.mean),\n ('Queue size std.dev.', qsize.std),\n ('Busy PMF', str_array(busy_pmf)),\n ('Utilization', self.get_utilization(node)),\n ('Drop probability', self.drop_prob[node]),\n ('Delivery probability', self.delivery_prob[node]),\n ('Departures, average', self.departures[node].avg),\n ('Departures, std.dev.', self.departures[node].std),\n ('Response time, average', self.response_time[node].avg),\n ('Response time, std.dev.', self.response_time[node].std),\n ('Wait time, average', self.wait_time[node].avg),\n ('Wait time, std.dev.', self.wait_time[node].std),\n ('End-to-end delays, average', self.delivery_delays[node].avg),\n ('End-to-end delays, std.dev.', self.delivery_delays[node].std),\n ])\n return tabulate(items, headers=('Param', 'Value'))", "def __str__(self):\n sorted_table = InferenceUtils.get_n_best(self._table, max(len(self._table), 1))\n\n result = []\n for key, value in sorted_table.items():\n result.append('P(%s):=%f\\n' % (str(key), value))\n\n return ''.join(result)[:-1] if len(result) > 0 else ''", "def print_table(table):\r\n print('/-----------------------------------------------------------------------------------\\\\')\r\n for item in table:\r\n\r\n while len(item[1]) <= 22:\r\n item[1] += ' '\r\n\r\n while len(item[2]) <= 27:\r\n item[2] += ' '\r\n\r\n while len(item[0]) <= 15:\r\n item[0] += ' '\r\n\r\n print('| '+item[0]+' | '+item[1]+'| '+item[2]+' |')\r\n\r\n print('\\\\-----------------------------------------------------------------------------------/')", "def __str__(self):\n out = \"phase polynomial = \\n\"\n out += str(self.poly)\n out += \"\\naffine function = \\n\"\n out += \" (\"\n for row in range(self.num_qubits):\n wrote = False\n for col in range(self.num_qubits):\n if self.linear[row][col] != 0:\n if wrote:\n out += \" + x_\" + str(col)\n else:\n out += \"x_\" + str(col)\n wrote = True\n if self.shift[row] != 0:\n out += \" + 1\"\n if row != self.num_qubits - 1:\n out += \",\"\n out += \")\\n\"\n return out", "def tabout(things, file=sys.stdout):\n print(\"\\t\".join([str(x) for x in things]), file=file)\n file.flush()", "def pformat(class_instance):\n s = ''\n for var, val in vars(class_instance).items():\n s += var + ': ' + str(val) + '\\n'\n return s[:-1]", "def __str__(self):\n\t\treturn pprint.pformat({'servicename':self.servicename,'doc_lines':self.doc_lines,'sourcefile':self.sourcefile,'methods':self.methods})" ]
[ "0.62828636", "0.61583054", "0.57344764", "0.57325906", "0.57238656", "0.57168484", "0.57098097", "0.57098097", "0.5668657", "0.5587102", "0.55370563", "0.54839957", "0.5471588", "0.54644656", "0.542793", "0.53945804", "0.53495973", "0.5316048", "0.5290959", "0.52773845", "0.5270728", "0.52207613", "0.520993", "0.51909554", "0.51552963", "0.5144754", "0.5117192", "0.510735", "0.5107278", "0.5097223" ]
0.62052697
1
Initiate the root XML, parse it, and return a dataframe
def process_data(self): structure_data = self.parse_root(self.root) dict_data = {} for d in structure_data: dict_data = {**dict_data, **d} df = pd.DataFrame(data=list(dict_data.values()), index=dict_data.keys()).T return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_XML(xml_file, df_cols):\n \n xtree = et.parse(xml_file)\n xroot = xtree.getroot()\n rows = []\n \n for node in xroot: \n res = []\n #res.append(node.attrib.get(df_cols[0]))\n for el in df_cols: \n if node is not None and node.find(el) is not None:\n res.append(node.find(el).text)\n else: \n res.append(None)\n rows.append({df_cols[i]: res[i] \n for i, _ in enumerate(df_cols)})\n \n out_df = pd.DataFrame(rows, columns=df_cols)\n \n return out_df", "def _xmlRead(self):\r\n\r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n \r\n try: \r\n \r\n logger.debug(\"{0:s}xmlFile: {1:s} parse Xml ...\".format(logStr,self.xmlFile)) \r\n tree = ET.parse(self.xmlFile) # ElementTree \r\n root = tree.getroot() # Element\r\n\r\n self.dataFrames=Xm._xmlRoot2Dfs(root)\r\n\r\n #fixes and conversions\r\n self._convertAndFix()\r\n\r\n #Views\r\n self._vXXXX()\r\n \r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.error(logStrFinal) \r\n raise XmError(logStrFinal) \r\n finally:\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.'))", "def parse_ncbi_xml(root):\n \n ii = 0\n bigdf = pd.DataFrame()\n for i in root:\n # basic sample information \n sample_id = i.attrib['id']\n sample_title = i[1][0].text\n taxonomy_name = i[1][1].attrib['taxonomy_name']\n try:\n organism_name = i[1][1][0].text\n except IndexError:\n organism_name = 'NA'\n # check for sequencing data\n has_sra = False\n for child in i[0]:\n if 'SRA' in list(child.attrib.values()):\n has_sra =True\n try:\n comment_text = i[1][2].findall('Paragraph')[0].text\n except IndexError:\n comment_text = 'NA'\n if has_sra:\n # add this to each row\n add_extra = [sample_id, sample_title, taxonomy_name, organism_name, comment_text]\n add_extra_header = ['sample_id', 'sample_title', 'taxonomy_name', 'organism_name', 'comment_text']\n # get resistance table\n table = i[1][2].findall('Table')[0]\n table_title = table.attrib['class']\n table_header = [col.text for col in table.find('Header').getchildren()] + add_extra_header\n rows = [c for c in table.find('Body').getchildren()]\n table_text = []\n for r in rows:\n row_text = [cell.text for cell in r.getchildren()] + add_extra\n table_text.append(row_text)\n table_df = pd.DataFrame(table_text, columns=table_header)\n\n # standardize the columns in this \n # we want the Antibiotic, Resistance phenotype, Measurement, Measurement sign, Measurement units\n # columns. If theres not that, put NA\n keep_columns = ['Antibiotic', 'Resistance phenotype', 'Measurement', 'Measurement sign', 'Measurement units'] + add_extra_header\n for k in keep_columns:\n if k not in table_df.columns:\n table_df[k] = 'NA'\n table_df = table_df[keep_columns]\n # add other info to the table\n # table_df['sample_id']= sample_id\n # table_df['sample_title']= sample_title\n # table_df['taxonomy_name']= taxonomy_name\n # table_df['organism_name']= organism_name\n bigdf = bigdf.append(table_df)\n ii +=1\n return(bigdf)", "def parse_ncbi_xml_single(root):\n\n assert(len(root)==1)\n root = root[0]\n\n # basic sample information \n sample_id = root.attrib['id']\n sample_accession = root.attrib['accession']\n taxonomy_name = root[1][1].attrib['taxonomy_name']\n sample_title = root[1][0].text\n\n # check for sequencing data\n has_sra = False\n for child in root[0]:\n if 'SRA' in list(child.attrib.values()):\n has_sra =True\n try:\n comment_text = root[1][2].findall('Paragraph')[0].text\n except IndexError:\n comment_text = 'NA'\n if has_sra:\n # add this to each row\n add_extra = [sample_id, sample_accession, sample_title, taxonomy_name, comment_text]\n add_extra_header = ['sample_id', 'sample_accession', 'sample_title', 'taxonomy_name', 'comment_text']\n # get resistance table\n table = root[1][2].findall('Table')[0]\n table_title = table.attrib['class']\n table_header = [col.text for col in table.find('Header').getchildren()] + add_extra_header\n rows = [c for c in table.find('Body').getchildren()]\n table_text = []\n for r in rows:\n row_text = [cell.text for cell in r.getchildren()] + add_extra\n table_text.append(row_text)\n table_df = pd.DataFrame(table_text, columns=table_header)\n\n # standardize the columns in this \n # we want the Antibiotic, Resistance phenotype, Measurement, Measurement sign, Measurement units\n # columns. If theres not that, put NA\n keep_columns = ['Antibiotic', 'Resistance phenotype', 'Measurement', 'Measurement sign', 'Measurement units'] + add_extra_header\n for k in keep_columns:\n if k not in table_df.columns:\n table_df[k] = 'NA'\n table_df = table_df[keep_columns]\n return(table_df)", "def read_xml(path_to_dir):\n\n # Test wheter there is only one *.evx file in the directory.\n assert len(glob.glob(path_to_dir + \"*.evx\")) == 1\n\n # Open *.evx file and get time of the first EEG sample.\n with open(glob.glob(path_to_dir + \"*.evx\")[0], mode='r', encoding=\"utf-8\") as xml_file:\n xml_tree = etree.parse(xml_file)\n root = xml_tree.getroot()\n\n # Store this information in a data-frame in a datetime/timestamp format.\n df = pd.DataFrame()\n for child_of_root in root:\n\n if child_of_root.attrib[\"strId\"] == \"Technical_ExamStart\":\n\n time_event = child_of_root.find(\"event\")\n\n # Timestamp in unix time.\n unix_time = time_event.attrib[\"time\"]\n\n # Timestamp in DateTime.\n dt_time = time_event.find(\"info\").attrib[\"time\"]\n\n # TODO: Make sure the timestamps will be possible to compare between tz (utc) naive and tz aware formats.\n timezone_info = dt_time.find('+')\n df[\"UNIXTIME\"] = pd.to_datetime([unix_time], unit=\"us\").tz_localize(\"UTC\") + \\\n pd.Timedelta(hours=int(dt_time[timezone_info + 1: dt_time.find('+') + 3]))\n\n df[\"DateTime\"] = pd.to_datetime([dt_time], infer_datetime_format=True).tz_localize('UTC') + \\\n pd.Timedelta(hours=int(dt_time[timezone_info + 1: dt_time.find('+') + 3]))\n\n return df", "def xml(self):\n return parse_xml(self, tab=\"\\t\", id=self.id or \"\")", "def extract_xml(filename, columns, path_to_listings):\n \n tree = etree.parse(filename)\n root = tree.getroot()\n rows = get_rows(root.xpath(path_to_listings), columns)\n\n return pd.DataFrame(rows,columns=[col['name'] for col in columns])", "def parsexml(self):\n raise NotImplementedError", "def read_xml(self):\n pass", "def read_xml(self):\n connection = urlopen(self.url)\n in_xml = connection.read()\n state = ElementTree.fromstring(in_xml)\n records = []\n record = []\n\n # Specific to CHP\n # TODO(David) Nested for loops are bad. Change this to be more\n # efficient, possibly use generators.\n for center in state:\n rec_center = center.attrib['ID']\n\n for dispatch in center:\n rec_dispatch = dispatch.attrib['ID']\n\n for log in dispatch:\n record = [rec_center, rec_dispatch]\n\n record.append(log.attrib['ID'])\n\n log_time = log.find('LogTime').text.strip('\"')\n log_type = log.find('LogType').text.strip('\"')\n location = log.find('Location').text.strip('\"')\n loc_desc = log.find('LocationDesc').text.strip('\"')\n area = log.find('Area').text.strip('\"')\n\n record.append(log_time)\n record.append(log_type)\n record.append(location)\n record.append(loc_desc)\n record.append(area)\n\n latlon = log.find('LATLON').text.strip('\"')\n\n (lat, lon) = latlon.split(':')\n lat = str(lat[:2]) + '.' + str(lat[2:])\n lon = '-' + str(lon[:3]) + '.' + str(lon[3:])\n\n record.append(lat)\n record.append(lon)\n\n records.append(record)\n\n self.records = records", "def xml2df(path):\n XMLdata = open(path).read()\n\n XML = ET.XML(XMLdata)\n\n all_records = []\n for i, child in enumerate(XML):\n record = {}\n for subchild in child:\n record[subchild.tag] = subchild.text\n for subsubchild1 in subchild:\n for subsubchild in subsubchild1:\n record[subsubchild.tag] = subsubchild.text\n \n all_records.append(record)\n df = pd.DataFrame(all_records)\n df = df.rename(columns={ 'value' : 'polarity'})\n del df['sentiment']\n\n \"\"\"Tweet Tokenize\"\"\"\n tknzr = TweetTokenizer()\n\n contents = []\n for content in df['content']:\n contents.append(' '.join(tknzr.tokenize(content)))\n \n return contents, df['polarity']", "def parse(self):\n \n root = self.xml_tree.getroot()\n \n #run for creating tables\n for child in root[1]:\n if child.attrib['type'] == 'Database - Table':\n self.add_table(child)\n \n \n #if table_dict empty -> wrong type of dia diagram\n if self.table_dict == {}: ###\n self.err.print_error(\"parser:database_wrong_dia\") ###\n e_code = self.err.exit_code[\"parser\"] ###\n ###\n exit(e_code) ###\n \n \n #run for adding references\n for child in root[1]:\n if child.attrib['type'] == 'Database - Reference':\n self.add_reference(child)\n \n return", "def test_parse_pi_xml_02(self):\n source = os.path.join(DATA_DIR, \"GDresults_dam.xml\")\n reader = PiXmlReader(source)\n for md, df in reader.get_series():\n pass\n self.assertTrue(True)", "def _populate_from_xml_file(self, xml):\n '''\n example from API: http://www.ga.gov.au/www/argus.argus_api.survey?pSurveyNo=921\n\n <?xml version=\"1.0\" ?>\n <ROWSET>\n <ROW>\n <SURVEYID>921</SURVEYID>\n <SURVEYNAME>Goomalling, WA, 1996</SURVEYNAME>\n <STATE>WA</STATE>\n <OPERATOR>Stockdale Prospecting Ltd.</OPERATOR>\n <CONTRACTOR>Kevron Geophysics Pty Ltd</CONTRACTOR>\n <PROCESSOR>Kevron Geophysics Pty Ltd</PROCESSOR>\n <SURVEY_TYPE>Detailed</SURVEY_TYPE>\n <DATATYPES>MAG,RAL,ELE</DATATYPES>\n <VESSEL>Aero Commander</VESSEL>\n <VESSEL_TYPE>Plane</VESSEL_TYPE>\n <RELEASEDATE/>\n <ONSHORE_OFFSHORE>Onshore</ONSHORE_OFFSHORE>\n <STARTDATE>05-DEC-96</STARTDATE>\n <ENDDATE>22-DEC-96</ENDDATE>\n <WLONG>116.366662</WLONG>\n <ELONG>117.749996</ELONG>\n <SLAT>-31.483336</SLAT>\n <NLAT>-30.566668</NLAT>\n <LINE_KM>35665</LINE_KM>\n <TOTAL_KM/>\n <LINE_SPACING>250</LINE_SPACING>\n <LINE_DIRECTION>180</LINE_DIRECTION>\n <TIE_SPACING/>\n <SQUARE_KM/>\n <CRYSTAL_VOLUME>33.6</CRYSTAL_VOLUME>\n <UP_CRYSTAL_VOLUME>4.2</UP_CRYSTAL_VOLUME>\n <DIGITAL_DATA>MAG,RAL,ELE</DIGITAL_DATA>\n <GEODETIC_DATUM>WGS84</GEODETIC_DATUM>\n <ASL/>\n <AGL>60</AGL>\n <MAG_INSTRUMENT>Scintrex CS2</MAG_INSTRUMENT>\n <RAD_INSTRUMENT>Exploranium GR820</RAD_INSTRUMENT>\n </ROW>\n </ROWSET>\n '''\n # turn the XML doc into a Python object\n root = objectify.fromstring(xml)\n\n if hasattr(root.ROW, 'SURVEYNAME'):\n self.survey_name = root.ROW.SURVEYNAME\n if hasattr(root.ROW, 'STATE'):\n self.state = root.ROW.STATE\n if hasattr(root.ROW, 'OPERATOR'):\n self.operator = root.ROW.OPERATOR\n if hasattr(root.ROW, 'CONTRACTOR'):\n self.contractor = root.ROW.CONTRACTOR\n if hasattr(root.ROW, 'PROCESSOR'):\n self.processor = root.ROW.PROCESSOR\n if hasattr(root.ROW, 'SURVEY_TYPE'):\n self.survey_type = root.ROW.SURVEY_TYPE\n if hasattr(root.ROW, 'DATATYPES'):\n self.data_types = root.ROW.DATATYPES\n if hasattr(root.ROW, 'VESSEL'):\n self.vessel = root.ROW.VESSEL\n if hasattr(root.ROW, 'VESSEL_TYPE'):\n self.vessel_type = root.ROW.VESSEL_TYPE\n if hasattr(root.ROW, 'RELEASEDATE'):\n self.release_date = datetime.strptime(root.ROW.RELEASEDATE.text, \"%Y-%m-%dT%H:%M:%S\") if root.ROW.RELEASEDATE.text is not None else None\n if hasattr(root.ROW, 'ONSHORE_OFFSHORE'):\n self.onshore_offshore = root.ROW.ONSHORE_OFFSHORE\n if hasattr(root.ROW, 'STARTDATE'):\n self.start_date = datetime.strptime(root.ROW.STARTDATE.text, \"%Y-%m-%dT%H:%M:%S\") if root.ROW.STARTDATE.text is not None else None\n if hasattr(root.ROW, 'ENDDATE'):\n self.end_date = datetime.strptime(root.ROW.ENDDATE.text, \"%Y-%m-%dT%H:%M:%S\") if root.ROW.ENDDATE.text is not None else None\n if hasattr(root.ROW, 'WLONG'):\n self.w_long = root.ROW.WLONG\n if hasattr(root.ROW, 'ELONG'):\n self.e_long = root.ROW.ELONG\n if hasattr(root.ROW, 'SLAT'):\n self.s_lat = root.ROW.SLAT\n if hasattr(root.ROW, 'NLAT'):\n self.n_lat = root.ROW.NLAT\n if hasattr(root.ROW, 'LINE_KM'):\n self.line_km = root.ROW.LINE_KM\n if hasattr(root.ROW, 'TOTAL_KM'):\n self.total_km = root.ROW.TOTAL_KM\n if hasattr(root.ROW, 'LINE_SPACING'):\n self.line_spacing = root.ROW.LINE_SPACING\n if hasattr(root.ROW, 'LINE_DIRECTION'):\n self.line_direction = root.ROW.LINE_DIRECTION\n if hasattr(root.ROW, 'TIE_SPACING'):\n self.tie_spacing = root.ROW.TIE_SPACING\n if hasattr(root.ROW, 'SQUARE_KM'):\n self.square_km = root.ROW.SQUARE_KM\n if hasattr(root.ROW, 'CRYSTAL_VOLUME'):\n self.crystal_volume = root.ROW.CRYSTAL_VOLUME\n if hasattr(root.ROW, 'UP_CRYSTAL_VOLUME'):\n self.up_crystal_volume = root.ROW.UP_CRYSTAL_VOLUME\n if hasattr(root.ROW, 'DIGITAL_DATA'):\n self.digital_data = root.ROW.DIGITAL_DATA\n if hasattr(root.ROW, 'GEODETIC_DATUM'):\n self.geodetic_datum = root.ROW.GEODETIC_DATUM\n if hasattr(root.ROW, 'ASL'):\n self.asl = root.ROW.ASL\n if hasattr(root.ROW, 'AGL'):\n self.agl = root.ROW.AGL\n if hasattr(root.ROW, 'MAG_INSTRUMENT'):\n self.mag_instrument = root.ROW.MAG_INSTRUMENT\n if hasattr(root.ROW, 'RAD_INSTRUMENT'):\n self.rad_instrument = root.ROW.RAD_INSTRUMENT", "def test_parse_pi_xml_08(self):\n source = os.path.join(DATA_DIR, \"GDresults_dam.xml\")\n reader = PiXmlReader(source)\n for md, df in reader.get_series():\n pass\n self.assertTrue(True)", "def test_parse_pi_xml_06(self):\n source = os.path.join(DATA_DIR, \"no_events.xml\")\n reader = PiXmlReader(source)\n for md, df in reader.get_series():\n self.assertEqual(None, df)", "def load_xml(self,filename):\n self.initvars()\n source = iter(ET.iterparse(filename, events = ('start','end')))\n self.name = source.next()[1].tag\n for event,elem in source:\n if event == 'end' and elem.tag == 'row':\n row = [None]*self.numcols()\n for name,val in elem.attrib.items():\n try:\n idx = self.getColIndex(name)\n except ColumnNotFoundError:\n idx = len(self.cols)\n row.append(None)\n # Add new column to the table\n self.cols.append(set([name]))\n for oldrow in self.data:\n oldrow.append(None)\n row[idx] = val\n self.data.append(row)\n self.initTypes()", "def test_parse_pi_xml_09(self):\n source = os.path.join(DATA_DIR, \"no_events.xml\")\n reader = PiXmlReader(source)\n for md, df in reader.get_series():\n self.assertEqual(None, df)", "def from_xml(self, xml):\n\n node = parseString(xml).documentElement\n self.from_node(node)", "def parser(self):\n\t\tdom = ET.parse(self.input_filename)\n\t\tself.doc = dom.getroot()", "def test_parse_pi_xml_06(self):\n source = os.path.join(DATA_DIR, \"no_events.xml\")\n reader = PiXmlReader(source)\n for md, df in reader.bulk_get_series(chunk_size=5):\n self.assertEqual(None, df)", "def build_dataframe(self):\n #Freq 0.0 2.5\n #ElementID NodeID Item\n #6901 6901 angle 0.000000+0.000000j 0.000000+0.000000j\n # sc 13.847674-0.461543j 13.855294-0.462052j\n # sd 0.625892-0.020861j 0.623742-0.020717j\n # se -12.178029+0.405894j -12.185331+0.406381j\n # sf 1.043753-0.034788j 1.046222-0.034953j\n # 6904 angle 0.000000+0.000000j 0.000000+0.000000j\n # sc -1.660571-0.416504j -1.663256-0.416978j\n # sd -2.790551+0.024178j -2.789738+0.024356j\n # se 0.627616+0.450933j 0.629571+0.451455j\n # sf 1.757596+0.010251j 1.756053+0.010121j\n #6902 6901 angle 0.000000+0.000000j 0.000000+0.000000j\n headers = self.headers\n column_names, column_values = self._build_dataframe_transient_header()\n self.data_frame = self._build_pandas_transient_element_node(\n column_values, column_names,\n headers, self.element_node, self.data)", "def test_parse_pi_xml_09(self):\n source = os.path.join(DATA_DIR, \"no_events.xml\")\n reader = PiXmlReader(source)\n for md, df in reader.bulk_get_series(chunk_size=5):\n self.assertEqual(None, df)", "def loadFromDom(self, root):\n if hasattr(root, \"documentElement\"):\n self.xml = root\n else:\n # Encase the given tree fragment in a Document\n self.xml = createRootNode()\n self.xml.appendChild(self.xml.importNode(root, True))\n self.preprocess()", "def _parse_xml(self, response):\n if response.startswith('\\n'):\n response = response[1:]\n tree = etree.fromstring(response)\n return tree", "def parseXML(xmlFile):\n\n tree = etree.parse(xmlFile)\n root = tree.getroot() \n transitionTable = dict()\n transitionTable = getTransitions(tree, root, transitionTable)\n return tree, root, transitionTable", "def parse_query_result(self):\n results = self.jsonData['results']\n\n df = pd.DataFrame(results)\n df.drop(['rootSource', 'uri'], axis=1, inplace=True)\n\n return df", "def parse_query_result(self):\n results = self.jsonData['results']\n\n df = pd.DataFrame(results)\n df.drop(['rootSource', 'uri'], axis=1, inplace=True)\n\n return df", "def setUp(self):\n\n self.parser = XMLParser()\n self.xml = self.parser.parse(self.XMLDATA)", "def parse(self):\n\n if self.__xml_string is None:\n return\n else:\n self.__node_stack = []\n\n for token, content, loc in XML(self.__xml_string):\n\n # This next line presents a problem processing XML with special\n # formatting characters. It generates an exception. Since it is\n # only debug, we'll just comment this out until the XML is\n # updated to remove the special characters.\n #\n # DEBUG.debug( 'token, content, loc are %s=%s %s=%s %s=%s'\n # % (type(token).__name__,str(token),\n # type(content).__name__,str(content),\n # type(loc).__name__,str(loc) ) )\n\n if token == \"START\":\n name = content[0]\n attr = content[1]\n self._startElement(name, attr)\n elif token == \"TEXT\":\n self._cData(content)\n elif token == \"END\":\n name = content[0]\n self._endElement(name)\n\n return self.__root" ]
[ "0.65164536", "0.6279615", "0.6141911", "0.6138031", "0.6130271", "0.60411644", "0.6022583", "0.60092944", "0.5845552", "0.5839543", "0.58087265", "0.5732619", "0.56914544", "0.5677152", "0.5674767", "0.56603354", "0.56579065", "0.5635685", "0.5619518", "0.5595719", "0.5527254", "0.5525866", "0.5517543", "0.54022133", "0.53811586", "0.53782016", "0.53686124", "0.53686124", "0.53608376", "0.5352899" ]
0.6824071
0
Generate the Doxygen XML. We do not have to remove any old XML or similar since we use the index.xml file to parse the rest.. So if some stale information is in the output folder it is ok we will not use it anyway
def generate(self): # Write Doxyfile doxyfile_content = DOXYFILE_TEMPLATE.format( name="wurfapi", output_path=self.output_path, source_path=" ".join(self.source_paths), recursive="YES" if self.recursive else "NO", extra="", ) doxyfile_path = os.path.join(self.output_path, "Doxyfile") with open(doxyfile_path, "w") as doxyfile: doxyfile.write(doxyfile_content) # @todo: Doxygen generates a bunch of warnings. We should # propagate these somehow - if you want to know what # has not been documented etc. result = self.runner.run( command=self.doxygen_executable + " Doxyfile", cwd=self.output_path ) # Doxygen reports warnings on stderr. So if we have some output # there raise it. self._suppress_incorrect_warnings(result.stderr) if result.stderr.output and self.warnings_as_error: raise wurfapi.doxygen_error.DoxygenError(result.stderr.output) # The Doxygen XML is written to the 'xml' subfolder of the # output directory return os.path.join(self.output_path, "xml")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_doxygen(self):\n if not getattr(self, \"doxygen_conf\", None):\n self.generator.bld.fatal(\"No doxygen configuration file supplied.\")\n if not isinstance(self.doxygen_conf, Node.Node):\n self.generator.bld.fatal(\"'doxygen_conf' must be a Node.\")\n\n self.create_task(\n \"doxygen\",\n self.doxygen_conf,\n cwd=self.bld.path.get_bld().abspath(),\n output=Context.STDERR,\n )", "def generate_documentation(self):\n self.generate_api_docs()\n build.main([\n self.SOURCE_DIR,\n self.BUILD_DIR,\n ])", "def makeDocFile(self):\n\n f_out = \"%s/%s-doc.php\" % (self.dir_out, self.project_id)\n version = max(self.versions)\n\n with open(f_out, 'w') as f:\n f.write(\"<!DOCTYPE html>\\n\" \\\n \"<html xmlns=\\\"http://www.w3.org/1999/xhtml\\\">\\n\" \\\n \"<head>\\n\" \\\n \"<meta http-equiv=\\\"Content-Type\\\" content=\\\"text/html; charset=utf-8\\\"/>\\n\" \\\n \"\\n\" \\\n \"<title>Kit&Pack − Ultimate Power Booster</title>\\n\" \\\n \"<link rel=\\\"shortcut icon\\\" type=\\\"image/png\\\" href=\\\"../favicon.png\\\"/>\" \\\n \"<link rel=\\\"stylesheet\\\" type=\\\"text/css\\\" href=\\\"../css/doc-2.css\\\" />\\n\"\n \"\\n\" \\\n \"</head>\\n\" \\\n \"<body>\\n\" \\\n \"\\n\" \\\n \"<h1>Ultimate Power Booster</h1>\" \\\n \"\\n\")\n\n # Write a list of other versions of the documentation\n f.write(\"<p>Versions de cette documentation.</p>\\n\")\n f.write(\"<ul>\\n\")\n for v in self.versions:\n f.write(\"\\t<li><a href=\\\"%s.php\\\">%s</a></li>\\n\" % (\n v, v))\n f.write(\"</ul>\\n\\n\")\n\n f.write(\"<?php\\n\" \\\n \"include(\\\"%s.php\\\")\\n\" \\\n \"?>\\n\" \\\n \"\\n\" \\\n \"</body>\\n\" \\\n \"</html>\" % (version))", "def generate_api_docs(self):\n if self.API_OUTPUT_DIR:\n args = [\n # Put documentation for each module on its own page\n '-e',\n # don't create the \"modules.rst\" file (the table of contents\n # file) as this is already provided by the package's main rst\n # file.\n '-T',\n # Overwrite existing files\n '--force',\n '-o', self.API_OUTPUT_DIR,\n # the package to generate docs from\n self.PROJECT_DIR\n ]\n excludes = [\n os.path.join(self.PROJECT_DIR, p)\n if not os.path.isabs(p) else p\n for p in self.API_EXCLUDE_DIRS\n ]\n apidoc.main(args + excludes)", "def html():\n builtdocs = path(\"docs\") / options.sphinx.builddir / \"html\"\n destdir = path(PACKAGE) / \"docs\"\n destdir.rmtree()\n builtdocs.move(destdir)", "def buildDocumentation():\n helptext = 'usage: build_doc.py <output format> <type of documentation>' \\\n '\\n - html: for html output' \\\n '\\n - pdf: for pdf output' \\\n '\\n\\n - all: complete documentation' \\\n '\\n - dev: only developer documentation' \\\n '\\n - user: only user documentation'\n if len(sys.argv) != 3:\n print helptext\n sys.exit(1)\n\n if sys.argv[1] not in ['pdf', 'html']:\n print helptext\n sys.exit(1)\n if sys.argv[2] not in ['all', 'dev', 'user']:\n print helptext\n sys.exit(1)\n\n copyfile('docs/index_%s.rst.template' % sys.argv[2], 'index.rst') # copy main file into root directory\n os.system('sphinx-build -b %s -c docs -D master_doc=index . docs/output/%s/%s' % (sys.argv[1], sys.argv[1], sys.argv[2]))\n os.remove('index.rst') # delete config file from root directory", "def run():\n build_no_documentation()\n build_sphinx_build()\n #build_sphinx_pdf()\n build_graphviz_files()", "def generate():\n local('cd doc && make clean && make html')", "def doc(self):\n from distutils.dir_util import copy_tree\n\n def copy_tree_checker(src, dst):\n \"\"\"Wrap copy_tree to avoid pydoit error.\"\"\"\n copy_tree(src, dst)\n return True\n\n return {\n \"actions\": [\n (create_dir, [\"build/doc/source\"]),\n (copy_tree_checker, [\"docs\", \"build/doc/source\"]),\n TaskCreator.get_sphinx() + \"-apidoc -o build/doc/source --force --separate --module-first \" + self.project_name_sc,\n TaskCreator.get_sphinx() + \"-build -j auto -n build/doc/source build/doc/html\"\n ],\n \"verbosity\": 2\n }", "def docs():\n sh('sphinx-build -W -b html docs docs/_build/html')", "def write_api_docs(self, outdir):\r\n if not os.path.exists(outdir):\r\n os.mkdir(outdir)\r\n # compose list of modules\r\n modules = self.discover_modules()\r\n self.write_modules_api(modules,outdir)", "def generate_docs(root_dir, session):\n ...", "def generate_html(repo_dir, package_dir, module):\n apidir = os.path.join(repo_dir, 'doc', 'api')\n print(f\"Generating {module} API docs in {apidir!r}\")\n if subprocess.call(['sphinx-apidoc', '-Tef', '-o', apidir,\n os.path.join(package_dir, module),\n os.path.join(package_dir, module, 'test'),\n os.path.join(package_dir, module, 'scripts')]):\n raise RuntimeError(f'API doc generation failed for {module}')", "def build(self) -> None:\n\n print(\"Genereting files..\")\n self.doc = self.doc + r'\\end{document}'\n\n f = open(\"latex\\\\\" + self.report_name + '.tex', 'w')\n f.write(self.doc)\n f.close()\n\n os.chdir('latex')\n\n cmd = ['pdflatex', '-interaction', 'nonstopmode', self.report_name + '.tex']\n #cmd = ['pdflatex', '-interaction', self.report_name + '.tex']\n\n for i in range(2):\n proc = subprocess.Popen(cmd)\n proc.communicate()\n retcode = proc.returncode\n if not retcode == 0:\n os.chdir('..')\n raise ValueError('Error {} executing command: {}'.format(retcode, ' '.join(cmd)))\n\n os.unlink(self.report_name + '.aux')\n os.unlink(self.report_name + '.lof')\n os.unlink(self.report_name + '.log')\n os.unlink(self.report_name + '.lot')\n os.unlink(self.report_name + '.out')\n os.unlink(self.report_name + '.toc')\n\n os.chdir('..')", "def generate_docs(self) -> List[Path]:\n outputs = []\n for file in self.files:\n if (stem := file.stem) == \"__init__\":\n # We might have more than one __init__.py file depending on package structure and these files shouldn't\n # contain methods, so we don't want to convert them\n continue\n\n if not (doc := get_doc(file)):\n continue # No docstring returned, skip this file\n doc = doc[33:] # First 33 characters are not required for our docs\n\n # Write the output we've generated to a file\n (output := self.directory / f\"{stem}.md\").write_text(generate_header(stem) + doc)\n outputs.append(output)\n return outputs", "def build_docs(session):\n envbindir = session.bin\n session.install(\"-e\", \".[all,docs]\")\n with session.chdir(\"docs/\"):\n session.run(\n \"sphinx-autobuild\",\n \"-j\",\n \"auto\",\n \"--open-browser\",\n \"-qT\",\n \".\",\n f\"{envbindir}/../tmp/html\",\n )", "def load_xml(self, only_files=None):\n xmldir = os.path.join(self._build_root, 'docs', 'html', 'doxygen', 'xml')\n self._docset = xml.DocumentationSet(xmldir, self._reporter)\n if only_files:\n if isinstance(only_files, collections.Iterable):\n filelist = [x.get_relpath() for x in only_files]\n self._docset.load_file_details(filelist)\n else:\n self._docset.load_file_details()\n else:\n self._docset.load_details()\n self._docset.merge_duplicates()\n self._load_dirs()\n self._load_modules()\n self._load_files()\n if not only_files:\n self._load_namespaces()\n self._load_classes()\n self._load_members()", "def main():\n # We know that qidoc build will set the correct cwd\n qibuild_dir = \"..\"\n qibuild_dir = os.path.abspath(qibuild_dir)\n this_file = __file__\n this_dir = os.path.dirname(this_file)\n cmake_api = os.path.join(this_dir, \"../source/advanced/cmake/api\")\n cmake_api = os.path.abspath(cmake_api)\n if not os.path.exists(cmake_api):\n os.makedirs(cmake_api)\n qibuild_cmake = os.path.join(qibuild_dir, \"cmake\", \"qibuild\")\n for filename in DOCUMENTED_FILES:\n cmake_file = os.path.join(qibuild_cmake, filename + \".cmake\")\n rst_file = os.path.join(cmake_api, filename + \".rst\")\n gen_cmake_doc(cmake_file, rst_file)", "def _write_member_documentation_pages(\n documenter: sphinx.ext.autodoc.Documenter):\n for entry in _get_documenter_members(documenter):\n if entry.is_inherited:\n continue\n if (entry.overload and entry.overload.overload_id and\n re.fullmatch('[0-9]+', entry.overload.overload_id)):\n logger.warning('Unspecified overload id: %s', entry.object_name)\n member_rst_path = os.path.join(documenter.env.app.srcdir, 'python', 'api',\n entry.page_name + '.rst')\n objtype = entry.documenter.objtype\n member_content = ''\n if objtype == 'class':\n member_content += ':duplicate-local-toc:\\n\\n'\n member_content += sphinx_utils.format_directive(\n 'tensorstore-python-apidoc',\n options=dict(\n fullname=entry.full_name,\n objtype=objtype,\n importname=entry.import_name,\n objectdescription=True,\n subscript=entry.subscript,\n overload=cast(ParsedOverload, entry.overload).overload_id,\n ),\n )\n pathlib.Path(member_rst_path).write_text(member_content)\n _write_member_documentation_pages(entry.documenter)", "def gen_cmake_doc(cmake_file, rst_file):\n should_skip = False\n basedir = os.path.dirname(rst_file)\n if not os.path.exists(basedir):\n os.makedirs(basedir)\n if not os.path.exists(rst_file):\n should_skip = False\n else:\n dest_mtime = os.stat(rst_file).st_mtime\n src_mtime = os.stat(cmake_file).st_mtime\n if src_mtime < dest_mtime:\n should_skip = True\n if should_skip:\n return\n print(\"Generating\", rst_file)\n with open(cmake_file, \"r\") as fp:\n txt = fp.read()\n rst = gen_rst(txt)\n with open(rst_file, \"w\") as fp:\n fp.write(\".. Generated by %s\\n.. DO NOT EDIT\\n\\n\" % sys.argv[0])\n fp.write(rst)", "def generate(env):\n## doxyfile_scanner = env.Scanner(## DoxySourceScan,\n## \"DoxySourceScan\",\n## scan_check = DoxySourceScanCheck,\n##)\n\n if targz.exists(env):\n srcdist_builder = targz.makeBuilder(srcDistEmitter)\n\n env['BUILDERS']['SrcDist'] = srcdist_builder", "def write_index(self, outdir, froot='gen', relative_to=None):\r\n if self.written_modules is None:\r\n raise ValueError('No modules written')\r\n # Get full filename path\r\n path = os.path.join(outdir, froot+self.rst_extension)\r\n # Path written into index is relative to rootpath\r\n if relative_to is not None:\r\n relpath = outdir.replace(relative_to + os.path.sep, '')\r\n else:\r\n relpath = outdir\r\n idx = open(path,'wt')\r\n w = idx.write\r\n w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\\n\\n')\r\n w('.. toctree::\\n\\n')\r\n for f in self.written_modules:\r\n w(' %s\\n' % os.path.join(relpath,f))\r\n idx.close()", "def test_html_documentation(self):\n app = Sphinx(\n self.source_dir,\n self.config_dir,\n self.output_dir,\n self.doctree_dir,\n buildername='html',\n warningiserror=True,\n )\n app.build(force_all=self.all_files)", "def createManPage(target, source, env):\n os.system('asciidoc -d manpage -b docbook -o alock.xml ' + str(source[0]))\n os.system('xmlto man alock.xml')\n os.remove('alock.xml')\n return None", "def create_gen_xml(self, out_file):\n\n param_list = []\n msg = []\n msg_type = []\n dep_node = []\n for line in self.full_ed_lines:\n param_list.append(line.text())\n dep_pkg = param_list[6].split(', ')\n if dep_pkg[len(dep_pkg) - 1] == '':\n dep_pkg.pop()\n for dep in self.manager.wid.sub_list:\n dep_node.append(dep['msg_type'])\n for dep in self.manager.wid.pub_list:\n dep_node.append(dep['msg_type'])\n for dep in dep_node:\n a, b = dep.split('/')\n msg.append(a)\n msg_type.append(b)\n f = open('../genkernel/templates/package_rosgen.xml')\n o = open(out_file, 'a')\n flag = 0\n while 1:\n line = f.readline()\n if not line: break\n for i in range(6):\n line = line.replace('[{0}]'.format(i), param_list[i])\n line = line.replace('[7]', param_list[7])\n if line.find('[6]') != -1:\n for dep in dep_pkg:\n line_dep = '\\t<depend>{0}</depend>\\n'.format(dep)\n o.write(line_dep)\n flag = 1\n elif line.find('[8]') != -1:\n for dep, tp in zip(msg, msg_type):\n line_dep = '\\t\\t<depend type=\"{1}\">{0}</depend>\\n'.format(dep, tp)\n o.write(line_dep)\n flag = 1\n elif line.find('<subscribers>') != -1:\n o.write('\\t\\t<subscribers>\\n')\n for sub in self.manager.wid.sub_list:\n o.write('\\t\\t\\t<sub>\\n')\n o.write('\\t\\t\\t\\t<name>{0}</name>\\n'.format(sub['name']))\n o.write('\\t\\t\\t\\t<msg_type>{0}</msg_type>\\n'.format(sub['msg_type']))\n o.write('\\t\\t\\t\\t<topic_name>{0}</topic_name>\\n'.format(sub['topic_name']))\n o.write('\\t\\t\\t\\t<queue_size>{0}</queue_size>\\n'.format(sub['queue_size']))\n o.write('\\t\\t\\t</sub>\\n')\n o.write('\\t\\t</subscribers>\\n')\n flag = 1\n elif line.find('<publishers>') != -1:\n o.write('\\t\\t<publishers>\\n')\n for pub in self.manager.wid.pub_list:\n o.write('\\t\\t\\t<pub>\\n')\n o.write('\\t\\t\\t\\t<name>{0}</name>\\n'.format(pub['name']))\n o.write('\\t\\t\\t\\t<msg_type>{0}</msg_type>\\n'.format(pub['msg_type']))\n o.write('\\t\\t\\t\\t<topic_name>{0}</topic_name>\\n'.format(pub['topic_name']))\n o.write('\\t\\t\\t\\t<queue_size>{0}</queue_size>\\n'.format(pub['queue_size']))\n o.write('\\t\\t\\t</pub>\\n')\n o.write('\\t\\t</publishers>\\n')\n flag = 1\n if flag == 0:\n o.write(line)\n else:\n flag = 0\n o.close()\n f.close()\n self.changed = False", "def run(self):\n name_desc = self.__class__.name_sphinx\n settings = self.state.document.settings\n env = settings.env if hasattr(settings, \"env\") else None\n docname = None if env is None else env.docname\n tag = self.options.get('tag', '').strip()\n n = self.__class__.node_class('')\n n[\"breftag\"] = tag\n n[\"brefsort\"] = self.options.get('sort', 'title').strip()\n n[\"brefsection\"] = self.options.get(\n 'section', True) in (True, \"True\", \"true\", 1, \"1\")\n n[\"brefcontents\"] = self.options.get(\n 'contents', False) in (True, \"True\", \"true\", 1, \"1\", \"\", None, \"None\")\n n['docname'] = docname\n if env is not None:\n targetid = 'index%slist-%s' % (name_desc,\n env.new_serialno('index%slist' % name_desc))\n targetnode = nodes.target('', '', ids=[targetid])\n return [targetnode, n]\n else:\n return [n]", "def write_root_index(self):\n self.logger.info('writing package index')\n temp_dir = self.output_path / 'simple'\n with tempfile.NamedTemporaryFile(mode='w', dir=str(temp_dir),\n encoding='utf-8',\n delete=False) as index:\n try:\n index.file.write('<!DOCTYPE html>\\n')\n index.file.write(\n tag.html(\n tag.head(\n tag.title('Pi Wheels Simple Index'),\n tag.meta(name='api-version', value=2),\n ),\n tag.body(\n (tag.a(package, href=package), tag.br())\n for package in self.package_cache\n )\n )\n )\n except BaseException:\n index.delete = True\n raise\n else:\n os.fchmod(index.file.fileno(), 0o644)\n os.replace(index.name,\n str(self.output_path / 'simple' / 'index.html'))", "def buildHTML(self):\n\n # TODO: make this configurable via a dialog\n os.chdir(self.file_path.parent)\n proc = subprocess.Popen(\n [\"make\", \"clean\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n for line in proc.stdout:\n print(\"stdout: \" + str(line.rstrip(), encoding='utf8'))\n print('----------------')\n proc = subprocess.Popen(\n [\"make\", \"html\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n proc.wait()\n for line in proc.stdout:\n print(\"stdout: \" + str(line.rstrip(), encoding='utf8'))\n\n # Load corresponding HTML file from newly-built Sphinx docs\n self.preview.load_html(self.output_html_path)", "def configure(conf):\n conf.find_program(\"doxygen\", var=\"DOXYGEN\")\n conf.find_program(\"dot\", var=\"DOT\")", "def build_python_api_main(outputdir, components):\n mainrst_filename = Path(outputdir, 'index.rst')\n # list documented (python) packages\n docpython_dir = Path(outputdir, 'python')\n packages = [f for f in docpython_dir.glob('*')]\n packages = [p.name for p in packages]\n # (ugly) trick to print components in the expected order.\n pack = {}\n for p in components:\n for pname in packages:\n if pname.count(p) > 0:\n pack[pname] = components[p]\n packages = [p[0] for p in sorted(pack.items(), key=operator.itemgetter(1))] \n\n if len(packages) > 0:\n with open(mainrst_filename, 'a') as f:\n # label = '.. _siconos_python_reference:\\n\\n\\n'\n title = 'Siconos Python API reference'\n title += '\\n' + len(title) * '#' + '\\n\\n'\n title += 'This is the documentation of '\n title += '`python <https://www.python.org/>`_ '\n title += 'interface to Siconos.\\n\\n\\n'\n header = '.. toctree::\\n :maxdepth:3\\n\\n'\n f.write(title)\n f.write(header)\n for p in packages:\n if p in modules_docs:\n title = p.replace('_','.') + ': ' + modules_docs[p]\n directive = title + ' <python/' + p + '/autodoc>\\n'\n else:\n directive = 'python/' + p + '/autodoc\\n\\n'\n directive = textwrap.indent(directive, ' ')\n f.write(directive)\n f.write('\\n')" ]
[ "0.70324063", "0.6946403", "0.68050563", "0.66510713", "0.6509056", "0.6443243", "0.6337205", "0.63226926", "0.62693423", "0.62597907", "0.623974", "0.62392175", "0.6145338", "0.6020383", "0.6006961", "0.6003898", "0.5965673", "0.5950159", "0.5910592", "0.5896202", "0.58665496", "0.58571017", "0.5848018", "0.5842549", "0.58302075", "0.581624", "0.5749026", "0.5748675", "0.5724701", "0.5724263" ]
0.77589625
0
Sidebar widget to select your data view
def select_data_view() -> str: st.sidebar.markdown('### Select your data view:') view_select = st.sidebar.selectbox('', DATA_VIEWS, index=0). \ replace(' (NEW)', '') return view_select
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_layout() -> None:\n\n st.sidebar.title(\"Menu\")\n app_mode = st.sidebar.selectbox(\"Please select a page\", [' I. Homepage',\n \"II. Download data\" ,\n \"III. Statistic Data\",\n ' IV. AGF Indices',\n ' V. Notes',\n \" VI. Rank of patient\" ])\n \n if app_mode == ' I. Homepage':\n load_homepage() \n elif app_mode == \"III. Statistic Data\":\n leyer.leyer() \n elif app_mode == ' IV. AGF Indices':\n single.AGF_indices() \n elif app_mode == \"II. Download data\":\n download_data.download_data() \n elif app_mode == ' V. Notes':\n text_input.text_input()\n elif app_mode == \" VI. Rank of patient\":\n rank_of_patient.rank_of_patient()", "def get_sidebar_layout(self):\n return [\n dbc.Card(children=self.get_sidebar_filters(), body=True),\n dbc.Card(children=self.get_sidebar_filter_save(), body=True, className=\"mt-2\"),\n dbc.Card(children=self.get_sidebar_saved_filter_names(), body=True, className=\"mt-2\")\n ]", "def sidebar_menu(items, selected):\n\n return {\n 'items': items,\n 'selected': selected,\n }", "def __sidebar(self, conf):\n result = \"\"\n\n i = conf[\"conf_json\"][0]\n result += \"\"\"\n\n <h4>%s</h4>\n <ul id=\"sidebar_menu\" class=\"sidebar_menu\">\n <li><a class=\"btn active\" href=\"#description\">DESCRIPTION</a></li>\n <li><a class=\"btn\" href=\"#parameters\">PARAMETERS</a></li>\n <li><a class=\"btn\" href=\"#operations\">OPERATIONS</a>\n <ul class=\"sidebar_submenu\">%s</ul>\n </li>\n <li><a class=\"btn active\" href=\"/\">HOME</a></li>\n </ul>\n \"\"\" % (\n i[\"title\"],\n \"\".join(\n [\n \"<li><a class='btn' href='#%s'>%s</a></li>\" % (op[\"url\"], op[\"url\"])\n for op in conf[\"conf_json\"][1:]\n ]\n ),\n )\n return result", "def various(self):\n # Changer le default d'un tb, ici ne rien mettre au niveau le plus haut\n context = self\n from imio.dashboard.utils import getCollectionLinkCriterion\n criterion = getCollectionLinkCriterion(context)\n criterion.default = u''\n from eea.facetednavigation.criteria.interfaces import ICriteria\n ICriteria(context).criteria._p_changed = True\n\n # Réparer la vue de la page pst\n context.setLayout('view')\n from imio.project.pst.setuphandlers import configure_faceted_folder\n configure_faceted_folder(context, xml='default_dashboard_widgets.xml', default_UID=None)", "def getWidget(self):", "def widget(self, request, group):", "def dashboard():", "def edit_sidebar(request):\n sidebar = models.Sidebar.load()\n\n if request.POST and 'yaml' in request.POST:\n yaml_data = request.POST['yaml']\n if not sidebar:\n sidebar = models.Sidebar(yaml=yaml_data)\n else:\n sidebar.yaml = yaml_data\n\n error_message = None\n try:\n sidebar.put()\n except yaml.YAMLError:\n error_message = 'Invalid YAML'\n except KeyError, error:\n error_message = 'Invalid YAML, missing key %s' % error\n\n if error_message:\n return utility.respond(request, 'admin/edit_sidebar',\n {'yaml': yaml_data,\n 'error_message': error_message})\n\n return http.HttpResponseRedirect(urlresolvers.reverse('views.admin.index'))\n\n else:\n yaml_data = ''\n if sidebar:\n yaml_data = sidebar.yaml\n return utility.respond(request, 'admin/edit_sidebar', {'yaml': yaml_data})", "def sidebar_click(sidebar_item):\n #print('sidebar_click')\n sidebar = driver.find_element_by_class_name('m-menu__nav')\n return click_on(sidebar_item, scope=sidebar)", "def _setupSideMenu(self):\r\n dataGroup = QGroupBox(\"Select Data\")\r\n dataGroup.setLayout(self._setupDataGroup())\r\n self.addWidget(dataGroup, 1)\r\n\r\n modelsGroup = QGroupBox(\"Select Hazard Functions\")\r\n modelsGroup.setLayout(self._setupModelsGroup())\r\n self.addWidget(modelsGroup, 2)\r\n\r\n metricsGroup = QGroupBox(\"Select Covariates\")\r\n metricsGroup.setLayout(self._setupMetricsGroup())\r\n self.addWidget(metricsGroup, 2)\r\n\r\n self.runButton = QPushButton(\"Run Estimation\")\r\n self.runButton.clicked.connect(self._emitRunModelSignal)\r\n self.addWidget(self.runButton, 1)\r\n\r\n self.addStretch(1)\r\n\r\n # signals\r\n self.sheetSelect.currentIndexChanged.connect(self._emitSheetChangedSignal) # when sheet selection changed\r", "def on_right_click_theory(self, event):\n try:\n id, data_class_name, _ = \\\n self.tree_ctrl_theory.GetSelection().GetData()\n _, _ = self.parent.get_data_manager().get_by_id(id_list=[id])\n except:\n return\n if self.data_menu is not None:\n menu_enable = (data_class_name == \"Data2D\")\n self.data_menu.Enable(self.editmask_id, False)\n self.data_menu.Enable(self.plot3d_id, menu_enable)\n self.PopupMenu(self.data_menu)", "def showSideBar(self):\n\t\treturn False", "def visit_sidebar(self, node):\n\n return BaseTranslator.visit_admonition(self, node)", "def app():\n # Add title to the page\n st.title(\"Welcome to the Data Info page\")\n\n # Add subheader for the section\n st.subheader(\"View Data\")\n\n # Load the dataset\n X, y = load_data()\n df = pd.concat([X, y], axis=1)\n\n # Create an expansion option to check the data\n with st.expander(\"View data\"):\n st.dataframe(df)\n\n # Create a section to columns values\n # Give subheader\n st.subheader(\"Columns Summary:\")\n\n # Create a checkbox to get the summary.\n if st.checkbox(\"View Summary\"):\n st.dataframe(df.describe())\n\n # Create multiple check box in row\n col_name, col_dtype, col_data = st.columns(3)\n\n # Show name of all dataframe\n with col_name:\n if st.checkbox(\"Column Names\"):\n st.dataframe(df.columns)\n\n # Show datatype of all columns \n with col_dtype:\n if st.checkbox(\"Columns data types\"):\n dtypes = df.dtypes.apply(lambda x: x.name)\n st.dataframe(dtypes)\n \n # Show data for each columns\n with col_data: \n if st.checkbox(\"Columns Data\"):\n col = st.selectbox(\"Column Name\", list(df.columns))\n st.dataframe(df[col])\n \n # Add image for your data describtion.\n #st.image(\"./images/iris_classification_model.jpg\")\n\n # Add info about your dataset\\\n # st.write(\"Data Info\")\n\n # Add the link to you dataset\n # st.markdown(\"\"\"\n # <p style=\"font-size:24px\">\n # <a \n # href=\"https://github.com/ShishirShekhar/car-price-prediction/blob/main/about.py\"\n # target=_blank\n # style=\"text-decoration:none; color:red\"\n # >Dataset\n # </a> \n # </p>\n # \"\"\", unsafe_allow_html=True\n # )", "def main_menu_toolbar():\n\n pass", "def on_pushButton_view_clicked(self):\n content = unicode(self.comboBox.currentText())\n if content == \"职称表\":\n data = self.sql_client.get_zc_info()\n self.fill_tableview(data)\n elif content == \"文化表\":\n data = self.sql_client.get_wh_info()\n self.fill_tableview(data)\n elif content == \"部门表\":\n data = self.sql_client.get_bm_info()\n self.fill_tableview(data)", "def admin_sidebar(context):\n request = context.get('request')\n\n request_context = {\n 'count_users': User.objects.count(),\n 'count_review_groups': Group.objects.count(),\n 'count_default_reviewers': DefaultReviewer.objects.count(),\n 'count_oauth_applications': Application.objects.count(),\n 'count_repository': Repository.objects.accessible(\n request.user, visible_only=False).count(),\n 'count_webhooks': WebHookTarget.objects.count(),\n 'count_hosting_accounts': HostingServiceAccount.objects.count(),\n 'version': get_version_string(),\n }\n\n # We're precomputing URLs in here, rather than computing them in the\n # template, because we need to always ensure that reverse() will be\n # searching all available URL patterns and not just the ones bound to\n # request.current_app.\n #\n # current_app gets set by AdminSite views, and if we're in an extension's\n # AdminSite view, we'll fail to resolve these URLs from within the\n # template. We don't have that problem if calling reverse() ourselves.\n request_context.update({\n 'url_%s' % url_name: reverse('admin:%s' % url_name)\n for url_name in ('auth_user_add',\n 'auth_user_changelist',\n 'hostingsvcs_hostingserviceaccount_add',\n 'hostingsvcs_hostingserviceaccount_changelist',\n 'notifications_webhooktarget_add',\n 'notifications_webhooktarget_changelist',\n 'oauth_application_add',\n 'oauth_application_changelist',\n 'reviews_defaultreviewer_add',\n 'reviews_defaultreviewer_changelist',\n 'reviews_group_add',\n 'reviews_group_changelist',\n 'scmtools_repository_add',\n 'scmtools_repository_changelist')\n })\n\n return RequestContext(request, request_context)", "def create_widgets(self):", "def layout_data_list(self):\n # Add splitter\n w, h = self.parent.GetSize()\n splitter = wx.SplitterWindow(self)\n splitter.SetMinimumPaneSize(50)\n splitter.SetSashGravity(1.0)\n\n file_sizer = wx.BoxSizer(wx.VERTICAL)\n file_sizer.SetMinSize(wx.Size(w/13, h*2/5))\n theory_sizer = wx.BoxSizer(wx.VERTICAL)\n theory_sizer.SetMinSize(wx.Size(w/13, h*2/5))\n\n self.tree_ctrl = DataTreeCtrl(parent=splitter,\n style=wx.SUNKEN_BORDER,\n root=\"Available Data\")\n\n self.tree_ctrl.Bind(CT.EVT_TREE_ITEM_CHECKING, self.on_check_item)\n self.tree_ctrl.Bind(CT.EVT_TREE_ITEM_MENU, self.on_right_click_data)\n # Create context menu for page\n self.data_menu = wx.Menu()\n id = wx.NewId()\n name = \"Data Info\"\n msg = \"Show Data Info\"\n self.data_menu.Append(id, name, msg)\n wx.EVT_MENU(self, id, self.on_data_info)\n\n id = wx.NewId()\n name = \"Save As\"\n msg = \"Save Theory/Data as a file\"\n self.data_menu.Append(id, name, msg)\n wx.EVT_MENU(self, id, self.on_save_as)\n\n quickplot_id = wx.NewId()\n name = \"Quick Plot\"\n msg = \"Plot the current Data\"\n self.data_menu.Append(quickplot_id, name, msg)\n wx.EVT_MENU(self, quickplot_id, self.on_quick_plot)\n\n self.plot3d_id = wx.NewId()\n name = \"Quick 3DPlot (Slow)\"\n msg = \"Plot3D the current 2D Data\"\n self.data_menu.Append(self.plot3d_id, name, msg)\n wx.EVT_MENU(self, self.plot3d_id, self.on_plot_3d)\n\n self.editmask_id = wx.NewId()\n name = \"Edit Mask\"\n msg = \"Edit Mask for the current 2D Data\"\n self.data_menu.Append(self.editmask_id, name, msg)\n wx.EVT_MENU(self, self.editmask_id, self.on_edit_data)\n\n self.tree_ctrl_theory = DataTreeCtrl(parent=splitter,\n style=wx.SUNKEN_BORDER,\n root=\"Available Theory\")\n self.tree_ctrl_theory.Bind(CT.EVT_TREE_ITEM_CHECKING,\n self.on_check_item)\n self.tree_ctrl_theory.Bind(CT.EVT_TREE_ITEM_MENU,\n self.on_right_click_theory)\n splitter.SplitHorizontally(self.tree_ctrl, self.tree_ctrl_theory)\n self.sizer1.Add(splitter, 1, wx.EXPAND | wx.ALL, 10)", "def open_viewer(self):\r\n choice = self.thoughts_lst.get(tk.ACTIVE)\r\n subject = self.refference[choice]\r\n tbl = self.home_table[subject]\r\n view = kit.SQL_pull('*', tbl, 'subject_id = \"{}\"'.format(subject))\r\n obj = kit.class_fill(tbl, view[0])\r\n self.session = tk.Toplevel(self.master, **jt.bframe_style)\r\n jv.Viewer(self.session, obj)", "def __init__(self):\r\n super().__init__()\r\n self._setupSideMenu()", "def on_help(self, event):\n\n #import documentation window here to avoid circular imports\n #if put at top of file with rest of imports.\n from documentation_window import DocumentationWindow\n\n _TreeLocation = \"user/sasgui/guiframe/data_explorer_help.html\"\n _doc_viewer = DocumentationWindow(self, -1, _TreeLocation, \"\",\n \"Data Explorer Help\")", "def layout():\n # Needs db connection! (Set up tunnel if testing app locally)\n _ids = get_doc_ids_from_db() \n dropdown_dates = {num2str_month(_id): _id for _id in _ids}\n\n children_list = [\n html.Div([\n html.H2('Top women and men quoted'),\n html.P('''\n In this section, we display the top women and men quoted as sources in a\n given month. First, select a month from the dropdown menu below.\n ''')],\n ),\n html.Br(),\n dcc.Dropdown(\n id='date-dropdown',\n options=[\n {'label': date_str, 'value': date_num}\n for date_str, date_num in dropdown_dates.items()\n ],\n value=_ids[-1],\n className='dropdown'\n ),\n html.Br(),\n html.Div(\n dcc.Loading(\n id='topic-load-progress',\n children=[\n dcc.Store(id='top-sources-topic-data'),\n dcc.Store(id='top-sources-stats'),\n ])\n ),\n html.Br(),\n html.H4('Topics and gender representation among sources'),\n dcc.Markdown('''\n First, we display the *gender prominence* for each topic discovered in the \n selected month. Gender prominence is a measure we use to study whether a\n given topic features quotes by men or women more prominently.\n '''),\n html.Div([\n dcc.Graph(id='top-sources-outlet-gender-heatmap'),\n ]),\n html.Br(),\n html.Div([\n html.H4('Top quoted sources by gender'),\n html.P(f'''\n Next, we display the top {NUM_SOURCES_TO_SHOW} sources per gender for the given month.\n Hover over the dots to display the total number of articles in which each source was \n quoted.\n '''),\n html.Div([\n dcc.Graph(id='top-sources-dotplot', className='chart'),\n ]),\n ]),\n html.H4('Observations'),\n dcc.Markdown(id='user-comment-display'),\n ]\n return children_list", "def add_sidebar(self, widget: Component) -> None:\n self._root.add_sidebar(widget)", "def viewWidgetCreated(self, view, plot):\n return", "def home(request):\n\n context = {\n \"resource_id\": request.GET.get(\"resource_id\"),\n \"aggregation_id\": request.GET.get(\"aggregation_path\"),\n \"geoserver_url\": app.get_custom_setting(\"geoserver_url\"),\n \"hydroserver_url\": app.get_custom_setting(\"hydroserver_url\"),\n \"max_layers\": app.get_custom_setting(\"max_layers\")\n }\n\n return render(request, 'hydroshare_data_viewer/home.html', context)", "def onInvoke():\n if dock.isVisible():\n dock.toggleViewAction().trigger()\n else:\n dock.setFloating(True)\n pos = QtGui.QCursor.pos()\n dock.move(pos.x() - dock.size().width() / 2,\n pos.y() - dock.size().height() / 2)\n dock.setVisible(True)", "def menuItem(*args):\n\toptionsWindow()", "def show_main_edit():\n db = get_db()\n now = datetime.datetime.now()\n rooms = app.config[\"HC_CONFIG\"].values()\n rooms.sort(key=config_lexer.Room.sortkey)\n # Get the three newest counts from the database, sorted by the\n # user-provided time\n newest_counts = db.get_newest_counts_for_user(\n 3, session[\"username\"], hc_db.NewestSort.ENTERED_TIME\n )\n recent_counts = []\n for count in newest_counts:\n room_rows = db.get_roomdata_for_count_id(count[\"id\"])\n # I couldn't think of a short, descriptive name for this variable.\n some_dict = {\"id\": count[\"id\"], \"date\": count[\"entered_time\"], \"counts\": {}}\n for row in room_rows:\n some_dict[\"counts\"][row[\"room\"]] = row[\"people_count\"]\n some_dict[\"counts\"] = OrderedDict(\n sorted(some_dict[\"counts\"].items(), key=sort_count_data)\n )\n recent_counts.append(some_dict)\n if is_admin(session[\"username\"]):\n buttons = [\n NavButton(url_for(\"show_admin\"), \"Administration\"),\n NavButton(url_for(\"show_help\"), \"Help\"),\n NavButton(url_for(\"logout\"), \"Log Out\"),\n ]\n else:\n buttons = [\n NavButton(url_for(\"show_help\"), \"Help\"),\n NavButton(url_for(\"logout\"), \"Log Out\"),\n ]\n return render_template(\n \"main-edit.html\",\n buttons=buttons,\n rooms=rooms,\n recent_counts=recent_counts,\n datewhen=now.strftime(\"%Y-%m-%d\"),\n timewhen=now.strftime(\"%H:%M\"),\n )" ]
[ "0.55691195", "0.5513806", "0.5493136", "0.5451674", "0.5418005", "0.5393736", "0.53791744", "0.5376035", "0.5333957", "0.53197217", "0.53023964", "0.5265653", "0.5262883", "0.52579665", "0.5177404", "0.5163641", "0.51526636", "0.5104715", "0.510281", "0.5094196", "0.5078733", "0.50778455", "0.5075264", "0.5071688", "0.5060323", "0.5055614", "0.50260115", "0.50143474", "0.5012948", "0.5012896" ]
0.61881065
0
Sidebar widget to select fiscal year
def select_fiscal_year(view_select) -> str: if 'Wage Growth' in view_select: working_fy_list = FY_LIST[:-1] else: working_fy_list = FY_LIST st.sidebar.markdown('### Select fiscal year:') fy_select = st.sidebar.selectbox('', working_fy_list, index=0).split(' ')[0] return fy_select
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_calender_year(self, year):\n self.single_selection_from_kendo_dropdown(self.calender_year_kendo_dropdown_locator, year)", "def set_start_year(self, year):\n return self.form.set_value(\"output period \\\"year from\\\"\", str(year))", "def MonthYearFieldWidget(field, request):\n return z3c.form.widget.FieldWidget(field, MonthYearWidget(request))", "def showNextYear(self):\n pass", "def yearShown(self):\n return self.currentYear", "def set_finish_year(self, year):\n return self.form.set_value(\"output period \\\"year to\\\"\", str(year))", "def get_date_display(self, context):\n return '{year}'.format(year=self.get_year())", "def showPreviousYear(self):\n pass", "def test_year_filtering(self):\n # Get a valid date\n entry = Entry.objects.get(id=1)\n params = {\"year\": entry.publication_date.year}\n\n self._test_filtering(**params)", "def closeyear(year):\n\n # Return the specific year\n return int(year % 4)", "def get_current_fiscal_year(self):\n current_date = datetime.today().date()\n for year in self.fiscal_years.all():\n if year.begin_date < current_date < year.end_date:\n return year\n return None", "def increment_year(self):", "def set_year (self, year):\n self.year = year", "def _get_fiscalyear(self, cr, uid, context={}, period_id=False):\n\n if period_id:\n period_obj = self.pool.get(\n 'account.period').browse(cr, uid, period_id)\n fiscalyear_id = period_obj.fiscalyear_id.id\n else:\n fiscalyear_obj = self.pool.get('account.fiscalyear')\n ids = fiscalyear_obj.find(cr, uid, time.strftime(\n '%Y-%m-%d'), context=context)\n fiscalyear_id = ids\n return fiscalyear_id", "def run_year(self, year):\n pass", "def set_Year(self, value):\n super(GetTimestampFromDateParametersInputSet, self)._set_input('Year', value)", "def get_current_year() -> int:\n return datetime.now().year", "def year_archive(request, year):\n articles = Article.objects.filter(pub_date__year=year)\n context = { 'year': year, 'articles': articles }\n return render(request, 'news/year_archive.html', context)", "def get_year_form(answer, answer_store, metadata, error_messages, label, guidance, group_instance=0):\n\n class YearDateForm(Form):\n year = None\n\n @property\n def data(self):\n data = super().data\n if not data['year']:\n return None\n\n return '{:04d}'.format(int(data['year']))\n\n validate_with = [OptionalForm()]\n\n if answer['mandatory'] is True:\n validate_with = validate_mandatory_date(error_messages, answer)\n\n error_message = get_bespoke_message(answer, 'INVALID_DATE')\n validate_with.append(YearCheck(error_message))\n\n if 'minimum' in answer or 'maximum' in answer:\n min_max_validation = validate_min_max_date(answer, answer_store, metadata, 'yyyy', group_instance=group_instance)\n validate_with.append(min_max_validation)\n\n YearDateForm.year = CustomIntegerField(\n label=label,\n validators=validate_with,\n description=guidance,\n )\n\n return YearDateForm", "def year(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"year\")", "def year_dropdown_populator():\n start_year_unique = funding_data['start_year'].unique()\n\n year_list = []\n for i in start_year_unique:\n if i == -1:\n # print({'label': i, 'value': i})\n # NA values has been changes to -1\n year_list.append({'label': 'NA', 'value': -1})\n else:\n x = int(i)\n # print({'label': x, 'value': i})\n year_list.append({'label': i, 'value': i})\n return year_list", "def testFutureYear(self):\n sdq1 = getattr(self.s1, 'sdq1')\n self.app.REQUEST.form['endingYear'] = ''\n self.app.REQUEST.form['futureYears'] = '5'\n app = self.app\n dummy_controller_state = ControllerState(\n id='base_edit',\n context=sdq1,\n button='submit',\n status='success',\n errors={},\n next_action=None,)\n controller = self.portal.portal_form_controller\n controller_state = controller.validate(dummy_controller_state, app.REQUEST, ['validate_base',])\n errors = controller_state.getErrors()\n errors = sdq1.post_validate(self.app.REQUEST, errors)\n assert errors == {}, \"Validation error raised: %s\" % errors", "def do_ry(self, arg):\n self.do_timesheet('report year')", "def yearname(self):\n return self.strftime(\"%Y\")", "def test_spider_gets_specific_year(self):\n spider = Eia923Spider()\n resp = factories.TestResponseFactory(eia923=True)\n\n result = spider.form_for_year(resp, 2007)\n\n assert result is not None\n assert result.url == \"https://www.eia.gov/electricity/data/eia923/\" \\\n \"archive/xls/f906920_2007.zip\"\n assert result.meta[\"year\"] == 2007\n\n for year in range(2001, 2019):\n result = spider.form_for_year(resp, year)\n assert result is not None", "def update_dropdowns_years_options(df_in, aux):\n\n df = u.uos.b64_to_df(df_in)\n return lay.get_options(df[c.cols.YEAR].unique().tolist())", "def get_year():\n try:\n year = input(\"Enter Year: \")\n year = int(year)\n if year > 2021 or year < 2000:\n os.system('cls')\n print(\"Accepted Values: 2000-2021\")\n return get_year()\n else:\n os.system('cls')\n return year\n except ValueError:\n os.system('cls')\n print(\"Accepted Values: 2000-2021\")\n return get_year()", "def create_dropdown_list_years(movies):\n dropdown = ''\n movies.sort(key=lambda x: x.year, reverse=True)\n y = []\n for movie in movies:\n y.append(movie.year)\n y = list(set(y))\n for x in y:\n dropdown += dropdown_tile_year.format(id_year=x,\n year=x)\n \n return dropdown", "def setYear(self, *args):\n return _libsbml.Date_setYear(self, *args)", "def get_year(self):\n return self.year" ]
[ "0.6576069", "0.6364317", "0.62365025", "0.6131743", "0.6076331", "0.5969902", "0.5813705", "0.57639", "0.5758694", "0.5715589", "0.56626064", "0.5603968", "0.5560014", "0.5438583", "0.54234385", "0.53938276", "0.5356743", "0.5356063", "0.5349551", "0.5347137", "0.53393054", "0.53263825", "0.5311115", "0.5307799", "0.5303358", "0.52942204", "0.5241037", "0.5238227", "0.5232996", "0.5214949" ]
0.7023844
0
Sidebar widget to select pay rate conversion (hourly/annual)
def select_pay_conversion(fy_select, pay_norm, view_select) -> int: st.sidebar.markdown('### Select pay rate conversion:') conversion_select = st.sidebar.selectbox('', PAY_CONVERSION, index=0) if conversion_select == 'Hourly': if view_select != 'Trends': pay_norm = FISCAL_HOURS[fy_select] # Number of hours per FY else: pay_norm = 2080 # Number of hours per FY return pay_norm
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cb_radio(label):\n global pm_rate\n rate_dict = {'0.2 Step': 0.2, '1.0 Step': 1.0}\n pm_rate = rate_dict[label]", "def select_rates_tab(self):\n self.select_static_tab(self.rates_tab_locator, True)", "def render_investip():\n\tlinewidth = 2\n\n\tst.sidebar.markdown('# Dashboard')\n\tstock = st.sidebar.selectbox('Stock:', stocks)\n\n\tstartdd = datetime.datetime(2020, 3, 1)\n\tstartdd = st.sidebar.date_input('start-date', value=startdd)\n\n\tendd = datetime.datetime.now()\n\tendd = st.sidebar.date_input('end-date', value=endd)\n\n\tt0 = stock\n\tt0_ohlc = extract(ticker=t0, start_date=startdd, end_date=endd)\n\tt0_df = pd.DataFrame({f'{t0}-Close': t0_ohlc.Close})\n\n\t# st.write(t0_ohlc)\n\tmpf.plot(t0_ohlc, type='candle',volume=True,show_nontrading=False, title=t0, figscale=1.)\n\t# tdf = plot_ticker(t0, df=t0_df, start_date=startdd, end_date=endd)\n\tst.pyplot()\n\n\n\tst.sidebar.markdown('## Stock Correlation')\n\tstock_returns = st.sidebar.checkbox('Enable', value=True, key='cb_corrs')\n\tif stock_returns:\n\t\tst.markdown('## Stock Correlation')\n\t\tstock_selection = st.sidebar.multiselect('Stocks', stocks, def_stocks)\n\t\tplot_stock_correlations(stock_selection, startdd, endd)\n\t\tst.pyplot()\n\n\t# trading_context = True\n\tst.sidebar.markdown('## Returns')\n\tstock_returns = st.sidebar.checkbox('Enable', value=True, key='cb_returns')\n\tif stock_returns:\n\t\tst.markdown('## Stock Returns')\n\t\tst.markdown('''### Daily Stock returns\n[EWMA](https://www.investopedia.com/articles/07/ewma.asp)''')\n\t\tspan = st.sidebar.slider('span', 2, 21, value=5)\n\t\tplot_historical(t0, t0_ohlc, span=span, linewidth=linewidth)\n\t\tst.pyplot()\n\n\n\t# trading_context = True\n\tst.sidebar.markdown('## Volatility')\n\ttrading_context = st.sidebar.checkbox('Enable', value=False, key='cb_volatility')\n\tif trading_context:\n\t\tst.markdown('## Volatility & Risk')\n\t\tst.markdown('''### Daily differences between High & Low\nWe model these ranges with [Inverse Gamma PDF](https://en.wikipedia.org/wiki/Inverse-gamma_distribution).\nGreen lines denote +/- 1 stdev.\n''')\n\t\tf, ax = plt.subplots(1, 2, figsize=(14,6), sharex=False)\n\t\tf.suptitle(f'{t0} High-Low Daily')\n\t\tmmd = t0_ohlc.High - t0_ohlc.Low\n\t\t# mmd.dropna(inplace=True)\n\t\tmmd.plot(color='r', ax=ax[0], lw=linewidth)\n\n\t\tmu, sigma = mmd.dropna().mean(), mmd.dropna().std()\n\t\tzval = 1.#96\n\t\t# TODO: try one-tail limit to get outliers\n\t\t_=ax[0].axhline(y=mu, color='k', lw=linewidth)\n\t\t_=ax[0].axhline(y=mu-zval*sigma, color='g', lw=linewidth)\n\t\t_=ax[0].axhline(y=mu+zval*sigma, color='g', lw=linewidth)\n\n\t\tp95 = mmd.dropna().quantile(.95)\n\t\t_=ax[0].axhline(y=p95, color='b', lw=linewidth, label='p95')\n\t\t_=ax[1].axvline(p95, color='b', lw=linewidth, label='p95')\n\n\t\twith warnings.catch_warnings():\n\t\t warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n\t\t print(invgamma.fit(mmd))\n\t\t sns.distplot(mmd, fit=invgamma, kde=False, ax=ax[1])\n\t\t_=ax[1].axvline(mmd.values[-1], color='r', label='last', lw=linewidth)\n\t\t_=ax[1].axvline(mu, color='k', label='mean', lw=linewidth)\n\t\t_=ax[1].legend()\n\t\tst.pyplot()\n\n\t\tst.markdown('''### Daily Average True Range (ATR)\nImplementation follows [ATR](https://kodify.net/tradingview/indicators/average-true-range/).\nCheck [Investopedia](https://www.investopedia.com/terms/a/atr.asp) for more info.''')\n\n\t\tatr_df = pd.DataFrame({\n\t\t\tf'{t0}-High-Low': t0_ohlc.High - t0_ohlc.Low,\n\t\t\tf'{t0}-High-PrevCloseAbs': abs(t0_ohlc.High - t0_ohlc.Close.shift(1)),\n\t\t\tf'{t0}-Low-PrevCloseAbs': abs(t0_ohlc.Low - t0_ohlc.Close.shift(1)),\n\t\t}).max(axis=1)\n\t\tatr_df = pd.DataFrame({\n\t\t\tf'{t0}-true-range': atr_df,\n\t\t})\n\t\tatr_df[f'{t0}-ATR14'] = atr_df.iloc[:, 0].rolling(14).mean()\n\t\t# st.write(atr_df)\n\n\t\tf, ax = plt.subplots(1, 2, figsize=(14,6), sharex=False)\n\t\tf.suptitle(f'{t0} True Range & SMA14')\n\t\tatr_df.plot(ax=ax[0], lw=linewidth)\n\n\t\twith warnings.catch_warnings():\n\t\t warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n\t\t #print(invgamma.fit(f'{t0}-true-range'))\n\t\t sns.distplot(atr_df[f'{t0}-true-range'], fit=invgamma, kde=False, ax=ax[1])\n\t\t_=ax[1].axvline(atr_df[f'{t0}-true-range'].values[-1], color='b', label='last', lw=linewidth)\n\t\t_=ax[1].axvline(atr_df[f'{t0}-ATR14'].values[-1], color='r', label='last', lw=linewidth)\n\t\t_=ax[1].legend()\n\t\tst.pyplot()\n\n\n\n\t# do_strategy_analysis = True\n\tst.sidebar.markdown('## Trading Strategy')\n\tdo_strategy_analysis = st.sidebar.checkbox('Enable', value=False, key='cb_stra')\n\tif do_strategy_analysis:\n\t\tst.markdown('## Trading Strategy')\n\t\tst.markdown('[investopedia](https://www.investopedia.com/articles/active-trading/052014/how-use-moving-average-buy-stocks.asp)')\n\t\tshort_window = st.sidebar.slider('short_window', 2, 21, 3)\n\t\tlong_window = st.sidebar.slider('long_window', 3, 50, 5)\n\t\tplot_strategy(t0, t0_df, short_window, long_window)\n\t\tst.pyplot()\n\n\t# do_corr_analysis = False\n\tst.sidebar.markdown('## Correlation analysis')\n\tdo_corr_analysis = st.sidebar.checkbox('Enable', value=False, key='cb_corr')\n\tif do_corr_analysis:\n\t\tst.markdown('## Correlation analysis')\n\t\tt1= 'GC=F' # # SP500 'GC=F'\n\t\tt2 = 'CL=F' # '^GSPC' # '^DJI' # DJ30 'CL=F'\n\t\tt1 = st.sidebar.selectbox('REF1:', stocks, index=stocks.index(t1))\n\t\tt2 = st.sidebar.selectbox('REF2:', stocks, index=stocks.index(t2))\n\t\tif st.sidebar.button('Reset'):\n\t\t\tt1 = 'GC=F' # # SP500 'GC=F'\n\t\t\tt2 = 'CL=F' # '^GSPC' # '^DJI' # DJ30 'CL=F'\n\t\t\t# t1 = st.sidebar.selectbox('ref1:', stocks, index=stocks.index(t1))\n\t\t\t# t2 = st.sidebar.selectbox('ref2:', stocks, index=stocks.index(t2))\n\n\t\[email protected](persist=True, show_spinner=False)\n\t\tdef get_dataframes(t1, t2, startdd, endd):\n\t\t\tt1_ohlc = extract(ticker=t1, start_date=startdd, end_date=endd)\n\t\t\tt2_ohlc = extract(ticker=t2, start_date=startdd, end_date=endd)\n\t\t\treturn t1_ohlc, t2_ohlc\n\n\t\tt1_ohlc, t2_ohlc = get_dataframes(t1, t2, startdd, endd)\n\t\tt1_df = pd.DataFrame({f'{t1}-Close': t1_ohlc.Close})\n\t\tt2_df = pd.DataFrame({f'{t2}-Close': t2_ohlc.Close})\n\n\t\t#print(t0_ohlc.shape)\n\t\t#t0_ohlc.head()\n\t\t# print(t1_ohlc.shape)\n\t\t# ticker_ohlc.head()\n\t\t# ticker_ohlc.info()\n\n\t\ttdf = t0_df.join(t1_df).join(t2_df).interpolate().dropna()\n\t\t# tdf.head(10)\n\n\t\t# t0_ohlc.corr(t1_ohlc)\n\t\t#ax = t0_ohlc.Close.plot()\n\t\t#t1_ohlc.Close.plot(ax=ax)\n\n\t\timport numpy as np\n\t\tprint('glocal corrleation1: ', t0_ohlc.Close.corr(t1_ohlc.Close))\n\t\tprint('glocal corrleation2: ', t0_ohlc.Close.corr(t2_ohlc.Close))\n\n\t\tp_window_size = 5\n\t\tr_window_size = 5\n\t\tcentering = False\n\n\n\t\tmodf = lambda x: x\n\t\t#modf = np.log10\n\n\n\t\tmain_stat = f'[{t0}]-mean-roll{p_window_size}'\n\t\talt_stat_1 = f'[{t1}]-mean-roll{p_window_size}'\n\t\talt_stat_2 = f'[{t2}]-mean-roll{p_window_size}'\n\t\t# df_rc = pd.DataFrame({\n\t\t# main_stat : tdf.iloc[:, 0].apply(modf).rolling(window=p_window_size,center=centering).mean(),\n\t\t# alt_stat_1: tdf.iloc[:, 1].apply(modf).rolling(window=p_window_size,center=centering).mean(),\n\t\t# alt_stat_2: tdf.iloc[:, 2].apply(modf).rolling(window=p_window_size,center=centering).mean(),\n\t\t# })\n\t\tcom_val = 0.2\n\t\tdf_rc = pd.DataFrame({\n\t\t main_stat : tdf.iloc[:, 0].apply(modf).ewm(span=p_window_size, adjust=False).mean(),\n\t\t alt_stat_1: tdf.iloc[:, 1].apply(modf).ewm(span=p_window_size, adjust=False).mean(),\n\t\t alt_stat_2: tdf.iloc[:, 2].apply(modf).ewm(span=p_window_size, adjust=False).mean(),\n\t\t})\n\n\t\tdf_rc = df_rc.interpolate()\n\t\tdf_rc[f'[{t0}]-[{t1}]-corr-roll{r_window_size}'] = df_rc[main_stat].rolling(window=r_window_size, center=centering).corr(df_rc[alt_stat_1])\n\t\tdf_rc[f'[{t0}]-[{t2}]-corr-roll{r_window_size}'] = df_rc[main_stat].rolling(window=r_window_size, center=centering).corr(df_rc[alt_stat_2])\n\n\t\tf, ax = plt.subplots(3,1,figsize=(16,10),sharex=True)\n\t\t#df_rc.iloc[:,0].plot(ax=ax[0], legend=True)\n\t\tdf_rc.iloc[:,1].plot(ax=ax[0], legend=True, color='gold')\n\t\tdf_rc.iloc[:,2].plot(ax=ax[1], legend=True, color='darkred')\n\t\tdf_rc.iloc[:,3].plot(ax=ax[2], legend=True, color='gold')\n\t\tdf_rc.iloc[:,4].plot(ax=ax[2], legend=True, color='darkred')\n\t\tax[2].axhline(y=0, lw=1, color='black')\n\t\t#t0_ohlc.Close.rolling(window=r_window_size,center=True).mean().plot(ax=ax[0])\n\t\t#t1_ohlc.Close.rolling(window=r_window_size,center=True).mean().plot(ax=ax[1])\n\t\t# ax[0].set(xlabel='Frame',ylabel='Smiling Evidence')\n\t\t# ax[1].set(xlabel='Frame',ylabel='Pearson r')\n\t\t_=plt.suptitle(f\"{t0} Close rolling correlation to {t1}, {t2}\")\n\n\t\tst.pyplot()\n\n\n\t\tf,ax=plt.subplots(1, 2, figsize=(16,8),sharex=False)\n\n\t\t_= df_rc.plot.scatter(x=df_rc.columns[1],\n\t\t y=df_rc.columns[2],\n\t\t c=df_rc.columns[0],\n\t\t colormap='viridis',\n\t\t # legend=None,\n\t\t ax=ax[0])\n\n\t\tprint(df_rc.columns)\n\t\tnewr_p = df_rc.iloc[-1, 0]\n\t\tt1_p = df_rc.iloc[-1, 1]\n\t\tt2_p = df_rc.iloc[-1, 2]\n\t\tt1_c = df_rc.dropna().iloc[-1, 3]\n\t\tt2_c = df_rc.dropna().iloc[-1, 4]\n\t\tprint('current_corr:', (t1_c, t2_c))\n\n\t\t# figure out circle size\n\t\taaaa = df_rc.iloc[:, 1].aggregate([np.max, np.min])\n\t\txrange = np.ceil(aaaa.values[0] - aaaa.values[1])\n\t\tprint(aaaa.values[0], aaaa.values[1], xrange)\n\t\txradius = xrange / 20.\n\n\t\tcircle = plt.Circle((t1_p, t2_p), xradius, color='r', fill=False)\n\t\tax[0].add_artist(circle)\n\t\t#ax[0].set_xlabel(f'GOLD Price {t1_p:.4f}')\n\t\t#ax[0].set_ylabel(f'OIL Price {t2_p:.4f}')\n\t\t# ax[0].legend().set_visible(False)\n\n\t\t_= df_rc.plot.scatter(x=df_rc.columns[-2],\n\t\t y=df_rc.columns[-1],\n\t\t c=df_rc.columns[0],\n\t\t colormap='viridis',\n\t\t # legend=True,\n\t\t #linestyle=\n\t\t ax=ax[1])\n\n\t\t# figure out circle size\n\t\taaaa = df_rc.iloc[:, -2].aggregate([np.max, np.min])\n\t\txrange = np.ceil(aaaa.values[0] - aaaa.values[1])\n\t\tprint(aaaa.values[0], aaaa.values[1], xrange)\n\t\txradius = xrange / 20.\n\n\t\tcircle1 = plt.Circle((t1_c, t2_c), xradius, color='r', fill=False)\n\t\tax[1].add_artist(circle1)\n\t\t#ax[1].set_ylabel('OIL Correlation')\n\t\t#_= ax[1].set_xlabel('GOLD Correlation')\n\n\n\t\tst.pyplot()", "def on_CurrentradioButton_clicked(self):\n # TODO: not implemented yet\n # raise NotImplementedError\n print(\"Select current cash deposit. If you select it, cash will be released after 7 days\")\n self.select_actor = \"Month0\"", "def __init__(self):\n\n super().__init__()\n\n self.rates = dict()\n self.currencies = list()\n self.getData() # Fetch the data from the csv online file\n\n # Initialization of the currencies choice dropdown boxes\n self.from_currency = QComboBox()\n self.from_currency.addItems(self.currencies)\n self.to_currency = QComboBox()\n self.to_currency.addItems(self.currencies)\n\n self.from_amount = QDoubleSpinBox()\n self.from_amount.setRange(0.01, 10000000.00)\n self.from_amount.setValue(1.00)\n self.to_amount = QLabel('1.00')\n self.from_currency_label = QLabel('From Currency:')\n self.to_currency_label = QLabel('To Currency:')\n self.from_amount_label = QLabel('Amount to convert:')\n self.to_amount_label = QLabel('Result of conversion based on most recent rates:')\n\n self.from_calendar = QCalendarWidget()\n self.to_calendar = QCalendarWidget()\n self.rates_plot = pg.PlotWidget()\n self.from_date = QDate()\n self.to_date = QDate()\n self.last_clicked = \"\"\n\n hint_font = QFont()\n hint_font.setItalic(True)\n self.graph_hint = QLabel('Hint: you can interact with the graph using your mouse')\n self.graph_hint.setFont(hint_font)\n\n\n self.initUI()", "def get_rate(parent=None):\n dialog = RateDialog(parent)\n dialog.exec_()\n rate = dialog.rate\n return rate", "def getActiveCurrency():", "def getActiveCurrencies():", "def on_chosen_currency(self):\n main_currency_title = self.choose_currency.currentText()\n # the string needs to be modified to be compatible with the database values\n main_currency = main_currency_title.replace(\" \", \"_\").lower()\n relation_currency = self.choose_relation_currency.currentText().replace(\" \", \"_\").lower()\n # graph\n if len(load_all(main_currency)) < 2:\n gui_warnings.on_loading_values()\n else:\n try:\n canvas = Canvas(relation_currency, self)\n canvas.plot(main_currency)\n except ValueError:\n pass # plots empty graph if main_currency = relation_currency\n self.clear_graph_layout(self.graph_layout)\n self.graph_layout.addWidget(canvas)\n # title\n self.gui_title.setText(main_currency_title)\n # table\n self.currency_table.setRowCount(0)\n currency_list = [\n \"Brazilian Real\",\n \"American Dollar\",\n \"European Euro\",\n \"British Pound\",\n \"Japanese Yen\",\n \"Swiss Frank\",\n \"Canadian Dollar\",\n \"Australian Dollar\"\n ]\n for currency in currency_list:\n temp = currency_list[currency_list.index(currency)]\n currency_list[currency_list.index(currency)] = currency_list[0]\n currency_list[0] = temp\n if main_currency_title == currency:\n self.currency_table.setHorizontalHeaderLabels((*currency_list[1:], \"Date\"))\n # from https://www.youtube.com/watch?v=l2OoXj1Z2hM&t=411s\n records = enumerate(load_all(main_currency))\n for row_num, row_data in records:\n self.currency_table.insertRow(row_num)\n for column_num, data in enumerate(row_data):\n self.currency_table.setItem(\n row_num, column_num, QTableWidgetItem(str(data))\n )", "def daily_bond_yc_html(request, template_name=\"daily_bond_yc.html\"):\n resp = HttpResponse()\n date = request.GET.get(\"d\",\"20120823\")\n country = request.GET.get(\"ct\",\"US\")\n currency = request.GET.get(\"cu\",\"USD\")\n currency = currency.upper()\n rating = request.GET.get(\"rt\",\"AAA\") \n\n list_bond_html =\"\"\"\n <TR>\n <th>Duration</th>\n <th>CURRENT YLD</th>\n <th>PREV YLD</th>\n <th>CHANGE</th>\n <th>1 WK YLD</th>\n <th>1 MO YLD</th>\n <th>6 MO YLD</th>\n </TR>\n \"\"\"\n #YLD:USTB:USD:GOV:DLY:1M\n key = \"\"\n list_bond_html =\"\" \n def formatstr(fstr): \n \tif(str!=None):\n \t return '%.3f' % round(string.atof(fstr),3) if(fstr != None) else None\n \t \n durations = ('1M','3M','6M','1Y','2Y','3Y','5Y','7Y','10Y','20Y','30Y')\n #durations = ('1M','3M' )\n for dur in durations:\n key = 'YLD:USTB:'+currency+':GOV:DLY:'+dur\n list_bond_html+= \"<TR>\"\n list_bond_html+= \"<th>\"+ dur +\" </th> \"\n list_bond_html+= \"<TD>\"+formatstr(r.hget(key,date)) +\" </TD> \"\n list_bond_html+= \"<TD>\"+formatstr(r.hget(key,date)) +\"</TD> \"\n list_bond_html+= \"<TD>\"+ \"0\" +\"</TD> \" \n key = 'YLD:USTB:'+currency+':GOV:AV1W:'+dur\n list_bond_html+= \"<TD>\"+formatstr(r.hget(key,date)) +\"</TD>\" \n key = 'YLD:USTB:'+currency+':GOV:AV1M:'+dur\n list_bond_html+= \"<TD>\"+formatstr(r.hget(key,date)) +\"</TD>\" \n key = 'YLD:USTB:'+currency+':GOV:AV6M:'+dur\n list_bond_html+= \"<TD>\"+formatstr(r.hget(key,date)) +\"</TD>\"\n list_bond_html+= \"</TR>\"\n resp.write(list_bond_html)\n return resp", "def updateUI(self):\n\n try:\n # Getting the values selected by the user\n from_ = self.from_currency.currentText()\n to = self.to_currency.currentText()\n from_amt = Decimal(self.getMostRecentRelevantRate(self.rates[from_]))\n to_amt = Decimal(self.getMostRecentRelevantRate(self.rates[to]))\n amt = Decimal(self.from_amount.value())\n\n # Calculating the new conversion value\n amount = (to_amt / from_amt) * amt\n self.to_amount.setText('%.02f' % amount)\n\n # Getting the dates selected by the user\n self.from_date = self.from_calendar.selectedDate().toPyDate()\n self.to_date = self.to_calendar.selectedDate().toPyDate()\n\n # Updating the graph only if something in relationship with it changes\n if self.last_clicked != 'amount':\n # Update the dates selected according to the user selection if the user selects a negative range\n if self.to_date < self.from_date:\n if self.last_clicked == 'from':\n date = self.from_calendar.selectedDate()\n self.to_calendar.setSelectedDate(date)\n self.to_date = date.toPyDate()\n else:\n date = self.to_calendar.selectedDate()\n self.from_calendar.setSelectedDate(date)\n self.from_date = date.toPyDate()\n\n # Getting and calculating the currencies rates according to the range selected by the user\n from_rates = self.getRatesInRange(self.rates[from_])\n to_rates = self.getRatesInRange(self.rates[to])\n conv_rates = self.getConvRates(from_rates, to_rates)\n\n # Getting the number of days included in the range\n nb_days = (self.to_date - self.from_date).days + 1\n date_range = range(0, nb_days)\n\n # Clearing the graph and the legend\n self.rates_plot.clear()\n self.legend.scene().removeItem(self.legend)\n self.legend = self.rates_plot.addLegend()\n\n # Updating the graph with our new values\n self.rates_plot.setXRange(0, nb_days)\n self.rates_plot.setYRange(0, max(from_rates + to_rates + conv_rates))\n self.rates_plot.plot(date_range, from_rates, pen='b', symbol='x', symbolPen='b', symbolBrush=0.2, name=from_)\n self.rates_plot.plot(date_range, to_rates, pen='r', symbol='o', symbolPen='r', symbolBrush=0.2, name=to)\n self.rates_plot.plot(date_range, conv_rates, pen='g', symbol='+', symbolPen='g', symbolBrush=0.2, name='conversion rate')\n except Exception as e:\n print('Failed to update UI')\n print(e)", "def interactive_utility_report_with_buttons(year: int):\n utility_category_id = Category().get_category_id_by_name('Utility')\n monthly_utility_total = Spending().get_specific_category_monthly_spending(year, [utility_category_id])\n monthly_utility_details = Spending().get_sub_category_monthly_spending_of_a_category(year, [utility_category_id])\n\n df_monthly_total = pd.DataFrame(monthly_utility_total, columns=['sum', 'mon'])\n df_monthly_details = pd.DataFrame(monthly_utility_details, columns=['sum', 'name', 'mon'])\n\n df_water = df_monthly_details[df_monthly_details['name'] == 'Water and Waste']\n df_electricity = df_monthly_details[df_monthly_details['name'] == 'Electricity']\n df_internet = df_monthly_details[df_monthly_details['name'] == 'Internet']\n df_gas = df_monthly_details[df_monthly_details['name'] == 'Nature Gas']\n df_mobile = df_monthly_details[df_monthly_details['name'] == 'Mobile Bill']\n\n fig = go.Figure()\n # Add the total amount into figure as bar chart\n fig.add_trace(\n go.Bar(\n x=df_monthly_total['mon'],\n y=df_monthly_total['sum'],\n name=f\"{year} utility\"\n )\n )\n # Add water\n fig.add_trace(\n go.Scatter(x=df_water['mon'], y=df_water['sum'], name='Water',\n visible=False, line=dict(color=\"#5DADE2\"))\n )\n # Add Electricity\n fig.add_trace(\n go.Scatter(x=df_electricity['mon'], y=df_electricity['sum'], name='Electricity',\n visible=False, line=dict(color=\"#F7DC6F\"))\n )\n # Add Internet\n fig.add_trace(\n go.Scatter(x=df_internet['mon'], y=df_internet['sum'], name='Internet',\n visible=False, line=dict(color=\"#82E0AA\"))\n )\n # Add Gas\n fig.add_trace(\n go.Scatter(x=df_gas['mon'], y=df_gas['sum'], name='Gas',\n visible=False, line=dict(color=\"#E74C3C\"))\n )\n # Add Mobile\n fig.add_trace(\n go.Scatter(x=df_mobile['mon'], y=df_mobile['sum'], name='Mobile',\n visible=False, line=dict(color=\"#BB8FCE\"))\n )\n\n fig.update_layout(\n updatemenus=[\n dict(\n type='buttons',\n direction='right',\n active=0,\n x=0.57,\n y=1.2,\n buttons=list([\n dict(\n label='Total Utility Spending',\n method='update',\n args=[\n {'visible': [True, False, False, False, False, False]},\n {'title': 'Total Utility Spending', \"annotations\": []}\n ]\n ),\n dict(\n label='Water',\n method='update',\n args=[\n {'visible': [True, True, False, False, False, False]},\n {'title': 'Total vs Water', \"annotations\": []}\n ]\n ),\n dict(\n label='Electricity',\n method='update',\n args=[\n {'visible': [True, False, True, False, False, False]},\n {'title': 'Total vs Electricity', \"annotations\": []}\n ]\n ),\n dict(\n label='Internet',\n method='update',\n args=[\n {'visible': [True, False, False, True, False, False]},\n {'title': 'Total vs Internet', \"annotations\": []}\n ]\n ),\n dict(\n label='Gas',\n method='update',\n args=[\n {'visible': [True, False, False, False, True, False]},\n {'title': 'Total vs Gas', \"annotations\": []}\n ]\n ),\n dict(\n label='Mobile',\n method='update',\n args=[\n {'visible': [True, False, False, False, False, True]},\n {'title': 'Total vs Mobile', \"annotations\": []}\n ]\n ),\n ])\n )\n ]\n )\n\n fig.update_layout(\n {\n 'width': 1200,\n 'height': 550\n },\n overwrite=True\n )\n\n aPlot = plotly.offline.plot(fig,\n config={\"displayModeBar\": False},\n show_link=False,\n include_plotlyjs=False,\n output_type='div')\n\n return aPlot", "def OnButtonRateOKButton(self, event):\r\n\t\tself.OnButtonOKButton()", "def interactive_annual_monthly_bar_report_with_buttons(year: int):\n condition_dict = {'key': 'category', 'operation': '!=', 'value': 16}\n average_monthly_amount = Spending().get_average_monthly_spending_amount(condition_dict=condition_dict)\n current_annual_monthly_amount = Spending().get_monthly_total_spending_of_a_year(year, include_doctor=False)\n\n df_average_monthly = pd.DataFrame(average_monthly_amount, columns=['avg', 'mon'])\n df_current_annual_monthly = pd.DataFrame(current_annual_monthly_amount, columns=['sum', 'mon'])\n\n fig = go.Figure()\n\n fig.add_trace(\n go.Bar(\n x=df_current_annual_monthly['mon'],\n y=df_current_annual_monthly['sum'],\n name=f'{year} monthly spending'\n )\n )\n\n fig.add_trace(\n go.Scatter(\n x=df_average_monthly['mon'],\n y=df_average_monthly['avg'],\n name='Average Monthly Spending',\n visible=True,\n line=dict(color=\"#33CFA5\", dash=\"dash\")\n )\n )\n\n fig.update_layout(\n updatemenus=[\n dict(\n type='buttons',\n direction='right',\n active=1,\n x=0.57,\n y=1.2,\n buttons=list([\n dict(\n label='Current Year',\n method='update',\n args=[\n {'visible': [True, False]},\n {'title': f'{year} monthly spending', \"annotations\": []}\n ]\n ),\n dict(\n label='Average',\n method='update',\n args=[\n {'visible': [True, True]},\n {'title': 'Compare with Average monthly', \"annotations\": []}\n ]\n ),\n ]),\n )\n ]\n )\n\n fig.update_layout(\n {\n 'width': 960,\n 'height': 550\n },\n overwrite=True\n )\n\n aPlot = plotly.offline.plot(fig,\n config={\"displayModeBar\": False},\n show_link=False,\n include_plotlyjs=False,\n output_type='div')\n\n return aPlot", "def RateSelect(self):\n\t\treturn self._get_attribute('rateSelect')", "def methodsGB():\n return render_template(\n 'methodsGB.html',\n year=datetime.now().year,\n )", "def OnButtonRateHelpButton(self, event):\r\n\t\twebbrowser.open(consts.URL_HELP_RATE)", "def on_chosen_currency_combobox(self, combobox):\n main_currency = combobox.currentText()\n main_currency = main_currency.replace(\" \", \"_\").lower()\n switch_cases = {\n \"brazilian_real\": \"R$\",\n \"american_dollar\": \"$\",\n \"european_euro\": \"€\",\n \"british_pound\": \"£\",\n \"japanese_yen\": \"¥\",\n \"swiss_frank\": \"CHF\",\n \"canadian_dollar\": \"$\",\n \"australian_dollar\": \"$\"\n }\n case = switch_cases.get(main_currency)\n symbol_top = self.currency_value_top.text().split()[0]\n symbol_bottom = self.currency_value_bottom.text().split()[0]\n if combobox == self.choose_currency_conversion_top:\n self.currency_value_top.setText(\"{} 1.0\".format(case))\n self.currency_value_bottom.setText(\"{} 1.0\".format(symbol_bottom))\n else:\n self.currency_value_bottom.setText(\"{} 1.0\".format(case))\n self.currency_value_top.setText(\"{} 1.0\".format(symbol_top))\n # resetting arg_nums everytime there's a new combobox click\n self.arg_nums = []", "def getUserCurrency():", "def info_widget(loc_classes, switch, weather):\r\n\r\n try:\r\n if loc_classes[\"country_iso\"]:\r\n info = {}\r\n iso = loc_classes[\"country_iso\"]\r\n\r\n \"\"\"FX-rate function\"\"\"\r\n info = fx_rate(iso)\r\n\r\n \"\"\"Language differing titles/phrases\"\"\"\r\n #German\r\n if switch == \"German\" or loc_classes['language'] == 'german':\r\n info[\"country\"] = loc_classes[\"country_de\"].title()\r\n info[\"title_euro\"] = \"Wechselkurse Euroländer\"\r\n info[\"title\"] = \"Wechselkurse\"\r\n #English:\r\n else:\r\n info[\"country\"] = loc_classes[\"country_en\"].title()\r\n info[\"title_euro\"] = \"FX box Euro countries\"\r\n info[\"title\"] = \"FX box\"\r\n\r\n\r\n \"\"\"GDP and population\"\"\"\r\n #World Band database needs iso3 country code\r\n iso_3 = db.execute(\"SELECT iso316_1_alpha_3 FROM data_hub_countries \\\r\n WHERE LOWER(iso3166_1_alpha_2)=:iso\",\r\n iso=iso)[0][\"iso316_1_alpha_3\"]\r\n #Country population in millions\r\n pop = db.execute(\"SELECT * FROM world_bank WHERE (CountryCode=:iso \\\r\n AND (SeriesCode='SP.POP.TOTL'))\",\r\n iso=iso_3)[0][\"2019\"]\r\n pop = round(int(pop) / (1000 * 1000), 1)\r\n info[\"pop\"] = pop\r\n #GDP per capita\r\n gdp = db.execute(\"SELECT * FROM world_bank WHERE (CountryCode=:iso \\\r\n AND (SeriesCode='NY.GDP.PCAP.CD'))\",\r\n iso=iso_3)[0][\"2019\"]\r\n #Convert from USD to EUR\r\n gdp_raw = 0.0\r\n gdp_cur = 0\r\n #Try/except loop, if fx-rate not available at API\r\n try:\r\n gdp_raw = round(float(gdp) / info[\"eur_usd\"])\r\n gdp_cur = \"Euro\"\r\n\r\n except:\r\n gdp_raw = round(float(gdp))\r\n gdp_cur = \"USD\"\r\n\r\n #1000 , splitter for readability\r\n locale.setlocale(locale.LC_ALL, '') # Use '' for auto, or force e.g. to 'en_US.UTF-8'\r\n gdp = f'{gdp_raw:n}'\r\n info[\"gdp\"] = gdp\r\n info[\"gdp_cur\"] = gdp_cur\r\n\r\n \"\"\"Capital, Internet domain, Country phone code\"\"\"\r\n #Capital\r\n capital = db.execute(\"SELECT capital FROM data_hub_countries \\\r\n WHERE LOWER(iso3166_1_alpha_2)=:iso\",\r\n iso=iso)[0][\"capital\"]\r\n info[\"capital\"] = capital\r\n #Internet domain\r\n internet = db.execute(\"SELECT tld FROM data_hub_countries \\\r\n WHERE LOWER(iso3166_1_alpha_2)=:iso\",\r\n iso=iso)[0][\"tld\"]\r\n info[\"internet\"] = internet\r\n #country phone code\r\n phone = db.execute(\"SELECT dial FROM data_hub_countries \\\r\n WHERE LOWER(iso3166_1_alpha_2)=:iso\",\r\n iso=iso)[0][\"dial\"]\r\n info[\"phone\"] = \"+\" + phone\r\n\r\n\r\n \"\"\"GMT time zone\"\"\"\r\n #Get time zone delta from weather dictionary\r\n time_zone = weather[0][\"hour_offset\"]\r\n zone = 0\r\n\r\n #Exception/error errorhandler\r\n if iso == \"cn\":\r\n gmt = \"+8\"\r\n\r\n else:\r\n if (int(time_zone) - time_zone) == 0:\r\n zone = round(time_zone)\r\n if zone > 0:\r\n gmt = \"+\" + str(zone)\r\n else:\r\n gmt = str(zone)\r\n else:\r\n zone = time_zone\r\n if zone > 0:\r\n gmt = \"+\" + str(zone)\r\n else:\r\n gmt = str(zone)\r\n\r\n info[\"time_zone\"] = gmt\r\n\r\n\r\n print(\"############\", info)\r\n return info\r\n\r\n except:\r\n print(\"######## ERROR #########\")\r\n return None", "def on_chosen_relation_currency(self):\n main_currency = self.choose_currency.currentText().replace(\" \", \"_\").lower()\n relation_currency = self.choose_relation_currency.currentText().replace(\" \", \"_\").lower()\n if len(load_all(main_currency)) < 2:\n gui_warnings.on_loading_values()\n else:\n try:\n canvas = Canvas(relation_currency, self)\n canvas.plot(main_currency.replace(\" \", \"_\").lower())\n except ValueError:\n pass\n self.clear_graph_layout(self.graph_layout)\n self.graph_layout.addWidget(canvas)", "def glycolysis_rate_cal (self) :\n x = self.mitochondria.get_atp()\n y = self.mitochondria.get_adp()\n a = self.atp\n b = self.adp\n self.adp_to_atp(self.mitochondria.atp_translocase(math.ceil((x*b - a*y)/(a+b+x+y))))\n if a<1 :\n return\n else :\n self.set_glycolysis(int(5*b/a))", "def rangeselector_time():\n return {\n \"bgcolor\": \"rgb(35, 149, 86)\",\n \"activecolor\": \"rgb(25, 108, 62)\",\n \"buttons\": [\n {\"count\": 12, \"label\": \"12h\", \"step\": \"hour\", \"stepmode\": \"backward\"},\n {\"count\": 24, \"label\": \"24h\", \"step\": \"hour\", \"stepmode\": \"backward\"},\n {\"count\": 48, \"label\": \"48h\", \"step\": \"hour\", \"stepmode\": \"backward\"},\n {\"count\": 3, \"label\": \"3d\", \"step\": \"day\", \"stepmode\": \"backward\"},\n {\"count\": 7, \"label\": \"7d\", \"step\": \"day\", \"stepmode\": \"backward\"},\n {\"step\": \"all\"},\n ],\n }", "def select_reference_rates_tab(self):\n self.select_static_tab(self.reference_rates_tab_locator, True)", "def viewPricing(request):\n user = request.user\n pf = PricingForm()\n pricing_estimated = {\"estimated_price\": 0.0}\n if request.method == 'POST':\n time_unit = int(request.POST['time_unit_selection'])\n estimated_time = float(request.POST['estimated_time'])\n complexity_rate = request.POST['complexity']\n discount = float(request.POST['discount'])\n pf = PricingForm(request.POST)\n if pf.is_valid():\n total_estimated_hours = estimated_time if not time_unit else (\n estimated_time * WORKING_DAY)\n estimated_price = __get_estimated_price(\n total_estimated_hours, complexity_rate, discount)\n pricing_estimated[\"estimated_price\"] = estimated_price\n return HttpResponse(\n json.dumps(pricing_estimated), content_type='application/json')\n if pf.errors:\n logger.debug(\"Form has errors, %s \", pf.errors)\n return render(request, 'pricing.html', locals())", "def set_exchange_rate(self, exchange_rate):\n self.set_value_into_input_field(self.exchange_rate_textbox_locator, exchange_rate, True)", "def avg_convergence_rate(request):\n t = StaticMicrotask.objects.all().filter(scoring_done=True).aggregate(Avg('hop_count'),Max('hop_count'),Min('hop_count'))\n \n avg_hop_count = t['hop_count__avg']\n max_hop_count = t['hop_count__max']\n min_hop_count = t['hop_count__min']\n \n data = {\n 'avg_hop_count':avg_hop_count,\n 'max_hop_count':max_hop_count,\n 'min_hop_count':min_hop_count\n }\n return render_to_response('my_admin_tools/menu/avg_convergence_rate.html',data,context_instance=RequestContext(request))", "def plot_fundingOvertime(df, col1, col2, col_transform = 1000000000, left=2015, right=2016.5):\n\n print('\\n*** INTERACTIVE MODE: HOVER OVER THE GRAPH TO SEE AWARD TOTALS FOR EACH YEAR***')\n grouped = pd.DataFrame(df.groupby([col1])[col2].sum())\n grouped.reset_index(inplace=True)\n\n# set amounts by billion dollars\n grouped[col2]=grouped[col2]/col_transform\n source = ColumnDataSource(grouped)\n\n# initialize the figure\n p = figure(plot_width = 1000,\n plot_height = 450,\n title = 'Award funding has increased over time with 2011 seeing the largest funding amounts')\n\n # create the plot\n p.line(x=col1,\n y=col2,\n line_width=6,\n source=source, color = 'green')\n\n # set formating parameters\n p.xgrid.grid_line_color = None\n p.ygrid.grid_line_color = None\n p.background_fill_color = \"AliceBlue\"\n p.title.text_font_size = \"16pt\"\n p.title.text_color = 'MidnightBlue'\n p.xaxis.axis_label_text_font_size = '15pt'\n p.yaxis.axis_label_text_font_size = '15pt'\n p.yaxis.axis_label = 'Amount awarded in US Billion'\n p.xaxis.major_label_text_font_size = '12pt'\n\n # add shaded box to highlight year with greatest funding\n box = BoxAnnotation(left=left, right=right,\n line_width=1,\n line_color='black',\n line_dash='dashed',\n fill_alpha=0.2,\n fill_color='green')\n # add box to plot\n p.add_layout(box)\n\n # create label for the box\n label = Label(x=2016,\n y=6.220,\n x_offset=12,\n text=\"$6.22 b.awarded in 2016\",\n text_baseline=\"middle\")\n\n # add to plot\n p.add_layout(label)\n\n # add interactive hover tool that shows the amount awarded\n hover = HoverTool()\n hover.tooltips = [(\"Total amount awarded \", \"@AwardAmount\")]\n\n hover.mode = 'vline'\n p.add_tools(hover)\n\n # export plots\n _=export_png(p, filename = img_path / 'fundingovertime.png')\n output_file(img_path/'fundingovertime.html')\n\n p.output_backend = \"svg\"\n export_svgs(p, filename=img_path/\"fundingovertime.svg\")\n\n #display plot\n show(p)", "def coin_rate(request, coin):\n coin_obj = get_object_or_404(Coins, symbol=coin.upper())\n ratings = Rating.objects.filter(name_coin = coin_obj)\n return render(request, 'scraper/coin_rate.html', {'ratings': ratings})", "def selection_settings():\n options = driver.find_element_by_xpath(\"/html/body/usgs-root/usgs-header/header/usgs-panel-chooser/nav/i[3]\")\n options.click()\n\n earthquake_catalog = driver.find_element_by_xpath(\"/html/body/usgs-root/div/usgs-settings/section/usgs-earthquakes-filter/a\")\n earthquake_catalog.click()\n\n custom_selection = driver.find_element_by_xpath(\"/html/body/main/div/form/section/div[2]/section/ul[1]/li[3]/label\") \n custom_selection.click()\n\n start_datetime = driver.find_element_by_xpath(\"/html/body/main/div/form/section/div[2]/section/ul[2]/li[1]/input\")\n start_datetime.click()\n start_datetime.clear()\n start_datetime.send_keys(input(\"Datetime:\"))\n start_datetime.send_keys(Keys.RETURN)\n time.sleep(1)\n\n search = driver.find_element_by_xpath(\"/html/body/main/div/form/footer/button\")\n search.click()\n\n time.sleep(1)\n\n options = driver.find_element_by_xpath(\"/html/body/usgs-root/usgs-header/header/usgs-panel-chooser/nav/i[3]\")\n options.click()\n\n time_zone = driver.find_element_by_xpath(\"/html/body/usgs-root/div/usgs-settings/section/usgs-time-zone/mat-radio-group/mat-list/mat-list-item[2]/div/mat-radio-button\")\n time_zone.click()\n time.sleep(3)\n\n return driver" ]
[ "0.55547404", "0.53186727", "0.5312881", "0.5295051", "0.5293411", "0.5169351", "0.5135066", "0.511413", "0.5101465", "0.50238144", "0.49694377", "0.49577525", "0.4831858", "0.48169535", "0.48148766", "0.4786951", "0.47772965", "0.47656235", "0.4762115", "0.47487792", "0.47405902", "0.47316712", "0.47157356", "0.46988595", "0.46974647", "0.46657923", "0.4661979", "0.4659908", "0.46342412", "0.46221495" ]
0.64868665
0
Sidebar widget to select trends for Trends page
def select_trends() -> str: trends_checkbox = st.sidebar.checkbox(f'Show all trends', True) if trends_checkbox: trends_select = TRENDS_LIST else: trends_select = st.sidebar.multiselect('Select your trends', TRENDS_LIST) return trends_select
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTrends(): \n api = authentication()\n names = [i.name for i in api.GetTrendsCurrent()]\n stringTrends = [i.strip('#') for i in names and ]\n trends = [i for i in stringTrends if i != \"\"]\n return trends", "def get_trends():\n return api.trends_available()", "def trending(request):\n\titems = Item.objects.all()\n\ttrending = []\n\n\tfor item in Item.objects.order_by('-dailyVisits'):\n\t\t#Include items that have been uploaded within the past day and havent been sold\n\t\tif (date.today() - item.datePosted).days <= 0 and item.sold_to == None:\n\t\t\tif (len(trending) <= 5):\n\t\t\t\ttrending.append(item)\n\t\telse:\n\t\t\titem.dailyVisits = 0\n\t\t\titem.save()\n\n\t#If there are not enough items in the trending list, add older items to the list\n\tif len(trending) <= 5:\n\t\tfor item in Item.objects.order_by('-dailyVisits'):\n\t\t\tif ((len(trending) <= 5) and (item.sold_to == None) and (item not in trending)):\n\t\t\t\ttrending.append(item)\n\n\tcontext_dict = {\"trendingItems\": trending[0:3], \"search_bar\" :Search_bar()}\n\treturn render(request, 'tailored/index.html', context_dict)", "def page_dashboard(state):\n\n st.title(\":chart_with_upwards_trend: Prediction Results Dashboard\")\n\n st.markdown(\"# Select Stocks to View Results:\")\n if state.finalized_data:\n for stock_data in state.finalized_data:\n st.write(\"---\")\n st.markdown(\"## \" + stock_data[\"stock\"])\n if st.checkbox(\"View Results for \" + stock_data[\"stock\"]):\n\n ############################################\n\n st.markdown(\"### Historical Predictions:\")\n\n df2 = pd.DataFrame.from_dict(stock_data[\"prev_predictions\"])\n\n select_lbl = (\n \"Enter the names of models for \" + stock_data[\"stock\"] + \":\"\n )\n models_selections = st.multiselect(\n label=select_lbl,\n options=df2.columns,\n ) # allow users to display specific model results on dataframe graph\n\n if not models_selections: # if nothing is selected show all models!\n st.line_chart(df2)\n else:\n st.line_chart(df2[models_selections])\n\n st.markdown(\n \"*Note:* 'Prices' are the actual prices for those days. The rest are model predictions for those days.\\nPrices (in USD) are on the y-axis, the day number in the data is on the x-axis.\"\n )\n\n ############################################\n\n st.markdown(\"### Future (Next-Day) Predictions:\")\n\n df = pd.DataFrame()\n df = df.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"swing_predictions\"]]\n )\n )\n df = df.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"next_day_predictions\"]]\n )\n )\n df = df.append(\n pd.DataFrame([stock_data[\"prediction_results\"][\"model_scores\"]])\n )\n\n df.index = [\n \"Swing Predicton\",\n \"Price Prediction ($)\",\n \"Model Fit Score\",\n ]\n df = df.transpose()\n df # display chart\n\n st.markdown(\n \"- The current price of the stock is *$\"\n + str(\n round(stock_data[\"prediction_results\"][\"current_prev_close\"], 2)\n )\n + \"*.\"\n )\n\n if state.period == \"1mo\":\n st.markdown(\"- *Recommended Model (for 1mo):* SVR-RBF\")\n st.markdown(\n \"- *View the homescreen for more model & dataset size combination recommendations.*\"\n )\n elif state.period == \"6mo\":\n st.markdown(\n \"- *Recommended Model (for 6mo):* SVR-Poly (most recommended), LR, EN, or Lasso.\"\n )\n st.markdown(\n \"- *View the homescreen for more model & dataset size combination recommendations.*\"\n )\n elif state.period == \"1y\":\n st.markdown(\"- *Recommended Model (for 1yr):* SVR-Poly\")\n st.markdown(\n \"- *View the homescreen for more model & dataset size combination recommendations.*\"\n )\n else:\n st.markdown(\n \"- *Note:* View the home screen for information about the best models and training data size combinations.\"\n )\n\n ############################################\n st.markdown(\"### View Other Information:\")\n\n if st.checkbox(\n \"View \" + stock_data[\"stock\"] + \"'s Model Efficiency Timings\"\n ):\n st.markdown(\"#### Model Efficiencies:\")\n st.markdown(\n \"Shows the time in seconds it took models to complete specific tasks:\"\n )\n df3 = pd.DataFrame()\n df3 = df3.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"training_times\"]]\n )\n )\n df3 = df3.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"testing_times\"]]\n )\n )\n df3 = df3.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"new_predictions_times\"]]\n )\n )\n df3 = df3.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"prev_predictions_times\"]]\n )\n )\n df3.index = [\n \"Training\",\n \"Testing/Scoring\",\n \"Future Predictions\",\n \"Historical Predictions\",\n ]\n df3 = df3.transpose()\n df3\n\n ############################################\n\n if st.checkbox(\"View \" + stock_data[\"stock\"] + \"'s Information\"):\n st.markdown(\"#### Company Information:\")\n for key in stock_data[\"stock_info\"].keys():\n st.write(\"*\", key + \":\", stock_data[\"stock_info\"][key])\n else:\n st.markdown(\n \"## Generate data to populate and initialize this page by going to the 'Settings' page and running the tool!\"\n )", "def trends(max: int = None, until: str = None):\n for post in client.trends(max=max, until=until):\n print(json.dumps(post))", "def add_trends(self, doc=None):\n if doc is None:\n doc = self.doc\n\n trend = Trends(self.palette['trends'])\n doc.add_root(trend.layout())\n LOG.info('trends added')\n return doc", "def trendingTweets():\n api = twitter.Api()\n trending_topics = api.GetTrendsWoeid(PHILA_WOEID)\n for topic in trending_topics:\n topicSearchTerm = topic.name\n trending_tweets = api.GetSearch(topicSearchTerm)\n for tweet in trending_tweets:\n util.safe_print(tweet.GetText())\n # pass", "def get_vax_trends(area=None):\n data = get_vax_trends_data(area)\n trends = []\n for d in VAX_DOSES:\n last_week_dt = data[6]['_id']\n count = data[0][d]\n last_week_count = data[6][d]\n diff = count - last_week_count\n if diff > 0:\n status = 'increase'\n elif diff < 0:\n status = 'decrease'\n else:\n status = 'stable'\n try:\n perc = f'{round(diff / last_week_count * 100)}%'\n except (ValueError, ZeroDivisionError):\n perc = 'n/a'\n trends.append({\n 'id': d,\n 'last_week_count': format_number(last_week_count),\n 'percentage': perc,\n 'title': VARS[d]['title'],\n 'colour': VARS[d][status]['colour'],\n \"icon\": VARS[d]['icon'],\n 'status_icon': VARS[d][status]['icon'],\n 'count': format_number(count),\n 'last_week_dt': format_datetime(last_week_dt, DOW_FMTY)\n })\n return trends", "def trendingTopics():\n api = twitter.Api()\n\n trending_topics = api.GetTrendsWoeid(PHILA_WOEID)\n for topic in trending_topics:\n util.safe_print(topic.name)", "def get_trending():\n trending = r.get('trending_store')\n return trending", "def trend_click(self, instance):\n if self.change_top_graph:\n self.top_plot.current_param = instance.text()\n self.change_top_graph = False\n for header in self.headers:\n if self.top_plot.current_param in header:\n self.top_plot.setLabel(axis='left', text=header)\n else:\n self.bottom_plot.current_param = instance.text()\n self.change_top_graph = True\n for header in self.headers:\n if self.bottom_plot.current_param in header:\n self.bottom_plot.setLabel(axis='left', text=header)", "def get(self):\n\n # Retrieve keyword from the HTML form. If no keyword provided, use a random suggested keyword.\n keyword = self.request.get(\"keyword\")\n if not keyword:\n suggested_keywords = [\"alarm clocks\", \"the future\", \"miller lite\", \"taco bell\", \"yoga\", \"netflix\",\n \"life\", \"traffic\", \"elon musk\", \"beards\", \"world trade\", \"pepsi\", \"amazon\"]\n indices = np.arange(len(suggested_keywords))\n random.shuffle(indices)\n keyword = suggested_keywords[indices[0]]\n\n # Get recent tweets based on the keyword, up to 300 maximum tweets.\n tweets = get_tweets(keyword, max_tweets=300)\n\n # Compute the sentiment of each tweet.\n v = VaderSentimentModel()\n sentiment_scores = [v.classify_sentiment(tw) for tw in tweets] # shape (ntweets,)\n\n # Label sentiment categorically, e.g. \"negative\" or \"positive\"\n M_sent = np.mean(sentiment_scores)\n map = {1 : \"positive\", 0 : \"negative\"}\n valence = map[int(M_sent > 0)]\n\n \"\"\"\n Create plots. \n \"\"\"\n\n #############\n # Plot #1:\n ############\n # Plot the distribution of tweets and sentiment.\n # Resources is CSS code that goes in the header of the HTML. Shared across all bokeh plots.\n # Script1 is javascript for this plot.\n # Div1 is an HTML container for the plot. Goes where you want the plot to appear.\n resources, script1, div1 = plot_tweets(tweets=tweets, sentiment_scores=sentiment_scores)\n\n #############\n # Plot #2:\n ############\n # Plot the key words that lead us to this classification.\n # Script2 is javascript for this plot.\n # Div2 is an HTML container for this plot. Goes where you want the plot to appear.\n # Requires the HTML to include the shared resources, generated above, in the <HEAD>\n script2, div2 = plot_reason(tweets=tweets, sentiment_scores=sentiment_scores)\n\n \"\"\"\n Create HTML output. \n \"\"\"\n\n # Load HTML template.\n # This is a functioning webpage, with some placeholders for the keywords and plots we have created.\n html_p = os.path.join(\"html\", \"index.html\")\n html = open(html_p, \"r\").read()\n\n # Fill in placeholders in the HTML with varibles we have created.\n term_to_value = {\n \"[[!KEYWORD]]\" : keyword,\n \"[[!VALENCE]]\" : valence,\n \"[[!BOKEH_SCRIPT]]\" : script1,\n \"[[!BOKEH_SCRIPT2]]\": script2,\n \"[[!BOKEH_DIV]]\" : div1,\n \"[[!BOKEH_RESOURCES]]\" : resources,\n \"[[!BOKEH_DIV2]]\" : div2\n }\n for term, val in term_to_value.items():\n html = html.replace(term, val)\n\n \"\"\"\n Write a response.\n This essentially returns HTML to the google app engine.\n This will render a webpage visible to the user. \n \"\"\"\n self.response.headers[\"Content-Type\"] = \"text/html\"\n self.response.write(html)", "def test_youtube_trends(dates):\n test = pycmc.charts.youtube.videos(dates[\"start\"])\n assert isinstance(test, list)\n assert len(test) > 90\n assert test[0][\"name\"] != \"\"\n assert test[0][\"id\"] != \"\"", "def various(self):\n # Changer le default d'un tb, ici ne rien mettre au niveau le plus haut\n context = self\n from imio.dashboard.utils import getCollectionLinkCriterion\n criterion = getCollectionLinkCriterion(context)\n criterion.default = u''\n from eea.facetednavigation.criteria.interfaces import ICriteria\n ICriteria(context).criteria._p_changed = True\n\n # Réparer la vue de la page pst\n context.setLayout('view')\n from imio.project.pst.setuphandlers import configure_faceted_folder\n configure_faceted_folder(context, xml='default_dashboard_widgets.xml', default_UID=None)", "def layout():\n # Needs db connection! (Set up tunnel if testing app locally)\n _ids = get_doc_ids_from_db() \n dropdown_dates = {num2str_month(_id): _id for _id in _ids}\n\n children_list = [\n html.Div([\n html.H2('Top women and men quoted'),\n html.P('''\n In this section, we display the top women and men quoted as sources in a\n given month. First, select a month from the dropdown menu below.\n ''')],\n ),\n html.Br(),\n dcc.Dropdown(\n id='date-dropdown',\n options=[\n {'label': date_str, 'value': date_num}\n for date_str, date_num in dropdown_dates.items()\n ],\n value=_ids[-1],\n className='dropdown'\n ),\n html.Br(),\n html.Div(\n dcc.Loading(\n id='topic-load-progress',\n children=[\n dcc.Store(id='top-sources-topic-data'),\n dcc.Store(id='top-sources-stats'),\n ])\n ),\n html.Br(),\n html.H4('Topics and gender representation among sources'),\n dcc.Markdown('''\n First, we display the *gender prominence* for each topic discovered in the \n selected month. Gender prominence is a measure we use to study whether a\n given topic features quotes by men or women more prominently.\n '''),\n html.Div([\n dcc.Graph(id='top-sources-outlet-gender-heatmap'),\n ]),\n html.Br(),\n html.Div([\n html.H4('Top quoted sources by gender'),\n html.P(f'''\n Next, we display the top {NUM_SOURCES_TO_SHOW} sources per gender for the given month.\n Hover over the dots to display the total number of articles in which each source was \n quoted.\n '''),\n html.Div([\n dcc.Graph(id='top-sources-dotplot', className='chart'),\n ]),\n ]),\n html.H4('Observations'),\n dcc.Markdown(id='user-comment-display'),\n ]\n return children_list", "def get_trends(self, types=None, limit=10, page=0, from_language='en'):\n if not types:\n types = ['DECK', 'DECK_GROUP']\n\n trendables = self.data_source.get_trends(types, limit, page,\n from_language)\n\n return trendables", "def dashboard():", "def trending(request):\n assert isinstance(request, HttpRequest)\n try:\n stocks = StockList.objects.all()\n hold = []\n count = 0\n except StockList.DoesNotExist:\n return print(\"No Stocks Available\")\n\n\n\n while len(hold) < 8:\n for stock in stocks:\n stock.trend = stock.positiveSentimentCount + stock.negativeSentimentCount\n if stock.trend>= count:\n hold.append(stock)\n count = stock.trend\n \n \n\n context = {\n 'title': 'Trending',\n 'year': datetime.now().year,\n 'user': request.user,\n 'stocks': stocks,\n 'hold': hold,\n\n }\n\n \n return render(\n request,\n 'app/trending.html',\n context,\n )", "def on_category(self):\n super(ToolSettings, self).on_category()\n selItems = self.tw_category.selectedItems() or []\n #--- Build Tree ---#\n if selItems:\n if hasattr(selItems[0], 'itemWidget'):\n if selItems[0].itemWidget is not None:\n if not selItems[0].itemWidget.__edited__:\n selItems[0].itemWidget._initWidget()\n selItems[0].itemWidget.buildTree()", "def taxonomy_plot(self,seasons):\n print('Formatting data.')\n no_of_ideograms=self.taxonomy_files()\n location=self.place.capitalize()+'-'+str(self.year)\n if seasons==True:\n seasons=self.weather.seasons(self.place)\n print('Done')\n self.conf.taxo_conf(no_of_ideograms, location, self.start_level, self.plot_level, seasons)", "def TwitterListener():\n l = StdOutListener()\n auth = OAuthHandler(config.CONSUMER_KEY, config.CONSUMER_SECRET)\n auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)\n\n stream = Stream(auth, l)\n api = API(auth_handler=auth)\n config.HASHTAGS = [x['name'] for x in api.trends_place(id=44418)[0]['trends']]\n\n print(\"Stream listener is up and running\")\n stream.filter(track=config.HASHTAGS)", "def get_regional_trends(region):\n trends = []\n doc = reg_trends_coll.find_one({REGION_KEY: region})\n if doc:\n trends = format_trends(doc[\"trends\"])\n return trends", "def _on_next_event(self, **kwargs):\n self.append_trends()\n self.grab_shots()", "def get_top_trends_from_twitter_api(country='Japan', exclude_hashtags=True):\n # this stupid WOEID requires yweather to get (a library), because YAHOO itself has stopped supporting it\n # WOEID\n woeid_client = yweather.Client()\n woeid = woeid_client.fetch_woeid(location=country)\n\n check_rate_limit()\n\n if exclude_hashtags :\n trends = api.GetTrendsWoeid(woeid, exclude='hashtags')\n else:\n trends = api.GetTrendsWoeid(woeid, exclude=None)\n\n output = []\n images_output = []\n for trend in trends:\n trend = trend.AsDict()\n\n # get volumes\n try:\n tw_volume = int(trend['tweet_volume']),\n except:\n tw_volume = [0]\n\n # match time with timezone\n timestamp_str = trend['timestamp'] # this is utc\n timestamp_dt = str_2_datetime(timestamp_str, input_format=time_format_twitter_trends).replace(tzinfo=pytz.utc)\n\n # timestamp_local = timestamp_dt.astimezone(tz=pytz.utc)\n timestamp_utc_str = datetime_2_str(timestamp_dt, output_format=time_format_full_with_timezone)\n\n output.append({\n \"label\": trend['name'],\n \"volume\": tw_volume,\n \"time\": timestamp_utc_str,\n \"query\": trend['query'],\n \"url\": trend['url'],\n })\n\n images_output.append({\n \"label\": trend['name'],\n \"time\": timestamp_utc_str,\n \"tweets\": analyze_trending_keyword(trend['name'], count=50)\n })\n\n output_json = json.dumps(output, ensure_ascii=False)\n images_output_json = json.dumps(images_output, ensure_ascii=False)\n return output_json, images_output_json", "def on_category(self):\n super(ProjectSettings, self).on_category()\n selItems = self.tw_category.selectedItems() or []\n #--- Build Tree ---#\n if selItems:\n if hasattr(selItems[0], 'itemWidget'):\n if selItems[0].itemWidget is not None:\n if not selItems[0].itemWidget.__edited__:\n selItems[0].itemWidget._initWidget()\n selItems[0].itemWidget.buildTree()", "def plot_mult_timetrends(data, geoids, cols, area, colors, markers, sharex,\n ylim_bottom = -150, ylim_top = 150, ylabel = 'Pct change in mobility', xlabels=None):\n ax = plt.axes(area, sharex = None)\n \n cols = cols\n plt.hlines(0,data.num_date.min(),data.num_date.max())\n i = 0\n for y in cols:\n pts = y[:12]\n \n# lim = ylim\n# plt.xlabel('date', fontsize=18)\n plt.ylabel(ylabel, fontsize=22)\n\n plt.yticks(fontsize=30) \n\n x_locator = FixedLocator(data.num_date[np.arange(0,data.shape[0],7)].tolist())\n ax.xaxis.set_minor_locator(x_locator)\n plt.grid(axis='x', which = 'both') \n \n plt.plot(data['num_date'], data[y], color = colors[i], linewidth=5)\n i = i+ 1\n plt.xticks(ticks = data.num_date[np.arange(0,data.shape[0],28)].tolist(),\n labels = xlabels, rotation=30, ha='right',\n fontsize=30)\n plt.ylim(ylim_bottom,ylim_top)\n\n return ax", "def youtube_trending(session=None):\n data = []\n url = 'https://www.youtube.com/feed/trending'\n soup = ph.get_soup(url, session)\n if not soup:\n ph.logger.error('No soup found for {}'.format(url))\n return data\n\n uls = soup.find_all('ul', attrs={'class': 'expanded-shelf-content-list'})\n i = 0\n for ul in uls:\n for li in ul.find_all('li'):\n result_data = {}\n try:\n result_data['link'] = 'https://www.youtube.com' + li.h3.a.attrs['href']\n result_data['title'] = li.h3.a.attrs['title']\n except AttributeError:\n continue\n else:\n result_data['position'] = i\n try:\n result_data['duration'] = _clean_youtube_duration(li.h3.span.text)\n except AttributeError:\n result_data['duration'] = ''\n try:\n result_data['user'] = li.find(attrs={'class': 'yt-lockup-byline'}).a.text\n except AttributeError:\n result_data['user'] = ''\n try:\n metadata = li.find(attrs={'class': 'yt-lockup-meta-info'})\n metadata = [x.text for x in metadata.findChildren()]\n except AttributeError:\n metadata = []\n try:\n result_data['uploaded'] = metadata[0]\n result_data['views'] = metadata[1]\n except IndexError:\n result_data['uploaded'] = ''\n result_data['views'] = ''\n\n data.append(result_data)\n i += 1\n\n return data", "def get_national_trends():\n trends = format_trends(list(nat_trends_coll.find({})))\n return trends", "def windowMenuActions( self, action ):\n\tif (action.text() == 'Weeklies'):\n if not hasattr(sharedDB, 'myWeekliesWidget'):\n\t\tsharedDB.myWeekliesWidget = weeklieswidget.WeekliesWidget(sharedDB.mainWindow)\n\t\t\n\t #sharedDB.myWeekliesWidget.CalculateWeeklies()\n\t sharedDB.myWeekliesWidget.dockWidget.show()\n\tif (action.text() == 'Assignments'):\t\t\n\t sharedDB.myAssignmentsWidget.dockWidget.show()\n\tif (action.text() == 'Attribute Editor'):\t\t\n\t sharedDB.myAttributeEditorWidget.dockWidget.show()", "def top_twenty(request):" ]
[ "0.6033017", "0.5886845", "0.54061174", "0.5199153", "0.51406914", "0.512278", "0.5105741", "0.50310594", "0.49992782", "0.49274763", "0.481228", "0.48026663", "0.47352293", "0.46986964", "0.46903226", "0.4664409", "0.466313", "0.46275526", "0.45833886", "0.45830315", "0.4561449", "0.45554802", "0.4545247", "0.45321473", "0.44909775", "0.44756022", "0.44709644", "0.4469059", "0.4458835", "0.4458129" ]
0.6717184
0
Sidebar widget to select minimum salary for Highest Earners page
def select_minimum_salary(df, step, college_select: str = ''): st.sidebar.markdown('### Enter minimum FTE salary:') sal_describe = df[SALARY_COLUMN].describe() number_input_settings = { 'min_value': 100000, 'max_value': int(sal_describe['max']), 'value': 500000, 'step': step } if college_select: t_df = df.loc[df[COLLEGE_NAME] == college_select] sal_describe = t_df[SALARY_COLUMN].describe() max_value = int(sal_describe['max']) number_input_settings['max_value'] = max_value if max_value > 100000: number_input_settings['min_value'] = 75000 number_input_settings['value'] = 100000 else: number_input_settings['min_value'] = 65000 number_input_settings['value'] = 75000 min_salary = st.sidebar.number_input('', **number_input_settings) return min_salary
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_min_expense(self):\n pass", "def setMinMax(self):\n currentIndustryNum = self.myParent.myIndustry[self.myIndustryData.id]\n oldIndustryNum = self.myParent.myOldIndustry[self.myIndustryData.id]\n self.setMinValue(-currentIndustryNum)\n if oldIndustryNum > currentIndustryNum:\n self.setMaxValue(oldIndustryNum-currentIndustryNum)\n elif self.isIndustryResearched() == 0:\n self.setMaxValue(0)\n else:\n max = self.getMaxFromFundsAvail()\n cityNum = (self.myParent.cities-self.myParent.citiesUsed)/self.myIndustryData.cities\n if max < cityNum:\n self.setMaxValue(max)\n else:\n self.setMaxValue(cityNum)", "def view_batter_bysalary(self):\n conn = rs.create_connection(\"dailyfantasyscraper.db\")\n salary = \"$\" + sal.get()\n position = \"P\"\n batter_salary = (salary, position)\n cur = conn.cursor()\n cur.execute(\n \"SELECT * FROM rotowiredk WHERE salary <= ? and position != ? \", batter_salary)\n result = cur.fetchall()\n conn.commit()\n conn.close()\n\n for item in result:\n print(item)\n tree.insert('', 'end', values=item)", "def largest_two():\n # Add your code below!", "def second_lowest():\n copy = movies.copy()\n name = copy.sort_values(['Total Gross']).reset_index(drop = True)['#1 Movie'].loc[1]\n return ('second_lowest', name)", "def best_sell(self):\n return Library.functions.best_sell(self._book)", "def _choose_best_option(self):", "def main():\n filename1=input(\"Enter the first filename :\")\n filename2 = input(\"Enter the second filename :\")\n city1, hilltownDict=readFile(filename1)\n city2, valleydaleDict=readFile(filename2)\n max=int (input(\"Enter the maximum number of items :\"))\n\n hilltownDict, valleydaleDict=profitcal(hilltownDict,valleydaleDict)\n hilltownList=convertToList(hilltownDict)\n valleydaleList=convertToList(valleydaleDict)\n\n sort_valley= selectionSort(valleydaleList)\n sort_hill = selectionSort(hilltownList)\n\n\n resultH,tpH=totalProfit(city1,sort_hill,max)\n resultV,tpV=totalProfit(city2,sort_valley,max)\n\n if tpH>tpV:\n print(resultH)\n\n elif tpV>tpH:\n print(resultV)\n\n else:\n print(\"Both city has same profit\" +\"\\n\" + resultH +\"\\n\" + resultV)", "def limit_weight(self, weight_max):\n # remove items with low values\n if self.total_weight > weight_max:\n items_sorted_by_fitness = sorted(self.items, key=lambda item: item.fitness, reverse=False)\n while items_sorted_by_fitness and self.total_weight > weight_max:\n least_fit_item = items_sorted_by_fitness.pop(0)\n if self.item_stats[least_fit_item.id] == 1:\n self.item_stats[least_fit_item.id] = 0\n self.update_values() # have to update each time an item is change to recompute weight", "def Salvage(self):\n pass", "def _check_amount_with_priority(self):\n\t\tfor slc in self:\n\t\t\tif slc.max_amount and self.search([('priority', '<', slc.priority), ('max_amount', '>=', slc.max_amount)]):\n\t\t\t\traise Warning(_(\"There are below slides [Priority less than %s] with bigger amount from [%s]\"\n\t\t\t\t \" which against the logic!!!\\n You can increase amount or handel priority\")\n\t\t\t\t % (slc.priority, slc.max_amount))", "def get_greatest_stock_price():\n greatest_stock_price = 0\n // your code here", "def select_sort_method():\n st.sidebar.markdown('### Sort method:')\n sort_select = st.sidebar.selectbox('', ['Alphabetically', 'FTE Salary'],\n index=1)\n return sort_select", "def best_price(self):\n # TODO rename this to \"display_lowest_price\" or something...\n price = self.lowest_price\n if price:\n return Decimal.quantize(price.per_kg, settings.DECIMAL_CENTS) # round to cents", "def youngest(self):\n # Your implementation here", "def main():\r\n\r\n st.title(\"Summarizer App\")\r\n menu = ['Home', 'About']\r\n choice = st.sidebar.selectbox(\"Menu\", menu)\r\n\r\n if choice == \"Home\":\r\n st.subheader(\"Summarization\")\r\n raw_text = st.text_area(\"Enter Text Here\")\r\n if st.button(\"Summarize\"):\r\n \r\n with st.expander(\"original Text\"):\r\n st.write(raw_text)\r\n\r\n # Layout\r\n c1, c2 = st.columns(2)\r\n with c1:\r\n with st.expander(\"LexRank Summary\"):\r\n my_summary = sumy_summarizer(raw_text)\r\n document_len = {\"Original\":len(raw_text), \r\n \"Summary\":len(my_summary)}\r\n st.write(document_len)\r\n st.write(my_summary) \r\n st.info(\"Rouge Score\")\r\n eval_df = evaluate_summary(my_summary, raw_text)\r\n st.dataframe(eval_df.T)\r\n eval_df['metrics'] = eval_df.index\r\n c = alt.Chart(eval_df).mark_bar().encode(\r\n x = 'metrics', y = 'rouge-1')\r\n st.altair_chart(c)\r\n\r\n with c2:\r\n with st.expander(\"LexRank Summary\"):\r\n my_summary = summarize(raw_text)\r\n document_len = {\"Original\":len(raw_text), \r\n \"Summary\":len(my_summary)}\r\n st.write(document_len)\r\n st.write(my_summary)\r\n\r\n st.info(\"Rouge Score\")\r\n eval_df = evaluate_summary(my_summary, raw_text)\r\n st.dataframe(eval_df.T)\r\n eval_df['metrics'] = eval_df.index\r\n c = alt.Chart(eval_df).mark_bar().encode(\r\n x = 'metrics', y = 'rouge-1')\r\n st.altair_chart(c)\r\n\r\n else:\r\n st.subheader(\"About\")\r\n st.write(\"Webpage for Summarization using NLP\")\r\n st.write(\"***\")\r\n st.write(\"### Packages Used\")\r\n st.write(\"pip install streamlit gensim==3.8.3 sumy gensim_sum_ext pandas altair seaborn rouge\")\r\n st.write(\"***\")\r\n st.write(\"This webpage is maintained by Ramakrishnan\")", "def display_score(self):\n if self.args:\n return self.display_score_for_group()\n return self.display_top_donor_for_each_group()", "def __statistics_students_enrolled(self):\n try:\n discipline_name = input(\"Give discipline discipline_name: \")\n self.__discipline_validator.validate_name(discipline_name)\n except DisciplineException as de:\n print(de)\n return\n\n menu_string = \"\\t1. Sort alphabetically \\n\\t\" \\\n \"2. Sort descending by average \\n\\t\" \\\n \"0. Exit\"\n command = self.__ui_read_command(menu_string)\n\n if command == '1':\n self.__ui_statistics_sort_alpha(discipline_name)\n elif command == '2':\n self.__ui_statistics_sort_avg(discipline_name)\n elif command == '0':\n return", "def main():\r\n print(\"Please enter the grade of two subjects in which you are struggling with\")\r\n subject1_grade = int(input(\"First subject's Grade: \"))\r\n subject2_grade = int(input(\"Second subject's Grade: \"))\r\n\r\n smaller_number = getSmaller(subject1_grade, subject2_grade)\r\n print(\"What is the name of the subject in which your grade is\", smaller_number, \"?\")\r\n subject_lowest_grade = input(\"Enter the subject's name: \")\r\n print(\"To improve your grade in\", subject_lowest_grade + \",\", \"you should talk with you professor.\")\r\n print(\"Also, you should schedule an appointment with an Academic Advisor\")", "def clicked_checkbox_upper_limit(self):\n spectral_model, proxy_index, index = self._get_selected_model(True)\n spectral_model.metadata[\"is_upper_limit\"] \\\n = self.checkbox_upper_limit.isChecked()\n self.measurement_view.update_row(proxy_index.row())\n self.summarize_current_table()\n self.refresh_plots()\n return None", "def worst_score(self):\r\n pass", "def calculate_risk_tol(*args):\n global total_score\n risk_tol_start = 0.0\n\n for risk_per_pg in risk_tol_per_qs.iterkeys():\n try:\n risk_tol_start = risk_tol_start + risk_tol_per_qs[risk_per_pg][-1] # this is the last item in the list of each information in the page\n except IndexError:\n pass\n total_score = risk_tol_start", "def top_students(grade_book, num_students=3):\n return sorted(grade_book, key=grade_book.get, reverse=True)[:num_students]", "def update_lowest_sell(self, limit):\n if limit.size == 0:\n #successor case\n limit = self.sell_tree.successor(limit)\n if limit is None:\n #no successor\n self.lowest_sell = None\n else: #have a successor, but dont know if it has orders or not\n if limit.size == 0:#limit has no orders but other limits in the tree might have orders\n if self.sell_tree.size == 0: #we know, no other limits have an order\n self.lowest_sell = None\n else: #other limits have an order\n while limit.size == 0:\n limit = self.sell_tree.successor(limit)\n # now our limit has a valid order, and we've found the first valid successor\n self.lowest_sell = limit.price\n else: #limit has an order, we found the valid successor!\n self.lowest_sell = limit.price", "def clicked_btn_find_upper_limit(self):\n spectral_model, proxy_index, index = self._get_selected_model(True)\n # Find the upper limit \n try:\n sigma = round(float(self.edit_ul_sigma.text()),1)\n except:\n logger.debug(\"Invalid sigma for finding limit\")\n return None\n upper_limit = spectral_model.find_upper_limit(sigma=sigma, start_at_current=True)\n # Refresh GUI\n self.measurement_view.update_row(proxy_index.row())\n self.summarize_current_table()\n self.update_fitting_options()\n self.refresh_plots()\n return None", "def _company_health_insurance(self, total_salary):\n company_health_insurance = 0\n\n if total_salary <= self.health_max_fee:\n company_health_insurance = total_salary *\\\n self.company_health_insurance_rate\n else:\n company_health_insurance = self.health_max_fee\\\n * self.company_health_insurance_rate\n #end if\n return company_health_insurance", "def valuable_customer():\r\n valuable_customer_dictionary = {}\r\n for customer in Records.customers_list:\r\n valuable_customer_dictionary.update({customer.name: customer.total})\r\n search_value = max(valuable_customer_dictionary.values())\r\n for name, value in valuable_customer_dictionary.items():\r\n if value == search_value:\r\n search_name = name\r\n print('The current most valuable customer is', search_name, 'with a total order value of $',\r\n search_value)\r\n input('Press enter to return to main menu\\n')\r\n return", "def upper_earning_limit(self):\n\t\treturn self._upper_earning_limit", "def adjust_salary_range(driver, salary):\n if salary == 'All': \n return\n index = ['', 'All', '40+', '60+', '80+', '100+', \n '120+', '160+', '180+', '200+'].index(salary)\n salary_button = \"html/body/div[3]/div/div[2]/div[1]/div[4]/form/div/ul/\" \\\n \"li[4]/fieldset/button\"\n salary_path = \"html/body/div[3]/div/div[2]/div[1]/div[4]/\" \\\n \"form/div/ul/li[4]/fieldset/div[1]/ol/li[{}\" \\\n \"]/div/label\".format(index)\n attempts = 1\n while True:\n try:\n elem = driver.find_element_by_xpath(salary_button)\n time.sleep(3)\n except Exception as e:\n attempts += 1\n if attempts > 25: \n break\n else:\n elem.click()\n time.sleep(3)\n driver.find_element_by_xpath(salary_path).click()\n break", "def _get_max_expense(self):\n pass" ]
[ "0.50486606", "0.5035432", "0.5007743", "0.49070513", "0.48892468", "0.4817341", "0.47293594", "0.4686217", "0.468501", "0.4654785", "0.4652862", "0.464481", "0.46400693", "0.46108285", "0.46024165", "0.45971134", "0.45901182", "0.45752954", "0.45622933", "0.45582008", "0.45527396", "0.45513022", "0.45391023", "0.45382112", "0.45357856", "0.4510129", "0.45096976", "0.44835946", "0.44796285", "0.44731036" ]
0.60446465
0
Wrapper around k8s.load_and_create_resource to create a SageMaker resource
def create_sagemaker_resource( resource_plural, resource_name, spec_file, replacements, namespace="default" ): reference, spec, resource = k8s.load_and_create_resource( resource_directory, CRD_GROUP, CRD_VERSION, resource_plural, resource_name, spec_file, replacements, namespace, ) return reference, spec, resource
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_resource(\n service_name: str, config_name: str = None, **resource_args\n):\n session = get_session(config_name)\n return session.resource(service_name, **resource_args)", "def create_resource():\n return wsgi.Resource(Controller())", "def create_resource():\n return wsgi.Resource(Controller(), serializer=ImageSerialize())", "def create_resource():\n deserializer = wsgi.JSONRequestDeserializer()\n serializer = wsgi.JSONResponseSerializer()\n return wsgi.Resource(Controller(), deserializer, serializer)", "def __init__(__self__, resource_name, opts=None, block_device_mappings=None, capacity_reservation_specification=None, credit_specification=None, description=None, disable_api_termination=None, ebs_optimized=None, elastic_gpu_specifications=None, elastic_inference_accelerator=None, iam_instance_profile=None, image_id=None, instance_initiated_shutdown_behavior=None, instance_market_options=None, instance_type=None, kernel_id=None, key_name=None, license_specifications=None, monitoring=None, name=None, name_prefix=None, network_interfaces=None, placement=None, ram_disk_id=None, security_group_names=None, tag_specifications=None, tags=None, user_data=None, vpc_security_group_ids=None, __props__=None, __name__=None, __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['block_device_mappings'] = block_device_mappings\n __props__['capacity_reservation_specification'] = capacity_reservation_specification\n __props__['credit_specification'] = credit_specification\n __props__['description'] = description\n __props__['disable_api_termination'] = disable_api_termination\n __props__['ebs_optimized'] = ebs_optimized\n __props__['elastic_gpu_specifications'] = elastic_gpu_specifications\n __props__['elastic_inference_accelerator'] = elastic_inference_accelerator\n __props__['iam_instance_profile'] = iam_instance_profile\n __props__['image_id'] = image_id\n __props__['instance_initiated_shutdown_behavior'] = instance_initiated_shutdown_behavior\n __props__['instance_market_options'] = instance_market_options\n __props__['instance_type'] = instance_type\n __props__['kernel_id'] = kernel_id\n __props__['key_name'] = key_name\n __props__['license_specifications'] = license_specifications\n __props__['monitoring'] = monitoring\n __props__['name'] = name\n __props__['name_prefix'] = name_prefix\n __props__['network_interfaces'] = network_interfaces\n __props__['placement'] = placement\n __props__['ram_disk_id'] = ram_disk_id\n __props__['security_group_names'] = security_group_names\n __props__['tag_specifications'] = tag_specifications\n __props__['tags'] = tags\n __props__['user_data'] = user_data\n __props__['vpc_security_group_ids'] = vpc_security_group_ids\n __props__['arn'] = None\n __props__['default_version'] = None\n __props__['latest_version'] = None\n super(LaunchTemplate, __self__).__init__(\n 'aws:ec2/launchTemplate:LaunchTemplate',\n resource_name,\n __props__,\n opts)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n backend_type: Optional[pulumi.Input['InstanceBackendType']] = None,\n connection_name: Optional[pulumi.Input[str]] = None,\n current_disk_size: Optional[pulumi.Input[str]] = None,\n database_version: Optional[pulumi.Input['InstanceDatabaseVersion']] = None,\n disk_encryption_configuration: Optional[pulumi.Input[pulumi.InputType['DiskEncryptionConfigurationArgs']]] = None,\n disk_encryption_status: Optional[pulumi.Input[pulumi.InputType['DiskEncryptionStatusArgs']]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n failover_replica: Optional[pulumi.Input[pulumi.InputType['InstanceFailoverReplicaArgs']]] = None,\n gce_zone: Optional[pulumi.Input[str]] = None,\n instance_type: Optional[pulumi.Input['InstanceInstanceType']] = None,\n ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IpMappingArgs']]]]] = None,\n ipv6_address: Optional[pulumi.Input[str]] = None,\n kind: Optional[pulumi.Input[str]] = None,\n maintenance_version: Optional[pulumi.Input[str]] = None,\n master_instance_name: Optional[pulumi.Input[str]] = None,\n max_disk_size: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n on_premises_configuration: Optional[pulumi.Input[pulumi.InputType['OnPremisesConfigurationArgs']]] = None,\n out_of_disk_report: Optional[pulumi.Input[pulumi.InputType['SqlOutOfDiskReportArgs']]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n replica_configuration: Optional[pulumi.Input[pulumi.InputType['ReplicaConfigurationArgs']]] = None,\n replica_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n root_password: Optional[pulumi.Input[str]] = None,\n satisfies_pzs: Optional[pulumi.Input[bool]] = None,\n scheduled_maintenance: Optional[pulumi.Input[pulumi.InputType['SqlScheduledMaintenanceArgs']]] = None,\n secondary_gce_zone: Optional[pulumi.Input[str]] = None,\n self_link: Optional[pulumi.Input[str]] = None,\n server_ca_cert: Optional[pulumi.Input[pulumi.InputType['SslCertArgs']]] = None,\n service_account_email_address: Optional[pulumi.Input[str]] = None,\n settings: Optional[pulumi.Input[pulumi.InputType['SettingsArgs']]] = None,\n state: Optional[pulumi.Input['InstanceState']] = None,\n suspension_reason: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceSuspensionReasonItem']]]] = None,\n __props__=None):\n ...", "def __init__(__self__, resource_name, opts=None, allocated_capacity=None, command=None, connections=None, default_arguments=None, description=None, execution_property=None, glue_version=None, max_capacity=None, max_retries=None, name=None, number_of_workers=None, role_arn=None, security_configuration=None, tags=None, timeout=None, worker_type=None, __props__=None, __name__=None, __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['allocated_capacity'] = allocated_capacity\n if command is None:\n raise TypeError(\"Missing required property 'command'\")\n __props__['command'] = command\n __props__['connections'] = connections\n __props__['default_arguments'] = default_arguments\n __props__['description'] = description\n __props__['execution_property'] = execution_property\n __props__['glue_version'] = glue_version\n __props__['max_capacity'] = max_capacity\n __props__['max_retries'] = max_retries\n __props__['name'] = name\n __props__['number_of_workers'] = number_of_workers\n if role_arn is None:\n raise TypeError(\"Missing required property 'role_arn'\")\n __props__['role_arn'] = role_arn\n __props__['security_configuration'] = security_configuration\n __props__['tags'] = tags\n __props__['timeout'] = timeout\n __props__['worker_type'] = worker_type\n __props__['arn'] = None\n super(Job, __self__).__init__(\n 'aws:glue/job:Job',\n resource_name,\n __props__,\n opts)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n file_shares: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FileShareConfigArgs']]]]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n kms_key_name: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n networks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkConfigArgs']]]]] = None,\n project: Optional[pulumi.Input[str]] = None,\n tier: Optional[pulumi.Input['InstanceTier']] = None,\n __props__=None):\n ...", "def create_resource():\n #deserializer = ImageDeserializer()\n #serializer = ImageSerializer()\n return wsgi.Resource(Controller())", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n auto_scaling_configuration_arn: Optional[pulumi.Input[str]] = None,\n encryption_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceEncryptionConfigurationArgs']]] = None,\n health_check_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceHealthCheckConfigurationArgs']]] = None,\n instance_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceInstanceConfigurationArgs']]] = None,\n network_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceNetworkConfigurationArgs']]] = None,\n observability_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceObservabilityConfigurationArgs']]] = None,\n service_name: Optional[pulumi.Input[str]] = None,\n source_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceSourceConfigurationArgs']]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def create_resource():\n return wsgi.Resource(WorkersController())", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n acl_name: Optional[pulumi.Input[str]] = None,\n auto_minor_version_upgrade: Optional[pulumi.Input[bool]] = None,\n data_tiering: Optional[pulumi.Input[bool]] = None,\n description: Optional[pulumi.Input[str]] = None,\n engine_version: Optional[pulumi.Input[str]] = None,\n final_snapshot_name: Optional[pulumi.Input[str]] = None,\n kms_key_arn: Optional[pulumi.Input[str]] = None,\n maintenance_window: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n name_prefix: Optional[pulumi.Input[str]] = None,\n node_type: Optional[pulumi.Input[str]] = None,\n num_replicas_per_shard: Optional[pulumi.Input[int]] = None,\n num_shards: Optional[pulumi.Input[int]] = None,\n parameter_group_name: Optional[pulumi.Input[str]] = None,\n port: Optional[pulumi.Input[int]] = None,\n security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n snapshot_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n snapshot_name: Optional[pulumi.Input[str]] = None,\n snapshot_retention_limit: Optional[pulumi.Input[int]] = None,\n snapshot_window: Optional[pulumi.Input[str]] = None,\n sns_topic_arn: Optional[pulumi.Input[str]] = None,\n subnet_group_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tls_enabled: Optional[pulumi.Input[bool]] = None,\n __props__=None):\n ...", "def create(ctx, iface, resource_config, params, **_):\n\n lb_name = params.get(LB_NAME)\n if not lb_name:\n targs = \\\n utils.find_rels_by_node_type(\n ctx.instance,\n LB_TYPE)\n lb_name = \\\n targs[0].target.instance.runtime_properties[\n EXTERNAL_RESOURCE_ID]\n params.update({LB_NAME: lb_name})\n\n ctx.instance.runtime_properties[LB_NAME] = \\\n lb_name\n\n # Actually create the resource\n iface.create(params)", "def __init__(__self__,\n resource_name: str,\n args: RuntimeArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: ObjectStorageKeyArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n agent_upgrade: Optional[pulumi.Input[pulumi.InputType['AgentUpgradeArgs']]] = None,\n client_public_key: Optional[pulumi.Input[str]] = None,\n extensions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MachineExtensionInstanceViewArgs']]]]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['IdentityArgs']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n location_data: Optional[pulumi.Input[pulumi.InputType['LocationDataArgs']]] = None,\n machine_name: Optional[pulumi.Input[str]] = None,\n mssql_discovered: Optional[pulumi.Input[str]] = None,\n os_profile: Optional[pulumi.Input[pulumi.InputType['OSProfileArgs']]] = None,\n os_type: Optional[pulumi.Input[str]] = None,\n parent_cluster_resource_id: Optional[pulumi.Input[str]] = None,\n private_link_scope_resource_id: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n service_statuses: Optional[pulumi.Input[pulumi.InputType['ServiceStatusesArgs']]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n vm_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n acr: Optional[pulumi.Input[pulumi.InputType['ACRArgs']]] = None,\n aks_resource_id: Optional[pulumi.Input[str]] = None,\n app_name: Optional[pulumi.Input[str]] = None,\n branch_name: Optional[pulumi.Input[str]] = None,\n builder_version: Optional[pulumi.Input[str]] = None,\n deployment_properties: Optional[pulumi.Input[pulumi.InputType['DeploymentPropertiesArgs']]] = None,\n docker_build_context: Optional[pulumi.Input[str]] = None,\n dockerfile: Optional[pulumi.Input[str]] = None,\n dockerfile_generation_mode: Optional[pulumi.Input[Union[str, 'DockerfileGenerationMode']]] = None,\n dockerfile_output_directory: Optional[pulumi.Input[str]] = None,\n generation_language: Optional[pulumi.Input[Union[str, 'GenerationLanguage']]] = None,\n image_name: Optional[pulumi.Input[str]] = None,\n image_tag: Optional[pulumi.Input[str]] = None,\n language_version: Optional[pulumi.Input[str]] = None,\n last_workflow_run: Optional[pulumi.Input[pulumi.InputType['WorkflowRunArgs']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n manifest_generation_mode: Optional[pulumi.Input[Union[str, 'ManifestGenerationMode']]] = None,\n manifest_output_directory: Optional[pulumi.Input[str]] = None,\n manifest_type: Optional[pulumi.Input[Union[str, 'GenerationManifestType']]] = None,\n namespace: Optional[pulumi.Input[str]] = None,\n oidc_credentials: Optional[pulumi.Input[pulumi.InputType['GitHubWorkflowProfileOidcCredentialsArgs']]] = None,\n port: Optional[pulumi.Input[str]] = None,\n repository_name: Optional[pulumi.Input[str]] = None,\n repository_owner: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n workflow_name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n block_size_bytes: Optional[pulumi.Input[int]] = None,\n container_id: Optional[pulumi.Input[str]] = None,\n disk_file_format: Optional[pulumi.Input[Union[str, 'DiskFileFormat']]] = None,\n disk_size_gb: Optional[pulumi.Input[float]] = None,\n dynamic: Optional[pulumi.Input[bool]] = None,\n extended_location: Optional[pulumi.Input[pulumi.InputType['ExtendedLocationArgs']]] = None,\n hyper_v_generation: Optional[pulumi.Input[Union[str, 'HyperVGeneration']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n logical_sector_bytes: Optional[pulumi.Input[int]] = None,\n physical_sector_bytes: Optional[pulumi.Input[int]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n virtual_hard_disk_name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, resource_name, opts=None, client_id=None, client_secret=None, client_timeout=None, initial_login=None, password=None, realm=None, root_ca_certificate=None, tls_insecure_skip_verify=None, url=None, username=None, __props__=None, __name__=None, __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n if client_id is None:\n client_id = utilities.get_env('KEYCLOAK_CLIENT_ID')\n __props__['client_id'] = client_id\n if client_secret is None:\n client_secret = utilities.get_env('KEYCLOAK_CLIENT_SECRET')\n __props__['client_secret'] = client_secret\n if client_timeout is None:\n client_timeout = (utilities.get_env_int('KEYCLOAK_CLIENT_TIMEOUT') or 5)\n __props__['client_timeout'] = pulumi.Output.from_input(client_timeout).apply(json.dumps) if client_timeout is not None else None\n __props__['initial_login'] = pulumi.Output.from_input(initial_login).apply(json.dumps) if initial_login is not None else None\n if password is None:\n password = utilities.get_env('KEYCLOAK_PASSWORD')\n __props__['password'] = password\n if realm is None:\n realm = (utilities.get_env('KEYCLOAK_REALM') or 'master')\n __props__['realm'] = realm\n __props__['root_ca_certificate'] = root_ca_certificate\n __props__['tls_insecure_skip_verify'] = pulumi.Output.from_input(tls_insecure_skip_verify).apply(json.dumps) if tls_insecure_skip_verify is not None else None\n if url is None:\n url = utilities.get_env('KEYCLOAK_URL')\n __props__['url'] = url\n if username is None:\n username = utilities.get_env('KEYCLOAK_USER')\n __props__['username'] = username\n super(Provider, __self__).__init__(\n 'keycloak',\n resource_name,\n __props__,\n opts)", "def __init__(self,\n name: str = None,\n k8s_name: str = None,\n run_name: str = None,\n spec: {} = None,\n job_name: str = None,\n jar_params: {} = None,\n python_params: {} = None,\n spark_submit_params: {} = None,\n notebook_params: {} = None,\n existing_cluster_id: str = None,\n new_cluster: {} = None,\n libraries: {} = None,\n spark_jar_task: {} = None,\n spark_python_task: {} = None,\n spark_submit_task: {} = None,\n notebook_task: {} = None,\n timeout_seconds: int = None):\n\n if not spec:\n spec = {}\n\n if run_name:\n spec[\"run_name\"] = run_name\n if job_name:\n spec[\"job_name\"] = job_name\n if jar_params:\n spec[\"jar_params\"] = jar_params\n if python_params:\n spec[\"python_params\"] = python_params\n if spark_submit_params:\n spec[\"spark_submit_params\"] = spark_submit_params\n if notebook_params:\n spec[\"notebook_params\"] = notebook_params\n if new_cluster:\n spec[\"new_cluster\"] = new_cluster\n if existing_cluster_id:\n spec[\"existing_cluster_id\"] = existing_cluster_id\n if spark_jar_task:\n spec[\"spark_jar_task\"] = spark_jar_task\n if spark_python_task:\n spec[\"spark_python_task\"] = spark_python_task\n if spark_submit_task:\n spec[\"spark_submit_task\"] = spark_submit_task\n if notebook_task:\n spec[\"notebook_task\"] = notebook_task\n if libraries:\n spec[\"libraries\"] = libraries\n if timeout_seconds:\n spec[\"timeout_seconds\"] = timeout_seconds\n\n if not k8s_name and \"run_name\" in spec:\n k8s_name = spec[\"run_name\"]\n elif not k8s_name:\n raise ValueError(\"You need to provide a k8s_name or a run_name.\")\n\n super().__init__(\n k8s_resource={\n \"apiVersion\": \"databricks.microsoft.com/v1alpha1\",\n \"kind\": \"Run\",\n \"metadata\": {\n \"name\": k8s_name\n },\n \"spec\": spec\n },\n action=\"create\",\n success_condition=(\"status.metadata.state.life_cycle_state in \"\n \"(TERMINATED, SKIPPED, INTERNAL_ERROR)\"),\n attribute_outputs={\n \"name\": \"{.metadata.name}\",\n \"job_id\": \"{.status.metadata.job_id}\",\n \"number_in_job\": \"{.status.metadata.number_in_job}\",\n \"run_id\": \"{.status.metadata.run_id}\",\n \"run_name\": \"{.status.metadata.run_name}\",\n \"life_cycle_state\": \"{.status.metadata.state.life_cycle_state}\",\n \"result_state\": \"{.status.metadata.state.result_state}\",\n \"notebook_output_result\": \"{.status.notebook_output.result}\",\n \"notebook_output_truncated\": \"{.status.notebook_output.truncated}\",\n \"error\": \"{.status.error}\"\n },\n name=name)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n application_insights_id: Optional[pulumi.Input[str]] = None,\n container_registry_id: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n encryption: Optional[pulumi.Input[pulumi.InputType['WorkspaceEncryptionArgs']]] = None,\n friendly_name: Optional[pulumi.Input[str]] = None,\n high_business_impact: Optional[pulumi.Input[bool]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['WorkspaceIdentityArgs']]] = None,\n image_build_compute_name: Optional[pulumi.Input[str]] = None,\n key_vault_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n primary_user_assigned_identity: Optional[pulumi.Input[str]] = None,\n public_access_behind_virtual_network_enabled: Optional[pulumi.Input[bool]] = None,\n public_network_access_enabled: Optional[pulumi.Input[bool]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n sku_name: Optional[pulumi.Input[str]] = None,\n storage_account_id: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n v1_legacy_mode_enabled: Optional[pulumi.Input[bool]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: Optional[InstanceArgs] = None,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n auto_renew: Optional[pulumi.Input[bool]] = None,\n auto_renew_period: Optional[pulumi.Input[int]] = None,\n cluster_name: Optional[pulumi.Input[str]] = None,\n data_center_name: Optional[pulumi.Input[str]] = None,\n disk_size: Optional[pulumi.Input[int]] = None,\n disk_type: Optional[pulumi.Input[str]] = None,\n enable_public: Optional[pulumi.Input[bool]] = None,\n instance_type: Optional[pulumi.Input[str]] = None,\n ip_white: Optional[pulumi.Input[str]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n major_version: Optional[pulumi.Input[str]] = None,\n node_count: Optional[pulumi.Input[int]] = None,\n password: Optional[pulumi.Input[str]] = None,\n pay_type: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[int]] = None,\n period_unit: Optional[pulumi.Input[str]] = None,\n security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n add_on: Optional[pulumi.Input[pulumi.InputType['InstanceAddOnArgs']]] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n blueprint_id: Optional[pulumi.Input[str]] = None,\n bundle_id: Optional[pulumi.Input[str]] = None,\n ip_address_type: Optional[pulumi.Input[str]] = None,\n key_pair_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_data: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, resource_name, opts=None, aws_kms_key_arn=None, content_config=None, content_config_permissions=None, input_bucket=None, name=None, notifications=None, output_bucket=None, role=None, thumbnail_config=None, thumbnail_config_permissions=None, __props__=None, __name__=None, __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['aws_kms_key_arn'] = aws_kms_key_arn\n __props__['content_config'] = content_config\n __props__['content_config_permissions'] = content_config_permissions\n if input_bucket is None:\n raise TypeError(\"Missing required property 'input_bucket'\")\n __props__['input_bucket'] = input_bucket\n __props__['name'] = name\n __props__['notifications'] = notifications\n __props__['output_bucket'] = output_bucket\n if role is None:\n raise TypeError(\"Missing required property 'role'\")\n __props__['role'] = role\n __props__['thumbnail_config'] = thumbnail_config\n __props__['thumbnail_config_permissions'] = thumbnail_config_permissions\n __props__['arn'] = None\n super(Pipeline, __self__).__init__(\n 'aws:elastictranscoder/pipeline:Pipeline',\n resource_name,\n __props__,\n opts)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n crypto_key_backend: Optional[pulumi.Input[str]] = None,\n crypto_key_id: Optional[pulumi.Input[str]] = None,\n destroy_scheduled_duration: Optional[pulumi.Input[str]] = None,\n import_only: Optional[pulumi.Input[bool]] = None,\n key_ring_id: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n next_rotation_time: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n purpose: Optional[pulumi.Input['CryptoKeyPurpose']] = None,\n rotation_period: Optional[pulumi.Input[str]] = None,\n skip_initial_version_creation: Optional[pulumi.Input[bool]] = None,\n version_template: Optional[pulumi.Input[pulumi.InputType['CryptoKeyVersionTemplateArgs']]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n container_registry_name: Optional[pulumi.Input[str]] = None,\n instance_count: Optional[pulumi.Input[int]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tier: Optional[pulumi.Input[str]] = None,\n virtual_network_subnet_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def create(self, resource_name, data_dict):\n try:\n resource_cls = getattr(self, resource_name).resource\n except AttributeError:\n raise AttributeError(\"No resource named %s is defined.\" % resource_name)\n\n return resource_cls.create(self, data_dict)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n cluster_id: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_network_uuid: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[Union[str, 'Region']]] = None,\n size: Optional[pulumi.Input[Union[str, 'DatabaseSlug']]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ..." ]
[ "0.6695489", "0.65566033", "0.6527681", "0.63303506", "0.61349356", "0.6096821", "0.6095365", "0.6082456", "0.60742486", "0.6072159", "0.6070179", "0.60697085", "0.6067538", "0.6065695", "0.60196674", "0.60108477", "0.59737706", "0.59575313", "0.59364146", "0.5927485", "0.5908221", "0.587884", "0.5864971", "0.5854595", "0.5852691", "0.5818908", "0.5812263", "0.5801604", "0.57875985", "0.5774461" ]
0.7768747
0
Wrapper around k8s.load_and_create_resource to create a Adopoted resource
def create_adopted_resource(replacements, namespace="default"): reference, spec, resource = k8s.load_and_create_resource( resource_directory, ADOPTED_RESOURCE_CRD_GROUP, CRD_VERSION, "adoptedresources", replacements["ADOPTED_RESOURCE_NAME"], "adopted_resource_base", replacements, namespace, ) return reference, spec, resource
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_resource(\n service_name: str, config_name: str = None, **resource_args\n):\n session = get_session(config_name)\n return session.resource(service_name, **resource_args)", "def create_sagemaker_resource(\n resource_plural, resource_name, spec_file, replacements, namespace=\"default\"\n):\n\n reference, spec, resource = k8s.load_and_create_resource(\n resource_directory,\n CRD_GROUP,\n CRD_VERSION,\n resource_plural,\n resource_name,\n spec_file,\n replacements,\n namespace,\n )\n\n return reference, spec, resource", "def create(ctx, iface, resource_config, params, **_):\n\n lb_name = params.get(LB_NAME)\n if not lb_name:\n targs = \\\n utils.find_rels_by_node_type(\n ctx.instance,\n LB_TYPE)\n lb_name = \\\n targs[0].target.instance.runtime_properties[\n EXTERNAL_RESOURCE_ID]\n params.update({LB_NAME: lb_name})\n\n ctx.instance.runtime_properties[LB_NAME] = \\\n lb_name\n\n # Actually create the resource\n iface.create(params)", "def create_sticky(ctx, iface, resource_config, **_):\n\n # Create a copy of the resource config for clean manipulation.\n params = \\\n dict() if not resource_config else resource_config.copy()\n\n lb_name = params.get(LB_NAME)\n policy_name = params.get(RESOURCE_NAME)\n\n if not lb_name:\n targs = \\\n utils.find_rels_by_node_type(\n ctx.instance,\n LB_TYPE)\n lb_name = \\\n targs[0].target.instance.runtime_properties[\n EXTERNAL_RESOURCE_ID]\n params.update({LB_NAME: lb_name})\n\n ctx.instance.runtime_properties[LB_NAME] = \\\n lb_name\n ctx.instance.runtime_properties[RESOURCE_NAME] = \\\n policy_name\n\n # Actually create the resource\n iface.create_sticky(params)", "def create_resource():\n return wsgi.Resource(Controller(), serializer=ImageSerialize())", "def create_resource(self, **kwargs):\n results = self.api.action.resource_create(**kwargs)\n # TODO: use `results` rather than re-download, using an isolation layer to standardize the re-structure\n self.get_ckan_metadata(True)\n if 'id' in results:\n self._import_resource_to_cache(kwargs['upload'], results['id'])\n return results", "def create_resource():\n return wsgi.Resource(Controller())", "def add_resource(client, api_id, parent_resource, sub_path):\n response = client.create_resource(\n restApiId=api_id,\n parentId=parent_resource['id'],\n pathPart=sub_path)\n file_name = \"{0}_resource.pickle\".format(sub_path)\n pickle_dictionary_to_file(response, file_name)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n add_on: Optional[pulumi.Input[pulumi.InputType['InstanceAddOnArgs']]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n blueprint_id: Optional[pulumi.Input[str]] = None,\n bundle_id: Optional[pulumi.Input[str]] = None,\n cpu_count: Optional[pulumi.Input[int]] = None,\n created_at: Optional[pulumi.Input[str]] = None,\n ip_address_type: Optional[pulumi.Input[str]] = None,\n ipv6_address: Optional[pulumi.Input[str]] = None,\n ipv6_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n is_static_ip: Optional[pulumi.Input[bool]] = None,\n key_pair_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n public_ip_address: Optional[pulumi.Input[str]] = None,\n ram_size: Optional[pulumi.Input[float]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_data: Optional[pulumi.Input[str]] = None,\n username: Optional[pulumi.Input[str]] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceState.__new__(_InstanceState)\n\n __props__.__dict__[\"add_on\"] = add_on\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"availability_zone\"] = availability_zone\n __props__.__dict__[\"blueprint_id\"] = blueprint_id\n __props__.__dict__[\"bundle_id\"] = bundle_id\n __props__.__dict__[\"cpu_count\"] = cpu_count\n __props__.__dict__[\"created_at\"] = created_at\n __props__.__dict__[\"ip_address_type\"] = ip_address_type\n __props__.__dict__[\"ipv6_address\"] = ipv6_address\n __props__.__dict__[\"ipv6_addresses\"] = ipv6_addresses\n __props__.__dict__[\"is_static_ip\"] = is_static_ip\n __props__.__dict__[\"key_pair_name\"] = key_pair_name\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"private_ip_address\"] = private_ip_address\n __props__.__dict__[\"public_ip_address\"] = public_ip_address\n __props__.__dict__[\"ram_size\"] = ram_size\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"user_data\"] = user_data\n __props__.__dict__[\"username\"] = username\n return Instance(resource_name, opts=opts, __props__=__props__)", "def __init__(__self__, resource_name, opts=None, allocated_capacity=None, command=None, connections=None, default_arguments=None, description=None, execution_property=None, glue_version=None, max_capacity=None, max_retries=None, name=None, number_of_workers=None, role_arn=None, security_configuration=None, tags=None, timeout=None, worker_type=None, __props__=None, __name__=None, __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['allocated_capacity'] = allocated_capacity\n if command is None:\n raise TypeError(\"Missing required property 'command'\")\n __props__['command'] = command\n __props__['connections'] = connections\n __props__['default_arguments'] = default_arguments\n __props__['description'] = description\n __props__['execution_property'] = execution_property\n __props__['glue_version'] = glue_version\n __props__['max_capacity'] = max_capacity\n __props__['max_retries'] = max_retries\n __props__['name'] = name\n __props__['number_of_workers'] = number_of_workers\n if role_arn is None:\n raise TypeError(\"Missing required property 'role_arn'\")\n __props__['role_arn'] = role_arn\n __props__['security_configuration'] = security_configuration\n __props__['tags'] = tags\n __props__['timeout'] = timeout\n __props__['worker_type'] = worker_type\n __props__['arn'] = None\n super(Job, __self__).__init__(\n 'aws:glue/job:Job',\n resource_name,\n __props__,\n opts)", "def create_resource():\n deserializer = wsgi.JSONRequestDeserializer()\n serializer = wsgi.JSONResponseSerializer()\n return wsgi.Resource(Controller(), deserializer, serializer)", "def pre_namespace_create(self, resource_dict):\n pass", "def __init__(__self__,\n resource_name: str,\n args: ObjectStorageKeyArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: RuntimeArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def create(self, resource_name, data_dict):\n try:\n resource_cls = getattr(self, resource_name).resource\n except AttributeError:\n raise AttributeError(\"No resource named %s is defined.\" % resource_name)\n\n return resource_cls.create(self, data_dict)", "def create_resource():\n #deserializer = ImageDeserializer()\n #serializer = ImageSerializer()\n return wsgi.Resource(Controller())", "def create(ctx, iface, resource_config, **_):\n resource_id = \\\n utils.get_resource_id(\n ctx.node,\n ctx.instance,\n resource_config.get(VPN_CONNECTION_ID),\n use_instance_id=True\n )\n utils.update_resource_id(ctx.instance, resource_id)\n # Actually create the resource\n create_response = iface.create(resource_config)\n ctx.instance.runtime_properties['create_response'] = \\\n utils.JsonCleanuper(create_response).to_dict()\n ctx.instance.runtime_properties['VPN_CONNECTION_ID'] = \\\n resource_config.get(VPN_CONNECTION_ID)\n ctx.instance.runtime_properties['DESTINATION_CIDR_BLOCK'] = \\\n resource_config.get(DESTINATION_CIDR_BLOCK)", "def __init__(__self__, resource_name, opts=None, block_device_mappings=None, capacity_reservation_specification=None, credit_specification=None, description=None, disable_api_termination=None, ebs_optimized=None, elastic_gpu_specifications=None, elastic_inference_accelerator=None, iam_instance_profile=None, image_id=None, instance_initiated_shutdown_behavior=None, instance_market_options=None, instance_type=None, kernel_id=None, key_name=None, license_specifications=None, monitoring=None, name=None, name_prefix=None, network_interfaces=None, placement=None, ram_disk_id=None, security_group_names=None, tag_specifications=None, tags=None, user_data=None, vpc_security_group_ids=None, __props__=None, __name__=None, __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['block_device_mappings'] = block_device_mappings\n __props__['capacity_reservation_specification'] = capacity_reservation_specification\n __props__['credit_specification'] = credit_specification\n __props__['description'] = description\n __props__['disable_api_termination'] = disable_api_termination\n __props__['ebs_optimized'] = ebs_optimized\n __props__['elastic_gpu_specifications'] = elastic_gpu_specifications\n __props__['elastic_inference_accelerator'] = elastic_inference_accelerator\n __props__['iam_instance_profile'] = iam_instance_profile\n __props__['image_id'] = image_id\n __props__['instance_initiated_shutdown_behavior'] = instance_initiated_shutdown_behavior\n __props__['instance_market_options'] = instance_market_options\n __props__['instance_type'] = instance_type\n __props__['kernel_id'] = kernel_id\n __props__['key_name'] = key_name\n __props__['license_specifications'] = license_specifications\n __props__['monitoring'] = monitoring\n __props__['name'] = name\n __props__['name_prefix'] = name_prefix\n __props__['network_interfaces'] = network_interfaces\n __props__['placement'] = placement\n __props__['ram_disk_id'] = ram_disk_id\n __props__['security_group_names'] = security_group_names\n __props__['tag_specifications'] = tag_specifications\n __props__['tags'] = tags\n __props__['user_data'] = user_data\n __props__['vpc_security_group_ids'] = vpc_security_group_ids\n __props__['arn'] = None\n __props__['default_version'] = None\n __props__['latest_version'] = None\n super(LaunchTemplate, __self__).__init__(\n 'aws:ec2/launchTemplate:LaunchTemplate',\n resource_name,\n __props__,\n opts)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n add_on: Optional[pulumi.Input[pulumi.InputType['InstanceAddOnArgs']]] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n blueprint_id: Optional[pulumi.Input[str]] = None,\n bundle_id: Optional[pulumi.Input[str]] = None,\n ip_address_type: Optional[pulumi.Input[str]] = None,\n key_pair_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_data: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def add_resource(self, name, controller, ipaddress, ram, cpus, storage, owner=None, flavor='', compute=None, huge_pages=False):\n if compute is None: compute = controller\n args = { 'vm': name,\n 'controller': controller,\n 'ipaddress': ipaddress,\n 'ram': ram,\n 'cpus': cpus,\n 'storage': storage,\n 'flavor': flavor,\n 'compute': compute,\n 'hugepages': huge_pages,\n }\n if owner is not None:\n args['owner'] = owner\n self._NDL_API('addresource', args, None)", "def __init__(__self__,\n resource_name: str,\n args: Optional[InstanceArgs] = None,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def _create_resource(resource, **options):\n global _existing\n\n if _existing[resource]:\n print('{r} {k}:{v} already exists with id {i}.'.format(\n r=resource,\n k=args.tag,\n v=args.role,\n i=_existing[resource].id\n ))\n return True\n\n print('{v} a {r} with parameters: {p}...'.format(\n v='Would create' if dry else 'Creating',\n r=resource,\n p=str(options)\n ))\n\n if dry:\n return True\n\n # All easy cases out of the way, we now need to actually create something.\n r = None\n try:\n r = getattr(ec2, definitions[resource].create)(** options)\n # In some cases (instance) a list is returned instead of one item. Quack!\n try:\n r = r[0]\n except:\n pass\n _tag_resource(r)\n print('... {r} id {i} created.'.format(\n r=resource,\n i=r.id\n ))\n _existing[resource] = r\n return True\n except Exception as e:\n if r is None:\n print('Could not create resource {r}.'.format(\n r=resource\n ))\n traceback.print_exc()\n else:\n print('Could not tag resource {r}, id {i}.'.format(\n r=resource,\n i=r.id\n ))\n traceback.print_exc()\n _destroy_resource(resource)\n return False", "def __init__(__self__, resource_name, opts=None, destination_cidr_block=None, destination_ipv6_cidr_block=None, egress_only_gateway_id=None, gateway_id=None, instance_id=None, nat_gateway_id=None, network_interface_id=None, route_table_id=None, transit_gateway_id=None, vpc_peering_connection_id=None, __props__=None, __name__=None, __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['destination_cidr_block'] = destination_cidr_block\n __props__['destination_ipv6_cidr_block'] = destination_ipv6_cidr_block\n __props__['egress_only_gateway_id'] = egress_only_gateway_id\n __props__['gateway_id'] = gateway_id\n __props__['instance_id'] = instance_id\n __props__['nat_gateway_id'] = nat_gateway_id\n __props__['network_interface_id'] = network_interface_id\n if route_table_id is None:\n raise TypeError(\"Missing required property 'route_table_id'\")\n __props__['route_table_id'] = route_table_id\n __props__['transit_gateway_id'] = transit_gateway_id\n __props__['vpc_peering_connection_id'] = vpc_peering_connection_id\n __props__['destination_prefix_list_id'] = None\n __props__['instance_owner_id'] = None\n __props__['origin'] = None\n __props__['state'] = None\n super(Route, __self__).__init__(\n 'aws:ec2/route:Route',\n resource_name,\n __props__,\n opts)", "def resource_create(resource_id, resource_type, resource_options=None, cibfile=None):\n return item_create(\n item=\"resource\",\n item_id=resource_id,\n item_type=resource_type,\n extra_args=resource_options,\n cibfile=cibfile,\n )", "def create(self, validated_data):\n resource = Resource.objects.create(**validated_data.get(\"resource\"))\n return Secret.objects.create(resource=resource)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n dry_run: Optional[pulumi.Input[bool]] = None,\n load_balancer_id: Optional[pulumi.Input[str]] = None,\n security_group_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, resource_name, opts=None, client_id=None, client_secret=None, client_timeout=None, initial_login=None, password=None, realm=None, root_ca_certificate=None, tls_insecure_skip_verify=None, url=None, username=None, __props__=None, __name__=None, __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n if client_id is None:\n client_id = utilities.get_env('KEYCLOAK_CLIENT_ID')\n __props__['client_id'] = client_id\n if client_secret is None:\n client_secret = utilities.get_env('KEYCLOAK_CLIENT_SECRET')\n __props__['client_secret'] = client_secret\n if client_timeout is None:\n client_timeout = (utilities.get_env_int('KEYCLOAK_CLIENT_TIMEOUT') or 5)\n __props__['client_timeout'] = pulumi.Output.from_input(client_timeout).apply(json.dumps) if client_timeout is not None else None\n __props__['initial_login'] = pulumi.Output.from_input(initial_login).apply(json.dumps) if initial_login is not None else None\n if password is None:\n password = utilities.get_env('KEYCLOAK_PASSWORD')\n __props__['password'] = password\n if realm is None:\n realm = (utilities.get_env('KEYCLOAK_REALM') or 'master')\n __props__['realm'] = realm\n __props__['root_ca_certificate'] = root_ca_certificate\n __props__['tls_insecure_skip_verify'] = pulumi.Output.from_input(tls_insecure_skip_verify).apply(json.dumps) if tls_insecure_skip_verify is not None else None\n if url is None:\n url = utilities.get_env('KEYCLOAK_URL')\n __props__['url'] = url\n if username is None:\n username = utilities.get_env('KEYCLOAK_USER')\n __props__['username'] = username\n super(Provider, __self__).__init__(\n 'keycloak',\n resource_name,\n __props__,\n opts)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n file_shares: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FileShareConfigArgs']]]]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n kms_key_name: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n networks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkConfigArgs']]]]] = None,\n project: Optional[pulumi.Input[str]] = None,\n tier: Optional[pulumi.Input['InstanceTier']] = None,\n __props__=None):\n ...", "def GenerateAssetForCreateRequest(args):\n module = dataplex_api.GetMessageModule()\n resource_spec_field = module.GoogleCloudDataplexV1AssetResourceSpec\n resource_spec = module.GoogleCloudDataplexV1AssetResourceSpec(\n name=args.resource_name,\n type=resource_spec_field.TypeValueValuesEnum(args.resource_type),\n )\n request = module.GoogleCloudDataplexV1Asset(\n description=args.description,\n displayName=args.display_name,\n labels=dataplex_api.CreateLabels(module.GoogleCloudDataplexV1Asset, args),\n resourceSpec=resource_spec,\n )\n discovery = GenerateDiscoverySpec(args)\n if discovery != module.GoogleCloudDataplexV1AssetDiscoverySpec():\n setattr(request, 'discoverySpec', discovery)\n return request", "def __init__(__self__,\n resource_name: str,\n args: Optional[TargetPoolArgs] = None,\n opts: Optional[pulumi.ResourceOptions] = None):\n ..." ]
[ "0.6452652", "0.6253703", "0.6171513", "0.6096159", "0.59922796", "0.59912336", "0.595014", "0.58938444", "0.5844623", "0.5840295", "0.58089936", "0.57912517", "0.57580245", "0.5738343", "0.573344", "0.5730226", "0.5724765", "0.5722394", "0.5716222", "0.5712392", "0.56978524", "0.5697366", "0.56828666", "0.56782913", "0.56513584", "0.5622816", "0.5604982", "0.5604254", "0.56011033", "0.55805737" ]
0.71058977
0
Get the scale for a unit
def get_scale(units, compartmentId, volume, extracellularVolume): if compartmentId == 'c': V = volume else: V = extracellularVolume if units == 'uM': return 1. / N_AVOGADRO / V * 1e6 elif units == 'mM': return 1. / N_AVOGADRO / V * 1e3 elif units == 'molecules': return 1. else: raise Exception('Invalid units "%s"' % units)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getScale(self):\n return _libsbml.Unit_getScale(self)", "def scale(self):\n return self.scale_factor / CONSTANTS.AU", "def GetScale(self):\n ...", "def scale(self):\n return self._scale", "def scale(self) -> Tuple[float, float]:\n return self._scale", "def get_unit(scale):\n scale2unit = { 1e-9: 'nm',\n 1e-6: u'\\N{MICRO SIGN}m', #or hex id (lookup): u'\\u00B5'\n 1e-3: 'mm',\n 0.01: 'cm',\n 0.1:'dm',\n 1:'m',\n 1000:'km',\n # time\n 8.6400e4:'day',\n 3.1536e7:'yr',\n 3.1536e10:'ka',\n 3.1536e13:'Ma',\n #Pressure\n 1e9: 'GPa',\n 1e6: 'MPa',\n }\n return scale2unit[scale]", "def get_scale():\r\n\r\n \r\n return 0.5", "def dim_unit_scaling(in_unit, out_unit):\n\n unit_vals = {\n 'nm': 1e-9,\n 'um': 1e-6,\n 'mm': 1e-3,\n 'cm': 1e-2,\n 'm': 1.0,\n 'km': 1e3,\n }\n\n if in_unit not in unit_vals:\n raise ValueError(\n 'Invalid input unit {}. Must be one of {}'.format(\n in_unit, list(unit_vals.keys())\n )\n )\n if out_unit not in unit_vals:\n raise ValueError(\n 'Invalid input unit {}. Must be one of {}'.format(\n in_unit, list(unit_vals.keys())\n )\n )\n\n return unit_vals[in_unit]/unit_vals[out_unit]", "def getScale(self):\n \n dag_node = OpenMaya.MFnDagNode(self.thisObj)\n transform_node = OpenMaya.MFnTransform(dag_node.parent( 0 ))\n \n util = OpenMaya.MScriptUtil()\n util.createFromDouble(0.0, 0.0, 0.0)\n pointeur = util.asDoublePtr()\n transform_node.getScale(pointeur)\n \n sx = util.getDoubleArrayItem(pointeur, 0)\n sy = util.getDoubleArrayItem(pointeur, 1)\n sz = util.getDoubleArrayItem(pointeur, 2)\n\n return sx, sy, sz", "def get_scale(self):\n\n if not hasattr(self, \"scale\"):\n raise NotImplementedError(\"\"\"All end-use subclasses of Color should define\n a get_scale method.\"\"\")\n\n return self.scale", "def scale(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"scale\")", "def scale(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"scale\")", "def scale(self):\n return self.distribution.scale", "def get_scale(self):\r\n try: return self.scale[0], self.scale[1], self.scale[2]\r\n except: return self.scale, self.scale, self.scale", "def GetUserScale(*args, **kwargs):\n return _gdi_.DC_GetUserScale(*args, **kwargs)", "def unitsize(self, unit):\n\t\treturn self._unitsize[unit]", "def scaling(self):\n return self.__scaling", "def scale(self):\n return self._gev_bijector.scale", "def getScale(self):\n return self.factor**self.turnOn", "def _get_scaling(root):\n dpi = root.winfo_fpixels(\"1i\")\n scaling = dpi / 72.0\n logger.debug(\"dpi: %s, scaling: %s'\", dpi, scaling)\n return scaling", "def scaling(self):\n return self._scaling", "def scaling(self):\n return self._scaling", "def scale(self):\n return self._a", "def _scale_coord_to_meters(coord, unit):\n if unit == 'cm':\n return np.divide(coord, 100.)\n elif unit == 'mm':\n return np.divide(coord, 1000.)\n else:\n return coord", "def setScale(self, *args):\n return _libsbml.Unit_setScale(self, *args)", "def scale(self) -> Optional[pulumi.Input['ScaleArgs']]:\n return pulumi.get(self, \"scale\")", "def scale_value(self):\n return self._scale_value[2]", "def get_scale_op(self):\n\t\treturn self.variables.get('scale')", "def scale_parameter(self):\n return self._scale_parameter", "def get_zoom(self) -> float:\n transform = self.transform()\n cur_scale = (transform.m11(), transform.m22())\n return float(f\"{cur_scale[0] - 1.0:0.2f}\")" ]
[ "0.80368865", "0.7562799", "0.74804133", "0.73843277", "0.7313599", "0.73030704", "0.72814995", "0.7198944", "0.7182465", "0.71608347", "0.71563214", "0.71075463", "0.7067282", "0.7034129", "0.7005473", "0.69814765", "0.6953271", "0.69395196", "0.6914729", "0.6903628", "0.6891157", "0.6891157", "0.6853376", "0.68275654", "0.67949283", "0.6703553", "0.66870934", "0.664717", "0.66379094", "0.6575207" ]
0.7647739
1
Euclidean distance between vector and matrix.
def euclid_dist(vec, mat): return np.linalg.norm(mat - vec, axis=1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def euclidean_distance(vector_x, vector_y):\n if len(vector_x) != len(vector_y):\n raise Exception('Vectors must be same dimensions')\n return math.sqrt(sum((vector_x[dim] - vector_y[dim]) ** 2 for dim in range(len(vector_x))))", "def get_distance(self,row_vector):\n d = row_vector-self.X_test\n \n return np.sqrt(np.dot(d,d)) # return the euclidean distance", "def euclidean_distance(x: np.ndarray, y: np.ndarray) -> float:\n distance_vector: np.ndarray = x - y\n distance = compute_norm(distance_vector)\n return distance", "def calculate_euclidean_distance(self, matrix, input, output_neuron):\n result = 0\n\n # Loop over all input data.\n diff = input - matrix[output_neuron]\n return np.sqrt(sum(diff*diff))", "def euclidean_distance(vector_1: Vector, vector_2: Vector) -> VectorOut:\n return np.sqrt(np.sum((np.asarray(vector_1) - np.asarray(vector_2)) ** 2))", "def euclidean_distance(vec1, vec2):\n return numpy.linalg.norm(vec1 - vec2)", "def get_euclidean_vector(vector):\n\n return np.subtract(vector[1], vector[0])", "def calculateEuclideanDistance(vector):\r\n global euclideanDistance\r\n # create linkage matrix with the distance metric as euclidean distance\r\n # calculate the distances of the clusters by starting as singletons\r\n # and in each iteration will merge the two clusters which have the smallest distance\r\n # returns array of length n - 1\r\n # Z[i] will tell us which clusters were merged in the i-th iteration\r\n # each row has format [cluster1, cluster1, dist, sample_count].\r\n euclideanDistance = linkage(vector, metric='euclidean')", "def euclidean_distance(x: np.ndarray, y: np.ndarray) -> float:\n\n distance = np.linalg.norm(x - y)\n\n return distance", "def euclidean_distance(vec):\n\n x, y = vec\n distance = tf.math.sqrt(tf.math.reduce_sum(tf.math.square(x - y), axis=-1, keepdims=True))\n return distance", "def distance_metric(u, v):\n if len(u) != len(v):\n raise Exception(\n \"Distance metric not valid for differently sized vectors\")\n sum = 0.\n for i in range(len(u)):\n sum += ((u[i] - v[i]) ** 2)\n return math.sqrt(sum)", "def compute_euclidean_dist(vec1, vec2):\r\n assert len(vec1) == len(vec2)\r\n vec1 = np.array(vec1)\r\n vec2 = np.array(vec2)\r\n return np.sqrt(np.sum(np.square(vec2 - vec1)))", "def euclidean_distance_no_np(vector_1: Vector, vector_2: Vector) -> VectorOut:\n return sum((v1 - v2) ** 2 for v1, v2 in zip(vector_1, vector_2)) ** (1 / 2)", "def metric_euclid(vector):\n return la.norm(vector)", "def euclidean_distance(vector1, vector2):\n e_dist = [(v1 - v2) ** 2 for v1, v2 in zip(vector1, vector2)]\n e_dist = math.sqrt(sum(e_dist))\n return e_dist", "def euclidean(x_mat: 'Tensor', y_mat: 'Tensor', device: str = 'cpu') -> 'numpy.ndarray':\n device = tf.device('/GPU:0') if device == 'cuda' else tf.device('/CPU:0')\n\n with _get_tf_device(device):\n return tf.sqrt(\n tf.reduce_sum((tf.expand_dims(x_mat, 1) - tf.expand_dims(y_mat, 0)) ** 2, 2)\n ).numpy()", "def euclidean_distance(x1: np.ndarray, x2: np.ndarray) -> float:\n return np.sqrt(np.square(x1 - x2).sum())", "def _vector_dist(self, vec1, vec2):\r\n return sqrt(sum([(float(v1) - float(v2)) ** 2 for v1, v2 in\r\n zip(vec1, vec2)]))", "def get_euclid_dist(vec_1, vec_2):\n\n\treturn np.sqrt(np.sum(np.fabs(vec_1 - vec_2), axis=1)).flatten()", "def euclidean_distance(a, b):\n return np.linalg.norm(a - b)", "def get_distance(self, vec):\r\n\r\n sum = 0\r\n if len(self.weights) == len(vec):\r\n for i in range(len(vec)):\r\n sum += (self.weights[i] - vec[i]) * (self.weights[i] - vec[i])\r\n return np.sqrt(sum)\r\n else:\r\n sys.exit(\"Error: dimension of nodes != input data dimension!\")", "def euclidean_distance(vector_1, vector_2) -> float:\n\n\n before_square_root = 0\n for i in range(len(vector_1)):\n before_square_root += (vector_1[i] - vector_2[i])**2\n\n d = math.sqrt(before_square_root)\n print(d)\n return(d)", "def pairwise_euclidean_distance(M, axis=0):\n\n return pairwise_distance(M, 'euclidean', axis=axis)", "def distance(self, u, v):\n numerator = np.dot(u,v)\n denominator = np.linalg.norm(u) * np.linalg.norm(v)\n similarity = numerator/(denominator +1e-7)\n return similarity", "def __dist(u, v):\n return spatial.distance.euclidean(u, v)", "def distance(self, vector1, vector2):\n\t\tsum_sq = 0\n\t\tfor i in range(28):\n\t\t\tfor j in range(28):\n\t\t\t\tsum_sq += (vector1[i][j] - vector2[i][j])**2\n\t\treturn math.sqrt(sum_sq)", "def euclidean_distance(x1, x2):\n return np.sqrt(np.sum(np.square(np.subtract(x1, x2))))", "def _euclid_distance(self, A, B, axis=1):\n return np.linalg.norm(A - B, axis=axis)", "def distance_matrix(data):\n D = numpy.zeros( (data.shape[0], data.shape[0]) )\n for i in xrange(data.shape[0]):\n for j in xrange(i):\n D[i,j] = numpy.linalg.norm(data[i,:]-data[j,:])\n D[j,i] = D[i,j]\n\n return D", "def distancematrix(vec1, vec2):\n v1, v2 = np.meshgrid(vec1, vec2)\n return np.abs(v1 - v2)" ]
[ "0.7465319", "0.73555523", "0.7279082", "0.7155775", "0.70827484", "0.7002339", "0.6978049", "0.69625634", "0.69552696", "0.68833405", "0.6845088", "0.684352", "0.6810883", "0.6807818", "0.6792588", "0.675617", "0.67391354", "0.668006", "0.6631312", "0.6624541", "0.6609537", "0.6573236", "0.65625995", "0.6560832", "0.653255", "0.65312594", "0.65285546", "0.65049106", "0.6495866", "0.6495561" ]
0.77098405
0
Generate a description for the combination of well, tile, channel and, optionaly, depth and/or time
def generate_tile_description(tile, time = None, depth = None): desc = "s"+ str(tile) if depth is not None: desc = desc + "_z" + str(depth) if time is not None: desc = desc + "_t" + str(time) return desc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def describe(self):\n branch = randint(0,62)\n \n if 0 <= branch <= 29: \n if self.casteOrder[0] == 'soldiers':\n if self.genesis == 'escape persecution': \n self.description = '{2}: A full service {3} for retired {1}'.format(choice(self.__class__.badjectives), choice(self.__class__.soldierSynonyms), self.name, choice(self.__class__.settlements[self.tech])) \n elif self.genesis == 'maintain control': \n self.description = 'The penal mining colony for {0} {1} on {2}'.format(choice(self.__class__.badjectives), choice(self.__class__.casteSynonyms[self.casteOrder[5]]), self.name, choice(self.__class__.settlements[self.tech])) \n elif self.genesis == 'explore the unknown':\n self.description = 'The frontier garrison and {2} {0} recently conquered {1}'.format(choice(self.__class__.techPreps[self.tech]), self.name, choice(self.__class__.settlements[self.tech]))\n elif self.genesis == 'seed the galaxy with life':\n self.description = 'The militarized {3} and bonsai garden for the mandatory contemplation of {0} {1} {2}'.format(choice(self.__class__.seedIdeals), choice(self.__class__.techPreps[self.tech]), self.name, choice(self.__class__.settlements[self.tech]))\n elif self.genesis == 'spread the gospel':\n self.description = 'An outpost for crusaders who took up arms to defend {2} {3} on {0} {1}'.format(choice(self.__class__.techAdjectives[self.tech]), self.name, choice(self.__class__.badjectives), choice(self.__class__.priestSynonyms)) \n elif self.casteOrder[0] == 'scientists':\n if self.genesis == 'escape persecution': \n self.description = 'A sanctuary for {0} who challenged the dogma of powerful {1} {2} {3}'.format(choice(self.__class__.scientistSynonyms), choice(self.__class__.priestSynonyms), choice(self.__class__.techPreps[self.tech]), self.name) \n elif self.genesis == 'maintain control': \n self.description = '{0}, home to a coalition of {1} eugenicists and their {2} servants'.format(self.name, choice(self.__class__.scientistAdjectives), choice(self.__class__.casteAdjectives[self.casteOrder[5]]))\n elif self.genesis == 'explore the unknown':\n self.description = 'An Extremely Large Hadron Collider {0} {1} {2}'.format(choice(self.__class__.techPreps[self.tech]), choice(self.__class__.techAdjectives[self.tech]), self.name)\n elif self.genesis == 'seed the galaxy with life':\n self.description = 'The research institute and {2} for abiogenesis on {0} {1}'.format(choice(self.__class__.techAdjectives[self.tech]), self.name, choice(self.__class__.settlements[self.tech]))\n elif self.genesis == 'spread the gospel':\n self.description = 'The Galactic Academy of Sciences founded by {0} {1} {2} {3}'.format(choice(self.__class__.scientistAdjectives), choice(self.__class__.scientistSynonyms), choice(self.__class__.techPreps[self.tech]), self.name)\n elif self.casteOrder[0] == 'laborers':\n if self.genesis == 'escape persecution':\n self.description = 'The {0} for unionized {1} {2} {3}'.format(choice(self.__class__.settlements[self.tech]), choice(self.__class__.laborerSynonyms), choice(self.__class__.techPreps[self.tech]), self.name)\n elif self.genesis == 'maintain control':\n self.description = '{0} {1}: divisional headquarters for the communist party in this region of the galaxy (where evidence of {2} has been redacted)'.format(choice(self.__class__.techAdjectives[self.tech]).capitalize(), self.name, choice(self.__class__.controlIdeals))\n elif self.genesis == 'explore the unknown':\n self.description = 'A team of robot {0} sent to survey {1} {2}'.format(choice(self.__class__.laborerSynonyms), choice(self.__class__.techAdjectives[self.tech]), self.name)\n elif self.genesis == 'seed the galaxy with life':\n self.description = 'Drones sent to terraform {0} {1} and build {2}'.format(choice(self.__class__.techAdjectives[self.tech]), self.name, choice(self.__class__.seedPlaces))\n elif self.genesis == 'spread the gospel':\n self.description = 'The {0} {1} {2} where a hard day\\'s work is highly valued'.format(choice(self.__class__.settlements[self.tech]), choice(self.__class__.techPreps[self.tech]), self.name)\n elif self.casteOrder[0] == 'merchants':\n if self.genesis == 'escape persecution':\n self.description = '{0} {1}: a refuge for {2} who fled a communist revolution on their home planet'.format(choice(self.__class__.techAdjectives[self.tech]).capitalize(), self.name, choice(self.__class__.merchantSynonyms))\n elif self.genesis == 'maintain control':\n self.description = 'The monopolistic conglomerate of {0} {1} {2} {3}'.format(choice(self.__class__.merchantAdjectives), choice(self.__class__.merchantSynonyms), choice(self.__class__.techPreps[self.tech]), self.name)\n elif self.genesis == 'explore the unknown':\n self.description = '{3} {0}, home to a consortium of {1} seeking to monetize {2}'.format(self.name, choice(self.__class__.merchantSynonyms), choice(self.__class__.genesisIdeals[choice(self.__class__.genesisReasons)]), choice(self.__class__.techAdjectives[self.tech]).capitalize())\n elif self.genesis == 'seed the galaxy with life':\n self.description = 'An oligarchy of wealthy {0} who recently opened a for-profit {1} on {2}'.format(choice(self.__class__.merchantSynonyms), choice(self.__class__.settlements[self.tech]), self.name)\n elif self.genesis == 'spread the gospel':\n self.description = 'An orbital printing press that rains down copies of {0} onto {1} {2}'.format(choice(self.__class__.merchantBooks), choice(self.__class__.techAdjectives[self.tech]), self.name)\n elif self.casteOrder[0] == 'artists':\n if self.genesis == 'escape persecution':\n self.description = 'The {0}\\' commune for the free and naked expression of {1} {2} {3}'.format(choice(self.__class__.artistSynonyms), choice(self.__class__.escapeIdeals), choice(self.__class__.techPreps[self.tech]), self.name)\n elif self.genesis == 'maintain control':\n self.description = 'The {0} with mandatory art classes on {1} {2}'.format(choice(self.__class__.settlements[self.tech]), choice(self.__class__.techAdjectives[self.tech]), self.name)\n elif self.genesis == 'explore the unknown':\n self.description = '{0} {1}, an observation deck where {2} observe naked and confined {3} to better understand the mysteries of {4}'.format(choice(self.__class__.techPreps[self.tech]).capitalize(), self.name, choice(self.__class__.artistSynonyms), choice(self.__class__.casteSynonyms[self.casteOrder[5]]), choice(self.__class__.genesisIdeals[choice(self.__class__.genesisReasons)]))\n elif self.genesis == 'seed the galaxy with life':\n self.description = 'The {0} for {1} {2} who choose to better understand their craft through lovemaking on {3}'.format(choice(self.__class__.settlements[self.tech]), choice(self.__class__.seedAdjectives), choice(self.__class__.artistSynonyms), self.name)\n elif self.genesis == 'spread the gospel':\n self.description = 'A {0} on {1} that hosts an annual conference for pop {2}'.format(choice(self.__class__.settlements[self.tech]), self.name, choice(self.__class__.artistSynonyms))\n elif self.casteOrder[0] == 'priests':\n if self.genesis == 'escape persecution':\n self.description = '{3} {0}, home to an order of heretical {1} who reject the {2} doctrine of their people'.format(self.name, choice(self.__class__.priestSynonyms), choice(self.__class__.gospelAdjectives), choice(self.__class__.techAdjectives[self.tech]).capitalize())\n elif self.genesis == 'maintain control':\n self.description = 'The orthodox {0} for {1} {2} {3} {4}'.format(choice(self.__class__.gospelPlaces), choice(self.__class__.gospelAdjectives), choice(self.__class__.priestSynonyms), choice(self.__class__.techPreps[self.tech]), self.name)\n elif self.genesis == 'explore the unknown':\n self.description = '{0} {1}, a seminary for the metaphysical contemplation of {2}'.format(choice(self.__class__.techPreps[self.tech]).capitalize(), self.name, choice(self.__class__.gospelIdeals))\n elif self.genesis == 'seed the galaxy with life':\n self.description = 'The marriage counseling clinic and devotional {2} {0} {1}'.format(choice(self.__class__.techPreps[self.tech]), self.name, choice(self.__class__.settlements[self.tech])) \n elif self.genesis == 'spread the gospel':\n self.description = 'A {0} mission on {1} {2} for converting {3} natives'.format(choice(self.__class__.gospelAdjectives), choice(self.__class__.techAdjectives[self.tech]), self.name, choice(self.__class__.badjectives))\n \n elif 30 <= branch <= 44:\n if self.tech == 'pre-industrial technology':\n if self.genesis == 'explore the unknown':\n self.description = '{0}, home to a historical reenactment society of {2} {1}'.format(self.name, choice(self.__class__.casteSynonyms[self.casteOrder[0]]), choice(self.__class__.badjectives))\n elif self.genesis == 'escape persecution':\n self.description = '{1}: a {2} of {0} Luddites'.format(choice(self.__class__.casteAdjectives[self.casteOrder[0]]), self.name, choice(self.__class__.preindustrialSettlements))\n elif self.genesis == 'maintain control':\n self.description = 'The {0} fiefdoms of {1}, where {2} serve their {3} liege lord'.format(choice(self.__class__.preindustrialAdjectives), self.name, choice(self.__class__.casteSynonyms[self.casteOrder[5]]), choice(self.__class__.casteAdjectives[self.casteOrder[0]]))\n elif self.genesis == 'seed the galaxy with life':\n self.description = 'A harem of {0} {1} on medieval {2}'.format(choice(self.__class__.seedAdjectives), choice(self.__class__.casteSynonyms[self.casteOrder[0]]), self.name)\n elif self.genesis == 'spread the gospel':\n self.description = 'The Puritan {0} for evangelical {1} on {2}'.format(choice(self.__class__.preindustrialSettlements), choice(self.__class__.casteSynonyms[self.casteOrder[0]]), self.name)\n elif self.tech == 'machine technology':\n if self.genesis == 'explore the unknown':\n self.description = 'An array of radio telescopes outside the {0} on {1} {2}'.format(choice(self.__class__.machineSettlements), choice(self.__class__.machineAdjectives), self.name)\n elif self.genesis == 'escape persecution':\n self.description = 'Public housing for {0} {1} gentrified {2}'.format(choice(self.__class__.casteSynonyms[self.casteOrder[0]]), choice(self.__class__.machinePreps), self.name) \n elif self.genesis == 'maintain control':\n self.description = 'The juvenile detention center for {0} children who obsess over {1} on {2}'.format(choice(self.__class__.badjectives), choice(self.__class__.genesisIdeals[choice(self.__class__.genesisReasons)]), self.name)\n elif self.genesis == 'seed the galaxy with life':\n self.description = 'Just a giant cruise ship full of {0} {1} on {2}'.format(choice(self.__class__.seedAdjectives), choice(self.__class__.casteSynonyms[self.casteOrder[0]]), self.name)\n elif self.genesis == 'spread the gospel':\n self.description = 'A megachurch run by {0} {1}, broadcasting live from {2}'.format(choice(self.__class__.priestAdjectives), choice(self.__class__.casteSynonyms[self.casteOrder[0]]), self.name)\n elif self.tech == 'ubiquitous technology':\n if self.genesis == 'explore the unknown':\n self.description = 'A quantum data center for simulating the lives of {0} {1} {2} {3}'.format(choice(self.__class__.casteAdjectives[self.casteOrder[0]]), choice(self.__class__.casteSynonyms[choice(self.__class__.castes)]), choice(self.__class__.techPreps[self.tech]), self.name)\n elif self.genesis == 'escape persecution':\n self.description = 'The cryogenics facility for the preservation of {0} {1} {2} {3}'.format(choice(self.__class__.badjectives), choice(self.__class__.casteSynonyms[self.casteOrder[0]]), choice(self.__class__.techPreps[self.tech]), self.name)\n elif self.genesis == 'maintain control':\n self.description = 'Orbiting {0}: a socially stratified {1} governed by {2}'.format(self.name, choice(self.__class__.ubiqitousSettlements), choice(self.__class__.casteSynonyms[self.casteOrder[0]]))\n elif self.genesis == 'seed the galaxy with life':\n self.description = '{0} {1}, home to an assembly plant for android {2}'.format(choice(self.__class__.ubiqitousAdjectives).capitalize(), self.name, choice(self.__class__.casteSynonyms[self.casteOrder[0]]))\n elif self.genesis == 'spread the gospel':\n self.description = '{0}, home to an anthropological society of {1} who covertly inject the theme of {2} into the folklore of other civilizations'.format(self.name, choice(self.__class__.casteSynonyms[self.casteOrder[0]]), choice(self.__class__.genesisIdeals[choice(self.__class__.genesisReasons)]))\n \n elif 45 <= branch <= 62:\n if self.casteOrder[0] == 'laborers':\n if self.tech == 'pre-industrial technology':\n self.description = 'A kibbutz for {0} {1} on {2}'.format(choice(self.__class__.genesisAdjectives[self.genesis]), choice(self.laborerSynonyms), self.name)\n elif self.tech == 'machine technology':\n self.description = 'The worker-owned and operated {1} of {2} {0}'.format(self.name, choice(self.__class__.genesisPlaces[self.genesis]), choice(self.__class__.machineAdjectives))\n elif self.tech == 'ubiquitous technology':\n self.description = '{0}, host to a psychic hivemind of {1} {2}'.format(self.name, choice(self.__class__.genesisAdjectives[self.genesis]), choice(self.__class__.laborerSynonyms))\n elif self.casteOrder[0] == 'artists':\n if self.tech == 'pre-industrial technology':\n self.description = 'The conservatory for neoclassical {0} who explore the theme of {1} on {2}'.format(choice(self.__class__.artistSynonyms), choice(self.__class__.genesisIdeals[self.genesis]), self.name)\n elif self.tech == 'machine technology':\n self.description = 'A series of art installations conceived by {1} at various {0} on {2}'.format(choice(self.__class__.genesisPlaces[self.genesis]), choice(self.__class__.artistSynonyms), self.name)\n elif self.tech == 'ubiquitous technology':\n self.description = '{0}: a collective of AI {1} who parody the human theme of {2}'.format(self.name, choice(self.__class__.artistSynonyms), choice(self.__class__.genesisIdeals[self.genesis]))\n elif self.casteOrder[0] == 'priests':\n if self.tech == 'pre-industrial technology':\n self.description = '{0} {1}, home to a scriptorium dedicated to the penning of meditations on {2}'.format(choice(self.__class__.preindustrialAdjectives).capitalize(), self.name, choice(self.__class__.genesisIdeals[self.genesis]))\n elif self.tech == 'machine technology':\n self.description = 'Polygamous cultists who live among the {0} of {1} {2}'.format(choice(self.__class__.genesisPlaces[self.genesis]), choice(self.__class__.machineAdjectives), self.name)\n elif self.tech == 'ubiquitous technology':\n self.description = 'A pyramidic burial chamber and monument to {0} {1} {2}'.format(choice(self.__class__.genesisIdeals[self.genesis]), choice(self.__class__.ubiquitousPreps), self.name)\n elif self.casteOrder[0] == 'scientists':\n if self.tech == 'pre-industrial technology':\n self.description = '{0}, home to a collective of {1} {2} who reproduce scientific experiments from pre-industrial Earth'.format(self.name, choice(self.genesisAdjectives[self.genesis]), choice(self.__class__.scientistSynonyms))\n elif self.tech == 'machine technology':\n self.description = 'A society of {0} who work simulated jobs at {1} on {3} to better understand {2}'.format(choice(self.__class__.scientistSynonyms), choice(self.__class__.genesisPlaces[self.genesis]), choice(self.__class__.genesisIdeals[self.genesis]), self.name)\n elif self.tech == 'ubiquitous technology':\n self.description = 'The postdoctoral program for {0} tenure-track {1} {2} {3}'.format(choice(self.__class__.genesisAdjectives[self.genesis]), choice(self.__class__.scientistSynonyms), choice(self.__class__.ubiquitousPreps), self.name)\n elif self.casteOrder[0] == 'soldiers':\n if self.tech == 'pre-industrial technology':\n self.description = '{0}, where {1} {2} study their martial arts in the quiet isolation of a {3}'.format(self.name, choice(self.__class__.genesisAdjectives[self.genesis]), choice(self.__class__.soldierSynonyms), choice(self.__class__.preindustrialSettlements))\n elif self.tech == 'machine technology':\n self.description = '{0}, where {1} {2} spar each other to overcome their frustrations surrounding {3}'.format(self.name, choice(self.__class__.badjectives), choice(self.__class__.soldierSynonyms), choice(self.__class__.genesisIdeals[self.genesis]))\n elif self.tech == 'ubiquitous technology':\n self.description = 'The highly militarized {0} {1} {2}'.format(choice(self.__class__.genesisPlaces[self.genesis]), choice(self.__class__.ubiquitousPreps), self.name)\n elif self.casteOrder[0] == 'merchants':\n if self.tech == 'pre-industrial technology':\n self.description = 'An antique fair on {0} where 1% of the proceeds are donated to the study of {1}'.format(self.name, choice(self.__class__.genesisIdeals[self.genesis]))\n elif self.tech == 'machine technology':\n self.description = 'A chain of retail {0} near the {1} on {2}'.format(choice(self.__class__.genesisPlaces[self.genesis]), choice(self.__class__.machineSettlements), self.name)\n elif self.tech == 'ubiquitous technology':\n self.description = 'A pay-by-the-hour computer simulation {0} {1} where patrons can experience {2}'.format(choice(self.__class__.ubiquitousPreps), self.name, choice(self.__class__.genesisIdeals[self.genesis]))", "def get_description():\n desc = dict()\n today = datetime.date.today()\n desc['data'] = True\n desc['description'] = \"\"\"This plot presents one of two metrics indicating\n daily humidity levels as reported by a surface weather station. The\n first being mixing ratio, which is a measure of the amount of water\n vapor in the air. The second being vapor pressure deficit, which reports\n the difference between vapor pressure and saturated vapor pressure.\n The vapor pressure calculation shown here makes no accounting for\n leaf tempeature. The saturation vapor pressure is computed by the\n Tetens formula (Buck, 1981).\n\n <br />On 22 June 2016, this plot was modified to display the range of\n daily averages and not the range of instantaneous observations.\n \"\"\"\n desc['arguments'] = [\n dict(type='zstation', name='zstation', default='AMW',\n label='Select Station:', network='IA_ASOS'),\n dict(type='year', name='year', default=today.year,\n label='Select Year to Plot'),\n dict(type='select', name='var', default='mixing_ratio',\n label='Which Humidity Variable to Compute?', options=PDICT),\n ]\n return desc", "def generate_basic(self, time):\n output_text = ''\n try:\n output_text = 'for x=1 to ' + str(int(time)//4) + ':\\n'\n except:\n return\n for i, layer in enumerate(self.c_layers):\n c = ['0'] * 8\n b = ['0'] * 8\n d = ['0'] * 8\n\n c[self.num_sides - i - 1] = '1'\n for point in layer:\n if not point.on:\n continue\n\n index = 7 - int(point.toplevel[1])\n if point.toplevel[0] == \"b\":\n b[index] = '1'\n else:\n d[index] = '1'\n\n output_text += f' portb = %{\"\".join(b)}\\n'\n output_text += f' portd = %{\"\".join(d)}\\n'\n output_text += f' portc = %{\"\".join(c)}\\n'\n output_text += ' pause 1\\n'\n output_text += '\\n'\n\n output_text += ' next x\\n'\n return output_text", "def summary(self):\n self.tiles.refreshnames()\n self.glues.refreshnames()\n # self.check_consistent()\n info = {\n \"ntiles\": len(self.tiles),\n \"nrt\": len([x for x in self.tiles if not x.is_fake]),\n \"nft\": len([x for x in self.tiles if x.is_fake]),\n \"nends\": len(self.glues),\n \"ntends\": len(self.tiles.glues_from_tiles()),\n \"tns\": \" \".join(x.name for x in self.tiles if x.name),\n \"ens\": \" \".join(x.name for x in self.glues if x.name)\n # if (\"info\" in self.keys() and \"name\" in self[\"info\"].keys())\n # else \"\",\n }\n tun = sum(1 for x in self.tiles if x.name is None)\n if tun > 0:\n info[\"tns\"] += \" ({} unnamed)\".format(tun)\n eun = sum(1 for x in self.glues if x.name is None)\n if eun > 0:\n info[\"ens\"] += \" ({} unnamed)\".format(eun)\n if info[\"nft\"] > 0:\n info[\"nft\"] = \" (+ {} fake)\".format(info[\"nft\"])\n else:\n info[\"nft\"] = \"\"\n return \"TileSet: {nrt} tiles{nft}, {nends} ends, {ntends} ends in tiles.\\nTiles: {tns}\\nEnds: {ens}\".format(\n **info\n )", "def get_description():\n desc = {\"description\": __doc__, \"data\": True, \"cache\": 600}\n today = datetime.date.today()\n desc[\"arguments\"] = [\n dict(\n type=\"csector\",\n name=\"csector\",\n default=\"IA\",\n label=\"Select state/sector to plot\",\n ),\n dict(\n type=\"date\",\n name=\"sdate\",\n default=f\"{today.year}/01/01\",\n label=\"Start Date:\",\n min=\"2000/01/04\",\n max=today.strftime(\"%Y/%m/%d\"),\n ),\n dict(\n type=\"date\",\n name=\"edate\",\n default=today.strftime(\"%Y/%m/%d\"),\n label=\"End Date:\",\n min=\"2000/01/04\",\n max=today.strftime(\"%Y/%m/%d\"),\n ),\n dict(\n type=\"select\",\n name=\"d\",\n default=\"0\",\n options=PDICT,\n label=\"Select Drought Classification (at and above counted):\",\n ),\n dict(\n type=\"select\",\n name=\"w\",\n default=\"percent\",\n options=PDICT2,\n label=\"How to express time for plot:\",\n ),\n dict(type=\"cmap\", name=\"cmap\", default=\"plasma\", label=\"Color Ramp:\"),\n ]\n return desc", "def description():", "def get_description():\n desc = {\"description\": __doc__}\n sts = utc() - timedelta(hours=26)\n ets = utc() - timedelta(hours=2)\n desc[\"arguments\"] = [\n {\n \"type\": \"datetime\",\n \"name\": \"sts\",\n \"default\": sts.strftime(\"%Y/%m/%d %H00\"),\n \"label\": \"Start Timestamp (UTC):\",\n \"min\": \"1986/01/01 0000\",\n },\n {\n \"type\": \"datetime\",\n \"name\": \"ets\",\n \"default\": ets.strftime(\"%Y/%m/%d %H00\"),\n \"label\": (\n \"End Timestamp [inclusive] (UTC), \"\n \"interval must be less than 4 days\"\n ),\n \"min\": \"1986/01/01 0000\",\n },\n {\n \"type\": \"select\",\n \"options\": PDICT,\n \"default\": \"min\",\n \"name\": \"w\",\n \"label\": \"Which statistic to compute\",\n },\n {\n \"type\": \"csector\",\n \"name\": \"csector\",\n \"default\": \"IA\",\n \"label\": \"Select state/sector\",\n },\n {\n \"type\": \"select\",\n \"options\": PDICT2,\n \"default\": \"user\",\n \"label\": \"Plotting mode (user defined color-ramp or freezing)\",\n \"name\": \"mode\",\n },\n {\n \"type\": \"cmap\",\n \"name\": \"cmap\",\n \"default\": \"gnuplot2\",\n \"label\": \"Color Ramp:\",\n },\n ]\n return desc", "def get_description():\n desc = dict()\n desc['data'] = True\n desc['description'] = \"\"\"This plot displays the maximum and minimum change\n in high temperature between a given day and a given number of days prior\n to that date. The red bars are the largest difference between a maximum\n high over a period of days and the given day. The blue bars are the\n opposite.\"\"\"\n desc['arguments'] = [\n dict(type='station', name='station', default='IA0200',\n label='Select Station:', network='IACLIMATE'),\n dict(type='int', name='days', default='4',\n label='Number of Trailing Days:'),\n ]\n return desc", "def details(self) -> str:\n return f\"- **language**: [{self.language}]\\n\" \\\n f\"- **opengame**: [{self.opengame}]\\n\" \\\n f\"- **system**: [{self.system}]\\n\" \\\n f\"- **mode**: [{self.mode}]\\n\" \\\n f\"- **attributes**: [{self.attributes}]\\n \" \\\n f\"- **score_threshold**: [{self.score_threshold}]\\n \" \\\n f\"- **monsters**: [{self.monsters}]\\n\"", "def description(self) -> str:\r\n description = \"The player must aim to put the most possible units \" + \\\r\n \"of \" + colour_name(self.colour) + \" on the outer\" +\\\r\n \" perimeter.\"\r\n return description", "def get_description():\n desc = dict()\n desc['cache'] = 86400\n desc['data'] = True\n desc['description'] = \"\"\"This chart presents a histogram of issuance times\n for a given watch, warning, or advisory type for a given office.\"\"\"\n desc['arguments'] = [\n dict(type='networkselect', name='station', network='WFO',\n default='DMX', label='Select WFO:'),\n dict(type='phenomena', name='phenomena',\n default='WC', label='Select Watch/Warning Phenomena Type:'),\n dict(type='significance', name='significance',\n default='W', label='Select Watch/Warning Significance Level:'),\n ]\n return desc", "def description(self) -> str:\r\n descrip = 'The player must aim to put the most possible units of a ' \\\r\n 'given colour c on the outer perimeter of ' \\\r\n 'the board. The ' \\\r\n 'player’s score is the total number of unit cells ' \\\r\n 'of colour ' \\\r\n 'c that are on the perimeter. There is a ' \\\r\n 'premium on corner ' \\\r\n 'cells: they count twice towards the score. '\r\n return descrip", "def describe():", "def get_description():\n desc = {}\n desc[\"data\"] = True\n desc[\n \"description\"\n ] = \"\"\"For each year, the average first and last date of\n a given temperature is computed. The values are then averaged and plotted\n to represent the period between these occurences and also the number of\n days represented by the period.\n \"\"\"\n desc[\"arguments\"] = [\n dict(\n type=\"station\",\n name=\"station\",\n default=\"IATDSM\",\n label=\"Select Station\",\n network=\"IACLIMATE\",\n )\n ]\n return desc", "def _generate_overlay_file_name(self, well, channel, desc):\n \n return \"c\" + channel + \"_w\" + well + \"_\" + desc + \".png\"", "def GetDescription(cls):\n return textwrap.dedent('''\n This trace step includes a diagram of the Ego long. acceleration in the report.\n ''').strip()", "def get_description():\r\n return{\"I'll never yield!\":\"Grants a shield.\",\r\n \"Stay still!\":\"Affected unit cannot act in their turn.\"\r\n }", "def get_description():\n desc = {\"description\": __doc__, \"data\": True}\n desc[\"arguments\"] = [\n dict(\n type=\"station\",\n name=\"station\",\n default=\"IATDSM\",\n label=\"Select Station:\",\n network=\"IACLIMATE\",\n ),\n dict(\n type=\"select\",\n name=\"var\",\n default=\"spi\",\n options=PDICT,\n label=\"Select which metric to plot:\",\n ),\n dict(\n type=\"select\",\n name=\"c\",\n default=\"ncei91\",\n options=PDICT2,\n label=\"Which climatology to use for averages:\",\n ),\n dict(\n type=\"int\",\n name=\"days\",\n default=90,\n label=\"Over how many trailing days to compute the metric?\",\n ),\n ]\n return desc", "def get_description():\n desc = dict()\n desc[\"cache\"] = 3600\n desc[\"data\"] = True\n desc[\n \"description\"\n ] = \"\"\"This plot is not meant for interactive use, but a backend for\n SPS plots.\n \"\"\"\n desc[\"arguments\"] = [\n dict(\n type=\"text\",\n name=\"pid\",\n default=\"202012300005-KDVN-WWUS83-SPSDVN\",\n label=\"IEM generated up to 35 char product identifier:\",\n ),\n dict(\n type=\"int\",\n default=0,\n name=\"segnum\",\n label=\"Product Segment Number (starts at 0):\",\n ),\n ]\n return desc", "def description(self) -> str:\r\n descrip = 'The player must aim for the largest “blob” of a given ' \\\r\n 'colour c. A blob is a group of connected blocks with the ' \\\r\n 'same colour. Two blocks are connected if their sides ' \\\r\n 'touch; touching corners doesn’t count. The player’s score ' \\\r\n 'is the number of unit cells in the largest blob of colour ' \\\r\n 'c. '\r\n return descrip", "def __str__(self):\n return '<TuebingenMEG: %i samples, %i timepoints, %i channels>' \\\n % (self.nsamples, self.ntimepoints, len(self.channelids))", "def desc(self):\n return LandCell.desc(self) + \"; plant=\" + str(self.plant)", "def description(self):\n\n\t\treturn \"%d %s %s\" % (self.vintage, self.winery.name, self.name)", "def get_description():\n desc = {\"description\": __doc__}\n desc[\"data\"] = True\n desc[\"arguments\"] = [\n dict(\n type=\"select\",\n options=PDICT3,\n default=\"temps\",\n name=\"v\",\n label=\"Which Variable(s) to Plot\",\n ),\n dict(\n type=\"station\",\n name=\"station1\",\n default=\"IATDSM\",\n label=\"Select First Station:\",\n network=\"IACLIMATE\",\n ),\n dict(\n type=\"select\",\n name=\"c1\",\n label=\"Climatology Source for First Station:\",\n default=\"1951\",\n options=PDICT,\n ),\n dict(\n type=\"station\",\n name=\"station2\",\n default=\"IATDSM\",\n optional=True,\n label=\"Select Second Station (Optional):\",\n network=\"IACLIMATE\",\n ),\n dict(\n type=\"select\",\n name=\"c2\",\n label=\"Climatology Source for Second Station:\",\n default=\"1951\",\n options=PDICT,\n ),\n dict(\n type=\"select\",\n name=\"s\",\n label=\"For difference plot, should smoother be applied:\",\n default=\"0\",\n options=PDICT2,\n ),\n dict(\n type=\"year\",\n min=1880,\n name=\"sy1\",\n default=1991,\n label=\"Inclusive Start Year for First Station Period of Years:\",\n ),\n dict(\n type=\"year\",\n min=1880,\n name=\"ey1\",\n default=2020,\n label=\"Inclusive End Year for First Station Period of Years:\",\n ),\n dict(\n type=\"year\",\n min=1880,\n name=\"sy2\",\n default=1981,\n label=\"Inclusive Start Year for Second Station Period of Years:\",\n ),\n dict(\n type=\"year\",\n min=1880,\n name=\"ey2\",\n default=2010,\n label=\"Inclusive End Year for Second Station Period of Years:\",\n ),\n ]\n return desc", "def _generate_raw_file_name(self, well, channel, desc):\n \n return \"bPLATE_w\" + well + \"_\" + desc + \"_c\" + channel + \".png\"", "def __init__(self, name=\"\", description=\"\", time_units=\"s\", len_units=\"m\"):\n\n # Set general info\n self._type = 2 # observation well id\n self.time_units = time_units\n self.len_units = len_units\n\n self.parameters = {'full': True, # is full penetrating?\n 'r': 1., # radius, distance until pumping well in length units\n 'd': 0., # depth of well screen (from top) in length units\n 'l': 1.} # depth of well bottom in length units\n\n # Create drawdown data\n self.drawdown = _Data(dtype=1, name=name, description=description)\n self.drawdown.set_units(self.time_units, self.len_units)\n\n # Set results from models\n self.data = []", "def get_description():\n desc = {\"description\": __doc__, \"data\": True}\n dt = datetime.date.today() - datetime.timedelta(days=1)\n desc[\"arguments\"] = [\n {\n \"type\": \"select\",\n \"name\": \"mode\",\n \"default\": \"normal\",\n \"label\": \"Application Mode:\",\n \"options\": PDICT,\n },\n dict(\n type=\"state\",\n name=\"state\",\n default=\"IA\",\n label=\"Select State:\",\n ),\n dict(\n type=\"date\",\n name=\"date\",\n default=dt.strftime(\"%Y/%m/%d\"),\n label=\"Retroactive Date of Plot\",\n ),\n dict(\n type=\"int\",\n name=\"d1\",\n default=30,\n label=\"Over how many trailing days to compute the metric?\",\n ),\n dict(\n type=\"int\",\n name=\"d2\",\n default=60,\n label=\"Over how many trailing days to compute the metric?\",\n ),\n dict(\n type=\"int\",\n name=\"d3\",\n default=90,\n label=\"Over how many trailing days to compute the metric?\",\n ),\n ]\n return desc", "def get_description():\n d = dict()\n d['data'] = True\n d['report'] = True\n d['description'] = \"\"\" \"\"\"\n d['arguments'] = [\n dict(type='station', name='station', default='IA2203',\n label='Select Station'),\n ]\n return d", "def get_description():\n desc = {\"description\": __doc__, \"data\": True}\n today = datetime.date.today()\n thisyear = today.year\n desc[\"arguments\"] = [\n dict(\n type=\"station\",\n name=\"station\",\n default=\"IATDSM\",\n label=\"Select Station:\",\n network=\"IACLIMATE\",\n ),\n dict(\n type=\"select\",\n options=PDICT,\n name=\"var\",\n default=\"precip\",\n label=\"Accumulate Precipitation or Snow?\",\n ),\n dict(\n type=\"year\",\n name=\"year1\",\n default=thisyear,\n label=\"Additional Year to Plot:\",\n ),\n dict(\n type=\"year\",\n name=\"year2\",\n optional=True,\n default=(thisyear - 1),\n label=\"Additional Year to Plot: (optional)\",\n ),\n dict(\n type=\"year\",\n name=\"year3\",\n optional=True,\n default=(thisyear - 2),\n label=\"Additional Year to Plot: (optional)\",\n ),\n dict(\n type=\"sday\",\n name=\"sdate\",\n default=\"0101\",\n label=\"Start Day of Year for Plot:\",\n ),\n dict(\n optional=True,\n type=\"sday\",\n name=\"edate\",\n default=f\"{today:%m%d}\",\n label=\"End Day of Year for Plot:\",\n ),\n dict(\n type=\"int\",\n default=\"3\",\n label=\"Number of missing days to allow before excluding year\",\n name=\"m\",\n ),\n ]\n return desc", "def GenBrief(self):\n\n brief = \"[{0}] {1} \".format(self.TimeStamp.strftime( \"%H:%M\"), ActionType.actionNames[self.Type])\n\n if self.Type in {ActionType.Feed, ActionType.Notes, ActionType.WakeUp, ActionType.SleepTime,\\\n ActionType.ComFood, ActionType.ErrStatus, ActionType.Pills, ActionType.Snacks}:\n brief += self.Detail\n elif self.Type == ActionType.Poop:\n pass\n elif self.Type == ActionType.AD:\n pass\n elif self.Type == ActionType.UnKnown:\n brief += \"可以尝试 '总结' 或 '一周总结' 查看萝卜成长状态。\"\n elif self.Type == ActionType.FallSleep:\n tnow = cn_utility.GetNowForUTC8()\n delta_minutes = int((tnow - self.TimeStamp).total_seconds()/60)\n #brief += \"已经睡了{0}小时{1}分钟.\\n天气干,记得涂唇膏。\".format(int(delta_minutes/60), delta_minutes%60)\n brief += \"已经睡了{0}小时{1}分钟.\".format(int(delta_minutes/60), delta_minutes%60)\n else:\n pass\n\n if len(brief) > 300:\n brief = brief[0:300]\n\n return brief" ]
[ "0.6358604", "0.61127687", "0.60675085", "0.6044572", "0.59665823", "0.592374", "0.59180605", "0.59102285", "0.5901158", "0.5849729", "0.5839126", "0.5819639", "0.5782906", "0.5768085", "0.5766401", "0.57455444", "0.5734563", "0.5732122", "0.57276", "0.57193375", "0.5670834", "0.56679654", "0.56610614", "0.56345844", "0.56201404", "0.5610995", "0.55990946", "0.55932605", "0.5580501", "0.55771893" ]
0.7623472
0
Generate a name for a file using the description and channel
def generate_file_name(well, channel, desc): return "bPLATE_w" + well + "_" + desc + "_c" + channel + ".png"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_raw_file_name(self, well, channel, desc):\n \n return \"bPLATE_w\" + well + \"_\" + desc + \"_c\" + channel + \".png\"", "def file_name(id, title, kind=\"src\"):\n fn_template = conf.template_source_file_name\n if kind == \"tst\":\n fn_template = conf.template_test_file_name\n\n return fn_template.format(id=id, title=title.replace(\"-\", \"_\"))", "def _generate_overlay_file_name(self, well, channel, desc):\n \n return \"c\" + channel + \"_w\" + well + \"_\" + desc + \".png\"", "def generate_file_name(entry):\n return str_for_file(u'{name}, {year}, {title}'.format(\n year=entry['year'],\n name=get_last_name(entry['author'][0]),\n title=entry['title']\n ))", "def create_file_name(self):\n # create a unique id for the file name\n index = self.helpers.alpha_uuid()\n\n filename = self.form['FieldStorage'][self.image_cid].filename\n extension = guess_extension(guess_type(filename)[0])\n return ( # concatenates the following data\n self.articleData.get('directory') + # directory\n '/' + # slash\n self.articleData.get('article_name') + # the article name\n '-' + # hyphen character\n index + # the id of the image\n extension\n )", "def create_final_name(fname, date, fc_id, sample_name):\n \n # Split the file name according to CASAVA convention\n m = re.match(r'(\\S+?)_(?:[ACGTN\\-]+|NoIndex|Undetermined)_L0*(\\d+)_R(\\d)_\\d+\\.fastq(.*)', fname)\n if m is not None:\n lane = m.group(2)\n read = m.group(3)\n ext = m.group(4)\n else:\n # Split the file name according to bcbb convention\n m = re.match(r'(\\d+)_(\\d+)_([^_]+)_(\\d+)_(?:nophix_)?(\\d+)_fastq.txt(.*)', fname)\n if m is None:\n raise ValueError(\"Could not parse file name {:s} correctly!\".format(fname))\n lane = m.group(1)\n read = m.group(5)\n ext = m.group(6)\n \n dest_file_name = \"{:s}.fastq{:s}\".format(\"_\".join([lane,\n date,\n fc_id,\n sample_name,\n read]),\n ext.replace('..','.'))\n return dest_file_name", "def collected_filename(cfg, collect_dir, i=None):\n if i is not None:\n file = cfg[\"files\"][i]\n else:\n file = cfg[\"file\"]\n ext = path.splitext(file)[1]\n name = cfg[\"id\"]\n if i is not None:\n name += \"_\" + str(i)\n return path.join(collect_dir, name + ext)", "def _generate_filename(doc_type, login, *args):\n filename = []\n filename.append(doc_type)\n filename.append(login)\n for item in args:\n filename.append(item)\n filename.append(datetime.datetime.now().isoformat(timespec='microseconds'))\n filename = '_'.join(filename)\n return filename", "def content_file_name(instance, filename):\r\n return '/'.join([str(instance.app.publisher.id), str(instance.app.id), filename])", "def construct_name_file(size_sample, randomness, pos_equal_neg, kernel):\n if randomness:\n randomness = \"rand\"\n else:\n randomness = \"nrand\"\n\n if pos_equal_neg:\n pos_equal_neg = \"pos-neg-eq\"\n else:\n pos_equal_neg = \"pos-neg-neq\"\n\n return \"{}_{}_{}_{}.json\".format(size_sample, randomness, pos_equal_neg, kernel)", "def generate_filename(player_name):\n name = player_name.split()\n filename = '_'.join(name).lower()\n return filename", "def get_nomenclature_channel_fname(czi_fname, nomenclature_file, channel_name, ext='.inr.gz'):\n # - Read NOMENCLATURE file defining naming conventions:\n n_names = get_nomenclature_name(nomenclature_file)\n return n_names[czi_fname]+\"/\", n_names[czi_fname] + \"_\" + channel_name + ext", "def generate_glider_filename(description):\n filename = (\n \"{glider}-{year:d}-{day:03d}-{mission:d}-{segment}.{type}\".format(**description)\n )\n return os.path.join(description['path'], filename)", "def generate_namefile(pathfolder, methodvalues):\n datestr = datetime.datetime.now().date().strftime('%F')\n paramsstr = str(hash(str(methodvalues)))\n namefile = datestr + '-' + methodvalues['codename'] + '_' + paramsstr\n namefile = os.path.join(pathfolder, namefile)\n return namefile", "def generate_filename(filename: str) -> str:\n return f\"{str(uuid.uuid4())}.{get_extension(filename)}\"", "def channel_name(radio_id: int, channel_id: int) -> str:\n return f\"COMM{radio_id} Ch {channel_id}\"", "async def filename_generator(self):\n chars = list(string.ascii_letters+string.digits)\n name = ''\n for i in range(random.randint(9, 25)):\n name += random.choice(chars)\n\n if name not in self.player['audio_files']:\n return name\n\n return await self.filename_generator()", "def gen_file_name(filename, path=UPLOAD_FOLDER):\n\n i = 1\n while os.path.exists(os.path.join(path, filename)):\n name, extension = os.path.splitext(filename)\n filename = '%s_%s%s' % (name, str(i), extension)\n i += 1\n\n return filename", "def generate_filename(playlist_or_album_name, user_id_or_artist_id=None):\n filename = ''\n if user_id_or_artist_id:\n filename += user_id_or_artist_id + '_'\n filename += playlist_or_album_name + '_' + str(time_ns())\n return filename", "def _get_file_name(name: types.TSeedName) -> str:\n return f\"{name}.yml\"", "def get_file_name(self):\n\n return \"%s - %s\" % (self.get_tags()[\"artist\"], self.get_tags()[\"title\"])", "def generateFilename(self, name):\n return self.context.generateUniqueId(type_name='Module')", "def filename(i):\n rand_name = os.path.join(os.getcwd(), \"input-%d.txt\" % i)\n ref_name = os.path.join(os.getcwd(), \"input-%d.ref\" % i)\n return rand_name, ref_name", "def create_filename (self):\n\t\tassert self.__patient_name and self.__location_name, \"New filename could not be determined, one or more needed arguments is empty!\"\n\t\t_patient_name = self.__patient_name.split(' ')\n\t\t_patient_name.reverse()\n\t\t\n\t\treturn os.path.join(os.path.dirname(self.file._path), \"%s MR %s%s\" % (self.__location_name, ', '.join(_patient_name).upper(), self._file.extension))", "def filename(self):\n return '%s%s' % (self.identifier, self.extension)", "def generate_report_file_name(args: Dict[str, Any]) -> str:\n return (\n f\"{args.get('report_type', '').lower().replace(' ', '_')}_fireeye_\"\n f\"{datetime.now().strftime('%Y-%m-%d_%H:%M:%S')}.\"\n f\"{args.get('type', REPORT_TYPE_ALLOWED_FORMAT[args.get('report_type', '')][0])}\"\n )", "def generate_file_name(hour, minute):\n hour = str(hour)\n if len(hour) == 1:\n hour = \"0\" + hour\n minute = str(minute)\n if len(minute) == 1:\n minute = \"0\" + minute\n file_name = date + \"--\" + hour + special_char + minute + special_char + \"00,00.mvol\"\n\n # print \"filename: \",file_name\n return file_name", "def filename(self):\n translator = {ord(\" \"): \"_\", ord(\",\"): None}\n return f'{self._full_name.translate(translator)}.txt'", "def generate_file_filename(instance, filename):\n return _generate_filename(instance, filename, 'photos')", "def temp_name(self, filename):\n if self.params.get('nopart', False) or filename == '-' or \\\n (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):\n return filename\n return filename + '.part'" ]
[ "0.75195915", "0.7167804", "0.71012443", "0.70670754", "0.69943404", "0.67369324", "0.6670187", "0.66342044", "0.66167426", "0.6494215", "0.64797497", "0.6479064", "0.6476831", "0.64566636", "0.6450938", "0.64458376", "0.6431859", "0.6416797", "0.6390564", "0.63887256", "0.6379263", "0.63707864", "0.6364709", "0.6351363", "0.63499355", "0.6347175", "0.634275", "0.63143575", "0.63116884", "0.62697697" ]
0.80092597
0
Constructor that takes the config and the well we are generating images for.
def __init__(self, config, well, directory): self.config = config self.well = well self.directory = directory
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, config):\n logging.info(\"Creating footprint\")\n # self.infra = yaml.load(config)\n self.infra = config\n self.footprint_name = self.infra.get(\"footprint\", \"ehw\")\n self.images = self.infra.get(\"images\")\n self.old_images = self.infra.get(\"old_images\", [])\n self.container_name = \"%s-metadata\" % self.footprint_name\n \n self.admin_password = self.infra.get('admin-password')\n self.savefile = self.infra.get(\"footprint\", \"outfile\") + \"-save.yaml\"\n if os.path.exists(self.savefile):\n self.saved_images = yaml.load(open(self.savefile))\n self.footprint_status=self.infra.get(\"footprint_status\", None)\n logging.debug(\"Loaded saved images: %s\" % self.saved_images)\n # sys.exit(0) ", "def __init__(self, generate_image_pyramid: bool = True):\n if not os.path.exists(MANIFEST_OUTPUT_DIR):\n os.makedirs(MANIFEST_OUTPUT_DIR)\n self._manifest_factory = ManifestFactory()\n self._manifest_factory.set_base_prezi_uri(MANIFEST_BASE_URL)\n self._manifest_factory.set_base_prezi_dir(MANIFEST_OUTPUT_DIR)\n self._manifest_factory.set_base_image_uri(IMAGE_BASE_URL)\n self._manifest_factory.set_iiif_image_info(2.0, 1) # Version, ComplianceLevel\n\n self._image_reader = ImageReader(IMAGE_SOURCE_DIR)\n\n if generate_image_pyramid:\n self._tile_generator = IIIFStatic(dst=IMAGE_FILE_OUTPUT_DIR, prefix=IMAGE_BASE_URL)\n\n self._generate_images = generate_image_pyramid", "def __init__(self, train_path='train/image', label_path='train/label', merge_path='train/merge', aug_merge_path='train/aug_merge', aug_train_path='train/aug_train', aug_label_path='train/aug_label', img_type=\"nii\"):\n\n\t\tself.train_imgs = glob.glob(\"/*.\"+img_type)\n\t\tself.label_imgs = glob.glob(\"/*.\"+img_type)\n\t\tself.train_path = train_path\n\t\tself.label_path = label_path\n\t\tself.merge_path = merge_path\n\t\tself.img_type = img_type\n\t\tself.aug_merge_path = aug_merge_path\n\t\tself.aug_train_path = aug_train_path\n\t\tself.aug_label_path = aug_label_path\n\t\tself.slices = len(self.train_imgs)\n\t\tself.datagen = ImageDataGenerator(\n\t\t\t\t\t\t\t rotation_range=0.2,\n\t\t\t\t\t\t\t width_shift_range=0.05,\n\t\t\t\t\t\t\t height_shift_range=0.05,\n\t\t\t\t\t\t\t shear_range=0.05,\n\t\t\t\t\t\t\t zoom_range=0.05,\n\t\t\t\t\t\t\t horizontal_flip=True,\n\t\t\t\t\t\t\t fill_mode='nearest')", "def __init__(self, img):\n self.img = img", "def __init__(self, train_path=\"./data/train/image\", label_path=\"./data/train/label\",\n\t\t\t\t merge_path=\"./data/train/merge\", aug_merge_path=\"./data/train/aug_merge\", \n\t\t\t\t aug_train_path=\"./data/train/aug_images\", \n\t\t\t\t aug_label_path=\"./data/train/aug_masks\", img_type=\"tif\"):\n\n\t\tself.train_imgs = glob.glob(train_path+\"/*.\"+img_type)\n\t\tself.label_imgs = glob.glob(label_path+\"/*.\"+img_type)\n\t\tself.train_path = train_path\n\t\tself.label_path = label_path\n\t\tself.merge_path = merge_path\n\t\tself.img_type = img_type\n\t\tself.aug_merge_path = aug_merge_path\n\t\tself.aug_train_path = aug_train_path\n\t\tself.aug_label_path = aug_label_path\n\n\t\tif not os.path.exists(merge_path):\n\t\t\tos.mkdir(merge_path)\n\t\t\tos.mkdir(aug_merge_path)\n\t\t\tos.mkdir(aug_train_path)\n\t\t\tos.mkdir(aug_label_path)\n\n\t\tself.slices = len(self.train_imgs)\n\t\tself.datagen = ImageDataGenerator(\n\t\t\t\t\t\t\t\t\tpreprocessing_function=self.preprocess,\n\t\t\t\t\t\t\t\t\trotation_range=0.2,\n\t\t\t\t\t\t\t\t\twidth_shift_range=0.1,\n\t\t\t\t\t\t\t\t\theight_shift_range=0.1,\n\t\t\t\t\t\t\t\t\tshear_range=0.05,\n\t\t\t\t\t\t\t\t\tzoom_range=0.05,\n\t\t\t\t\t\t\t\t\thorizontal_flip=True,\n\t\t\t\t\t\t\t\t\tfill_mode='nearest')", "def build_filler_images(self):", "def __init__ (self):\r\n\r\n self.path = 'c:\\\\python22\\\\work\\\\'\r\n self.bfh_vals = (BM, 0, 0, 0, 0)\r\n self.bih_vals = (BIH_SIZE, 0, 0, 1, 32, 0, 0, 0, 0, 0, 0)\r\n self.the_file = None\r\n self.image = []\r\n self.colourmap = []", "def __init__(self, config, set_name, preprocess_image):\n\t\t\tself.data_dir = config['data_dir']\n\t\t\tself.set_name = set_name\n\t\t\tself.coco = COCO(os.path.join(self.data_dir, 'annotations', 'instances_' + set_name + '.json'))\n\t\t\tself.image_ids = self.coco.getImgIds()\n\t\t\tself.mask = config['mask']\n\n\t\t\tself.load_classes()\n\n\t\t\tsuper(CocoGenerator, self).__from_config__(config, preprocess_image=preprocess_image)", "def __init__(self, gdmix_config_obj, jar_path=\"\", namespace=\"\", secret_name=\"\", image=\"\", service_account=\"\", job_suffix=\"\", *, prev_model_name):\n super().__init__(gdmix_config_obj, jar_path, namespace, secret_name, image, service_account, job_suffix)\n self.prev_model_name = prev_model_name\n self.root_output_dir = gdmix_config_obj.output_dir", "def __init__(self, images, loader):\n super().__init__()\n self._images = images\n self._loader = loader", "def initImages(self):\n pass", "def initImages(self):\n pass", "def initImages(self):\n pass", "def __init__(self, cfg):\r\n\r\n\t\tself.image_size = cfg.MODEL.INPUT.IMAGE_SIZE\r\n\t\tanchor_config = cfg.MODEL.ANCHORS\r\n\t\tself.feature_maps = anchor_config.FEATURE_MAPS\r\n\t\tself.min_sizes = anchor_config.MIN_SIZES\r\n\t\tself.max_sizes = anchor_config.MAX_SIZES \r\n\t\tself.aspect_ratios = anchor_config.ASPECT_RATIOS\r\n\t\tself.clip = anchor_config.CLIP", "def generate_image(self):\n pass", "def __init__(self, image):\n self.image = image", "def create_base_image(self, builder, template, parameters):", "def __init__(self, path, margin=25, folder=\"output\"):\n self.image = Image.open(path)\n self.width, self.height = self.image.size\n self.path = path\n self.margin = margin\n self.output_path = os.path.join(os.path.dirname(self.path),\n folder,\n os.path.basename(self.path))", "def __build__(self,data_index=0):\n \n super(Image,self).__build__()\n # -- How to read the image\n self._build_properties = dict(\n data_index = data_index,\n header_exptime = \"EXPTIME\",\n dataslice0=\"undefined\",\n dataslice1=\"undefined\",\n bkgdbox={\"bh\":100,\"bw\":100,\"fh\":3,\"fw\":3},\n )", "def __init__(self, **kwargs):\n super(ImageExporter, self).__init__(**kwargs)", "def configure(self):\n self.data_batch_file = self.get_value_from_config('data_batch_file')\n self.batch_meta_file = self.get_value_from_config('batch_meta_file')\n self.has_background = self.get_value_from_config('has_background')\n self.num_classes = self.get_value_from_config('num_classes')\n self.converted_images_dir = self.get_value_from_config('converted_images_dir')\n if not self.converted_images_dir:\n self.converted_images_dir = self.data_batch_file.parent / 'converted_images'\n self.convert_images = self.get_value_from_config('convert_images')\n # create directory for storing images if it is necessary\n if self.convert_images and not self.converted_images_dir.exists():\n self.converted_images_dir.mkdir(parents=True)\n self.dataset_meta = self.get_value_from_config('dataset_meta_file')", "def __init__(self, boardSize, width = 750, height = 750, borderSize = 75, boardTexture = BOARD_TEXTURE, stoneBlackTexture = STONE_BLACK_TEXTURE, stoneWhiteTexture = STONE_WHITE_TEXTURE, markerBlackTexture = MARKER_BLACK_TEXTURE, markerWhiteTexture = MARKER_WHITE_TEXTURE):\n\n if (boardSize == 19): self.settings = BOARD_SETTINGS_19\n elif (boardSize == 13):\n self.settings = BOARD_SETTINGS_13\n borderSize += 10\n elif (boardSize == 9):\n self.settings = BOARD_SETTINGS_9\n borderSize += 25\n\n self.width = width\n self.height = height\n self.innerWidth = width - borderSize * 2\n self.innerHeight = height - borderSize * 2\n self.borderSize = borderSize\n self.baseImage = Image.new(\"RGBA\", (width, height), (255, 255, 255, 255))\n\n (boardSize, _, _) = self.settings\n stoneWidth = int(self.innerWidth / (boardSize - 1))\n stoneHeight = int(self.innerWidth / (boardSize - 1))\n markerWidth = int(stoneWidth / 2)\n markerHeight = int(stoneHeight / 2)\n\n self.stoneBlackTexture = stoneBlackTexture.resize((stoneWidth, stoneHeight), Image.BICUBIC)\n self.stoneWhiteTexture = stoneWhiteTexture.resize((stoneWidth, stoneHeight), Image.BICUBIC)\n self.markerBlackTexture = markerBlackTexture.resize((markerWidth, markerHeight), Image.BICUBIC)\n self.markerWhiteTexture = markerWhiteTexture.resize((markerWidth, markerHeight), Image.BICUBIC)\n\n self.__draw_board_texture(boardTexture)\n self.__draw_board()", "def __init__(self, img, settings):\r\n self.img_orig = img\r\n self.settings = settings", "def __init__(self):\n self.index = 'r11_07_06c'\n self.parameters = {'run_index': 'r11_07_06c',\n 'h_1': 0.25,\n 'rho_0': 1.150,\n 'rho_1': 1.100,\n 'rho_2': 1.000,\n 'alpha': 0.5,\n 'D': 0.4,\n 'H': 0.25,\n 'sample': 1.0,\n 'perspective': 'old'}\n self.run_data = {'run_index': 'r11_07_06c',\n 'l0x': 2796,\n 'l0y': 1151,\n 'lsx': 2793,\n 'lsy': 716,\n 'j10x': 210,\n 'j10y': 1165,\n 'j1sx': 208,\n 'j1sy': 727,\n 'leakage': -76,\n 'odd_1': 'n',\n 'j20x': 2728,\n 'j20y': 1086,\n 'j2sx': 2730,\n 'j2sy': 670,\n 'r0x': 1097,\n 'r0y': 1095,\n 'rsx': 1093,\n 'rsy': 683,\n 'odd_2': 'n'}\n self.raw_image = 'tests/data/synced/r11_07_06c/cam1/img_0001.jpg'\n self.bc_image = 'tests/data/bc/r11_07_06c/cam1/img_0001.jpg'\n self.processed_path = 'tests/data/processed_ref/r11_07_06c/cam1/img_0001.jpg'", "def __init__(self, image1, image2):\n self.image1 = image1\n self.image2 = image2", "def __init__(self, config_file = 'config.yaml'):\n\n self.name = ''\n self.img_dir = ''\n self.out_dir = ''\n self.cam_file = ''\n self.options_file = ''\n self.output_xml_file = ''\n\n # If there is an options file, it will overwrite the defaults \n if config_file is not None:\n self.load(config_file)", "def __init__(self, opt):\r\n super().__init__(opt)\r\n\r\n self.image_color = []\r\n for folder in self.annotations.keys():\r\n for image in self.annotations[folder].keys():\r\n img_path = os.path.join(self.root_dir, folder, image)\r\n camera, spec, n = image.split('_')\r\n if camera == 'BB':\r\n continue\r\n else:\r\n if spec == 'color':\r\n self.image_color.append(img_path)\r\n\r\n def sort_priority(x):\r\n *_, folder, name = x.split('/')\r\n folder_n = int(folder[1])\r\n folder_t = folder[2]\r\n name = int(name[0:-4].split('_')[-1])\r\n return folder_n, folder_t, name\r\n\r\n self.image_source, self.image_target = self._get_src_tgt(\r\n opt.augmentation_ratio, self.image_color, sort_priority)", "def __init__(self, metadata, environment, component, image, version, s3_bucket, exec_env):\n self.environment = environment\n self.component = component\n self.s3_bucket = s3_bucket\n self.exec_env = exec_env\n self.image = image\n self.version = version\n self.metadata = metadata\n\n # generate Terragrunt config as part of object initialisation\n self.config()", "def __init__(self, *args):\n super().__init__(*args)\n\n self.output_dir = os.path.join(self.config.results_dir, \"cowinner\")\n self.merged_dir = os.path.join(self.output_dir, \"merged\")", "def __init__(self, data_dir, pairs_filepath, img_ext):\n self.data_dir = data_dir\n self.pairs_filepath = pairs_filepath\n self.img_ext = img_ext" ]
[ "0.66241646", "0.6582035", "0.6469281", "0.6385457", "0.634032", "0.6314989", "0.62429136", "0.6220419", "0.6145161", "0.61328727", "0.61167324", "0.61167324", "0.61167324", "0.61101764", "0.6095435", "0.60907704", "0.60626227", "0.6061309", "0.60489887", "0.6046789", "0.60387987", "0.60380125", "0.6019408", "0.5997857", "0.5964925", "0.59556377", "0.59543175", "0.59528095", "0.59427613", "0.59380376" ]
0.7127226
0
Generate a name for a file using the description and channel
def _generate_raw_file_name(self, well, channel, desc): return "bPLATE_w" + well + "_" + desc + "_c" + channel + ".png"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_file_name(well, channel, desc):\n \n return \"bPLATE_w\" + well + \"_\" + desc + \"_c\" + channel + \".png\"", "def file_name(id, title, kind=\"src\"):\n fn_template = conf.template_source_file_name\n if kind == \"tst\":\n fn_template = conf.template_test_file_name\n\n return fn_template.format(id=id, title=title.replace(\"-\", \"_\"))", "def _generate_overlay_file_name(self, well, channel, desc):\n \n return \"c\" + channel + \"_w\" + well + \"_\" + desc + \".png\"", "def generate_file_name(entry):\n return str_for_file(u'{name}, {year}, {title}'.format(\n year=entry['year'],\n name=get_last_name(entry['author'][0]),\n title=entry['title']\n ))", "def create_file_name(self):\n # create a unique id for the file name\n index = self.helpers.alpha_uuid()\n\n filename = self.form['FieldStorage'][self.image_cid].filename\n extension = guess_extension(guess_type(filename)[0])\n return ( # concatenates the following data\n self.articleData.get('directory') + # directory\n '/' + # slash\n self.articleData.get('article_name') + # the article name\n '-' + # hyphen character\n index + # the id of the image\n extension\n )", "def create_final_name(fname, date, fc_id, sample_name):\n \n # Split the file name according to CASAVA convention\n m = re.match(r'(\\S+?)_(?:[ACGTN\\-]+|NoIndex|Undetermined)_L0*(\\d+)_R(\\d)_\\d+\\.fastq(.*)', fname)\n if m is not None:\n lane = m.group(2)\n read = m.group(3)\n ext = m.group(4)\n else:\n # Split the file name according to bcbb convention\n m = re.match(r'(\\d+)_(\\d+)_([^_]+)_(\\d+)_(?:nophix_)?(\\d+)_fastq.txt(.*)', fname)\n if m is None:\n raise ValueError(\"Could not parse file name {:s} correctly!\".format(fname))\n lane = m.group(1)\n read = m.group(5)\n ext = m.group(6)\n \n dest_file_name = \"{:s}.fastq{:s}\".format(\"_\".join([lane,\n date,\n fc_id,\n sample_name,\n read]),\n ext.replace('..','.'))\n return dest_file_name", "def collected_filename(cfg, collect_dir, i=None):\n if i is not None:\n file = cfg[\"files\"][i]\n else:\n file = cfg[\"file\"]\n ext = path.splitext(file)[1]\n name = cfg[\"id\"]\n if i is not None:\n name += \"_\" + str(i)\n return path.join(collect_dir, name + ext)", "def _generate_filename(doc_type, login, *args):\n filename = []\n filename.append(doc_type)\n filename.append(login)\n for item in args:\n filename.append(item)\n filename.append(datetime.datetime.now().isoformat(timespec='microseconds'))\n filename = '_'.join(filename)\n return filename", "def content_file_name(instance, filename):\r\n return '/'.join([str(instance.app.publisher.id), str(instance.app.id), filename])", "def construct_name_file(size_sample, randomness, pos_equal_neg, kernel):\n if randomness:\n randomness = \"rand\"\n else:\n randomness = \"nrand\"\n\n if pos_equal_neg:\n pos_equal_neg = \"pos-neg-eq\"\n else:\n pos_equal_neg = \"pos-neg-neq\"\n\n return \"{}_{}_{}_{}.json\".format(size_sample, randomness, pos_equal_neg, kernel)", "def get_nomenclature_channel_fname(czi_fname, nomenclature_file, channel_name, ext='.inr.gz'):\n # - Read NOMENCLATURE file defining naming conventions:\n n_names = get_nomenclature_name(nomenclature_file)\n return n_names[czi_fname]+\"/\", n_names[czi_fname] + \"_\" + channel_name + ext", "def generate_filename(player_name):\n name = player_name.split()\n filename = '_'.join(name).lower()\n return filename", "def generate_glider_filename(description):\n filename = (\n \"{glider}-{year:d}-{day:03d}-{mission:d}-{segment}.{type}\".format(**description)\n )\n return os.path.join(description['path'], filename)", "def generate_namefile(pathfolder, methodvalues):\n datestr = datetime.datetime.now().date().strftime('%F')\n paramsstr = str(hash(str(methodvalues)))\n namefile = datestr + '-' + methodvalues['codename'] + '_' + paramsstr\n namefile = os.path.join(pathfolder, namefile)\n return namefile", "def channel_name(radio_id: int, channel_id: int) -> str:\n return f\"COMM{radio_id} Ch {channel_id}\"", "def generate_filename(filename: str) -> str:\n return f\"{str(uuid.uuid4())}.{get_extension(filename)}\"", "async def filename_generator(self):\n chars = list(string.ascii_letters+string.digits)\n name = ''\n for i in range(random.randint(9, 25)):\n name += random.choice(chars)\n\n if name not in self.player['audio_files']:\n return name\n\n return await self.filename_generator()", "def gen_file_name(filename, path=UPLOAD_FOLDER):\n\n i = 1\n while os.path.exists(os.path.join(path, filename)):\n name, extension = os.path.splitext(filename)\n filename = '%s_%s%s' % (name, str(i), extension)\n i += 1\n\n return filename", "def generate_filename(playlist_or_album_name, user_id_or_artist_id=None):\n filename = ''\n if user_id_or_artist_id:\n filename += user_id_or_artist_id + '_'\n filename += playlist_or_album_name + '_' + str(time_ns())\n return filename", "def _get_file_name(name: types.TSeedName) -> str:\n return f\"{name}.yml\"", "def get_file_name(self):\n\n return \"%s - %s\" % (self.get_tags()[\"artist\"], self.get_tags()[\"title\"])", "def generateFilename(self, name):\n return self.context.generateUniqueId(type_name='Module')", "def filename(i):\n rand_name = os.path.join(os.getcwd(), \"input-%d.txt\" % i)\n ref_name = os.path.join(os.getcwd(), \"input-%d.ref\" % i)\n return rand_name, ref_name", "def create_filename (self):\n\t\tassert self.__patient_name and self.__location_name, \"New filename could not be determined, one or more needed arguments is empty!\"\n\t\t_patient_name = self.__patient_name.split(' ')\n\t\t_patient_name.reverse()\n\t\t\n\t\treturn os.path.join(os.path.dirname(self.file._path), \"%s MR %s%s\" % (self.__location_name, ', '.join(_patient_name).upper(), self._file.extension))", "def filename(self):\n return '%s%s' % (self.identifier, self.extension)", "def generate_report_file_name(args: Dict[str, Any]) -> str:\n return (\n f\"{args.get('report_type', '').lower().replace(' ', '_')}_fireeye_\"\n f\"{datetime.now().strftime('%Y-%m-%d_%H:%M:%S')}.\"\n f\"{args.get('type', REPORT_TYPE_ALLOWED_FORMAT[args.get('report_type', '')][0])}\"\n )", "def generate_file_name(hour, minute):\n hour = str(hour)\n if len(hour) == 1:\n hour = \"0\" + hour\n minute = str(minute)\n if len(minute) == 1:\n minute = \"0\" + minute\n file_name = date + \"--\" + hour + special_char + minute + special_char + \"00,00.mvol\"\n\n # print \"filename: \",file_name\n return file_name", "def filename(self):\n translator = {ord(\" \"): \"_\", ord(\",\"): None}\n return f'{self._full_name.translate(translator)}.txt'", "def generate_file_filename(instance, filename):\n return _generate_filename(instance, filename, 'photos')", "def temp_name(self, filename):\n if self.params.get('nopart', False) or filename == '-' or \\\n (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):\n return filename\n return filename + '.part'" ]
[ "0.80091065", "0.7165928", "0.7101471", "0.7064337", "0.69931364", "0.67345184", "0.6668517", "0.66324764", "0.66144115", "0.64927953", "0.6479", "0.6476943", "0.64755803", "0.6454013", "0.6448165", "0.6447837", "0.6430102", "0.6413977", "0.63882077", "0.63864017", "0.63777316", "0.636905", "0.6362883", "0.6348021", "0.6347843", "0.63450694", "0.63408315", "0.6311075", "0.63088846", "0.6267646" ]
0.7519707
1
The function generates and returns a combined table with names of stocks in columns and dates in indexes. Profit tables for individual stocks are taken from stock_retrunrs function.
def Generating_stock_daily_return_table(): #Getting Names list Profitfile='pap//CombProfit.csv' path='D://Doktorat Marek//dane//' ProfitsFilePath=path+Profitfile quarterly_profit=pd.read_csv(ProfitsFilePath,index_col=0,header=0,parse_dates=True) Names_list=quarterly_profit.columns.tolist() Stock_returns=pd.DataFrame(index=pd.date_range('19980101','20180918',freq='D'),columns=Names_list) for name in Names_list: Stock_returns[name]=1+stock_returns(name)['Return']/100 Stock_returns[name].fillna(1,inplace=True) WIG=pd.read_excel('D://Doktorat Marek//dane//notowania//Infostrefa//PL9999999995.xls') WIG['Date']=pd.to_datetime(WIG['Data']) WIG.set_index('Date',inplace=True) Stock_returns['WIG'] = 1+WIG['Zmiana']/100 Stock_returns['WIG'].fillna(1,inplace=True) Stock_returns['Average']=Stock_returns.mean(1) FileReturns='D://Doktorat Marek//dane//Notowania//Stock_returns.csv' Stock_returns.to_csv(FileReturns,encoding='UTF-8') return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setupStockTable(self):\n # Get the date\n # NOTE: This is probably un\n date = datetime.date()\n dateStr = date.month() + \"/\" + date.day() + \"/\" + date.year()\n\n stocks = (\"INTC\", \"AAPL\", \"GOOG\", \"YHOO\", \"SYK\", \"VZ\")\n\n for stock in stocks:\n stockObj = self.securityFactory(stock)\n stockObj.queryAPI()\n\n self.stockDB.query(\"INSERT INTO basic_info (ticker, price, daily_change, company, year_high, year_low, \\\n daily_percent, date, streak) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\", (stockObj.target, stockObj.curr, \\\n stockObj.daily_change, stockObj.company,\\\n stockObj.year_high, stockObj.year_low,\\\n stockObj.daily_percent, dateStr, 0))", "def stocks():\n mappings = {key: key for key in Schema.stock_columns}\n return Schema(mappings)", "def portfolio_table(self):\n idx = set(name.split('-')[0].split('.')[0] for name, etf in self.etfs.items() if not etf.sold())\n table = pd.DataFrame({'Invested': 0, 'Shares':0, 'Share Price':0, 'Present Value':0, 'P/L':0, 'P/L%':0},index=idx)\n for name, etf in self.etfs.items():\n if not etf.sold():\n table.loc[name.split('-')[0].split('.')[0], 'Invested'] += etf.initial_investment()\n table.loc[name.split('-')[0].split('.')[0], 'Shares'] += etf.n_shares\n table.loc[name.split('-')[0].split('.')[0], 'Share Price'] = etf.stock_price()\n table.loc[name.split('-')[0].split('.')[0], 'Present Value'] += etf.present_value()\n table.loc[name.split('-')[0].split('.')[0], 'P/L'] += etf.profit_loss()\n table.insert(1, 'PMA', round(table['Invested'] / table['Shares'], 2))\n table.insert(3, 'Initial Weight', round(table['Invested'] / table['Invested'].sum() * 100, 2))\n table.insert(4, 'Present Weight', round(table['Present Value'] / table['Present Value'].sum() * 100, 2))\n table['P/L%'] = round(table['P/L'] / table['Invested'] * 100, 2)\n table['P/L'] = round(table['P/L'], 2)\n table['Present Value'] = round(table['Present Value'], 2)\n return table.sort_values('Invested', 0, ascending=False)", "def investments_table(self):\n table = pd.DataFrame(index=[etf.buy_date for etf in self.etfs.values()])\n table['Ticker'] = [name.split('-')[0].split('.')[0] for name in self.etfs.keys()]\n table['Buying Price (€)'] = [etf.buy_price for etf in self.etfs.values()]\n table['Number of Shares'] = [etf.n_shares for etf in self.etfs.values()]\n table['Commissions (€)'] = [etf.total_commissions() for etf in self.etfs.values()]\n table['Invested (€)'] = [etf.initial_investment() for etf in self.etfs.values()]\n table['Share Price (€)'] = [etf.stock_price() for etf in self.etfs.values()]\n table['Value (€)'] = [etf.present_value() for etf in self.etfs.values()]\n table['P/L (€)'] = [etf.profit_loss() for etf in self.etfs.values()]\n table['P/L (%)'] = [etf.profit_loss(pct=True) for etf in self.etfs.values()]\n return table", "def getHTMLTable(self):\n stocks = self.getTrackedStocks()\n\n html = \" \\\n <html> \\\n <head></head> \\\n <body> \\\n <table> \\\n <tr> \\\n <th>Ticker</th> \\\n <th>Company</th> \\\n <th>Price</th> \\\n <th>Daily Change</th> \\\n <th>Daily Percent Change</th> \\\n <th>Year High</th> \\\n <th>Year Low</th> \\\n </tr>\"\n\n for stock in stocks:\n html += \"<tr>\"\n\n html += \"<td>\"\n html += str(stock.target)\n html += \"</td>\"\n\n html += \"<td>\"\n html += str(stock.company)\n html += \"</td>\"\n\n html += \"<td>\"\n html += str(stock.curr)\n html += \"</td>\"\n\n html += \"<td>\"\n html += str(stock.daily_change)\n html += \"</td>\"\n\n html += \"<td>\"\n html += str(stock.daily_percent)\n html += \"</td>\"\n\n html += \"<td>\"\n html += str(stock.year_high)\n html += \"</td>\"\n\n html += \"<td>\"\n html += str(stock.year_low)\n html += \"</td>\"\n\n html += \"</tr>\"\n\n html += \"</table></body></html>\"\n\n return html", "def get_stock_price_df(info, symbols):\n\n df_l = []\n\n for num, i in enumerate(info):\n df = pd.DataFrame.from_dict(i, orient='index')\n df['Symbol'] = symbols[num]\n df_l.append(df)\n\n df_full = pd.concat(df_l)\n df_full = df_full.rename(columns={'1. open': 'Open',\n '2. high': 'High',\n '3. low': 'Low',\n '4. close': 'Close',\n '5. volume': 'Volume'})\n\n return df_full", "def get_quarterly_report_tables(self, symbol, date):\n raw_data = self.sec_file_ops.get_raw_quarterly_financial_statement(symbol, date)\n\n cleared_tables = {}\n for title in raw_data:\n cleared_tables[title] = self._remove_unnecessary_tags(raw_data[title])\n # print(cleared_tables)\n\n tables = {}\n for title in cleared_tables:\n table = Table()\n table.read_tablecontent(cleared_tables[title])\n tables[title] = table\n\n for title in tables:\n tables[title].setup_linked_rows()\n # tables[title].print(linked=True)\n\n return tables", "def create_query(window,con,input_table_name,output_table_name,input_columns, stat_columns):\n sql = \"CREATE TABLE {} AS \".format(output_table_name)\n sql = sql + \"SELECT\" \n for input_column in input_columns:\n sql = sql + \" {},\".format(input_column)\n for stat_column in stat_columns:\n sql = sql + \" {},\".format(stat_column)\n for stat_column in stat_columns:\n sql = sql + \" AVG({}) OVER(PARTITION BY pfafid_30spfaf06, month, temporal_resolution ORDER BY year ROWS BETWEEN {:01.0f} PRECEDING AND CURRENT ROW) AS ma{:02.0f}_{},\".format(stat_column,window-1,window,stat_column)\n sql = sql + \" MIN({}) OVER(PARTITION BY pfafid_30spfaf06, month, temporal_resolution ORDER BY year ROWS BETWEEN {:01.0f} PRECEDING AND CURRENT ROW) AS min{:02.0f}_{},\".format(stat_column,window-1,window,stat_column)\n sql = sql + \" MAX({}) OVER(PARTITION BY pfafid_30spfaf06, month, temporal_resolution ORDER BY year ROWS BETWEEN {:01.0f} PRECEDING AND CURRENT ROW) AS max{:02.0f}_{},\".format(stat_column,window-1,window,stat_column)\n sql = sql + \" regr_slope({},year) OVER (PARTITION BY pfafid_30spfaf06, month, temporal_resolution ORDER BY year ROWS BETWEEN {:01.0f} PRECEDING AND CURRENT ROW) AS slope{:02.0f}_{},\".format(stat_column,window-1,window,stat_column)\n sql = sql + \" regr_intercept({},year) OVER (PARTITION BY pfafid_30spfaf06, month, temporal_resolution ORDER BY year ROWS BETWEEN {:01.0f} PRECEDING AND CURRENT ROW) AS intercept{:02.0f}_{},\".format(stat_column,window-1,window,stat_column)\n sql = sql + (\" regr_slope({},year) OVER (PARTITION BY pfafid_30spfaf06, month, temporal_resolution ORDER BY year ROWS BETWEEN {:01.0f} PRECEDING AND CURRENT ROW) * year \"\n \"+ regr_intercept({},year) OVER (PARTITION BY pfafid_30spfaf06, month, temporal_resolution ORDER BY year ROWS BETWEEN {:01.0f} PRECEDING AND CURRENT ROW) AS ols{:02.0f}_{},\".format(stat_column,window-1,stat_column,window-1,window,stat_column))\n \n \n sql = sql[:-1]\n sql = sql + \" FROM {}\".format(input_table_name)\n return sql", "def get_back_dataframe(self, end_date=None, stocks=None):\n if end_date is None:\n end_date = self.dates[-1]\n\n if type(end_date) is not datetime.datetime and type(end_date) is not pd.tslib.Timestamp:\n end_date = datetime.datetime.strptime(end_date, \"%Y-%m-%d\")\n\n if stocks is None:\n stocks = self.stocks\n\n info = {}\n for stock in stocks:\n info[stock] = self.stock_data[stock].to_stock_dataframe_range(start_date=None, end_date=end_date)\n\n return info", "def sql_create_big_table():\n return \"\"\"\n SELECT\n m.tube_assembly_id as 'tube_assembly_id'\n , m.quantity_1 as 'quantity_component'\n , c.component_id \n , c.component_type_id \n , c.type as component_type \n , c.connection_type_id\n , c.outside_shape\n , c.base_type\n , c.height_over_tube\n , c.bolt_pattern_long\n , c.bolt_pattern_wide\n , c.groove\n , c.base_diameter\n , c.shoulder_diameter\n , c.unique_feature\n , c.orientation\n , c.weight\n , p.supplier\n , p.quote_date\n , p.annual_usage\n , p.min_order_quantity\n , p.bracket_pricing\n , p.quantity\n , p.cost\n FROM\n stg_bill_of_materials m INNER JOIN stg_comp_boss c\n ON m.component_id_1 = c.component_id\n \n INNER JOIN stg_price_quote p\n ON m.tube_assembly_id = p.tube_assembly_id\n \"\"\"", "def get_stock_data(company, start_date_inc, stop_date_inc):\n\n api_key = 'Bo9P_cJnmf5EsQPp1Bdp'\n desired_cols = 'date,close'\n\n# ticker = 'FB'\n# start_date_inc = '20170801'\n# end_date_inc = '20170831'\n\n # format and send the request\n payload = {\n 'date.gte': start_date_inc,\n 'date.lte': stop_date_inc,\n 'ticker': company,\n 'qopts.columns': desired_cols,\n 'api_key': api_key\n }\n meta_url = r'https://www.quandl.com/api/v3/datatables/WIKI/PRICES'\n r = requests.get(meta_url, params=payload)\n\n # convert to a pandas dataframe\n df = pd.DataFrame(r.json()['datatable']['data'])\n if not df.empty:\n df.columns = ['date', 'price']\n df['date'] = pd.to_datetime(df['date'])\n\n return df", "def create_order(df_stock, df_signal, moneyness=('OTM', 'ITM'),\n cycle=0, strike=0, expire=(False, True)):\n symbol = df_stock.ix[df_stock.index.values[0]]['symbol']\n\n tb_closes = {\n stock.date.strftime('%Y-%m-%d'): np.float(stock.close) for stock in\n Stock.objects.filter(Q(symbol=symbol) & Q(source='thinkback'))\n }\n\n holding = df_signal['holding'].apply(\n lambda x: int(x / np.timedelta64(1, 'D'))\n ).astype(np.int).min()\n\n data = list()\n dates0, options0 = get_options_by_cycle_strike(\n symbol=symbol,\n name='CALL',\n dates0=df_signal['date0'],\n dte=holding,\n moneyness=moneyness,\n cycle=cycle,\n strike=strike\n )\n\n for date0, (index, signal) in zip(dates0, df_signal.iterrows()):\n date1 = signal['date1']\n\n if date0:\n option0 = options0.get(date=date0)\n\n option1 = None\n if option0 and option0.bid > 0:\n date1, option1 = get_option_by_contract_date(option0.contract, date1)\n\n if option0 and option1:\n stock0 = tb_closes[option0.date.strftime('%Y-%m-%d')]\n close0 = stock0 - np.float(option0.bid)\n\n ask1 = 0\n if int(expire):\n ask1 = np.float(\n tb_closes[option1.date.strftime('%Y-%m-%d')]\n - np.float(option0.contract.strike)\n )\n ask1 = ask1 if ask1 > 0 else 0.0\n\n date1 = option1.date\n stock1 = tb_closes[option1.date.strftime('%Y-%m-%d')]\n close1 = stock1 - np.float(ask1)\n else:\n date1 = option1.date\n stock1 = tb_closes[option1.date.strftime('%Y-%m-%d')]\n close1 = stock1 - np.float(option1.ask)\n\n data.append({\n 'date0': option0.date,\n 'date1': date1,\n 'signal0': 'BUY',\n 'signal1': 'SELL',\n 'stock0': stock0,\n 'stock1': stock1,\n 'option0': option0.bid,\n 'option1': ask1 if expire else option1.ask,\n 'close0': np.round(close0, 2), # buy using ask\n 'close1': np.round(close1, 2), # sell using bid\n 'option_code': option0.contract.option_code,\n 'strike': np.float(option0.contract.strike),\n 'dte0': np.int(option0.dte),\n 'dte1': np.int(option1.dte),\n 'intrinsic0': np.float(option0.intrinsic),\n 'intrinsic1': np.float(option1.intrinsic)\n })\n\n df = DataFrame()\n if len(data):\n df = DataFrame(data, columns=[\n 'date0', 'date1', 'signal0', 'signal1',\n 'stock0', 'stock1', 'option0', 'option1', 'close0', 'close1',\n 'option_code', 'strike', 'dte0', 'dte1',\n 'intrinsic0', 'intrinsic1'\n ])\n\n df['holding'] = df['date1'] - df['date0']\n df['pct_chg'] = np.round((df['close1'] - df['close0']) / df['close0'], 2)\n\n f = lambda x: np.round(x['pct_chg'] * -1 if x['signal0'] == 'SELL' else x['pct_chg'], 2)\n df['pct_chg'] = df.apply(f, axis=1)\n\n df['sqm0'] = 100\n df['sqm1'] = -100\n df['oqm0'] = -1\n df['oqm1'] = 1\n\n return df", "def get_dataframes(symbols=(\"sne\", \"goog\", \"tsla\"), source='yahoo', refresh=False):\n symbols = util.make_symbols(list(symbols))\n if refresh:\n symbols_to_refresh = symbols\n else:\n symbols_to_refresh = [sym for sym in symbols if not Equity.objects.filter(symbol=sym).exists()]\n source = source.lower().strip()\n if source in ('yahoo', 'google'):\n source += '_finance'\n if source[:3] == 'fed':\n source = 'federal_reserve_economic_data'\n ccpanda = ccp.ConcurrentPandas()\n # set the data source\n getattr(ccpanda, \"set_source_\" + source)()\n if symbols_to_refresh:\n # tell concurrent pandas which keys/symbols to retrieve\n ccpanda.insert_keys(symbols_to_refresh)\n # start concurrentpandas threads\n ccpanda.consume_keys_asynchronous_threads()\n # FIXME: is there a better/faster iterator to use like `ccpanda.output_map` attribute?\n pseudodict = ccpanda.return_map()\n else:\n pseudodict = {}\n table = {}\n for sym in symbols:\n e, created = None, False\n if not sym in symbols_to_refresh:\n e, created = Equity.objects.get_or_create(symbol=sym)\n if created or not e or not e.time_series or sym in symbols_to_refresh:\n e, created = Equity.objects.get_or_create(\n symbol=sym,\n name=sym, # FIXME: use data source to find equity name!\n time_series=pseudodict[sym].to_json(),\n )\n table[sym] = pd.io.json.read_json(path_or_buf=e.time_series, orient='columns', typ='frame', convert_dates=True)\n return table", "def pack_for_create_table(tables: tuple):\n\n all_tables = {'product': \"\"\"CREATE TABLE IF NOT EXISTS product (\n ITEM_ID CHAR(50) NOT NULL UNIQUE,\n PRODUCT_ID CHAR(50),\n GLOBAL_ID CHAR(50),\n TITLE TEXT,\n COUNTRY CHAR(50),\n RETURNS_ACCEPTED CHAR(50),\n IS_MULTI_VARIATION_LISTING CHAR(50),\n CONDITION_ID CHAR(50),\n CONDITION_DISPLAY_NAME CHAR(50),\n CATEGORY_ID CHAR(50),\n CATEGORY_NAME CHAR(100),\n AUTO_PAY CHAR(50),\n LOCATION CHAR(50),\n PAYMENT_METHOD CHAR(50),\n TOP_RATED_LISTING CHAR(50),\n VIEW_ITEM_URL TEXT\n )\"\"\",\n 'listing_info': \"\"\"CREATE TABLE IF NOT EXISTS listing_info (\n ITEM_ID CHAR(50) NOT NULL UNIQUE,\n BEST_OFFER_ENABLED CHAR(20),\n END_TIME CHAR(50),\n GIFT CHAR(20),\n BUY_IT_NOW_AVAILABLE CHAR(20),\n START_TIME CHAR(50),\n FIXED_PRICE CHAR(20)\n )\"\"\",\n 'selling_status': \"\"\"CREATE TABLE IF NOT EXISTS selling_status (\n ITEM_ID CHAR(50) NOT NULL UNIQUE,\n CONVERTED_CURRENT_PRICE_CURRENCY_ID CHAR(20),\n CONVERTED_CURRENT_PRICE_VALUE CHAR(50),\n CURRENT_PRICE_CURRENCY_ID CHAR(20),\n CURRENT_PRICE_VALUE CHAR(50),\n TIME_LEFT CHAR(50),\n SELLING_STATE CHAR(20)\n )\"\"\",\n 'shipping_info': \"\"\"CREATE TABLE IF NOT EXISTS shipping_info (\n ITEM_ID CHAR(50) NOT NULL UNIQUE,\n SHIP_TO_LOCATIONS CHAR(20),\n SHIPPING_TYPE CHAR(100),\n ONE_DAY_SHIPPING_AVAILABLE CHAR(20),\n EXPEDITED_SHIPPING CHAR(20),\n SHIPPING_SERVICE_COST_CURRENCY_ID CHAR(50),\n SHIPPING_SERVICE_COST_VALUE CHAR(50),\n HANDLING_TIME CHAR(50)\n )\"\"\"\n }\n\n # If table was not specified, put all tables in line\n tables = all_tables.keys() if tables is None else tables\n\n return ';'.join([\"{}\".format(all_tables[i]) for i in tables])", "def make_historical_balances_and_prices_table(game_id: int, user_id: int, start_time: float = None,\n end_time: float = None) -> pd.DataFrame:\n start_time, end_time = get_time_defaults(game_id, start_time, end_time)\n balances_df = get_user_balance_history(game_id, user_id, start_time, end_time)\n df = add_bookends(balances_df, end_time=end_time)\n df = df.groupby(\"symbol\").apply(resample_values)\n df = df.reset_index().rename(columns={\"level_1\": \"timestamp\"})\n df = append_price_data_to_balance_histories(df) # price appends + resampling happen here\n df.sort_values([\"symbol\", \"timestamp\"])\n df[\"value\"] = df[\"balance\"] * df[\"price\"]\n df = filter_for_trade_time(df)\n df[[\"balance\", \"price\", \"value\"]] = df[[\"balance\", \"price\", \"value\"]].astype(float)\n apply_validation(df, balances_and_prices_table_schema, strict=True)\n return df.reset_index(drop=True).sort_values([\"timestamp\", \"symbol\"])", "def make_table(data, portfolio):\n table_data = [\n [\n # Header\n click.style('Rank', fg='white'),\n click.style('Coin', fg='white'),\n click.style('Price', fg='white'),\n click.style('Coins Owned', fg='white'),\n click.style('Net Worth CAD', fg='white'), \n click.style('Net Worth USD', fg='white'),\n click.style('24 Hour Volume', fg='white'),\n click.style('Market Cap', fg='white'),\n click.style('1 Hour', fg='white'),\n click.style('24 Hours', fg='white'),\n click.style('7 Days', fg='white'),\n click.style('Last Updated', fg='white')\n ]\n ]\n\n for row in data:\n id = row['id']\n table_row = [\n click.style(row['rank'], fg='white'),\n click.style(id, fg='cyan'),\n click.style('$' + add_commas(row['price_usd']), fg='green'),\n click.style(add_commas(portfolio[id]), fg='green'),\n click.style('$' + add_commas(str(int(round(float(portfolio[id]) * float(row['price_cad']),)))), fg='green'),\n click.style('$' + add_commas(str(int(round(float(portfolio[id]) * float(row['price_usd']),)))), fg='green'),\n click.style('$' + add_commas(format(float(row['24h_volume_usd']), '.2f')), fg='green'),\n click.style('$' + add_commas(format(float(row['market_cap_usd']), '.2f')), fg='green'),\n style_percent(row['percent_change_1h']),\n style_percent(row['percent_change_24h']),\n style_percent(row['percent_change_7d']),\n click.style(time_since(row['last_updated']), fg='white')\n ]\n table_data.append(table_row)\n \n table_instance = AsciiTable(table_data, 'Crypto Stats')\n cols = len(table_instance.table.split('\\n')[0])\n resize_terminal(total_rows(data), cols)\n\n return table_instance.table", "def create_stock_view_list_table(self):\n create_stock_view_list_sql = \"\"\"\n CREATE TABLE IF NOT EXISTS \"{0}\".\"{1}\" (\n stock_view_name text NOT NULL PRIMARY KEY,\n stock_view_relation text NOT NULL\n );\n \"\"\".format(Schemas.SCHEMA_META, Tables.TABLE_TICK_STOCK_VIEW_LIST)\n\n conn = self.db_engine.connect()\n try:\n conn.execute(create_stock_view_list_sql)\n except:\n self._logger.log_error(traceback.format_exc())\n return Error.ERROR_DB_EXECUTION_FAILED\n finally:\n conn.close()", "def join_data(df_order, df_stock, df_all):\n df_stock0 = df_stock.set_index('date')\n df_list = []\n for index, data in df_order.iterrows():\n df_both = df_all.query(\n '(option_code == %r | option_code == %r) & date >= %r & date <= %r' % (\n data['code0'], data['code1'], data['date0'], data['date1']\n )\n )[['date', 'option_code', 'dte', 'sell', 'buy', 'strike']]\n\n df0 = df_both.query('option_code == %r' % data['code0'])\n df1 = df_both.query('option_code == %r' % data['code1'])\n df_join = pd.merge(df0, df1, on='date', suffixes=(0, 1))\n\n if data['signal0'] == 'BUY':\n first = df_join['buy0'] - df_join['sell1']\n remain = df_join['sell0'] - df_join['buy1']\n df_join['signal'] = ['BUY'] + ['SELL'] * len(remain[1:])\n df_join['option'] = [first.iloc[0]] + list(remain[1:])\n else:\n first = -df_join['sell0'] + df_join['buy1']\n remain = -df_join['buy0'] + df_join['sell1']\n df_join['signal'] = ['SELL'] + ['BUY'] * len(remain[1:])\n df_join['option'] = [first.iloc[0]] + list(remain[1:])\n\n df_join['pos_net'] = df_join['option'] - df_join['option'].iloc[0]\n df_join['pct_chg'] = df_join['pos_net'] / np.abs(df_join['option'].iloc[0]) + 0\n\n df_join = df_join.drop([\n 'sell0', 'buy0', 'sell1', 'buy1', 'strike0', 'strike1', 'dte1'\n ], axis=1)\n df_close = df_stock0[data['date0']:data['date1']]\n df_close = df_close[['close']].reset_index()\n df_both = pd.merge(df_join, df_close, on='date')\n df_both.rename(index=str, columns={'close': 'stock', 'dte0': 'dte'}, inplace=True)\n\n # print df.to_string(line_width=1000)\n df_both = df_both.replace([np.inf, -np.inf], np.nan)\n df_both = df_both.fillna(0)\n\n df_both = df_both.round({\n 'pos_net': 2,\n 'pct_chg': 2,\n })\n\n df_list.append(df_both)\n\n return df_list", "def getStock(symbol, start, end):\n df = data.get_data_yahoo(symbol, start, end)\n\n df.columns.values[-1] = 'AdjClose'\n df.columns = df.columns + '_' + symbol\n df['Return_%s' % symbol] = df['AdjClose_%s' % symbol].pct_change()\n\n return df", "def getHistoricalData(stockName, startDate):\n conn = r.connect(db = db.DB)\n stockName = stockName.upper()\n startDate = dateToString(startDate)\n endDate = dateToString(datetime.datetime.now())\n\n if not stockName in db.STOCK_MAP.keys():\n return dict(\n error = 1,\n message = \"The info you want is not what I can give\"\n )\n\n stock = yf.StockInfo(stockName + db.IN_LONDON)\n cachedData = r.table(db.HISTORICAL_TABLE).get(stockName).run(conn)\n infoDict = dict()\n\n if cachedData == None:\n print \"\\n-- DB -- \" + stockName + \" == Inserting New Information ==\\n\"\n histList = stock.historical_prices(startDate, endDate)\n infoDict[\"history_list\"] = createHistoryDictList(histList)\n infoDict[\"index\"] = stockName\n infoDict[\"name\"] = db.STOCK_MAP[stockName]\n infoDict[\"timestamp\"] = getTime()\n r.table(db.HISTORICAL_TABLE).insert(infoDict).run(conn)\n else:\n elapsedTime = (\n getTime() -\n cachedData[\"timestamp\"]\n )\n if elapsedTime > db.HISTORICAL_INTERVAL:\n print \"\\n-- DB -- \" + stockName + \" == Updating Database ==\\n\"\n histList = stock.historical_prices(startDate, endDate)\n infoDict[\"history_list\"] = createHistoryDictList(histList)\n infoDict[\"index\"] = stockName\n infoDict[\"timestamp\"] = getTime()\n r.table(db.HISTORICAL_TABLE).get(stockName).update(\n infoDict\n ).run(conn)\n else:\n print \"\\n-- DB -- \" + stockName + \" == Using Cached Data ==\\n\"\n infoDict = cachedData\n\n infoDict[\"name\"] = db.STOCK_MAP[stockName]\n return infoDict", "def download_stock_price_hist(\n\ttickers = [ 'AAPL' ],\n\tprice_column = 'Adj Close',\t\t\t\t\t\t\t\t# assume it's the Adjusted Close price that are interested\n\tstart = datetime.date( 2009, 12, 31 ),\t\t\t\t# assume start is guaranteed to be a weekday\n\tend = datetime.date( 2015, 12, 31 ),\n\tcsv_file = \"stock_price_test.csv\",\n):\n\t# Check validity of inputs\n\tif len( tickers ) <= 0:\n\t\tprint \"Tickers must not be empty\";\n\t\treturn False;\n\tif start > end:\n\t\tprint \"Start date \" + start.isoformat() + \" can't be later than End date \" + end.isoformat();\n\n\tdf = pd.DataFrame();\t\t\t# data frame to return\n\tfor _i in range( len(tickers) ):\n\t\tticker = tickers[_i];\n\t\tprint \"Index\" + str(_i) + \"\\t\" + \"Ticker: \" + ticker;\n\n\t\tstart_str = start.isoformat();\n\t\tend_str = end.isoformat();\n\t\thist = ystockquote.get_historical_prices( ticker, start_str, end_str );\t# dictionary with date string as the key\n\n\t\t# Get time series of stock prices (Don't sort before forming the Series!!!)\n\t\tdate_index = [];\n\t\tprice_data = [];\n\t\tfor key, val in hist.iteritems():\n\t\t\tdate_index.append( datetime.datetime.strptime( key, \"%Y-%m-%d\" ).date() );\n\t\t\tprice_data.append( float( val[ price_column ] ) )\n\n\t\tif min( date_index ) > start:\t\t\t\t\t\t\t\t# Pass if the no stock price is available on Start\n\t\t\tcontinue;\n\t\tstock_ts = pd.Series( price_data, date_index );\n\t\tstock_ts = stock_ts.sort_index();\n\n\t\t# Add current stock TS to the DataFrame\n\t\tdf[ticker] = stock_ts;\n\t\n\tdf.to_csv( csv_file, index_label='Date' );\n\treturn True;", "def buildExposureTable(exposures, fields, instruments):\n name = []\n ra = []\n dec= []\n field= []\n inst = []\n airmass = []\n mjd = []\n exptime = []\n epoch = []\n apcorr = []\n index = 0\n for k,e in exposures.items():\n name.append(e.name)\n ra.append(getDegree(e.coords.ra))\n dec.append(getDegree(e.coords.dec))\n field.append(fields[e.field].index)\n if e.instrument in specialInstruments:\n inst.append(specialInstruments[e.instrument])\n else:\n inst.append(instruments[e.instrument].index)\n e.index = index\n index += 1\n\n airmass.append(e.airmass)\n mjd.append(e.mjd)\n exptime.append(e.exptime)\n epoch.append(e.epoch)\n apcorr.append(e.apcorr)\n hdu = pf.BinTableHDU.from_columns(\\\n pf.ColDefs( [pf.Column(name='NAME',format=py_to_fits(name),array=name),\n pf.Column(name='RA',format=py_to_fits(ra),array=ra),\n pf.Column(name='DEC',format=py_to_fits(dec),array=dec),\n pf.Column(name='FIELDNUMBER',format=py_to_fits(field),array=field),\n pf.Column(name='INSTRUMENTNUMBER',format=py_to_fits(inst),\\\n array=inst),\n pf.Column(name=\"MJD\",format=py_to_fits(mjd),array=mjd),\n pf.Column(name=\"AIRMASS\",format=py_to_fits(airmass),array=airmass),\n pf.Column(name=\"EXPTIME\",format=py_to_fits(exptime),array=exptime),\n pf.Column(name=\"EPOCH\",format=py_to_fits(epoch),array=epoch),\n pf.Column(name=\"APCORR\",format=py_to_fits(apcorr),array=apcorr)] ),\n name = 'Exposures')\n # hdu.header['EXTNAME'] = 'Exposures'\n return hdu", "def construct_table(self):\n table_str = self.header_row\n row_lbls, col_lbls = self.get_idxvals()\n for r,rlbl in enumerate(row_lbls):\n row_data = [self.data[rlbl,clbl] for clbl in col_lbls]\n table_str += self.construct_row(r, row_data)\n \n return table_str", "def getStock(symbol, start, end):\n df = pd.io.data.get_data_yahoo(symbol, start, end)\n\n df.columns.values[-1] = 'AdjClose'\n df.columns = df.columns + '_' + symbol\n df['Return_%s' % symbol] = df['AdjClose_%s' % symbol].pct_change()\n\n return df", "def create_tables (cls, env=os.environ):\n\n cur = cls.pri_table_read_cursor (env=env)\n cur.execute ('SPECIALCASE gettablelist')\n ret = cur.fetchall ()\n \n existingtables = set ([x[0].lower() for x in ret])\n\n for tabname in (set (cls.table_desc.keys ()) - existingtables):\n sql, lsd = cls.table_desc[tabname]\n epls, desls, sqlprefix = lsd.get_create_labeling (savels=True)\n\n conn = get_labeled_conn (epls, desls)\n cur = conn.cursor ()\n cur.execute (sql)\n conn.close ()\n lsd.pop_labelset ()\n\n \n import psycopg2\n for sql in cls.sql_createindex:\n conn = get_labeled_conn ()\n cur = conn.cursor ()\n # XXX It would be better to check which indices exist as we do for tables.\n try:\n cur.execute (sql)\n except psycopg2.ProgrammingError, e: \n pass\n conn.close ()", "def create_stock_view_table(self, stock_view_name, stock_relation):\n from Core.DAO.ComplicatedTables.StockViewTable import StockViewTable\n create_tick_sql = \"\"\"\n {4};\n CREATE INDEX IF NOT EXISTS stock_view_{3}_date ON \"{0}\".\"{1}{2}\"(\"date\");\n CREATE INDEX IF NOT EXISTS stock_view_{3}_datetime ON \"{0}\".\"{1}{2}\"(\"datetime\");\n \"\"\".format(Schemas.SCHEMA_STOCK_VIEW_DATA, Tables.TABLE_TICK_STOCK_VIEW_PREFIX,\n stock_view_name, stock_view_name.split(\".\")[0],\n StockViewTable(stock_relation).get_table_define_sql(Schemas.SCHEMA_STOCK_VIEW_DATA,\n Tables.TABLE_TICK_STOCK_VIEW_PREFIX +\n stock_view_name))\n\n conn = self.db_engine.connect()\n try:\n conn.execute(create_tick_sql)\n except:\n self._logger.log_error(traceback.format_exc())\n return Error.ERROR_DB_EXECUTION_FAILED\n finally:\n conn.close()\n\n return Error.SUCCESS", "def index():\n\n rows = db.execute(\n 'SELECT symbol, SUM(CASE WHEN operation = \"SELL\" THEN -shares ELSE shares END) shares FROM transactions WHERE id = :id GROUP BY symbol;', id=session['user_id'])\n\n cash = db.execute('SELECT cash FROM users WHERE id = :id', id=session['user_id'])[0]['cash']\n\n grand_total = cash\n\n for row in rows:\n stock = lookup(row['symbol'])\n\n row['name'] = stock['name']\n row['price'] = stock['price']\n row['total'] = row['shares'] * stock['price']\n\n grand_total += row['shares'] * stock['price']\n\n rows.append({\n 'symbol': 'CASH',\n 'cash': cash,\n 'total': grand_total\n })\n\n return render_template('index.html', stocks=rows)", "def get_data(symbols, dates):\n df = pd.DataFrame(index=dates)\n if \"SPY\" not in symbols:\n symbols.insert(0, \"SPY\")\n for symbol in symbols:\n temp = pd.read_csv(symbol_to_path(symbol, base_dir=\"data\"), \n index_col=\"Date\", \n parse_dates=True, \n usecols=[\"Date\", \"Adj Close\"])\n \n temp = temp.rename(columns={\"Adj Close\": symbol})\n \n df = df.join(temp, how=\"inner\")\n df = df.sort_index(axis=0, ascending=[1])\n \n return df", "def get_dividends(self, stock_list, start_date=None, end_date=None):\n df_dict = {}\n df_list = []\n file_in_path = [year.replace(\".csv\", \"\") for year in self.get_csv_in_path(self.dividend_eps_path)]\n if not start_date:\n start_date = file_in_path[0]\n if not end_date:\n end_date = file_in_path[-1]\n if start_date > end_date:\n return df_dict\n for year in range(int(start_date), int(end_date)+1):\n target_path = \"{}/{}.csv\".format(self.dividend_eps_path, year)\n df = pd.read_csv(target_path, index_col=\"名稱\")\n self.replace_nan_to_other(df, \"\")\n for stock in stock_list:\n pd_index = df.index.to_list()\n old_list = []\n if stock in pd_index:\n data = df.loc[stock]\n\n # print(\"日期 = {}\".format(data.get(\"除息交易日\")))\n if df_dict.get(stock):\n old_list = df_dict.get(stock)\n\n # check data is available\n dict = {}\n if data.get(\"現金股利\") != \"\":\n dict.update({\"除息交易日\": \"{}{}\".format(year, data.get(\"除息交易日\").split(\"'\")[1].replace(\"/\", \"\")) if data.get('除息交易日') else \"\",\n \"現金股利\": data.get(\"現金股利\"),\n })\n if data.get(\"股票股利\") != \"\":\n dict.update({\"除權交易日\": \"{}{}\".format(year, data.get(\"除權交易日\").split(\"'\")[1].replace(\"/\", \"\")) if data.get('除權交易日') else \"\",\n \"股票股利\": data.get(\"股票股利\"),\n })\n if dict:\n old_list.append(dict)\n df_dict.update({stock: old_list})\n\n return df_dict", "def index():\n\n stocks_owned = db.execute(\"SELECT DISTINCT stock FROM transaction WHERE id = :id;\", id=session['user_id'])\n\n number_of_rows= len(stocks_owned) - 1\n\n i = 0\n\n total_value=0\n\n for stock in stocks_owned:\n\n stock_list=[]\n stock_list[i]=stock\n\n value = db.execute(\"SELECT SUM(total_amount) FROM transaction WHERE id = :id GROUP BY stock HAVING stock=:stock\", id=session['usestockr_id'], stock=stocks_owned[\"stock\"])\n value_list=[]\n value_list[i] = value\n\n amount_owned = db.execute(\"SELECT SUM(amount) FROM transaction WHERE id = :id GROUP BY stock HAVING stock=:stock\", id=session['user_id'], stock = stocks_owned[\"stock\"])\n amount_list=[]\n amount_list[i]= amount_owned\n\n quote_input = stocks_owned[i]\n quote_info = lookup(quote_input)\n price = quote_info['price']\n price_list=[]\n price_list[i] = price\n\n\n total_value+=value\n\n i+=1\n\n cash = db.execute(\"SELECT cash FROM users WHERE id = :id;\", id=session['user_id'])\n\n grand_total = total_value + cash\n\n ###(\"SELECT stock, SUM(total_amount) FROM transaction WHERE id = :id;, id=session['user_id'] GROUP BY stock\")####\n\n\n return render_template(\"index.html\", number_of_rows=number_of_rows, stock_list=stock_list, amount_list=amount_list, value_list=value_list, price_list=price_list, total_value=total_value, grand_total=grand_total)" ]
[ "0.63580585", "0.61449856", "0.60916585", "0.59879607", "0.586941", "0.5752481", "0.574493", "0.56445813", "0.5596955", "0.5551755", "0.5538056", "0.55360204", "0.55317575", "0.55244833", "0.5521858", "0.55173093", "0.55147207", "0.54893124", "0.54601693", "0.54540384", "0.5437737", "0.54161805", "0.541107", "0.5409818", "0.54051584", "0.53736913", "0.537251", "0.53687346", "0.5362668", "0.5362655" ]
0.6858316
0
General method for costing belt filter press. Capital cost is a function of flow in gal/hr.
def cost_filter_press(blk): t0 = blk.flowsheet().time.first() # Add cost variable and constraint blk.capital_cost = pyo.Var( initialize=1, units=blk.config.flowsheet_costing_block.base_currency, bounds=(0, None), doc="Capital cost of unit operation", ) Q = pyo.units.convert( blk.unit_model.properties_in[t0].flow_vol, to_units=pyo.units.gal / pyo.units.hr, ) # Get parameter dict from database parameter_dict = blk.unit_model.config.database.get_unit_operation_parameters( blk.unit_model._tech_type, subtype=blk.unit_model.config.process_subtype ) # Get costing parameter sub-block for this technology A, B = blk.unit_model._get_tech_parameters( blk, parameter_dict, blk.unit_model.config.process_subtype, ["capital_a_parameter", "capital_b_parameter"], ) # Determine if a costing factor is required factor = parameter_dict["capital_cost"]["cost_factor"] expr = pyo.units.convert( A * Q + B, to_units=blk.config.flowsheet_costing_block.base_currency ) blk.capital_cost_constraint = pyo.Constraint(expr=blk.capital_cost == expr) # Register flows blk.config.flowsheet_costing_block.cost_flow( blk.unit_model.electricity[t0], "electricity" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cond_boiler_op_cost(Q_therm_W, Q_design_W, T_return_to_boiler_K):\n if Q_therm_W > 0.0:\n\n # boiler efficiency\n eta_boiler = cond_boiler_operation(Q_therm_W, Q_design_W, T_return_to_boiler_K)\n\n E_aux_Boiler_req_W = BOILER_P_AUX * Q_therm_W\n\n Q_primary_W = Q_therm_W / eta_boiler\n else:\n Q_primary_W = 0.0\n E_aux_Boiler_req_W = 0.0\n\n return Q_primary_W, E_aux_Boiler_req_W", "def calc_capital_costs (self):\n self.capital_costs = self.max_boiler_output * \\\n self.comp_specs[\"cost per btu/hrs\"]\n #~ print self.capital_costs", "def cost(self) -> float:", "def get_PP_costing(self, cost_accounts, scaled_param, units, tech, ccs=\"B\"):\n # ------------------------ Power Plant Cost ------------------------\n\n # check to see if a costing block already exists\n if hasattr(self, \"costing\"):\n raise AttributeError(\n \"{} already has an attribute costing. \"\n \"Check that you are not calling get_costing\"\n \" twice on the same model\".format(self.name)\n )\n\n # create a costing Block\n self.costing = Block()\n self.costing.library = \"PP\"\n\n # find flowsheet block to create global costing parameters\n try:\n fs = self.flowsheet()\n except AttributeError:\n fs = self.parent_block()\n\n # build flowsheet level parameters CE_index = year\n if not hasattr(fs, \"costing\"):\n fs.get_costing(year=\"2018\")\n\n CE_index = fs.costing.CE_index\n\n # define preloaded accounts\n PC_preloaded_accounts = {\n \"Coal Handling\": [\"1.1\", \"1.2\", \"1.3\", \"1.4\", \"1.9a\"],\n \"Sorbent Handling\": [\"1.5\", \"1.6\", \"1.7\", \"1.8\", \"1.9b\"],\n \"Coal Feed\": [\"2.1\", \"2.2\", \"2.9a\"],\n \"Sorbent Feed\": [\"2.5\", \"2.6\", \"2.9b\"],\n \"Feedwater System\": [\"3.1\", \"3.3\"],\n \"PC Boiler\": [\"4.9\"],\n \"Steam Turbine\": [\"8.1\"],\n \"Condenser\": [\"8.3\"],\n \"Cooling Tower\": [\"9.1\"],\n \"Circulating Water System\": [\"9.2\", \"9.3\", \"9.4\", \"9.6\", \"9.7\"],\n \"Ash Handling\": [\"10.6\", \"10.7\", \"10.9\"],\n }\n\n IGCC_preloaded_accounts = {\n \"Coal Handling\": [\"1.1\", \"1.2\", \"1.3\", \"1.4\", \"1.9\"],\n \"Coal Feed\": [\"2.1\", \"2.2\", \"2.3\", \"2.4\", \"2.9\"],\n \"Feedwater System\": [\"3.1\", \"3.3\"],\n \"Gasifier\": [\"4.1\"],\n \"Syngas Cooler\": [\"4.2\"],\n \"ASU\": [\"4.3a\"],\n \"ASU Oxidant Compression\": [\"4.3b\"],\n \"Combustion Turbine\": [\"6.1\", \"6.3\"],\n \"Syngas Expander\": [\"6.2\"],\n \"HRSG\": [\"7.1\", \"7.2\"],\n \"Steam Turbine\": [\"8.1\"],\n \"Condenser\": [\"8.3\"],\n \"Cooling Tower\": [\"9.1\"],\n \"Circulating Water System\": [\"9.2\", \"9.3\", \"9.4\", \"9.6\", \"9.7\"],\n \"Slag Handling\": [\"10.1\", \"10.2\", \"10.3\", \"10.6\", \"10.7\", \"10.8\", \"10.9\"],\n }\n\n NGCC_preloaded_accounts = {\n \"Feedwater System\": [\"3.1\", \"3.3\"],\n \"Combustion Turbine\": [\"6.1\", \"6.3\"],\n \"HRSG\": [\"7.1\", \"7.2\"],\n \"Steam Turbine\": [\"8.1\"],\n \"Condenser\": [\"8.3\"],\n \"Cooling Tower\": [\"9.1\"],\n \"Circulating Water System\": [\"9.2\", \"9.3\", \"9.4\", \"9.6\", \"9.7\"],\n }\n\n AUSC_preloaded_accounts = {\n \"PC Boiler\": [\"4.9\"],\n \"Steam Turbine\": [\"8.1\"],\n \"Steam Piping\": [\"8.4\"],\n }\n\n # preloaded account handling\n if type(cost_accounts) == str:\n if tech in [1, 2]:\n cost_accounts = PC_preloaded_accounts[cost_accounts]\n elif tech in [3, 4, 5]:\n cost_accounts = IGCC_preloaded_accounts[cost_accounts]\n elif tech == 6:\n cost_accounts = NGCC_preloaded_accounts[cost_accounts]\n elif tech == 7:\n cost_accounts = AUSC_preloaded_accounts[cost_accounts]\n else:\n AttributeError(\"{} technology not supported\".format(self.name))\n\n # pull data for each account into dictionaries\n process_params = {}\n reference_units = {}\n account_names = {}\n exponents = {}\n reference_costs = {}\n reference_params = {}\n engineering_fees = {}\n process_contingencies = {}\n project_contingencies = {}\n\n for account in cost_accounts:\n try: # first look for data in json file info\n process_params[account] = BB_costing_exponents[str(tech)][account][\n \"Process Parameter\"\n ]\n reference_units[account] = BB_costing_params[str(tech)][ccs][\n cost_accounts[0]\n ][\"Units\"]\n account_names[account] = BB_costing_exponents[str(tech)][account][\n \"Account Name\"\n ]\n exponents[account] = float(\n BB_costing_exponents[str(tech)][account][\"Exponent\"]\n )\n reference_costs[account] = BB_costing_params[str(tech)][ccs][account][\"BEC\"]\n reference_params[account] = BB_costing_params[str(tech)][ccs][account][\n \"RP Value\"\n ]\n engineering_fees[account] = BB_costing_params[str(tech)][ccs][account][\n \"Eng Fee\"\n ]\n process_contingencies[account] = BB_costing_params[str(tech)][ccs][account][\n \"Process Contingency\"\n ]\n project_contingencies[account] = BB_costing_params[str(tech)][ccs][account][\n \"Project Contingency\"\n ]\n except KeyError:\n try: # next look for data in custom dictionaries\n process_params[account] = custom_costing_exponents[str(tech)][account][\n \"Process Parameter\"\n ]\n reference_units[account] = custom_costing_params[str(tech)][ccs][\n cost_accounts[0]\n ][\"Units\"]\n account_names[account] = custom_costing_exponents[str(tech)][account][\n \"Account Name\"\n ]\n exponents[account] = float(\n custom_costing_exponents[str(tech)][account][\"Exponent\"]\n )\n reference_costs[account] = custom_costing_params[str(tech)][ccs][\n account\n ][\"BEC\"]\n reference_params[account] = custom_costing_params[str(tech)][ccs][\n account\n ][\"RP Value\"]\n engineering_fees[account] = custom_costing_params[str(tech)][ccs][\n account\n ][\"Eng Fee\"]\n process_contingencies[account] = custom_costing_params[str(tech)][ccs][\n account\n ][\"Process Contingency\"]\n project_contingencies[account] = custom_costing_params[str(tech)][ccs][\n account\n ][\"Project Contingency\"]\n except KeyError:\n print(\n \"KeyError: Account {} could not be found in the \"\n \"dictionary for technology {} with CCS {}\".format(\n account, str(tech), ccs\n )\n )\n\n # check that all accounts use the same process parameter\n param_check = None\n for account in cost_accounts:\n param = process_params[account]\n if param_check is None:\n param_check = param\n elif param != param_check:\n raise ValueError(\n \"{} cost accounts selected do not use \"\n \"the same process parameter\".format(self.name)\n )\n\n # check that the user passed the correct units\n for account in cost_accounts:\n ref_units = reference_units[account]\n if units != ref_units:\n raise ValueError(\n \"Account %s uses units of %s. \"\n \"Units of %s were passed.\" % (cost_accounts[0], ref_units, units)\n )\n\n # Used by other functions for reporting results\n self.costing.account_names = account_names\n\n # define parameters\n self.costing.exp = Param(\n cost_accounts,\n mutable=True,\n initialize=exponents,\n doc=\"exponential parameter for account\",\n )\n\n self.costing.ref_cost = Param(\n cost_accounts,\n mutable=True,\n initialize=reference_costs,\n doc=\"reference cost for account\",\n )\n\n self.costing.ref_param = Param(\n cost_accounts,\n mutable=True,\n initialize=reference_params,\n doc=\"reference parameter for account\",\n )\n\n self.costing.eng_fee = Param(\n cost_accounts,\n mutable=True,\n initialize=engineering_fees,\n doc=\"engineering fee percentage\",\n )\n\n self.costing.process_conting = Param(\n cost_accounts,\n mutable=True,\n initialize=process_contingencies,\n doc=\"process contingency percentage\",\n )\n\n self.costing.project_conting = Param(\n cost_accounts,\n mutable=True,\n initialize=project_contingencies,\n doc=\"project contingency percentage\",\n )\n\n # define variables\n self.costing.bare_erected_cost = Var(\n cost_accounts,\n initialize=reference_costs,\n bounds=(0, 1e4),\n doc=\"scaled bare erected cost in $MM\",\n )\n\n self.costing.total_plant_cost = Var(\n cost_accounts,\n initialize=reference_costs,\n bounds=(0, 1e4),\n doc=\"total plant cost in $MM\",\n )\n\n # rule for scaling BEC\n # reference cost is in 2018 dollars, 671.1 is CE index for 2018\n def bare_erected_cost_rule(costing, i):\n return (\n costing.bare_erected_cost[i] * 1e3\n == (CE_index / 671.1)\n * costing.ref_cost[i]\n * (scaled_param / costing.ref_param[i]) ** costing.exp[i]\n )\n\n self.costing.bare_erected_cost_eq = Constraint(\n cost_accounts, rule=bare_erected_cost_rule\n )\n\n # rule for calculating TPC\n def total_plant_cost_rule(costing, i):\n return costing.total_plant_cost[i] == costing.bare_erected_cost[i] * (\n 1 + costing.eng_fee[i] + costing.process_conting[i]\n ) * (1 + costing.project_conting[i])\n\n self.costing.total_plant_cost_eq = Constraint(\n cost_accounts, rule=total_plant_cost_rule\n )\n\n # rule for sum of BEC\n def BEC_sum_rule(costing):\n return sum(costing.bare_erected_cost[i] for i in cost_accounts)\n\n self.costing.bare_erected_cost_sum = Expression(rule=BEC_sum_rule)\n\n # rule for sum of TPC\n def TPC_sum_rule(costing):\n return sum(costing.total_plant_cost[i] for i in cost_accounts)\n\n self.costing.total_plant_cost_sum = Expression(rule=TPC_sum_rule)\n\n # add variable and constraint scaling\n for i in cost_accounts:\n iscale.set_scaling_factor(self.costing.bare_erected_cost[i], 1)\n iscale.set_scaling_factor(self.costing.total_plant_cost[i], 1)\n iscale.constraint_scaling_transform(\n self.costing.bare_erected_cost_eq[i], 1e-3, overwrite=False\n )\n iscale.constraint_scaling_transform(\n self.costing.total_plant_cost_eq[i], 1, overwrite=False\n )", "def _cost(self, action):\n raise NotImplementedError", "def c(x):\n cost = per_widget_cost * x + fixed_cost\n return cost", "def adjust_cost(self) -> None:\n\n n_iterations = self.array.shape[-1]\n n_year = len(self.array.year.values)\n\n # If uncertainty is not considered, the cost factor equals 1.\n # Otherwise, a variability of +/-30% is added.\n\n if n_iterations == 1:\n cost_factor = 1\n else:\n if \"reference\" in self.array.value.values.tolist():\n cost_factor = np.ones((n_iterations, 1))\n else:\n cost_factor = np.random.triangular(0.7, 1, 1.3, (n_iterations, 1))\n\n # Correction of hydrogen tank cost, per kg\n # Correction of fuel cell stack cost, per kW\n if \"FCEV\" in self.array.powertrain:\n self.array.loc[\n dict(powertrain=\"FCEV\", parameter=\"fuel tank cost per kg\")\n ] = np.reshape(\n (1.078e58 * np.exp(-6.32e-2 * self.array.year.values) + 3.43e2)\n * cost_factor,\n (1, n_year, n_iterations),\n )\n\n self.array.loc[\n dict(powertrain=\"FCEV\", parameter=\"fuel tank cost per kg\")\n ] = np.reshape(\n (3.15e66 * np.exp(-7.35e-2 * self.array.year.values) + 2.39e1)\n * cost_factor,\n (1, n_year, n_iterations),\n )\n\n # Correction of energy battery system cost, per kWh\n list_batt = [\n i\n for i in [\"BEV\", \"PHEV-e\", \"PHEV-c-p\", \"PHEV-c-d\"]\n if i in self.array.powertrain\n ]\n if len(list_batt) > 0:\n self.array.loc[\n dict(powertrain=list_batt, parameter=\"energy battery cost per kWh\")\n ] = np.reshape(\n (2.75e86 * np.exp(-9.61e-2 * self.array.year.values) + 5.059e1)\n * cost_factor,\n (1, 1, n_year, n_iterations),\n )\n\n # Correction of power battery system cost, per kW\n list_pwt = [\n i\n for i in [\n \"ICEV-p\",\n \"ICEV-d\",\n \"ICEV-g\",\n \"PHEV-c-p\",\n \"PHEV-c-d\",\n \"FCEV\",\n \"HEV-p\",\n \"HEV-d\",\n ]\n if i in self.array.powertrain\n ]\n\n if len(list_pwt) > 0:\n self.array.loc[\n dict(powertrain=list_pwt, parameter=\"power battery cost per kW\")\n ] = np.reshape(\n (8.337e40 * np.exp(-4.49e-2 * self.array.year.values) + 11.17)\n * cost_factor,\n (1, 1, n_year, n_iterations),\n )\n\n # Correction of combustion powertrain cost for ICEV-g\n if \"ICEV-g\" in self.array.powertrain:\n self.array.loc[\n dict(powertrain=\"ICEV-g\", parameter=\"combustion powertrain cost per kW\")\n ] = np.clip(\n np.reshape(\n (5.92e160 * np.exp(-0.1819 * self.array.year.values) + 26.76)\n * cost_factor,\n (1, n_year, n_iterations),\n ),\n None,\n 100,\n )", "def cond_boiler_operation(Q_load_W, Q_design_W, T_return_to_boiler_K):\n\n x = [0, 15.5, 21, 26.7, 32.2, 37.7, 43.3, 49, 54.4, 60, 65.6, 71.1, 100] # Return Temperature Dependency\n y = [96.8, 96.8, 96.2, 95.5, 94.7, 93.2, 91.2, 88.9, 87.3, 86.3, 86.0, 85.9, 85.8] # Return Temperature Dependency\n x1 = [0, 0.05, 0.25, 0.5, 0.75, 1] # Load Point dependency\n y1 = [99.5, 99.3, 98.3, 97.6, 97.1, 96.8] # Load Point Dependency\n\n # do the interpolation\n eff_of_T_return = interp1d(x, y, kind='linear')\n eff_of_phi = interp1d(x1, y1, kind='cubic')\n\n # get input variables\n if Q_design_W > 0:\n phi = float(Q_load_W) / float(Q_design_W)\n else:\n phi = 0\n\n if T_return_to_boiler_K == 0: # accounting with times with no flow\n T_return = 0\n else:\n T_return = T_return_to_boiler_K - 273\n eff_score = eff_of_phi(phi) / eff_of_phi(1)\n boiler_eff = (eff_score * eff_of_T_return(T_return)) / 100.0\n\n return boiler_eff", "def calc_Cinv_boiler(Q_design_W, technology_type, boiler_cost_data):\n Capex_a_Boiler_USD = 0.0\n Opex_a_fix_Boiler_USD = 0.0\n Capex_Boiler_USD = 0.0\n\n if Q_design_W > 0.0:\n boiler_cost_data = boiler_cost_data[boiler_cost_data['code'] == technology_type]\n # if the Q_design is below the lowest capacity available for the technology, then it is replaced by the least\n # capacity for the corresponding technology from the database\n if Q_design_W < boiler_cost_data.iloc[0]['cap_min']:\n Q_design_W = boiler_cost_data.iloc[0]['cap_min']\n max_boiler_size = boiler_cost_data.iloc[0]['cap_max']\n\n if Q_design_W <= max_boiler_size:\n\n boiler_cost_data = boiler_cost_data[\n (boiler_cost_data['cap_min'] <= Q_design_W) & (boiler_cost_data['cap_max'] > Q_design_W)]\n\n Inv_a = boiler_cost_data.iloc[0]['a']\n Inv_b = boiler_cost_data.iloc[0]['b']\n Inv_c = boiler_cost_data.iloc[0]['c']\n Inv_d = boiler_cost_data.iloc[0]['d']\n Inv_e = boiler_cost_data.iloc[0]['e']\n Inv_IR = boiler_cost_data.iloc[0]['IR_%']\n Inv_LT = boiler_cost_data.iloc[0]['LT_yr']\n Inv_OM = boiler_cost_data.iloc[0]['O&M_%'] / 100.0\n\n InvC = Inv_a + Inv_b * (Q_design_W) ** Inv_c + (Inv_d + Inv_e * Q_design_W) * log(Q_design_W)\n\n Capex_a_Boiler_USD = calc_capex_annualized(InvC, Inv_IR, Inv_LT)\n Opex_a_fix_Boiler_USD = InvC * Inv_OM\n Capex_Boiler_USD = InvC\n\n else:\n number_of_boilers = int(ceil(Q_design_W / max_boiler_size))\n Q_nom_W = Q_design_W / number_of_boilers\n\n boiler_cost_data = boiler_cost_data[\n (boiler_cost_data['cap_min'] <= Q_nom_W) & (boiler_cost_data['cap_max'] > Q_nom_W)]\n\n Inv_a = boiler_cost_data.iloc[0]['a']\n Inv_b = boiler_cost_data.iloc[0]['b']\n Inv_c = boiler_cost_data.iloc[0]['c']\n Inv_d = boiler_cost_data.iloc[0]['d']\n Inv_e = boiler_cost_data.iloc[0]['e']\n Inv_IR = boiler_cost_data.iloc[0]['IR_%']\n Inv_LT = boiler_cost_data.iloc[0]['LT_yr']\n Inv_OM = boiler_cost_data.iloc[0]['O&M_%'] / 100.0\n\n InvC = (Inv_a + Inv_b * (Q_nom_W) ** Inv_c + (Inv_d + Inv_e * Q_nom_W) * log(Q_nom_W)) * number_of_boilers\n\n Capex_a_Boiler_USD = calc_capex_annualized(InvC, Inv_IR, Inv_LT)\n Opex_a_fix_Boiler_USD = InvC * Inv_OM\n Capex_Boiler_USD = InvC\n\n return Capex_a_Boiler_USD, Opex_a_fix_Boiler_USD, Capex_Boiler_USD", "def cost_b(self):\n return self._cost_b", "def circuitSat(C):", "def compute_cost(AL,Y,cost_function_name):\n cost_functions = {\n \"cost_func_1\": cf.cost_function_1\n } \n\n activ_func = cost_functions.get(cost_function_name,lambda : \"Invalid Cost Function Name !\")\n\n cost,dAL = activ_func(AL,Y)\n\n return cost, dAL", "def calc_Cop_boiler(q_load_Wh, Q_nom_W, T_return_to_boiler_K):\n\n if (Q_nom_W > 0.0) and (q_load_Wh > 0.0):\n\n # calculate efficiency according to partload\n phi = float(q_load_Wh) / float(Q_nom_W)\n if phi >=1.0: # avoid rounding error\n phi = 0.98\n T_return_C = np.float(T_return_to_boiler_K - 273.15)\n eff_score = eff_of_phi(phi) / eff_of_phi(1)\n boiler_eff = (eff_score * eff_of_T_return([T_return_C]))[0] / 100.0\n else:\n boiler_eff = 0.0\n\n return boiler_eff", "def bc_flower(teff):\n lteff=np.log10(teff)\n if (lteff<3.7):\n bcflow=-0.190537291496456e+05+0.155144866764412e+05*lteff-0.421278819301717e+04*(lteff*lteff)+0.381476328422343e+03*(lteff*lteff*lteff)\n if (lteff>=3.7 and lteff<3.9):\n bcflow=-0.370510203809015e+05+0.385672629965804e+05*lteff-0.150651486316025e+05*(lteff*lteff)+0.261724637119416e+04*(lteff*lteff*lteff)-0.170623810323864e+03*(lteff*lteff*lteff*lteff)\n if (lteff>=3.9):\n bcflow=-0.370510203809015e+05+0.385672629965804e+05*lteff-0.150651486316025e+05*(lteff*lteff)+0.261724637119416e+04*(lteff*lteff*lteff)-0.170623810323864e+03*(lteff*lteff*lteff*lteff)\n return bcflow", "def compute_cost(AL, Y):\n pass", "def __call__(self, rate:'kW'):\n self.rate = rate\n self.cost = self.price * rate", "def obs_cost_fn(self, state):\n # Weights for different terms\n W_PUSHER = 1\n W_GOAL = 2\n W_DIFF = 5\n\n length = state.shape[0]\n # pusher_x, pusher_y = state[:, 0], state[:, 1]\n box_x, box_y = state[:, 2], state[:, 3]\n # goal_x, goal_y = np.tile(self.goal[0], (length, 1)), np.tile(self.goal[1], (length, 1))\n\n pusher = state[:, 0:2]\n box = state[:, 2:4]\n goal = np.tile(self.goal, (length, 1))\n goal_x, goal_y = goal[:, 0], goal[:, 1]\n\n d_box = np.linalg.norm(pusher - box, axis=1, ord=2)\n d_goal = np.linalg.norm(box - goal, axis=1, ord=2)\n\n\n # pusher_box = np.array([box_x - pusher_x, box_y - pusher_y])\n # box_goal = np.array([goal_x - box_x, goal_y - box_y])\n # d_box = np.sqrt(np.dot(pusher_box, pusher_box))\n # d_goal = np.sqrt(np.dot(box_goal, box_goal))\n diff_coord = np.abs(box_x / (box_y + EPSILON) - goal_x / (goal_y + EPSILON))\n # the -0.4 is to adjust for the radius of the box and pusher\n return W_PUSHER * np.max([d_box - 0.4, np.zeros(len(d_box))], axis=0) + W_GOAL * d_goal + W_DIFF * diff_coord", "def compute_cost(AL, Y):\n pass", "def compute_cost(self, chrome):\n return 1", "def taumBday(n_b, n_w, cost_b, cost_w, z):\n # 3 cases for how much it would cost to buy black or white present\n case1 = n_w * cost_w + n_b * cost_b\n case2 = n_w * (cost_b + z) + n_b * cost_b\n case3 = n_w * cost_w + n_b * (cost_w + z)\n return min(case1, case2, case3)", "def cost_mabr(blk):\n t0 = blk.flowsheet().time.first()\n\n # Get parameter dict from database\n parameter_dict = blk.unit_model.config.database.get_unit_operation_parameters(\n blk.unit_model._tech_type, subtype=blk.unit_model.config.process_subtype\n )\n\n # Get costing parameter sub-block for this technology\n A, B = blk.unit_model._get_tech_parameters(\n blk,\n parameter_dict,\n blk.unit_model.config.process_subtype,\n [\"reactor_cost\", \"blower_cost\"],\n )\n\n # Add cost variable and constraint\n blk.capital_cost = pyo.Var(\n initialize=1,\n units=blk.config.flowsheet_costing_block.base_currency,\n bounds=(0, None),\n doc=\"Capital cost of unit operation\",\n )\n\n DCC_reactor = pyo.units.convert(\n blk.unit_model.properties_treated[t0].flow_mass_comp[\"ammonium_as_nitrogen\"]\n / blk.unit_model.nitrogen_removal_rate\n * A,\n to_units=blk.config.flowsheet_costing_block.base_currency,\n )\n\n DCC_blower = pyo.units.convert(\n blk.unit_model.reactor_area * blk.unit_model.air_flow_rate[t0] * B,\n to_units=blk.config.flowsheet_costing_block.base_currency,\n )\n\n expr = DCC_reactor + DCC_blower\n\n blk.unit_model._add_cost_factor(\n blk, parameter_dict[\"capital_cost\"][\"cost_factor\"]\n )\n\n blk.capital_cost_constraint = pyo.Constraint(\n expr=blk.capital_cost == blk.cost_factor * expr\n )\n\n # Register flows\n blk.config.flowsheet_costing_block.cost_flow(\n blk.unit_model.electricity[t0], \"electricity\"\n )", "def cost_b_v(self):\n return self._cost_b_v", "def cost(self,e1,e2):\n pass", "def costFun(self, S, x):", "def get_prodcost(_craftable, tab):\n # print(f'{tab}{_craftable.name} ({_craftable.value}):')\n if _craftable.prod_cost == 0:\n prodcost = 0\n\n tab = tab + ' '\n\n # If there are Craftable Items as required materials ....\n if len(_craftable.craftables_list) > 0:\n for craft, amount in _craftable.craftables_list:\n # partial_cost = round(float(craftables[craft].value) * amount, 2)\n for loop in range(amount):\n partial_cost = get_prodcost(craftables[craft], tab)\n # If the prod cost of the item is more expensive than the buy cost\n if partial_cost >= float(craftables[craft].value):\n partial_cost = float(craftables[craft].value)\n craftables[craft].buy_or_craft = 'buy'\n # print(f'{tab}Buy item {craft}')\n else:\n craftables[craft].buy_or_craft = 'craft'\n # print(f'{tab}Craft item {craft}')\n prodcost = prodcost + partial_cost\n\n # If the Total PROD Cost (prod_cost + res_totalcost) is cheaper the the market value:\n prodcost = prodcost + _craftable.res_totalcost\n if prodcost < float(_craftable.value):\n for resource, amount in _craftable.resources_list:\n partial_cost = round(float(resources[resource].unit_price) * amount, 2)\n # print(f'{tab}{resource} Cost: {partial_cost}')\n\n # print(f'{tab}Total Prod C ost: {round(prodcost, 2)}')\n if prodcost < float(_craftable.value):\n # print(f'{tab}Craft item {_craftable.name}')\n _craftable.buy_or_craft = 'craft'\n else:\n # print(f'{tab}Buy item {_craftable.name}')\n _craftable.buy_or_craft = 'buy'\n return prodcost\n else:\n return _craftable.prod_cost", "def __call__(self, x, u, k):\n first_time_through = True\n for cost, arg, weight in zip(self._costs, self._args, self._weights):\n if arg == \"x\":\n cost_input = x\n else:\n cost_input = u[arg]\n\n current_term = weight * cost(cost_input, k)\n if current_term > 1e8:\n print(\"Warning: cost %s is %f\" % (cost._name, current_term))\n print(\"Input is: \", cost_input)\n\n# if cost._name[:4] == \"bike\":\n# print(cost._name, \": \", current_term)\n\n if first_time_through:\n total_cost = current_term\n else:\n total_cost += current_term\n\n first_time_through = False\n\n return total_cost", "def set_costs(self) -> None:\n self[\"glider cost\"] = (\n self[\"glider base mass\"] * self[\"glider cost slope\"]\n + self[\"glider cost intercept\"]\n )\n self[\"lightweighting cost\"] = (\n self[\"glider base mass\"]\n * self[\"lightweighting\"]\n * self[\"glider lightweighting cost per kg\"]\n )\n self[\"electric powertrain cost\"] = (\n self[\"electric powertrain cost per kW\"] * self[\"electric power\"]\n )\n self[\"combustion powertrain cost\"] = (\n self[\"combustion power\"] * self[\"combustion powertrain cost per kW\"]\n )\n self[\"fuel cell cost\"] = self[\"fuel cell power\"] * self[\"fuel cell cost per kW\"]\n self[\"power battery cost\"] = (\n self[\"battery power\"] * self[\"power battery cost per kW\"]\n )\n self[\"energy battery cost\"] = (\n self[\"energy battery cost per kWh\"] * self[\"electric energy stored\"]\n )\n self[\"fuel tank cost\"] = self[\"fuel tank cost per kg\"] * self[\"fuel mass\"]\n # Per km\n self[\"energy cost\"] = self[\"energy cost per kWh\"] * self[\"TtW energy\"] / 3600\n\n # For battery, need to divide cost of electricity\n # at battery by efficiency of charging\n # to get costs at the \"wall socket\".\n\n _ = lambda x: np.where(x == 0, 1, x)\n self[\"energy cost\"] /= _(self[\"battery charge efficiency\"])\n\n self[\"component replacement cost\"] = (\n self[\"energy battery cost\"] * self[\"battery lifetime replacements\"]\n + self[\"fuel cell cost\"] * self[\"fuel cell lifetime replacements\"]\n )\n\n with open(DATA_DIR / \"purchase_cost_params.yaml\", \"r\") as stream:\n to_markup = yaml.safe_load(stream)[\"markup\"]\n\n self[to_markup] *= self[\"markup factor\"]\n\n # calculate costs per km:\n self[\"lifetime\"] = self[\"lifetime kilometers\"] / self[\"kilometers per year\"]\n\n with open(DATA_DIR / \"purchase_cost_params.yaml\", \"r\") as stream:\n purchase_cost_params = yaml.safe_load(stream)[\"purchase\"]\n\n self[\"purchase cost\"] = self[purchase_cost_params].sum(axis=2)\n # per km\n amortisation_factor = self[\"interest rate\"] + (\n self[\"interest rate\"]\n / (\n (np.array(1) + self[\"interest rate\"]) ** self[\"lifetime kilometers\"]\n - np.array(1)\n )\n )\n self[\"amortised purchase cost\"] = (\n self[\"purchase cost\"] * amortisation_factor / self[\"kilometers per year\"]\n )\n\n # per km\n self[\"maintenance cost\"] = (\n self[\"maintenance cost per glider cost\"]\n * self[\"glider cost\"]\n / self[\"kilometers per year\"]\n )\n\n # simple assumption that component replacement\n # occurs at half of life.\n self[\"amortised component replacement cost\"] = (\n (\n self[\"component replacement cost\"]\n * (\n (np.array(1) - self[\"interest rate\"]) ** self[\"lifetime kilometers\"]\n / 2\n )\n )\n * amortisation_factor\n / self[\"kilometers per year\"]\n )\n\n self[\"total cost per km\"] = (\n self[\"energy cost\"]\n + self[\"amortised purchase cost\"]\n + self[\"maintenance cost\"]\n + self[\"amortised component replacement cost\"]\n )", "def get_expected_cost(self):", "def return_running_cost_func(RunningCost='Minimize Input Energy'):\n if type(RunningCost)==str:\n assert RunningCost in ['Minimize Input Energy',\n 'Minimize time away from target angle',\n 'Minimize time away from target angular velocity'],\\\n \"RunningCost must be either 'Minimize Input Energy','Minimize time away from target angle', or 'Minimize time away from target angular velocity'.\"\n else:\n assert type(RunningCost)==list, \"RunningCost must be a list of cost types.\"\n for el in RunningCost:\n assert type(el)==str, \"Each element of RunningCost must be a string. Not \" + str(type(el)) + \".\"\n assert el in ['Minimize Input Energy',\n 'Minimize time away from target angle',\n 'Minimize time away from target angular velocity'],\\\n \"Each element of RunningCost must be either 'Minimize Input Energy','Minimize time away from target angle', or 'Minimize time away from target angular velocity'. '\" + el + \"' not accepted.\"\n\n if \"Minimize Input Energy\" in RunningCost:\n result1 = lambda X,U,dt: np.trapz((k3/2)*U**2,dx=dt)\n else:\n result1 = lambda X,U,dt: 0\n\n if \"Minimize time away from target angle\" in RunningCost:\n result2 = lambda X,U,dt: np.trapz(k1*(1/2)*(X[0,1:]-TargetAngle)**2,dx=dt)\n else:\n result2 = lambda X,U,dt: 0\n\n if \"Minimize time away from target angular velocity\" in RunningCost:\n result3 = lambda X,U,dt:\\\n np.trapz(k2*(1/2)*(X[1,1:]-TargetAngularVelocity)**2,dx=dt)\n else:\n result3 = lambda X,U,dt: 0\n\n result = lambda X,U,dt: result1(X,U,dt) \\\n + result2(X,U,dt) \\\n + result3(X,U,dt)\n return(result)", "def calculate_bandwidth(self, complist):\n\n self.type = complist[0]\n self.spot = complist[2]\n self.value = complist[1]\n self.fail = False\n\n if self.spot == \"1\" or self.spot == \"2\":\n # print \"Calculation if Loop 1st filter\"\n if self.type == \"C\":\n if self.c1 == 0:\n self.c1 = self.value\n else:\n self.fail = True\n else:\n if self.r1 == 0:\n self.r1 = self.value\n else:\n self.fail = True\n\n if self.spot == \"3\" or self.spot == \"4\":\n # print \"Calculation if loop 2nd filter\"\n if self.type == \"C\":\n if self.c2 == 0:\n self.c2 = self.value\n else:\n self.fail = True\n else: #self.type == \"R\"\n if self.r2 == 0:\n self.r2 = self.value\n else:\n self.fail = True\n\n if self.c1 != 0 and self.c2 != 0 and self.r1 != 0 and self.r2 != 0:\n cf_low = int((1/(2*pi*(self.r1)*(self.c1))/1000))\n #print \"hello!\"\n cf_low = str(cf_low)\n self.cutoff_freql = cf_low\n self.cutoff_freql_text = cf_low + \"K\"\n print self.cutoff_freql_text\n cf_high = int((1/(2*pi*(self.r2)*(self.c2))/1000))\n cf_high = str(cf_high)\n self.cutoff_freqh = cf_high\n self.cutoff_freqh_text = cf_high + \"K\"\n print self.cutoff_freqh_text\n voltage_change = float((self.r2)/self.r1)\n voltage = 5 + voltage_change\n self.voltage = str(voltage)\n print self.voltage" ]
[ "0.60161155", "0.5890601", "0.5859548", "0.5829997", "0.5820077", "0.58090657", "0.57660633", "0.57442236", "0.5743485", "0.5697327", "0.56755006", "0.56499743", "0.5622", "0.5602251", "0.55968297", "0.5590435", "0.55680066", "0.5539766", "0.5538298", "0.5469668", "0.5467324", "0.545357", "0.5451566", "0.5417329", "0.54080707", "0.54065144", "0.5398749", "0.53898406", "0.5372717", "0.5366814" ]
0.7558044
0
Test case for create_symlink_file
def test_create_symlink_file(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_symlink(self, source_path, main):\n main_file = os.path.realpath(os.path.join(source_path, main))\n if not os.path.isfile(main_file):\n main_file += '.js'\n if not os.path.isfile(main_file):\n print('\\tWARNING: Could not create symlink for {}, no such file.'.format(main_file))\n return\n main_file_name = os.path.basename(main_file)\n with change_working_directory(os.path.realpath(self.symlink_dir)) as cd:\n file_path = os.path.join(cd, main_file_name)\n self.created(file_path)\n if os.path.islink(file_path):\n os.remove(file_path)\n symlink(main_file, main_file_name)", "def _symlink(source, link_name):\n flags = 0\n\n if source is not None and os.path.isdir(source):\n flags = 1\n\n CreateSymbolicLinkW(link_name, source, flags)", "def testCreateSymlinkOutput(self): # pylint: disable=no-self-use\n gcs_client = mock.MagicMock(spec=storage.Client)\n blob = prow.create_symlink(gcs_client, \"gs://bucket/symlink\",\n \"gs://bucket/output\")\n\n blob.upload_from_string.assert_called_once_with(\"gs://bucket/output\")", "def create_symbolic_link(file, target):\n try:\n os.symlink(file, target)\n except NotImplementedError:\n logger.critical(\"Symbolic links not supported on this platform\")\n raise\n except OSError:\n logger.critical(\"Not sufficient permissions\")\n raise", "def _symlink_file_on_disk(source, link_name):\n link_dir = os.path.dirname(link_name)\n\n # create intermediate dirs if they do not already exist\n if not os.path.isdir(link_dir):\n try:\n os.makedirs(link_dir)\n except OSError as exc:\n logger.error(\"Error creating directory '%s': %s\", link_dir, exc)\n return False\n\n # create symbolic link\n try:\n os.symlink(source, link_name)\n except OSError as exc:\n logger.error(\"Error creating symlink '%s': %s\", link_name, exc)\n return False\n\n logger.debug(\"Created symlink '%s' to '%s'\", link_name, source)\n return True", "def testIsSymlink(self):\r\n P=lambda p:ufsi.NativeUnixPath(p)\r\n existingValidSymlinkPath=P(self.existingValidSymlinkFilePathStr)\r\n existingInvalidSymlinkPath=P(self.existingInvalidSymlinkFilePathStr)\r\n nonExistingSymlinkPath=P(self.nonExistingSymlinkPathStr)\r\n\r\n # 1\r\n self.assertEquals(existingValidSymlinkPath.isSymlink(),True,\r\n 'Symlink %r exists'\r\n %str(existingValidSymlinkPath))\r\n\r\n # 2\r\n self.assertEquals(existingInvalidSymlinkPath.isSymlink(),True,\r\n 'Symlink %r exists'\r\n %str(existingInvalidSymlinkPath))\r\n\r\n # 3\r\n self.assertEquals(nonExistingSymlinkPath.isSymlink(),False,\r\n 'Symlink %r does not exist'\r\n %str(nonExistingSymlinkPath))", "def ensure_symlink_exists(symlink_path, file_path):\n\n if not (os.path.islink(symlink_path) or (os.path.realpath(symlink_path) != os.path.realpath(file_path))):\n # This is bad.\n raise CronException(\"Path {0} is not a symlink or does not point where expected.\".format(symlink_path))", "def _makeSymlink ( target, source, env ) :\n if len(target) != 1 :\n fail ( \"unexpected number of targets for symlink: \"+str(target) )\n if len(source) != 1 :\n fail ( \"unexpected number of sources for symlink: \"+str(source) )\n\n target = str(target[0])\n source = str(source[0].abspath)\n trace ( \"Executing symlink `%s' -> `%s'\" % ( target, source ), \"makeSymlink\", 3 )\n\n os.symlink ( source, target )", "def create_symlink(src: str, dst: str) -> bool:\n if exists(src):\n with suppress(Exception):\n if isfile(dst):\n remove(dst)\n else:\n rmtree(dst)\n\n try:\n\n symlink(src, dst)\n return True\n\n except PermissionError as err:\n printer(\n \"User without permission to create the symbolic link.\",\n str(err),\n foreground=FG().ERROR,\n )\n return False\n\n except FileExistsError:\n remove(dst)\n symlink(src, dst)\n return False", "def test_history_import_symlink():\n with HistoryArchive() as history_archive:\n history_archive.write_metafiles()\n history_archive.write_link('datasets/Pasted_Entry_1.txt', '../target.txt')\n history_archive.write_file('target.txt', 'insecure')\n _run_jihaw_cleanup_check_secure(history_archive, 'Symlink dataset in import archive allowed')", "def create_symlink(source_file, dest_file, sudo=True):\n LOG.info(\"Creating symlink to {} called {}\".format(source_file, dest_file))\n cmd = \"ln -sf {} {}\".format(source_file, dest_file)\n _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False)", "def create_symlink(src, dest):\n sudo('ln -s {} {}'.format(src, dest))", "def test_diff_git_symlink_added(self):\n diff = (\n b'diff --git a/link b/link\\n'\n b'new file mode 120000\\n'\n b'index 0000000..100b938\\n'\n b'--- /dev/null\\n'\n b'+++ b/link\\n'\n b'@@ -0,0 +1 @@\\n'\n b'+README\\n'\n b'\\\\ No newline at end of file\\n'\n )\n\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 1)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename=b'link',\n orig_file_details=PRE_CREATION,\n modified_filename=b'link',\n modified_file_details=b'100b938',\n new_unix_mode='120000',\n is_symlink=True,\n new_symlink_target=b'README',\n insert_count=1,\n data=diff)", "def create_symlinks(target_dir: os.PathLike, symlinks_to_create: List[os.PathLike]):\n for src_path in symlinks_to_create:\n trg_path = os.path.join(target_dir, os.path.basename(src_path))\n\n if os.path.islink(src_path):\n # Let's not create symlinks to symlinks\n # Since dropping the current symlink will break the experiment\n os.symlink(os.readlink(src_path), trg_path)\n else:\n print(f'Creating a symlink to {src_path}, so try not to delete it occasionally!')\n os.symlink(src_path, trg_path)", "def attempt_symlink_to(path: str, to_path: str) -> None:\n try:\n Path(path).symlink_to(Path(to_path))\n except OSError:\n pytest.skip(\"could not create symbolic link\")", "def makeLinks(self, source, target):\n\n if os.path.exists(target): os.unlink(target)\n os.symlink(source, target)", "def _safe_setup_link(link_filename, real_filename):\r\n real_filename = os.path.relpath(real_filename, os.path.dirname(link_filename))\r\n\r\n if os.path.exists(link_filename):\r\n try:\r\n os.unlink(link_filename)\r\n except OSError:\r\n pass\r\n try:\r\n os.symlink(real_filename, link_filename)\r\n except OSError as e:\r\n # Typically permission denied.\r\n pass", "def _symlink_tar(self):\r\n outsidep = self.unsafe_common_dir / \"unsafe_file.txt\"\r\n symlinkp = self.unsafe_common_dir / \"symlink.txt\"\r\n symlink_tar = self.unsafe_common_dir / \"symlink.tar.gz\"\r\n outsidep.symlink(symlinkp)\r\n with tarfile.open(symlink_tar, \"w:gz\") as tar:\r\n tar.add(symlinkp)\r\n\r\n return symlink_tar", "def testGetSymlinkPath(self):\r\n P=lambda p:ufsi.NativeUnixPath(p)\r\n existingValidFileSymlinkPath=P(self.existingValidSymlinkFilePathStr)\r\n existingValidDirSymlinkPath=P(self.existingValidSymlinkDirPathStr)\r\n existingFilePath=P(self.existingFilePathStr)\r\n existingDirPath=P(self.existingDirPathStr)\r\n nonExistingSymlinkPath=P(self.nonExistingSymlinkPathStr)\r\n\r\n # 1\r\n r1=str(existingValidFileSymlinkPath.getSymlinkPath())\r\n r2=self.existingValidFileSymlinkPath\r\n self.assertEquals(r1,r2,'Symlink path expected %r. Got %r'%(r2,r1))\r\n \r\n # 2\r\n r1=str(existingValidDirSymlinkPath.getSymlinkPath())\r\n r2=self.existingValidDirSymlinkPath\r\n self.assertEquals(r1,r2,'Symlink path expected %r. Got %r'%(r2,r1))\r\n\r\n # 3\r\n self.assertRaises(ufsi.NotASymlinkError,\r\n existingFilePath.getSymlinkPath)\r\n\r\n # 4\r\n self.assertRaises(ufsi.NotASymlinkError,\r\n existingDirPath.getSymlinkPath)\r\n\r\n # 5\r\n self.assertRaises(ufsi.NotASymlinkError,\r\n nonExistingSymlinkPath.getSymlinkPath)", "def make_symlink(dbconfig, targ):\n if \"latest\" in dbconfig and not dbconfig[\"latest\"]:\n return\n link = re.sub(r'[0-9]+', 'latest', targ)\n try:\n os.symlink(targ, link)\n info(\"create link \" + link + \" --> \" + targ)\n except OSError as e:\n if e.errno == errno.EEXIST:\n os.remove(link)\n os.symlink(targ, link)\n info(\"move link \" + link + \" --> \" + targ)", "def symlink(self, filen, link):\n src = os.path.abspath(filen)\n cwd = self.getWorkingDirectory()\n dest = os.path.join(cwd, link)\n os.symlink(os.path.relpath(src, cwd), dest)", "def symlink(self, filen, link):\n src = os.path.abspath(filen)\n cwd = self.getWorkingDirectory()\n dest = os.path.join(cwd, link)\n os.symlink(os.path.relpath(src, cwd), dest)", "def createLink(self):\n \n if( self.useLink ):\n trymakedir( self.parent.installPath + \"/\" + self.alias )\n\n os.chdir( self.parent.installPath + \"/\" + self.alias )\n \n # check for already existing symlinks or dirs \n if( os.path.islink( self.version )):\n os.unlink( self.version )\n elif( os.path.isdir( self.version )):\n self.abort( \"could not create link to [ \" + self.linkPath + \" ]\\nin [ \" \\\n + os.path.basename( self.installPath ) + \" ]!!!\" )\n\n os.symlink( self.linkPath , self.version )\n print \"+ Linking \" + self.parent.installPath + \"/\" + self.alias + \"/\" + self.version \\\n + \" -> \" + self.linkPath", "def _link(filename, existing_filename):\n CreateHardLinkW(filename, existing_filename, 0)", "def IsSymlink(info):\n return (info.external_attr >> 16) == 0120777", "def ln(src, dst):\n os.symlink(src, dst)", "def _sync_symlink(self, binary_name, link_to):\n\n # The symlink we are creating:\n link_path = os.path.join(self.bin_dir, binary_name)\n\n # The expected file we should be linking to:\n link_dest = os.path.join(self.bin_dir, link_to)\n\n if not os.path.exists(link_path) or \\\n not os.path.islink(link_path) or \\\n os.path.realpath(link_path) != os.path.realpath(link_dest):\n if os.path.exists(link_path):\n os.remove(link_path)\n os.symlink(link_to, os.path.join(self.bin_dir, binary_name))\n self.output.append(\"Symlinked %s to %s.\" % (link_path, link_dest))\n self.changed = True", "def test_symlink(self, mock_request):\n self.server.hook = UrlRequestHook('test_url', request_method='GET')\n linkpath = b'ugly'\n targetpath = b'ugliest'\n self.server.input_queue = sftpcmd(\n SSH2_FXP_SYMLINK, sftpstring(linkpath), sftpstring(targetpath),\n sftpint(0))\n self.server.process()\n mock_request.assert_called_once_with(\n 'GET', 'test_url/symlink', auth=None,\n data={\n 'method': 'symlink', 'linkpath': linkpath,\n 'targetpath': targetpath})", "def symlink(self, req, link, parent, name):\r\n self.reply_err(req, EROFS)", "def symlink(source, link_name):\n os_symlink = getattr(os, \"symlink\", None)\n if callable(os_symlink):\n os_symlink(source, link_name)\n else:\n import ctypes\n csl = ctypes.windll.kernel32.CreateSymbolicLinkW\n csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)\n csl.restype = ctypes.c_ubyte\n flags = 1 if os.path.isdir(source) else 0\n if csl(link_name, source, flags) == 0:\n raise ctypes.WinError()" ]
[ "0.7453823", "0.7333795", "0.7322523", "0.7264164", "0.72327024", "0.71856415", "0.7102871", "0.7052772", "0.70179003", "0.701752", "0.6931252", "0.6921266", "0.69030124", "0.6874251", "0.68668115", "0.682023", "0.67737055", "0.6681461", "0.66567737", "0.6653678", "0.6610112", "0.6610112", "0.6608762", "0.6590855", "0.657266", "0.65319246", "0.65244013", "0.65209776", "0.6516923", "0.6515994" ]
0.95351255
0
Test case for get_meta_range
def test_get_meta_range(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_range(self):\n pass", "def test_get_range_empty(self):\n\n queryset = mock.Mock()\n queryset.aggregate.return_value = None\n\n dimension = models.QuantitativeDimension(\n key='shares',\n name='Count of shares',\n description='Count of shares',\n field_name='shared_count',\n )\n\n min_val, max_val = dimension.get_range(queryset)\n\n self.assertIsNone(min_val)\n self.assertIsNone(max_val)", "def test_data_range(self):\n ex = self.ex\n m = self.m\n n = self.n\n\n lenrange = random.randint(1, 10)\n nreps = random.randint(1, 10)\n\n ex.range = [\"i\", range(lenrange)]\n ex.nreps = nreps\n\n ex.vary[\"X\"][\"along\"] = 0\n ex.vary[\"X\"][\"with\"].add(\"rep\")\n ex.infer_lds()\n\n cmds = ex.generate_cmds()\n\n self.assertIn([\"smalloc\", \"X\", nreps * m * n + (nreps - 1) * m], cmds)\n rangeidx = random.randint(0, lenrange - 1)\n repidx = random.randint(0, nreps - 1)\n self.assertIn([\"soffset\", \"X\", repidx * m,\n \"X_%d_%d\" % (rangeidx, repidx)], cmds)", "def getRange(self, p_int): # real signature unknown; restored from __doc__\n pass", "def GetScalarRange(self):\n ...", "def GetTRange(self):\n ...", "def _in_range_op(spec):", "def range(self):\n\n return time_stat(self, stat=\"range\")", "def __dynamic_range_process(info):\n if 'range' in info:\n for i in range(len(info['range'])):\n if info['range'][i][1] == -1:\n info['range'][i][1] = None\n return info", "def test_002_range_columns(self):\n assert(len(\n self.range_transformer.fit_transform(\n self.data[self.range_col]\n ).columns\n ) == 1)", "def f_has_range(self):\n raise NotImplementedError(\"Should have implemented this.\")", "def testRangeQuery(self):\n\n rq = TermRangeQuery(\"sorter\", \"b\", \"d\", True, True)\n filteredquery = FilteredQuery(rq, self.filter)\n scoreDocs = self.searcher.search(filteredquery, 1000).scoreDocs\n self.assertEqual(2, len(scoreDocs))", "def getRange(self):\n return self.range", "def test_range__no_base_date(self):\n data = self._data()\n data.pop('base_date')\n response = self._get(get_kwargs=data)\n self._check_response(response, 104)", "def get_range_value(self, key):\n pass", "def f_get_range(self, copy=True):\n raise NotImplementedError(\"Should have implemented this.\")", "def _get_sight_range(self):\n raise NotImplementedError", "def test_get_pivot_in_correct_range(self):\n list = [5, 6, 7, 8, 9, 2]\n assert 0 <= get_pivot(list) <= 5 # between the first and last indices", "def test_data_with_range_view(self):\n\n self.create_model()\n self.create_machine()\n self.insert_data()\n\n date_literal = '%Y-%m-%d'\n start_date = dt.today()\n end_date = start_date + datetime.timedelta(days=1)\n\n self.create_user_account_and_login()\n query_url = self.range_url + '/' + self.data['mid'] + \\\n '/?s=' + dt.strftime(start_date, date_literal) + \\\n '&e=' + dt.strftime(end_date, date_literal)\n\n response = self.client.get(query_url)\n results = json.loads(response.content)\n\n self.assertEquals(len(results), 2)", "def test_range__no_end_date(self):\n data = self._data()\n data.pop('end_date')\n response = self._get(get_kwargs=data)\n self._check_response(response, 104)", "def provider_range_lookup(self, record):\n pass", "def start(self):\n return _uhd_swig.meta_range_t_start(self)", "def getSliderRange(*args):\n\n #get timeslider range start\n startF = cmds.playbackOptions(query=True, min=True)\n endF = cmds.playbackOptions(query=True, max=True)\n return(startF, endF)", "def test_range_query(self):\r\n start = datetime(*self.base_date.timetuple()[:3])\r\n end = start + timedelta(days=3)\r\n\r\n results = DateTimeQueryTestModel.filter(user=0, day__gte=start, day__lt=end)\r\n assert len(results) == 3", "def _hit_range_get(self):\n return (self.hit_start, self.hit_end)", "def ex_range(data):\n a, b, step = _cleanse_range_args(data)\n return list(range(a, b+sign(step), step))", "def _query_range_get(self):\n return (self.query_start, self.query_end)", "def looks_range(self) -> Optional[int]:\n return self._get_property(LOOKS_RANGE_PROP, int)", "def isRangeValid(self) -> bool:\n ...", "def get_range(self) -> tuple[int, int]:\n return self.range_from, self.range_to" ]
[ "0.8197148", "0.6618275", "0.6484984", "0.64601654", "0.63697124", "0.6269415", "0.6174672", "0.6106884", "0.608165", "0.6080813", "0.60700256", "0.6020781", "0.60202074", "0.5981426", "0.59406525", "0.58997625", "0.5883565", "0.5879113", "0.5847939", "0.5835961", "0.5828512", "0.5803753", "0.5780891", "0.5761234", "0.5756502", "0.57524395", "0.57376945", "0.5732459", "0.5727585", "0.5718125" ]
0.9391207
0
Test case for get_range
def test_get_range(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_meta_range(self):\n pass", "def getRange(self, p_int): # real signature unknown; restored from __doc__\n pass", "def _in_range_op(spec):", "def f_get_range(self, copy=True):\n raise NotImplementedError(\"Should have implemented this.\")", "def GetTRange(self):\n ...", "def get_range(self) -> tuple[int, int]:\n return self.range_from, self.range_to", "def ex_range(data):\n a, b, step = _cleanse_range_args(data)\n return list(range(a, b+sign(step), step))", "def getRange(self):\n return self.range", "def f_has_range(self):\n raise NotImplementedError(\"Should have implemented this.\")", "def _get_sight_range(self):\n raise NotImplementedError", "def range(self):\n\n return time_stat(self, stat=\"range\")", "def test_data_range(self):\n ex = self.ex\n m = self.m\n n = self.n\n\n lenrange = random.randint(1, 10)\n nreps = random.randint(1, 10)\n\n ex.range = [\"i\", range(lenrange)]\n ex.nreps = nreps\n\n ex.vary[\"X\"][\"along\"] = 0\n ex.vary[\"X\"][\"with\"].add(\"rep\")\n ex.infer_lds()\n\n cmds = ex.generate_cmds()\n\n self.assertIn([\"smalloc\", \"X\", nreps * m * n + (nreps - 1) * m], cmds)\n rangeidx = random.randint(0, lenrange - 1)\n repidx = random.randint(0, nreps - 1)\n self.assertIn([\"soffset\", \"X\", repidx * m,\n \"X_%d_%d\" % (rangeidx, repidx)], cmds)", "def get_range(self):\n return time_to_range(self.get_time())", "def get_range_value(self, key):\n pass", "def get_range( value ):\n return list(range(value))", "def GetScalarRange(self):\n ...", "def get_range(value):\n return list(range(value))", "def range(self):\n return self.timerange()", "def range_function(num, start_range, end_range):\n if num > start_range and num < end_range:\n print(num, \"is in the range.\\n\")\n elif num < start_range or num > end_range:\n print(num, \"is not in the range.\\n\")", "def test_get_range_empty(self):\n\n queryset = mock.Mock()\n queryset.aggregate.return_value = None\n\n dimension = models.QuantitativeDimension(\n key='shares',\n name='Count of shares',\n description='Count of shares',\n field_name='shared_count',\n )\n\n min_val, max_val = dimension.get_range(queryset)\n\n self.assertIsNone(min_val)\n self.assertIsNone(max_val)", "def range(self):\n return self.range_array", "def range_inclusive(start, stop):\n return range(start, stop + 1)", "def range (self):\n return self._range", "def range (self):\n return self._range", "def getRange (start, stop, step=1):\r\n result = [n for n in range(start, stop, step)]\r\n return result", "def get_range(self, start=None, end=None):\n\n # handle the case of no data\n if self.data.shape[0] == 0 or self.source.data[\"index\"].shape[0] == 0:\n return None, None\n\n first_source_idx = self.source.data[\"index\"][0]\n last_source_idx = self.source.data[\"index\"][-1]\n\n # convert to timestamp if necessary\n if isinstance(self.data.index, pd.DatetimeIndex):\n start = pd.to_datetime(start, unit=\"ms\")\n end = pd.to_datetime(end, unit=\"ms\")\n first_source_idx = pd.to_datetime(first_source_idx, unit=\"ms\")\n last_source_idx = pd.to_datetime(last_source_idx, unit=\"ms\")\n\n # get new start and end\n if start is not None:\n if start < first_source_idx:\n start = max(self.data.index[0], start)\n elif start > last_source_idx:\n start = min(self.data.index[-1], start)\n elif start < self.data.index[0]:\n start = self.data.index[0]\n elif start > self.data.index[-1]:\n start = self.data.index[-1]\n elif len(self.source.data[\"index\"]) > 0:\n start = first_source_idx\n else:\n start = self.data.index[0]\n\n if end is not None:\n if end < first_source_idx:\n end = max(self.data.index[0], end)\n elif end > last_source_idx:\n end = min(self.data.index[-1], end)\n elif end < self.data.index[0]:\n end = self.data.index[0]\n elif end > self.data.index[-1]:\n end = self.data.index[-1]\n elif len(self.source.data[\"index\"]) > 0:\n end = last_source_idx\n else:\n end = self.data.index[-1]\n\n return start, end", "def new_range(r):\n if isinstance(r, list) or isinstance(r, tuple) and len(r) == 2:\n lower = r[0]\n upper = r[1]\n else:\n lower = r\n upper = r\n lower = int(lower)\n upper = int(upper)\n return range(lower, upper + 1)", "def get_range(start, stop):\n \n nums = []\n\n for num in range(start, stop):\n nums.append(num)\n\n return nums", "def range(self):\n \n return self._range", "def isRangeValid(self) -> bool:\n ..." ]
[ "0.80703026", "0.7837224", "0.7255473", "0.7147124", "0.7073319", "0.7031268", "0.70060104", "0.6990455", "0.6969976", "0.6872395", "0.68121374", "0.67698324", "0.6767907", "0.6744316", "0.67391896", "0.67225266", "0.6692281", "0.66836524", "0.6643143", "0.6600423", "0.659429", "0.65940017", "0.6591628", "0.6591628", "0.65800357", "0.6577428", "0.6565064", "0.65649545", "0.6540213", "0.65292394" ]
0.92907894
0
Compiles network for training
def compile_network(model, optimizer): compile_network_model(model, optimizer, categorical_crossentropy)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compile(self):\n logger.info('Define network with dnnet of version : %s'\\\n % dnnet.__version__)\n if self.layers.size == 0:\n msg = 'NeuralNetwork has no layer.\\n Add layers before compiling.'\n raise DNNetRuntimeError(msg)\n\n parent = self.layers[0]\n self.add(OutputLayer())\n\n for i, layer in enumerate(self.layers, 1):\n logger.debug('Add %s layer.' % layer.get_type())\n layer.set_parent(parent)\n parent = layer\n\n logger.debug('Defined network.')", "def build_network(self, inputs, targets, training=False):\n raise NotImplementedError", "def trainNet():", "def compile(self):\n m, n = self.input_shape[1], self.input_shape[2]\n\n inp = Input(shape=self.input_shape, traces=True)\n self.add_layer(inp, \"DoG\")\n\n s1 = LIFNodes(shape=(18, m, n), traces=True)\n self.add_layer(s1, \"conv_1\")\n c1 = LIFNodes(shape=(18, m // 2, n // 2), traces=True)\n self.add_layer(c1, \"pool_1\")\n\n s2 = LIFNodes(shape=(24, m // 2, n // 2), traces=True)\n self.add_layer(s2, \"conv_2\")\n c2 = LIFNodes(shape=(24, m // 4, n // 4), traces=True)\n self.add_layer(c2, \"pool_2\")\n\n s3 = LIFNodes(shape=(32, m // 4, n // 4), traces=True)\n self.add_layer(s3, \"conv_3\")\n f = LIFNodes(shape=(32, 1), traces=True)\n self.add_layer(f, \"global_pool\")\n\n conv1 = Conv2dConnection(inp, s1, 5, padding=2, weight_decay=0.01,\n nu=0.01, update_rule=PostPre, decay=0.5)\n self.add_connection(conv1, \"DoG\", \"conv_1\")\n pool1 = MaxPool2dConnection(s1, c1, 2, 2, decay=0.5)\n self.add_connection(pool1, \"conv_1\", \"pool_1\")\n\n conv2 = Conv2dConnection(c1, s2, 3, padding=1, weight_decay=0.01,\n nu=0.01, update_rule=PostPre, decay=0.5)\n self.add_connection(conv2, \"pool_1\", \"conv_2\")\n pool2 = MaxPool2dConnection(s2, c2, 2, 2, decay=0.5)\n self.add_connection(pool2, \"conv_2\", \"pool_2\")\n\n conv3 = Conv2dConnection(c2, s3, 3, padding=1, weight_decay=0.01,\n nu=0.01, update_rule=PostPre, decay=0.5)\n self.add_connection(conv3, \"pool_2\", \"conv_3\")\n global_pool = MaxPool2dConnection(s3, f, (m // 4, n // 4), decay=0.5)\n self.add_connection(global_pool, \"conv_3\", \"global_pool\")\n\n monitor = NetworkMonitor(self, layers=[\"DoG\", \"conv_1\", \"pool_1\",\n \"conv_2\", \"pool_2\",\n \"conv_3\", \"global_pool\"],\n connections=[(\"DoG\", \"conv_1\"),\n (\"pool_1\", \"conv_2\"),\n (\"pool_2\", \"conv_3\")],\n state_vars=[\"w\", \"s\"])\n self.add_monitor(monitor, \"network_monitor\")\n\n return self", "def compile(self):\n # create both networks\n self.q_network = self.create_model()\n # self.target_q_network = self.create_model()\n\n # set loss function in both \n adam = Adam(lr=1e-4)\n self.q_network.compile(loss=mean_huber_loss, optimizer=adam) \n # self.target_q_network.compile(loss=mean_huber_loss, optimizer=adam)\n \n # set the same weights for both initially\n # self.target_q_network.set_weights(self.q_network.get_weights())\n \n print self.q_network.summary()", "def _build_networks(self):\n self.online_convnet = self._create_network(name='Online')\n self.target_convnet = self._create_network(name='Target')\n self._net_outputs = self.online_convnet(self.state_ph, training=True)\n self._q_argmax = tf.argmax(self._net_outputs.q_values, axis=1)[0]\n self._replay_net_outputs = self.online_convnet(self._replay.states,\n training=True)\n self._replay_next_target_net_outputs = self.target_convnet(\n self._replay.next_states)", "def __compile_model(self,\n network,\n loss=lasagne.objectives.categorical_crossentropy,\n learning_rate=0.001,\n momentum=0.1):\n print('Compiling model...')\n self.report['network'] = inspect.getsource(network)\n self.report['loss_function'] = loss.__name__\n self.report['learning_rate'] = learning_rate\n self.report['learning_momentum'] = momentum\n start_time = time.time()\n self.__input_var = T.tensor4('inputs')\n self.__target_var = T.ivector('targets')\n self.__network = network(self.__input_var)\n self.__loss = lambda t: loss(get_output(self.__network,\n deterministic=t),\n self.__target_var).mean()\n self.__optimizer = lasagne.updates.nesterov_momentum(\n self.__loss(False), # enable dropout during training\n get_all_params(self.__network, trainable=True),\n learning_rate=learning_rate,\n momentum=momentum)\n predictions = T.argmax(\n get_output(self.__network, deterministic=True),\n axis=1)\n # number of correct predictions\n n_correct = T.sum(T.eq(predictions, self.__target_var))\n # number of relevant images in the sample\n n_relevant = T.sum(self.__target_var)\n # number of images predicted to be relevant\n n_selected = T.sum(predictions)\n # number of correct predictions of relevance\n n_correct_relevant = T.sum(predictions & self.__target_var)\n statistics = [n_correct, n_selected, n_relevant, n_correct_relevant]\n self.__train_fn = theano.function(\n [self.__input_var, self.__target_var],\n [self.__loss(False)] + statistics,\n updates=self.__optimizer)\n self.__val_fn = theano.function(\n [self.__input_var, self.__target_var],\n [self.__loss(True)] + statistics)\n elapsed_time = time.time() - start_time\n self.report['time_to_compile'] = elapsed_time", "def build_network(self):\n\n input_placeholder = Input(shape = self.input_shape)\n\n # Stage 1\n x = self.main_path_block(\n input_placeholder,\n 64, (7, 7), 'same',\n 'conv1', 'bn_conv1',\n activation = 'relu',\n strides = (2, 2)\n )\n x = MaxPooling2D((3, 3), strides = (2, 2), padding = 'same')(x)\n\n # Stage 2\n x = self.identity_block(x, 64, 'relu', 2, 'a', False)\n x = self.identity_block(x, 64, 'relu', 2, 'b')\n\n # Stage 3\n x = self.convolutional_block(x, [128, 128, 128], 'relu', 3, 'a')\n x = self.identity_block(x, 128, 'relu', 3, 'b')\n\n # Stage 4\n x = self.convolutional_block(x, [256, 256, 256], 'relu', 4, 'a')\n x = self.identity_block(x, 256, 'relu', 4, 'b')\n\n # Stage 5\n x = self.convolutional_block(x, [512, 512, 512], 'relu', 5, 'a')\n x = self.identity_block(x, 512, 'relu', 4, 'b')\n\n # Fully Connected Layers\n x = BatchNormalization(axis = 3)(x)\n x = Activation('relu')(x)\n x = AveragePooling2D((2, 1), padding = 'valid', strides = (2, 2))(x)\n x = Flatten()(x)\n x = Dense(512)\n x = Dense(\n self.classes, activation = 'softmax',\n name = 'fc_' + str(self.classes),\n kernel_initializer = glorot_uniform(seed = 0)\n )(x)\n\n self.model = Model(input_placeholder, x, name = 'Resnet18')", "def build_network(self, dimList, actType=\"Tanh\", verbose=True):\n self.Q_network = Model(dimList, actType, verbose=verbose)\n self.target_network = Model(dimList, actType)\n\n if self.device == torch.device(\"cuda\"):\n self.Q_network.cuda()\n self.target_network.cuda()\n\n self.build_optimizer()", "def train():\n if os.path.isfile(load_model):\n all_weights = np.load(load_model) \n else:\n print(\"Model file does not exist. Exiting....\")\n return\n\n print(\"Build up the network\")\n\n\n # Two different types of input\n image_input_var = T.tensor4('original_inputs')\n rotated_image_input_var = T.tensor4('rotated_image_input')\n target_var = T.ivector('targets')\n\n # Build teacher network\n cnn_model, cnn_mid_output, weight_decay_penalty = cifar10_merge.build_cnn(image_input_var)\n\n # Get the intermediate layer of the teacher network\n original_model_mid_output = lasagne.layers.get_output(cnn_mid_output, image_input_var, deterministic = True)\n\n # Get the softmax output of the teacher network.\n\n original_model_output_val = lasagne.layers.get_output(cnn_model, image_input_var, deterministic = True)\n \n # Build the student network\n \n rotated_cnn_model, rotated_model_mid, rotated_weight_penalty = \\\n cifar10_merge.build_cnn(rotated_image_input_var)\n \n # Get the softmax output of the student network. Since it need to be trained on, deterministic = False\n rotated_model_mid_output = lasagne.layers.get_output(rotated_model_mid, rotated_image_input_var, deterministic = False)\n\n # Get the model output of the studenet network.\n rotated_model_output = lasagne.layers.get_output(rotated_cnn_model, rotated_image_input_var, deterministic = True)\n\n # Set the weights for the teacher network\n lasagne.layers.set_all_param_values(cnn_model, all_weights)\n\n # Get the initialized weights below the intermediate layer\n rotated_net_weights_below_mid = lasagne.layers.get_all_param_values(rotated_model_mid)\n\n # Get the parameter of the student network that needs to be trained.\n rotated_net_training_param = lasagne.layers.get_all_params(rotated_model_mid, trainable=True)\n\n # Set the weights for the student network\n lasagne.layers.set_all_param_values(rotated_cnn_model, all_weights)\n\n lasagne.layers.set_all_param_values(rotated_model_mid,\n rotated_net_weights_below_mid)\n \n # cross_entropy_loss = lasagne.objectives.categorical_crossentropy(rotated_model_mid_output, target_var)\n\n # cross_entropy_loss_mean = cross_entropy_loss.mean()\n\n # L = T.mean(lasagne.objectives.squared_error(original_model_mid_output, rotated_model_mid_output), axis = 1)\n L = lasagne.objectives.squared_error(original_model_mid_output, rotated_model_mid_output).mean()\n # cost = T.mean(L)\n\n # cost = cross_entropy_loss_mean\n cost = L\n\n # updates = lasagne.updates.adagrad(cost, rotated_net_training_param, learning_rate=0.1)\n updates = lasagne.updates.adam(cost, rotated_net_training_param, learning_rate=0.001)\n\n # cross_entropy_loss = lasagne.objectives.categorical_crossentropy(model_output, target_var)\n\n # cross_entropy_loss_mean = cross_entropy_loss.mean()\n\n # loss = cross_entropy_loss_mean + weight_decay_penalty\n\n\n train_acc = T.mean(T.eq(T.argmax(rotated_model_output, axis = 1), target_var),\n dtype=theano.config.floatX)\n\n original_model_acc = T.mean(T.eq(T.argmax(original_model_output_val, axis = 1), target_var),\n dtype=theano.config.floatX)\n\n train_fn = theano.function(inputs = [image_input_var, rotated_image_input_var, target_var],\n outputs = [original_model_mid_output, rotated_model_mid_output, train_acc], updates = updates)\n\n # Return the accuracy for teacher network and student network, respectively\n val_fn = theano.function(inputs = [image_input_var, rotated_image_input_var, target_var],\n outputs = [original_model_acc, train_acc])\n\n if os.path.isfile(os.path.join(train_dir, 'latest_model.txt')):\n weight_file = \"\"\n with open(os.path.join(train_dir, 'latest_model.txt'), 'r') as checkpoint_file:\n weight_file = checkpoint_file.read().replace('\\n', '')\n print(\"Loading from: \", weight_file)\n model_weights = np.load(weight_file)\n lasagne.layers.set_all_param_values(rotated_cnn_model, model_weights)\n\n # Get images and labels for CIFAR-10.\n\n cifar10_data = cifar10_merge_input.load_cifar10()\n\n bkgimg = np.array([np.mean(cifar10_data.train.images[cifar10_data.train.labels==i], axis = 0) for i in range(10)])\n for epoch in xrange(max_steps):\n start_time = time.time()\n\n original_test_image, rotated_test_image, test_label = cifar10_data.test.next_eval_batch(batch_size)\n total_t_net_for_original = 0\n total_s_net_for_original = 0\n total_t_net_for_rotation = 0\n total_s_net_for_rotation = 0\n total_count = 0\n\n print(\"Start Evaluating\")\n\n while(rotated_test_image is not None):\n t_net_for_original, s_net_for_original = val_fn(original_test_image, original_test_image, test_label)\n total_t_net_for_original += t_net_for_original * original_test_image.shape[0]\n total_s_net_for_original += s_net_for_original * original_test_image.shape[0]\n\n t_net_for_rotated, s_net_for_rotated = val_fn(rotated_test_image, rotated_test_image, test_label)\n total_t_net_for_rotation += t_net_for_rotated * rotated_test_image.shape[0]\n total_s_net_for_rotation += s_net_for_rotated * rotated_test_image.shape[0]\n\n total_count += rotated_test_image.shape[0]\n original_test_image, rotated_test_image, test_label = cifar10_data.test.next_eval_batch(batch_size)\n \n print(\"Student Network Accuracy on Original Image: %.4f\" % (float(total_s_net_for_original / total_count)))\n print(\"Teacher Network Accuracy on Original Image: %.4f\" % (float(total_t_net_for_original / total_count)))\n\n print(\"Student Network Accuracy on Rotated Image: %.4f\" % (float(total_s_net_for_rotation / total_count)))\n print(\"Teacher Network Accuracy on Rotated Image: %.4f\" % (float(total_t_net_for_rotation / total_count)))\n\n\n print(\"Start Training...\")\n original_train_image, rotated_train_image, train_label, start = cifar10_data.train.next_batch(batch_size)\n original_train_image = generate(original_train_image, train_label, bkgimg, 16, 32, None, batch_size)\n # rotated_train_image = random_rotated_image(original_train_image[::-1])\n rotated_train_image = random_rotated_image(original_train_image)\n\n end_time_1 = time.time() - start_time\n step = 1\n loss_total = 0\n original_start = start\n\n while(start != 0):\n #loss_value, train_acc = train_fn(original_train_image, rotated_train_image, train_label)\n \n ori_mid, rot_mid, train_acc = train_fn(original_train_image, rotated_train_image, train_label)\n # ori_mid, rot_mid, train_acc = train_fn(original_train_image, np.array(np.random.rand(batch_size, 3, 32, 32), dtype = np.float32), train_label)\n step += 1\n if start == original_start:\n print(ori_mid[0])\n print(rot_mid[0])\n print(train_label)\n \n original_train_image, rotated_train_image, train_label, start = cifar10_data.train.next_batch(batch_size)\n original_train_image = generate(original_train_image, train_label, bkgimg, 16, 32, None, batch_size)\n rotated_train_image = random_rotated_image(original_train_image)\n # assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n # loss_total += loss_value\n if 1:\n if epoch % 100 == 0 or (step + 1) == max_steps:\n checkpoint_path = os.path.join(train_dir, 'model_step%d.npy' % epoch)\n weightsOfParams = lasagne.layers.get_all_param_values(rotated_cnn_model)\n np.save(checkpoint_path, weightsOfParams)\n latest_model_path = os.path.join(train_dir, 'latest_model.txt')\n try:\n os.remove(latest_model_path)\n except OSError:\n pass\n latest_model_file = open(latest_model_path, \"w\")\n latest_model_file.write(checkpoint_path)\n latest_model_file.close()\n\n # print(\"Epoch Stop, loss_averge\", float(loss_total) / float(step))\n duration = time.time() - start_time\n print(\"Duration is\", duration)", "def _build_networks(self):\n # Calling online_convnet will generate a new graph as defined in\n # self._get_network_template using whatever input is passed, but will always\n # share the same weights.\n self.online_convnet = tf.make_template('Online', self._network_template)\n self.target_convnet = tf.make_template('Target', self._network_template)\n self._net_outputs = self.online_convnet(self.state_ph)\n\n self._replay_net_outputs = self.online_convnet(self._replay.states)\n self._replay_next_target_net_outputs = self.target_convnet(\n self._replay.next_states)\n\n if self.acting_policy == 'hyperbolic':\n self._q_argmax = tf.argmax(self._net_outputs.hyp_q_value, axis=1)[0]\n elif self.acting_policy == 'largest_gamma':\n self._q_argmax = tf.argmax(self._net_outputs.q_values[-1], axis=1)[0]\n else:\n raise NotImplementedError", "def create_network(outfname_train, outfname_deploy, N_conv_layers=3, N_fully_connected_layers=3, batch_size_train=100,batch_size_test=100, source_train='datatrain', source_test='datatest', num_output_conv=32, kernel_size=3, weight_std_conv=0.01, activation='relu', num_output_fully_connected=64, weight_std_fully_connected=0.01, do_batchnorm=1, do_last_batchnorm=1, scale=1,shift=0, weight_std_affine=0, use_softmax=0, num_classes=3, input_dim_1=1,input_dim_2=3, input_dim_3=32, input_dim_4=32, use_lowrank=1, T_dimension=None, softmax_weight=1, lowrank_weight=1, data_type='lmdb'):\n\n if T_dimension==None:\n T_dimension = num_classes\n \n train_txt = \"\"\n deploy_txt = \"\"\n\n train_txt += data_layer(name='data_layer', source_train=source_train, batch_size_train=batch_size_train, source_test=source_test, batch_size_test=batch_size_test, data_type=data_type)\n\n deploy_txt += deploy_data_layer(name='data_layer', input_dim_1=input_dim_1, input_dim_2=input_dim_2, input_dim_3=input_dim_3, input_dim_4=input_dim_4)\n\n last_name = 'data'\n\n ####### CONVOLUTIONAL LAYERS\n for i in range(N_conv_layers):\n conv_name = 'conv%i' % (i+1)\n top = conv_name\n\n conv_txt = convolution_layer(conv_name, last_name, num_output=num_output_conv, kernel_size=kernel_size, weight_std=weight_std_conv)\n\n train_txt += conv_txt\n deploy_txt += conv_txt\n \n if activation == 'pool':\n pool_name = 'pool%i' % (i+1)\n activation_txt = pooling_layer(pool_name, conv_name)\n last_name = pool_name\n elif activation == 'relu':\n relu_name = 'relu%i' % (i+1)\n activation_txt = relu_layer(relu_name, conv_name)\n last_name = conv_name\n else:\n raise Exception('Unknown activation')\n \n\n train_txt += activation_txt\n deploy_txt += activation_txt\n\n \n\n ####### FULLY CONNECTED LAYERS\n for i in range(N_fully_connected_layers):\n fully_connected_name = 'ip%i' % (i+1)\n\n fully_connected_txt = fully_connected_layer(fully_connected_name, last_name, num_output=num_output_fully_connected, weight_std=weight_std_fully_connected)\n\n relu_name = 'iprelu%i' % (i+1)\n relu_txt = relu_layer(relu_name, fully_connected_name)\n\n batchnorm_name = 'ipbn%i' % (i+1)\n\n if do_batchnorm and i<N_fully_connected_layers-1:\n batchnorm_txt_train = batchnorm_layer(batchnorm_name, fully_connected_name, use_global_stats=False, phase='TRAIN', deploy=False)\n batchnorm_txt_test = batchnorm_layer(batchnorm_name, fully_connected_name, use_global_stats=True, phase='TEST', deploy=False)\n \n batchnorm_txt_deploy = batchnorm_layer(batchnorm_name, fully_connected_name, deploy=True)\n scale_txt = ''\n \n last_name = batchnorm_name\n \n elif do_last_batchnorm:\n batchnorm_txt_train = batchnorm_layer(batchnorm_name, fully_connected_name, use_global_stats=False, phase='TRAIN', deploy=False)\n batchnorm_txt_test = batchnorm_layer(batchnorm_name, fully_connected_name, use_global_stats=True, phase='TEST', deploy=False)\n \n batchnorm_txt_deploy = batchnorm_layer(batchnorm_name, fully_connected_name, deploy=True)\n scale_name = 'ipbnscaled%i' % (i+1)\n\n scale_txt = scale_layer(scale_name, batchnorm_name, scale=scale,shift=shift)\n \n last_name = scale_name\n else:\n batchnorm_txt_train = ''\n batchnorm_txt_test = ''\n batchnorm_txt_deploy = ''\n last_name = fully_connected_name\n scale_txt = ''\n \n train_txt += fully_connected_txt + relu_txt + batchnorm_txt_train + batchnorm_txt_test + scale_txt\n deploy_txt += fully_connected_txt + relu_txt + batchnorm_txt_deploy + scale_txt\n \n\n\n\n\n # add affine layer on top of funnel layer \n affine_name = 'affine' # (matrix T)\n affine_txt = fully_connected_layer(affine_name, last_name, num_output=T_dimension, weight_std=weight_std_affine)\n\n train_txt += affine_txt\n deploy_txt += affine_txt\n \n # apply lowrank loss to output of 'affine' layer [conv - fully_connected -\n # funnel - affine - lowrank] the lowrank output is located in affine. The\n # 'funnel' layer is used to allow softmax to separate between classes before\n # LRT\n if use_lowrank:\n lowrank_txt = lowrank_layer('lowrank_loss', affine_name, loss_weight=lowrank_weight)\n train_txt += lowrank_txt\n\n if use_softmax:\n # apply softmax loss to output of funnel layer [conv - fully_connected - funnel - softmax]\n # add one affine layer to reduce from num_output_fully_connected to num_classes\n\n # apr 4. trying on top of fully connected layer\n funnel_name = 'funnel'\n funnel_txt = fully_connected_layer(funnel_name, last_name, num_output=num_classes, weight_std=weight_std_fully_connected)\n\n train_txt += funnel_txt\n deploy_txt += funnel_txt\n\n softmax_txt = softmax_layer('softmax_loss', funnel_name, loss_weight=softmax_weight)\n train_txt += softmax_txt\n\n write_to_file(outfname_train, train_txt)\n write_to_file(outfname_deploy, deploy_txt)\n\n \n return train_txt, deploy_txt", "def __call__(self, inputs, training):\n\n\t\treturn self._build_network(inputs, training)", "def compile_model(network, nb_classes, input_shape):\r\n nb_layers = network['nb_layers']\r\n layer = network['layer']\r\n nb_neurons = network['nb_neurons']\r\n activation = network['activation']\r\n optimizer = network['optimizer']\r\n\r\n model = Sequential()\r\n\r\n for i in range(nb_layers):\r\n if i == 0:\r\n model.add(Conv2D(nb_neurons, activation=activation, input_shape=input_shape))\r\n else:\r\n model.add(layer(nb_neurons, activation=activation))\r\n \r\n model.add(Dropout(0.2))\r\n\r\n model.add(Dense(nb_classes, activation='softmax'))\r\n\r\n model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\r\n\r\n return model", "def compile_network_model(model, optimizer, loss_func):\n model.compile(optimizer=optimizer,\n loss=loss_func, metrics=['accuracy'])", "def build_network(config):\n network_cfg = config['network']\n\n network_name = network_cfg['name']\n\n network_params = list(inspect.signature(eval(network_name).__init__).parameters)[1:]\n\n args = [f'{param}={network_cfg[param]}' for param in network_params if network_cfg.get(param)]\n\n try:\n model = eval('{}({})'.format(network_name, ', '.join(args)))\n except:\n raise ValueError('Can\\'t load network.')\n\n return model.to(device='cuda')", "def compile(self, optimizer, loss_func):\n self.q_network.compile(optimizer=optimizer, loss=loss_func)\n self.target_network.compile(optimizer=optimizer, loss=loss_func)", "def _compile_networks(self):\n\n _header_ = self._header_ + '_compile_networks(): '\n\n if self.verbose:\n print(_header_ + 'Compiling all networks ...')\n\n networks = []\n\n all_nidx = set(self.nidx2lidx.keys())\n\n while all_nidx:\n\n nidx0 = [all_nidx.pop()]\n network = set(nidx0)\n\n while nidx0 and all_nidx:\n\n nidx = set()\n\n for l in nidx0:\n lidx = self.nidx2lidx[l]\n for n in lidx:\n nidx |= self.lidx2nidx[n]\n\n nidx -= network\n network |= nidx\n all_nidx -= nidx\n nidx0 = nidx.copy()\n\n networks.append(network)\n\n if self.verbose:\n print(_header_ + 'Found %d networks' % len(networks))\n for i, network in enumerate(networks):\n print(' Network %d - %s' % (i, ','.join([str(j) for j in network])))\n\n return networks", "def create_neural_network():\n network_input = keras.layers.Input((NETWORK_INPUT_SIZE,))\n network_layer = keras.layers.Dense(100, kernel_initializer='random_uniform', activation='tanh')(network_input)\n network_layer = keras.layers.Dense(100, kernel_initializer='random_uniform', activation='tanh')(network_layer)\n network_output = keras.layers.Dense(NETWORK_OUTPUT_SIZE, kernel_initializer='random_uniform', activation='linear')(network_layer)\n network = keras.models.Model(inputs=network_input, outputs=network_output)\n network.compile(loss=\"mse\", optimizer=\"Adam\")\n return network", "def compile(self):\n self.global_steps = tf.Variable(0, trainable=False, name='global_step')\n #self.training_phase = tf.placeholder(tf.bool, name='is_training')\n self.training_phase = tf.placeholder_with_default(False, shape=None, name='train')\n self.learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n self.build_graph()\n self.build_loss()\n self.build_summary()\n self.build_saver()\n self.get_params()\n\n# self.export_freeze_model()\n self.compiled = True\n\n return self", "def build_net(self, nodes, links, output_network, from_geometry=True, debug=False):\n _nodes = nodes.copy()\n _links = links.copy()\n\n if from_geometry:\n _nodes[['x', 'y']] = _nodes['geometry'].apply(lambda g: pd.Series([g.coords[0][0], g.coords[0][1]]))\n _nodes.drop(['geometry'], axis=1, errors='ignore', inplace=True)\n\n pandasdbf.write_dbf(_nodes, self.environment + r'\\temp_nodes_to_dbf.dbf', pre_process=False)\n pandasdbf.write_dbf(_links, self.environment + r'\\temp_links_to_dbf.dbf', pre_process=False)\n\n script_text = r\"\"\"\n\n RUN PGM=NETWORK PRNFILE=\"%s\\temp_net.prn\"\n FILEO NETO = \"%s\"\n FILEI LINKI[1] = \"%s\"\n FILEI NODEI[1] = \"%s\"\n ENDRUN\n\n \"\"\" % (\n self.environment,\n output_network,\n self.environment + r'\\temp_links_to_dbf.dbf',\n self.environment + r'\\temp_nodes_to_dbf.dbf'\n )\n\n # creating a cube script\n script = open(self.environment + r'\\build_net.s', 'w', encoding='latin')\n script.write(script_text)\n script.close()\n\n # runs the script with voyager.exe\n options = \"\"\"/Start /CloseWhenDone /Minimize /NoSplash\"\"\" if not debug else \"\"\n cmd = 'voyager.exe \"' + self.environment + r'\\build_net.s\" ' + options\n print(cmd)\n os.system(cmd)", "def _build_network(self):\n pass", "def setupNetwork(self):\n\t\tin_layer = Input(shape=(28, ))\n\t\td1 = Dense(40, activation='relu')(in_layer)\n\t\td2 = Dense(10, activation='relu')(d1)\n\t\tout = Dense(1, activation='sigmoid')(d2)\n\n\t\tself.model = tf.keras.Model(inputs=in_layer, outputs=out)", "def main(tetrode_number=TETRODE_NUMBER,num_hidden_units=500,num_hidden_units_2=300,num_hidden_units_3=200,num_code_units=50):\n \n print(\"Making the model...\")\n network = model((None,200),200,num_hidden_units,num_hidden_units_2,num_hidden_units_3,num_code_units)\n print(\"Done!\")\n\n\n for tetrode_number in [10]:\n\n print(\"Loading the model parameters from {}\".format(MODEL_FILENAME+str(tetrode_number)))\n f = open(MODEL_FILENAME+str(tetrode_number),'r')\n all_param_values = pickle.load(f)\n f.close()\n # print(all_param_values)\n lasagne.layers.set_all_param_values(network, all_param_values)\n\n print(\"Loading the data...\")\n dataset = load_data(tetrode_number)\n print(\"Done!\")\n\n print(dataset['data'].shape)\n\n print(\"Setting up the training functions...\")\n training = funcs(dataset,network)\n print(\"Done!\")\n\n for i in range(NUM_EPOCHS):\n costs = []\n\n for start, end in zip(range(0, dataset['data'].shape[0], BATCH_SIZE), range(BATCH_SIZE, dataset['data'].shape[0], BATCH_SIZE)):\n cost = training['train'](dataset['data'][start:end],dataset['data'][start:end])\n costs.append(cost)\n\n meanTrainCost = np.mean(np.asarray(costs,dtype=np.float32))\n # accuracy = training['accuracy'](dataset['X_test'],dataset['y_test'])\n\n print(\"Epoch: {}, Training cost: {}\".format(i+1,meanTrainCost))\n # NUM_POINTS = 5000\n codes = training['code'](dataset['data'][0:NUM_POINTS])\n\n \n\n # y = set(list(d.predict(dataset['data'][0:NUM_POINTS])))\n\n # print(y)\n\n # activations_1 = training['activations_1'](dataset['data'][0:NUM_POINTS])\n # activations_2 = training['activations_2'](dataset['data'][0:NUM_POINTS])\n # codes = training['code'](dataset['data'][0:NUM_POINTS])\n # # print(codes.shape)\n # # codes_2d = bh_sne(codes)\n\n # for k in range(3):\n # print(k)\n\n # codes_2d = bh_sne(np.asarray(codes[:(k+1)*12000],dtype=np.float64))\n\n # # d = DPGMM(n_components=10, covariance_type='full')\n # d = DPGMM(n_components=15,n_iter=100)\n\n # d.fit(codes_2d[:(k+1)*12000])\n\n # hdp = d.predict_proba(codes_2d[:(k+1)*12000])\n\n # hdp_1d = [np.argmax(z) for z in hdp]\n\n # print(set(list(hdp_1d)))\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/hdp_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # m = TSNE(n_components=2, random_state=0)\n \n # # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # # activations_1_2d = bh_sne(activations_1)\n # # activations_2_2d = bh_sne(activations_2)\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS][:(k+1)*12000],alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # This is where the code for the video will go\n # ##############################################################################\n # # Compute DBSCAN\n # db = None\n # core_samples_mask = None\n # labels = None\n\n # num_labels = 0\n # eps=1.0\n # while(num_labels < 10):\n # db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n # core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n # core_samples_mask[db.core_sample_indices_] = True\n # labels = db.labels_\n # num_labels = np.amax(labels)\n # eps -= 0.1\n\n # print(\"Num learned labels: {}\".format(num_labels))\n\n # plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS][:(k+1)*12000],lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # # pickle.dump(labels, f)\n # # f.close()\n\n codes_2d = bh_sne(np.asarray(codes,dtype=np.float64),theta=0.4)\n\n # d = DPGMM(n_components=10, covariance_type='full')\n d = DPGMM(n_components=15,n_iter=1000)\n\n d.fit(codes_2d)\n\n hdp = d.predict_proba(codes_2d)\n\n hdp_1d = [np.argmax(z) for z in hdp]\n\n print(set(list(hdp_1d)))\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/hdp_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # m = TSNE(n_components=2, random_state=0)\n \n # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # activations_1_2d = bh_sne(activations_1)\n # activations_2_2d = bh_sne(activations_2)\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS],alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # This is where the code for the video will go\n ##############################################################################\n # Compute DBSCAN\n db = None\n core_samples_mask = None\n labels = None\n\n num_labels = 0\n eps=1.0\n while(num_labels < 10):\n db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n core_samples_mask[db.core_sample_indices_] = True\n labels = db.labels_\n num_labels = np.amax(labels)\n eps -= 0.1\n\n print(\"Num learned labels: {}\".format(num_labels))\n\n plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS],lw=0)\n plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # pickle.dump(labels, f)\n # f.close()", "def compile(self):\n self.train = self._make_train()\n self.loss_test = self._make_loss_test()\n self.predict = self._make_predict()", "def main():\n X_train, Y_train, y_train = load_batch(\"data_batch_1\")\n X_test, Y_test, y_test = load_batch(\"test_batch\")\n X_val, Y_val, y_val = load_batch((\"data_batch_2\"))\n\n X_train, X_train_mean, X_train_std = normalize(X_train)\n X_test = normalize_mean_std(X_test, X_train_mean, X_train_std)\n X_val = normalize_mean_std(X_val, X_train_mean, X_train_std)\n\n data = {\n \"X_train\": X_train,\n \"Y_train\": Y_train,\n \"y_train\": y_train,\n \"X_test\": X_test,\n \"Y_test\": Y_test,\n \"y_test\": y_test,\n \"X_val\": X_val,\n \"Y_val\": Y_val,\n \"y_val\": y_val,\n }\n\n network = Network(data)", "def build_net(self, role, chk=None, chk_optimizer=None):\n\t\tlog.info('Building net')", "def TrainNetwork(self):\n\n self.logger.info('Train Network')\n self.netWork.TrainGenerator()\n\n # # train NetworkLSTM\n self.logger.info('Train NetworkLSTM')\n self.netWork.TrainLSTM()", "def compile(self):\n\n target_values = T.fmatrix('target_values')\n\n train_output = lasagne.layers.get_output(self.l_out)\n\n pred_output = lasagne.layers.get_output(self.l_out, deterministic=True)\n\n reg = lasagne.regularization.regularize_layer_params(self.l_out, lasagne.regularization.l2)\n\n train_cost = self.cost_function(train_output,target_values).mean() + reg * 0.01\n \n real_cost = self.cost_function(pred_output, target_values).mean() + reg * 0.01\n\n # Retrieve all parameters from the network\n all_params = lasagne.layers.get_all_params(self.l_out,trainable=True)\n\n self.logger.info(\"Computing updates...\")\n updates = self.update_function(train_cost, all_params, LEARNING_RATE)\n\n \n self.logger.info(\"Compiling functions ...\")\n\n # compile Theano GPU functions for training and computing train_cost\n self._train = theano.function([self.in_layer.input_var, self.i_mask.input_var, target_values], train_cost, \n updates=updates, allow_input_downcast=True)\n\n self._compute_cost = theano.function([self.in_layer.input_var, self.i_mask.input_var, target_values], real_cost, \n allow_input_downcast=True)\n\n self.label = theano.function([self.in_layer.input_var, self.i_mask.input_var],pred_output,allow_input_downcast=True)\n \n self._compiled = True", "def macro_network():\n # fmt: off\n tpm = np.array([\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 1.0, 1.0],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 1.0, 1.0],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 0.3, 0.3],\n [0.3, 0.3, 1.0, 1.0],\n [1.0, 1.0, 0.3, 0.3],\n [1.0, 1.0, 0.3, 0.3],\n [1.0, 1.0, 0.3, 0.3],\n [1.0, 1.0, 1.0, 1.0],\n ])\n # fmt: on\n return Network(tpm, node_labels=LABELS[:tpm.shape[1]])" ]
[ "0.7539172", "0.7513481", "0.7444233", "0.72000104", "0.719475", "0.71787214", "0.7108279", "0.70971787", "0.70245636", "0.699772", "0.699507", "0.68741465", "0.6854814", "0.68306136", "0.68187803", "0.67940164", "0.67512035", "0.6746178", "0.67435986", "0.6729193", "0.6706089", "0.6696578", "0.6694789", "0.66884124", "0.6685887", "0.66828984", "0.66625994", "0.6659912", "0.6649545", "0.66384256" ]
0.78212905
0
Randomly rotate the point clouds to augument the dataset rotation is per shape based along up direction
def rotate_point_cloud(batch_data): rotated_data = np.zeros(batch_data.shape, dtype=np.float32) for k in np.arange(batch_data.shape[0]): rotation_angle = np.random.uniform() * 2 * np.pi cosval = np.cos(rotation_angle) sinval = np.sin(rotation_angle) rotation_matrix = np.array([[cosval, 0, sinval], [0, 1, 0], [-sinval, 0, cosval]]) shape_pc = batch_data[k, ...] rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) return rotated_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rotate_point_cloud(data):\n rotated_data = np.zeros(data.shape, dtype=np.float32)\n for k in xrange(data.shape[0]):\n rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, 0, sinval],\n [0, 1, 0],\n [-sinval, 0, cosval]])\n shape_pc = data[k, ...]\n rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)\n return rotated_data", "def rotate_point_cloud(batch_data):\r\n rotated_data = np.zeros(batch_data.shape, dtype=np.float32)\r\n for k in range(batch_data.shape[0]):\r\n rotation_angle = np.random.uniform() * 2 * np.pi\r\n cosval = np.cos(rotation_angle)\r\n sinval = np.sin(rotation_angle)\r\n rotation_matrix = np.array([[cosval, 0, sinval],\r\n [0, 1, 0],\r\n [-sinval, 0, cosval]])\r\n shape_pc = batch_data[k, ...]\r\n rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)\r\n return rotated_data", "def rotate_point_cloud(batch_data):\n rotated_data = np.zeros(batch_data.shape, dtype=np.float32)\n for k in range(batch_data.shape[0]):\n rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, 0, sinval],\n [0, 1, 0],\n [-sinval, 0, cosval]])\n shape_pc = batch_data[k, ...]\n rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)\n return rotated_data", "def rotate_point_cloud(batch_data):\n rotated_data = np.zeros(batch_data.shape, dtype=np.float32)\n for k in range(batch_data.shape[0]):\n rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, 0, sinval],\n [0, 1, 0],\n [-sinval, 0, cosval]])\n shape_pc = batch_data[k, ...]\n rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)\n return rotated_data", "def rotate_point_cloud(batch_data):\n rotated_data = np.zeros(batch_data.shape, dtype=np.float32)\n for k in range(batch_data.shape[0]):\n rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, 0, sinval],\n [0, 1, 0],\n [-sinval, 0, cosval]])\n shape_pc = batch_data[k, ...]\n rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)\n return rotated_data", "def random_rotate(self):\r\n rotation = rand.randrange(0, 4, 1) # 0, 1, 2, 3\r\n flip = rand.randrange(0, 2, 1) # 0, 1\r\n new_seed = copy.deepcopy(self)\r\n # rotate by 90 degrees * rotation (0, 90, 180 270)\r\n new_seed.cells = np.rot90(new_seed.cells, rotation) \r\n if (flip == 1):\r\n # flip upside down\r\n new_seed.cells = np.flipud(new_seed.cells)\r\n new_seed.xspan = new_seed.cells.shape[0]\r\n new_seed.yspan = new_seed.cells.shape[1]\r\n return new_seed", "def rotate_point_cloud_z(batch_data):\n rotated_data = np.zeros(batch_data.shape, dtype=np.float32)\n for k in range(batch_data.shape[0]):\n rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, sinval, 0],\n [-sinval, cosval, 0],\n [0, 0, 1]])\n shape_pc = batch_data[k, ...]\n rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)\n return rotated_data", "def rotate_point_cloud_z(batch_data):\n rotated_data = np.zeros(batch_data.shape, dtype=np.float32)\n for k in range(batch_data.shape[0]):\n rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, sinval, 0],\n [-sinval, cosval, 0],\n [0, 0, 1]])\n shape_pc = batch_data[k, ...]\n rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)\n return rotated_data", "def rotate_point_cloud_by_angle(batch_data, rotation_angle):\n rotated_data = np.zeros(batch_data.shape, dtype=np.float32)\n for k in range(batch_data.shape[0]):\n #rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, 0, sinval],\n [0, 1, 0],\n [-sinval, 0, cosval]])\n shape_pc = batch_data[k,:,0:3]\n rotated_data[k,:,0:3] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)\n return rotated_data", "def RotatePointCloud(batch_data):\n rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, sinval, 0],\n [-sinval, cosval, 0],\n [0, 0, 1],])\n return np.dot(batch_data, rotation_matrix)", "def rotate_point_cloud_in_plane(batch_data):\n rotated_data = np.zeros(batch_data.shape, dtype=np.float32)\n #rotated_label = np.zeros(batch_label.shape, dtype=np.float32)\n #rotation_angle = np.random.uniform() * 2 * np.pi\n #cosval = np.cos(rotation_angle)\n #sinval = np.sin(rotation_angle)\n for k in range(batch_data.shape[0]):\n rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, -sinval, 0],\n [sinval, cosval, 0],\n [0, 0, 1]])\n shape_pc = batch_data[k, ...]\n rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)\n return rotated_data", "def rotate_point_cloud_by_angle(batch_data, rotation_angle):\n rotated_data = np.zeros(batch_data.shape, dtype=np.float32)\n for k in range(batch_data.shape[0]):\n #rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, 0, sinval],\n [0, 1, 0],\n [-sinval, 0, cosval]])\n shape_pc = batch_data[k, ...]\n rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)\n return rotated_data", "def rotate_point_cloud_by_angle(batch_data, rotation_angle):\n rotated_data = np.zeros(batch_data.shape, dtype=np.float32)\n for k in range(batch_data.shape[0]):\n #rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, 0, sinval],\n [0, 1, 0],\n [-sinval, 0, cosval]])\n shape_pc = batch_data[k, ...]\n rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)\n return rotated_data", "def rotate_point_cloud_by_angle(batch_data, rotation_angle):\n rotated_data = np.zeros(batch_data.shape, dtype=np.float32)\n for k in range(batch_data.shape[0]):\n #rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, 0, sinval],\n [0, 1, 0],\n [-sinval, 0, cosval]])\n shape_pc = batch_data[k, ...]\n rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)\n return rotated_data", "def rotate_points(self, pointcloud_model, DEGREES = 0, query_points = False, use_rotation_tensor = False, save_rotation_tensor = False):\n ## https://en.wikipedia.org/wiki/Rotation_matrix\n if(use_rotation_tensor != True):\n angle_range = DEGREES\n x_angle = radians(random.random() * angle_range)\n y_angle = radians(random.random() * angle_range)\n z_angle = radians(random.random() * angle_range)\n\n rot_x = torch.Tensor([[1,0,0,0],[0, cos(x_angle),-sin(x_angle),0], [0, sin(x_angle), cos(x_angle),0], [0,0,0,1]])\n rot_y = torch.Tensor([[cos(y_angle),0,sin(y_angle), 0],[0, 1, 0,0], [-sin(y_angle),0,cos(y_angle),0], [0,0,0,1]])\n rot_z = torch.Tensor([[cos(z_angle), -sin(z_angle),0,0],[sin(z_angle), cos(z_angle),0,0],[0,0,1,0], [0,0,0,1]])\n rotation_matrix = torch.mm(rot_y, rot_z)\n rotation_matrix = torch.mm(rot_x,rotation_matrix) \n\n batch_size, point_cloud_size, _ = pointcloud_model.shape\n pointcloud_model = torch.cat([pointcloud_model, torch.ones(batch_size, point_cloud_size,1).to(self.device)], dim = 2)\n \n \n pointcloud_model_rotated = torch.matmul(pointcloud_model, rotation_matrix.to(self.device))\n self.rotation_matrix = rotation_matrix\n \n if(save_rotation_tensor):\n torch.save(rotation_matrix, 'rotation_matrix.pt') #used for plane prediction, change it at your will \n return pointcloud_model_rotated[:,:,0:3], (x_angle, y_angle, z_angle)\n else: \n batch_size, point_cloud_size, _ = pointcloud_model.shape\n pointcloud_model = pointcloud_model / sqrt(0.55**2 + 0.55**2 + 0.55**2)\n pointcloud_model = torch.cat([pointcloud_model, torch.ones(batch_size, point_cloud_size,1).to(self.device)], dim = 2)\n pointcloud_model_rotated =torch.matmul(pointcloud_model, self.rotation_matrix.to(self.device))\n return pointcloud_model_rotated[:,:,0:3]", "def rotate_point_cloud_by_angle(batch_data, rotation_angle):\r\n rotated_data = np.zeros(batch_data.shape, dtype=np.float32)\r\n for k in range(batch_data.shape[0]):\r\n # rotation_angle = np.random.uniform() * 2 * np.pi\r\n cosval = np.cos(rotation_angle)\r\n sinval = np.sin(rotation_angle)\r\n rotation_matrix = np.array([[cosval, 0, sinval],\r\n [0, 1, 0],\r\n [-sinval, 0, cosval]])\r\n shape_pc = batch_data[k, ...]\r\n rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)\r\n return rotated_data", "def rotate_point_cloud_by_angle_with_normal(batch_data, rotation_angle):\n rotated_data = np.zeros(batch_data.shape, dtype=np.float32)\n for k in range(batch_data.shape[0]):\n #rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, 0, sinval],\n [0, 1, 0],\n [-sinval, 0, cosval]]) # rotate along y axis\n shape_pc = batch_data[k,:,0:3]\n shape_normal = batch_data[k,:,3:6]\n rotated_data[k,:,0:3] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)\n rotated_data[k,:,3:6] = np.dot(shape_normal.reshape((-1,3)), rotation_matrix)\n return rotated_data", "def rotate_point_cloud(batch_data, batch_label):\n rotated_data = np.zeros(batch_data.shape, dtype=np.float32)\n rotated_label = np.zeros(batch_label.shape, dtype=np.float32)\n #rotation_angle = np.random.uniform() * 2 * np.pi\n #cosval = np.cos(rotation_angle)\n #sinval = np.sin(rotation_angle)\n for k in range(batch_data.shape[0]):\n rotation_angle = np.random.uniform() * 2 * np.pi\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, 0, sinval],\n [0, 1, 0],\n [-sinval, 0, cosval]])\n shape_pc, shape_label = batch_data[k, ...], batch_label[k]\n rotated_data[k, ...], rotated_label[k] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix), np.dot(shape_label, rotation_matrix)\n return rotated_data, rotated_label", "def randomize_trajectory(self):\n self.angle = randint(-360, 360)\n self.speed = randint(1, 5)/2.5", "def random_rotation(self, img, p = 0.5):\n if self.decision(p):\n theta = random.randrange(-15, 15)\n phi = random.randrange(-15, 15)\n gamma = random.randrange(-15, 15)\n it = ImagePerspectiveTransformer(img, shape=(img.shape[0] + abs(gamma), img.shape[1]))\n roi = it.rotate_along_axis(theta=theta, phi=phi, gamma=gamma)\n # check if cut is ahve to much dark pixels, more then 20 %\n non_zeros = np.count_nonzero(roi)\n non_zeros_procent = non_zeros / roi.size\n if non_zeros_procent < 0.8:\n pass\n else:\n img = roi\n return img", "def _get_random_transform(self, img_shape, seed=None):\n img_row_axis = self.row_axis - 1\n img_col_axis = self.col_axis - 1\n\n if seed is not None:\n np.random.seed(seed)\n\n if self.rotation_range:\n theta = np.random.uniform(\n -self.rotation_range,\n self.rotation_range)\n else:\n theta = 0\n\n if self.height_shift_range:\n try: # 1-D array-like or int\n tx = np.random.choice(self.height_shift_range)\n tx *= np.random.choice([-1, 1])\n except ValueError: # floating point\n tx = np.random.uniform(-self.height_shift_range,\n self.height_shift_range)\n if np.max(self.height_shift_range) < 1:\n tx *= img_shape[img_row_axis]\n else:\n tx = 0\n\n if self.width_shift_range:\n try: # 1-D array-like or int\n ty = np.random.choice(self.width_shift_range)\n ty *= np.random.choice([-1, 1])\n except ValueError: # floating point\n ty = np.random.uniform(-self.width_shift_range,\n self.width_shift_range)\n if np.max(self.width_shift_range) < 1:\n ty *= img_shape[img_col_axis]\n else:\n ty = 0\n\n if self.shear_range:\n shear = np.random.uniform(\n -self.shear_range,\n self.shear_range)\n else:\n shear = 0\n\n if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:\n zx, zy = 1, 1\n else:\n zx, zy = np.random.uniform(\n self.zoom_range[0],\n self.zoom_range[1],\n 2)\n\n flip_horizontal = (np.random.random() < 0.5) * self.horizontal_flip\n flip_vertical = (np.random.random() < 0.5) * self.vertical_flip\n\n channel_shift_intensity = None\n if self.channel_shift_range != 0:\n channel_shift_intensity = np.random.uniform(-self.channel_shift_range,\n self.channel_shift_range)\n\n brightness = None\n if self.brightness_range is not None:\n brightness = np.random.uniform(self.brightness_range[0],\n self.brightness_range[1])\n\n transform_parameters = {'theta': theta,\n 'tx': tx,\n 'ty': ty,\n 'shear': shear,\n 'zx': zx,\n 'zy': zy,\n 'flip_horizontal': flip_horizontal,\n 'flip_vertical': flip_vertical,\n 'channel_shift_intensity': channel_shift_intensity,\n 'brightness': brightness}\n\n return transform_parameters", "def random_rotation(image, gt, normal=False, random_state=None):\n if random_state is None:\n random_state = np.random.RandomState(None)\n\n degree = int(random_state.normal(0, 10)) if normal else random_state.randint(-10, 10)\n rows, cols = image.shape[:2]\n\n mat = cv2.getRotationMatrix2D((cols / 2, rows / 2), degree, 1)\n result_im = cv2.warpAffine(image, mat, (cols, rows), flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_REFLECT)\n result_gt = cv2.warpAffine(gt, mat, (cols, rows), flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_REFLECT)\n\n return result_im, result_gt", "def rotate_perturbation_point_cloud(batch_data, angle_sigma=0.06, angle_clip=0.18):\n rotated_data = np.zeros(batch_data.shape, dtype=np.float32)\n for k in range(batch_data.shape[0]):\n angles = np.clip(angle_sigma*np.random.randn(3), -angle_clip, angle_clip)\n Rx = np.array([[1,0,0],\n [0,np.cos(angles[0]),-np.sin(angles[0])],\n [0,np.sin(angles[0]),np.cos(angles[0])]])\n Ry = np.array([[np.cos(angles[1]),0,np.sin(angles[1])],\n [0,1,0],\n [-np.sin(angles[1]),0,np.cos(angles[1])]])\n Rz = np.array([[np.cos(angles[2]),-np.sin(angles[2]),0],\n [np.sin(angles[2]),np.cos(angles[2]),0],\n [0,0,1]])\n R = np.dot(Rz, np.dot(Ry,Rx))\n shape_pc = batch_data[k, ...]\n rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), R)\n return rotated_data", "def random_rotation(image ):\n random_degree = random.uniform(-25, 25)\n padding_mode = random.sample([\"constant\", \"edge\"], 1)[0]\n return rotate(image, random_degree, mode = padding_mode )", "def shuffle_points(mutated_genome,index):\n random.shuffle(mutated_genome[index][2])", "def random_rotate():\n u = np.random.uniform(size=3)\n\n # Random quaternion\n q = np.array([np.sqrt(1-u[0])*np.sin(2*np.pi*u[1]),\n np.sqrt(1-u[0])*np.cos(2*np.pi*u[1]),\n np.sqrt(u[0])*np.sin(2*np.pi*u[2]),\n np.sqrt(u[0])*np.cos(2*np.pi*u[2])])\n \n # Convert the quaternion into a rotation matrix \n rotMat = np.array([[q[0]*q[0] + q[1]*q[1] - q[2]*q[2] - q[3]*q[3],\n 2*q[1]*q[2] - 2*q[0]*q[3],\n 2*q[1]*q[3] + 2*q[0]*q[2]],\n [2*q[1]*q[2] + 2*q[0]*q[3],\n q[0]*q[0] - q[1]*q[1] + q[2]*q[2] - q[3]*q[3],\n 2*q[2]*q[3] - 2*q[0]*q[1]],\n [2*q[1]*q[3] - 2*q[0]*q[2],\n 2*q[2]*q[3] + 2*q[0]*q[1],\n q[0]*q[0] - q[1]*q[1] - q[2]*q[2] + q[3]*q[3]]])\n return rotMat", "def rotate_point_cloud_by_angle(self, data, rotation_angle):\n cosval = np.cos(rotation_angle)\n sinval = np.sin(rotation_angle)\n rotation_matrix = np.array([[cosval, 0, sinval],\n [0, 1, 0],\n [-sinval, 0, cosval]])\n rotated_data = np.dot(data, rotation_matrix)\n\n return rotated_data", "def test_d_3():\n rs = 20\n d = 3\n np.random.seed(rs)\n number_rotations = 3\n\n theta_1 = np.random.uniform(0, 2 * math.pi)\n rotation_1 = np.identity(d)\n pos_1 = np.random.randint(0, d - 1)\n pos_2 = np.random.randint(pos_1 + 1, d)\n rotation_1[pos_1, pos_1] = math.cos(theta_1)\n rotation_1[pos_1, pos_2] = - math.sin(theta_1)\n rotation_1[pos_2, pos_1] = math.sin(theta_1)\n rotation_1[pos_2, pos_2] = math.cos(theta_1)\n\n theta_2 = np.random.uniform(0, 2 * math.pi)\n rotation_2 = np.identity(d)\n pos_3 = np.random.randint(0, d - 1)\n pos_4 = np.random.randint(pos_3 + 1, d)\n rotation_2[pos_3, pos_3] = math.cos(theta_2)\n rotation_2[pos_3, pos_4] = - math.sin(theta_2)\n rotation_2[pos_4, pos_3] = math.sin(theta_2)\n rotation_2[pos_4, pos_4] = math.cos(theta_2)\n\n theta_3 = np.random.uniform(0, 2 * math.pi)\n rotation_3 = np.identity(d)\n pos_5 = np.random.randint(0, d - 1)\n pos_6 = np.random.randint(pos_5 + 1, d)\n rotation_3[pos_5, pos_5] = math.cos(theta_3)\n rotation_3[pos_5, pos_6] = - math.sin(theta_3)\n rotation_3[pos_6, pos_5] = math.sin(theta_3)\n rotation_3[pos_6, pos_6] = math.cos(theta_3)\n\n final_rotation = rotation_1 @ rotation_2 @ rotation_3\n np.random.seed(rs)\n rotation_function = (mt_obj.calculate_rotation_matrix\n (d, number_rotations))\n assert(np.all(final_rotation == rotation_function))", "def augument_data(data_dir, center, left, right, steering_angle):\n image, steering_angle = choose_random_image(data_dir, center, left, right, steering_angle)\n # Randomly flipt the image horizontally and adjust the steering angle.\n if np.random.rand() < 0.5:\n image = cv2.flip(image, 1)\n steering_angle = -steering_angle\n \n #translate the object with random distance in x and y direction and adjust the steering angle\n trans_x = np.random.uniform(0, 30)\n trans_y = np.random.uniform(0, 20)\n steering_angle += trans_x * 0.002\n trans_matrix = np.float32([[1, 0, trans_x], [0, 1, trans_y]])\n height= image.shape[0]\n width=image.shape[1]\n image = cv2.warpAffine(image, trans_matrix, (width, height))\n return image, steering_angle", "def random_rotation_matrix():\n\n x = np.random.uniform(size=3)\n theta = x[0]*2*math.pi\n phi = x[1]*2*math.pi\n z = x[2]*2\n\n r = math.sqrt(z)\n vx = math.sin(phi)*r\n vy = math.cos(phi)*r\n vz = math.sqrt(2.0-z)\n\n st = math.sin(theta)\n ct = math.cos(theta)\n\n sx = vx*ct-vy*st\n sy = vx*st+vy*ct\n\n return np.array([[vx*sx-ct, vx*sy-st, vx*vz],\n [vy*sx+st, vy*sy-ct, vy*vz],\n [vz*sx,vz*sy,1.0-z]])" ]
[ "0.7384044", "0.69522613", "0.6924791", "0.6924791", "0.6924791", "0.68159866", "0.6813395", "0.6813395", "0.6746108", "0.669056", "0.6641431", "0.65933377", "0.65933377", "0.65933377", "0.65219665", "0.64666146", "0.641492", "0.6368018", "0.62005866", "0.61693", "0.6102413", "0.59596556", "0.59272367", "0.5924191", "0.58644557", "0.58521456", "0.5792025", "0.5740868", "0.57052946", "0.5694572" ]
0.6964295
1
Get maximum depth of given tree by BFS
def max_depth(root): # basic case if root is None: return 0 # breadth-first traversal queue = collections.deque([root]) depth = 0 while queue: queue_size = len(queue) for i in range(queue_size): curr = queue.popleft() if curr.left is not None: queue.append(curr.left) if curr.right is not None: queue.append(curr.right) depth += 1 return depth
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _max_depth(self):\n max_depth = 0\n for node, data in self.traverse():\n max_depth = max(max_depth, data['level'])\n return max_depth", "def max_depth(node):\n if not node:\n return 0\n return max(max_depth(node.left), max_depth(node.right)) + 1", "def maxDepth(node):\n\tif node is None: \n\t\treturn 0 \n\telse: \n\t\tlDepth=maxDepth(node.left)\n\t\trDepth=maxDepth(node.right) \n\t\tif lDepth>rDepth: \n\t\t return lDepth+1\n\t\telse: \n\t\t return rDepth+1", "def get_max_depth(clf):\n tree =clf.tree_\n def get_node_depths_(current_node, current_depth, l, r, depths):\n depths += [current_depth]\n if l[current_node] != -1 and r[current_node] != -1:\n get_node_depths_(l[current_node], current_depth + 1, l, r, depths)\n get_node_depths_(r[current_node], current_depth + 1, l, r, depths)\n\n depths = []\n get_node_depths_(0, 0, tree.children_left, tree.children_right, depths) \n return max(depths)", "def get_max_depth_node(nodes):\n curr = nodes[0]\n for i in range(0, len(nodes)):\n if nodes[i].depth > curr.depth:\n curr = nodes[i]\n return curr", "def max_tree_depth(self):\n\n depths = np.array([leaf.tree_depth for leaf in self.leaves])\n\n return depths.max()", "def test_MaxDepth_SimpleTree(self):\n\n root = TreeNode(0)\n root.addLeft(1)\n root.addRight(5)\n root.left.addLeft(2)\n root.left.addRight(3)\n root.left.right.addRight(4)\n root.right.addRight(6)\n\n self.assertEqual(findMaxDepthDFS(root),3)", "def max_depth(self) -> int:\n return 0", "def get_max_depth(clade):\n depths = clade.depths()\n if not max(depths.values()):\n depths = clade.depths(unit_branch_lengths=True)\n return max(depths.values()) * tree_depth / actual_tree_depth", "def bfs(graph, root, max_depth):\n ###TODO\n pass", "def max_depth(self):\n if len(self.children) == 0:\n return 1\n else:\n child_depths = [c.max_depth() for c in self.children]\n return 1 + max(child_depths)", "def find_depth_tree(root):\n if root is not None:\n max_depth = 0\n if root.branches is None:\n return 1\n else:\n for value in root.branches.values():\n max_depth = max(max_depth, DecisionTree.find_depth_tree(value))\n return 1 + max_depth\n else:\n return 1", "def minDepth(self, root: TreeNode) -> int:\n return self.bfs(root)", "def get_max_depth(self):\n return self.MAX_DEPTH", "def bfs(self, root: TreeNode) -> int:\n if not root:\n return 0\n queue = deque([(root, 1)])\n while queue:\n node, level = queue.popleft()\n if not node.left and not node.right:\n return level\n if node.left:\n queue.append((node.left, level + 1))\n if node.right:\n queue.append((node.right, level + 1))\n return -1", "def depth(self, d=0):\n d1 = 0\n d2 = 0\n if self.leftChild:\n d1 = max(self.leftChild.depth(d + 1), d)\n if self.rightChild:\n d2 = max(self.rightChild.depth(d + 1), d)\n return max(d1, d2, d)", "def max_depth(self) -> int:\n return pulumi.get(self, \"max_depth\")", "def depth(self):\n\t\tdef helper(tree, d):\n\t\t\tif tree.isLeaf():\n\t\t\t\treturn d\n\t\t\telse:\n\t\t\t\td_left=helper(tree.left, d+1) if tree.hasLeftChild() else 0\n\t\t\t\td_right=helper(tree.right, d+1) if tree.hasRightChild() else 0\n\t\t\t\treturn max(d_left, d_right)\n\n\t\treturn helper(self.root, 1) if not self.isEmpty() else 0", "def get_depth(self, current, n):\n if current is not None:\n return max(self.get_depth(current.left, n + 1), self.get_depth(current.right, n + 1))\n else:\n return n", "def height(root:Node) -> int:\n current = root.left\n depth = 0\n maxdepth = [0]\n #track the value and whether it has a branchpoint or not (bool)\n seen = dict()\n\n #do the left side first, then the right\n\n while current is not None:\n if current.val not in seen:\n if (current.left is not None) and (current.right is not None):\n seen.update({current.val:True})\n else:\n seen.update({current.val:False})\n depth +=1\n maxdepth.append(depth)\n if current.left is not None:\n current = current.left\n elif current.right is not None:\n current = current.right\n else:\n current = None\n\n print(' maxdepth left so far is {}'.format(maxdepth))\n\n current = root.right\n depth = 0\n\n while current is not None:\n if current.val not in seen:\n if (current.left is not None) and (current.right is not None):\n seen.update({current.val: True})\n else:\n seen.update({current.val: False})\n depth +=1\n maxdepth.append(depth)\n if current.right is not None:\n current = current.right\n elif current.left is not None:\n current = current.left\n else:\n current = None\n print(' maxdepth right so far is {}'.format(maxdepth))\n\n return max(maxdepth)", "def score_max_depths(graph, max_depths):\n ###TODO\n pass", "def max_depth(self) -> int:\n if self.child_actions:\n return max(child_action.max_depth\n for child_action in self.child_actions)\n else:\n return self.depth", "def depth(self):\n return max(n.depth for n in self.iternodes())", "def depth(self):\n L, R = 0,0\n if self.left:\n L = self.left.depth()\n if self.right:\n R = self.right.depth()\n\n return 1 + max(L, R)", "def min_depth(t):\n if is_leaf(t):\n return 0\n h = float('inf')\n for b in branches(t):\n # Still works fine!\n h = min(h, 1 + min_depth(b))\n return h", "def deep_max(self):\r\n node = self\r\n while not node.is_leaf():\r\n node = node.children[-1]\r\n return node.keys[-1] if node.keys else None", "def max_depth(self):\r\n lvl = 1\r\n has_lvl_desc = True\r\n while has_lvl_desc:\r\n num_children = len(self.level_n_descendants(lvl))\r\n if num_children==0:\r\n has_lvl_desc = False\r\n else:\r\n lvl+=1\r\n return lvl-1", "def bfs_w_depth(tree):\n visited = []\n frontier = [(0, tree)]\n while frontier:\n depth, tree = frontier.pop(0)\n if tree is not None:\n visited.append((depth, tree[0]))\n frontier.append((depth + 1, tree[1]))\n frontier.append((depth + 1, tree[2]))\n return visited", "def depth(self):\n result = 0\n if self.val is None:\n return result\n return max(self.left.depth(), self.right.depth()) + 1", "def max_depth(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_depth\")" ]
[ "0.7669001", "0.7654562", "0.7653014", "0.7614319", "0.7599487", "0.74606186", "0.74326384", "0.73375744", "0.7196552", "0.718703", "0.7182534", "0.71651614", "0.7148998", "0.71120065", "0.7107149", "0.7074876", "0.706041", "0.7011375", "0.6983949", "0.6968935", "0.6909913", "0.69088864", "0.69061244", "0.68651354", "0.6857812", "0.68508536", "0.68388355", "0.68382144", "0.68303", "0.6828062" ]
0.7672864
0
Crawls each authors pages starting from allauthors main page stored in authors report
def start_requests(self): authors_pandas = conf.read_from_data('authors.json') author_link_list = list( map(lambda obj: (obj['keyUrl'], conf.gd_base_url + obj['article_url'], obj['article_url']), authors_pandas)) for link in author_link_list: yield Request(url=link[1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scrape_author(self, author_name, min_len=0, max_len=9999):\n search = sc.search_author(author_name)\n author = next(search)\n sc.fill(author, sections=['publications'])\n print(author.keys())\n with open(\n 'loadings\\\\authors_papers\\\\{}.txt'.format(author_name),\n 'w',\n encoding='utf-8'\n ) as file:\n for counter, pubblication in enumerate(author['publications']):\n\n if len(pubblication['bib']['title']) < min_len \\\n or len(pubblication['bib']['title']) > max_len:\n continue\n file.write(pubblication['bib']['title'])\n file.write('\\n')\n counter += 1\n if counter > self.hard_limit:\n break", "def get_authors_page(author_name):\n return 'http://techcrunch.com/author/' + slugify(author_name)", "def test_list_all_authors(self):\n response = self.client.get(reverse('authors') + '?page=2')\n self.assertEqual(response.status_code, 200)\n self.assertTrue('is_paginated' in response.context)\n self.assertTrue(response.context['is_paginated'] is True)\n self.assertTrue(len(response.context['author_list']) == 3)", "def scrape_the_athletic():\n\n r = requests.get(f'{url}/author/james-pearce/', headers=HEADERS).text\n soup = BeautifulSoup(r, 'lxml')\n\n latest_articles = soup.find_all(attrs={\"data-object-type\": \"article\", \"class\": \"col-sm-3\"})\n\n latest_article_links = [latest_article.a['href'] for latest_article in latest_articles]\n\n\n for link in latest_article_links:\n link = f\"{url}{link}\"\n r = requests.get(link, headers=HEADERS).text\n soup = BeautifulSoup(r, 'lxml')\n\n para = extract_paratext(soup)\n text = extract_text(para)\n\n if not text:\n continue\n\n yield f'{text} {link}'", "def authors_completion(self, terms):\n return self.db.execute(u'''SELECT * FROM \"authors\" WHERE name LIKE ? LIMIT 50''', (u\"%{}%\".format(terms),)).fetchall()", "def get_paper_authors(tree):\n\tpath = '//table/tr/th[text() = \"Glasgow Author(s) Enlighten ID:\"]/following-sibling::td/a'\n\t# Get list of <a> elements, each an author\n\tauthors = tree.xpath(path)\n\t# Make list of (author name, author url) pairs to return\n\tauthors = [(author.text, author.get(\"href\")) for author in authors]\n\n\treturn authors", "def author_articles(self):\n return ArticlePage.objects.live().filter(author=self).order_by('-date')", "def extract_author_from_page(self, parsed, author_idx=0):\n\n author = {}\n\n author_url = parsed.xpath('//a[@rel=\"author\"]')\n if author_url and len(author_url) > author_idx:\n author['profile'] = 'https://techcrunch.com'\n author['profile'] += author_url[author_idx].get('href')\n\n\n # Find the twitter handle associated with the i-th author\n twitter_handle = parsed.xpath('//span[@class=\"twitter-handle\"]/a')\n if twitter_handle and len(twitter_handle) > author_idx:\n author['twitter'] = twitter_handle[author_idx].get('href')\n\n\n # Parse the new author's page and send it back to `extract_author`.\n # In case the profile page found on article's page does not exist as\n # well, this will be going back and forth on an infinite basis. To exit\n # this loop, we added a flag on the author dict.\n author['flag'] = 'exit'\n\n parsed = self.parse(author['profile'], self.author_page_type)\n\n return self.extract_author(parsed, author_idx, author)", "def AuthorURLs(entry):\n a_URLs = ''\n for a in entry.getAuthors():\n url = a.get('homepage', ' ')\n a_URLs += \"%s and \" % url\n return a_URLs[:-5]", "def download_inst(self, authors_list=[], rows_max=200):\n self.staff = authors_list\n fl = ['id', 'bibcode', 'title', 'citation_count',\n 'aff', 'author', 'citation', 'pub', 'reference',\n 'metrics', 'year', 'read_count', 'pubdate']\n\n authors = []\n for auth in authors_list:\n print(f\"searching ADS for author: {auth}\")\n papers = list(ads.SearchQuery(author=auth, rows=rows_max, fl=fl))\n authors.append(papers)\n\n byauth = pd.DataFrame()\n byauth['authors'] = authors_list\n byauth['ppr_list'] = authors\n\n # cantidad de papers por autor:\n npprs = []\n for p in authors:\n npprs.append(len(p))\n byauth['n_papers'] = npprs\n\n # self.byauth = byauth\n\n return byauth", "def get_coauthors(self):\n # Get number of authors to search for\n res = download(url=self.coauthor_link, accept='json')\n data = loads(res.text)['search-results']\n N = int(data.get('opensearch:totalResults', 0))\n # Store information in namedtuples\n fields = 'surname given_name id areas affiliation_id name city country'\n coauth = namedtuple('Coauthor', fields)\n coauthors = []\n # Iterate over search results in chunks of 25 results\n count = 0\n while count < N:\n params = {'start': count, 'count': 25}\n res = download(url=self.coauthor_link, params=params, accept='json')\n data = loads(res.text)['search-results'].get('entry', [])\n # Extract information for each coauthor\n for entry in data:\n aff = entry.get('affiliation-current', {})\n try:\n areas = [a['$'] for a in entry.get('subject-area', [])]\n except TypeError: # Only one subject area given\n areas = [entry['subject-area']['$']]\n new = coauth(surname=entry['preferred-name']['surname'],\n given_name=entry['preferred-name'].get('given-name'),\n id=entry['dc:identifier'].split(':')[-1],\n areas='; '.join(areas),\n affiliation_id=aff.get('affiliation-id'),\n name=aff.get('affiliation-name'),\n city=aff.get('affiliation-city'),\n country=aff.get('affiliation-country'))\n coauthors.append(new)\n count += 25\n return coauthors", "def test_get_all_authors(self):\n self.register_user()\n token = self.login_user()\n response = self.client.get(self.user_author, format='json', HTTP_AUTHORIZATION='Token ' +token)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def get_author_name_urls(dept_name, dept_url):\n\t# Change to \"School of Humanities\" to match the name used in Enlighten\n\t# Done because the string obtained from http://www.gla.ac.uk/schools/ contains the Gaelic name as well\n\tif \"Humanities\" in dept_name:\n\t\tdept_name = \"School of Humanities\"\n\n\t# get list of names of researchers in department\n\tnames = get_names(dept_url)\n\n\twinning_name_urls = set()\n\n\t# loop through each name\n\tfor name in names:\n\t\tname = initialise_first_name(name)\n\t\t# Get Enlighten page on which author name will be found (page for the letter of author's last name)\n\t\tfull_url = author_list_base + \"index.\"+ name.split(\" \")[0][0] + \".html\"\n\t\ttree = get_tree(full_url)\n\t\t# Get all candidate authors which match the name\n\t\tname_urls = get_name_url_matches(name, tree)\n\t\t# If candidates were found\n\t\tif name_urls:\n\t\t\t# Filter out authors that have already been scraped\n\t\t\tname_urls = [name_url for name_url in name_urls if name_url not in winning_name_urls]\n\t\t\t# Get the first ranked (name, url) tuple for the target name from the remaining candidates\n\t\t\twinning_name_url = get_winning_url(name_urls, dept_name)\n\t\t\tif winning_name_url:\n\t\t\t\twinning_name_urls.add(winning_name_url)\n\n\treturn winning_name_urls", "def setUp(self):\n self.url = reverse(\"td_biblio:entry_list\")\n self.paginate_by = 20\n self.n_publications_per_year = 3\n self.start_year = 2000\n self.end_year = 2014\n self.n_publications = self.end_year - self.start_year\n self.n_publications *= self.n_publications_per_year\n self.n_authors = self.n_publications * 3\n self.publications_years = []\n self.max_page_num = self.n_publications / self.paginate_by\n if self.n_publications % self.paginate_by:\n self.max_page_num += 1\n\n # Entry (14 * 3 = 42)\n for y in range(self.start_year, self.end_year, 1):\n for i in range(1, 1 + self.n_publications_per_year):\n date = datetime.date(y, i, 1)\n EntryWithAuthorsFactory(publication_date=date)\n self.publications_years.append(y)", "def test_multiple_authors(self):\r\n args = self.page_kwargs.copy()\r\n content = Page(**args)\r\n assert content.authors == [content.author]\r\n args['metadata'].pop('author')\r\n args['metadata']['authors'] = ['First Author', 'Second Author']\r\n content = Page(**args)\r\n assert content.authors\r\n assert content.author == content.authors[0]", "def parse(self, response, **kwargs):\n title = response.xpath('//*[@id=\"wrap\"]/h1/text()').extract_first()\n if title:\n url_to_full_version = response._get_url()\n first_160 = ''.join(response.xpath('//*[@id=\"woe\"]/section/div/p/text()').extract())[:160]\n base_date = response.xpath('//*[@id=\"wrap\"]/div/div[2]/text()').extract_first()\n date_formatted = conf.exec_func_chain(base_date,\n [conf.clean_records_regex,\n lambda v: v[0:-2],\n lambda v: conf.parse_dtts(v, '%b %d, %Y')])\n\n tags = response.xpath('//*[@id=\"woe\"]/section[3]/div/div/a/text()').extract()\n authors_section = response.xpath('//*[@id=\"wrap\"]/div/div[1]/div/span/a')\n for row in authors_section:\n full_author_url = Selector(text=row.extract()).xpath('///@href') \\\n .extract_first()\n author_fullname = conf.clean_records_regex(\n Selector(text=row.extract()).xpath('///span/text()').extract_first())\n if date_formatted >= conf.crawl_date[0].get('LastExecutionDate'):\n conf.write_data_append('articles.json', json.dumps({'title': title,\n 'urlFullVersion': url_to_full_version,\n 'first160': first_160,\n 'dateFormatted': date_formatted,\n 'tags': tags,\n 'authorUrl': f\"{conf.gd_base_url}\"\n f\"{full_author_url}\",\n 'authorName': author_fullname,\n 'author_key': full_author_url.rsplit('/')[-2]\n }))", "def get_author_titles(author_url):\n\t# Get the html tree for the author page\n\tauthor_page_tree = get_tree(author_url)\n\t# Get the <a> elements for the papers on the author's page\n\ta_elems = get_a_elems_for_papers(author_page_tree)\n\n\tall_titles = []\n\tlinks = []\n\t# Loop through a elements and put associated href and text into respective lists\n\tfor a in a_elems:\n\t\tall_titles.append(a.text_content())\n\t\tlinks.append(a.get(\"href\"))\n\n\t# Create list of (title, url) tuples\n\ttitles_links = zip(all_titles, links)\n\t# Get the list of titles of papers that have been tagged with a subject\n\ttagged_titles = get_tagged_titles(titles_links)\n\t# Return the 2 lists in a tuple\n\treturn (all_titles, tagged_titles)", "def scrape_from_feltrinelli(books_for_page, timeout, path_output, name_file_out):\n total_books_catalog = 6460\n lst_titles = []\n lst_authors = []\n try:\n for i in range(1, round(total_books_catalog / books_for_page)):\n main_url = f\"https://www.lafeltrinelli.it/libri/c-1/0/{i}/?languageId=22&pblValue=%3C+20210000&type=1&cat1=1&sort=0&pageSize={books_for_page}\"\n response = requests.get(main_url, timeout)\n soup_main = BeautifulSoup(response.text, \"html.parser\")\n div_item = soup_main.find(\"div\", {\"class\": \"product-result\"})\n book_href = div_item.findAll(\"div\", {\"class\": \"cover\"})\n book_descriptions = div_item.findAll(\"div\", {\"class\": \"description\"})\n for book_link in book_href:\n link = book_link.a[\"href\"].strip()\n lst_titles.append(link)\n for book_desc in book_descriptions:\n author = book_desc.h4.text.strip()\n lst_authors.append(author)\n print(f\"Page {i} finished!\")\n except requests.ConnectionError as e:\n print(\n \"OOPS!! Connection Error. Make sure you are connected to Internet. Technical Details given below.\\n\"\n )\n print(str(e))\n except requests.Timeout as e:\n print(\"OOPS!! Timeout Error\")\n print(str(e))\n except requests.RequestException as e:\n print(\"OOPS!! General Error\")\n print(str(e))\n except KeyboardInterrupt:\n print(\"Someone closed the program\")\n finally:\n try:\n list_cols = [\"title\", \"author\"]\n rows = list(zip(lst_titles, lst_authors))\n final_df = pd.DataFrame(rows, columns=list_cols)\n # create output directory if not exists, otherwise skip\n os.makedirs(path_output, exist_ok=True)\n output_path = os.path.join(path_output, name_file_out)\n final_df.to_csv(output_path, index=False)\n except Exception as ex:\n print(\"Unable to store records in CSV file. Technical details below.\\n\")\n print(str(ex))", "def all_authors( data ) :\n return list(set( chain.from_iterable( [ authors(x) for x in data ] ) ))", "def parse(self, response, **kwargs):\n\n key_url = response._get_url().rsplit('/')[-2] # get the author nickname after the last slash\n name = response.xpath('//*[@id=\"woe\"]/div[2]/div/div[1]/div[2]/h3/text()').extract_first()\n job_title = response.xpath('//*[@id=\"woe\"]/div[2]/div/div[1]/div[2]/p/text()').extract_first()\n linkedin_url = response.xpath('//*[@id=\"woe\"]/div[2]/div/div[1]/div[1]/ul/li/a/@href').extract_first()\n date_title_part = response.xpath('//*[@id=\"woe\"]/div[2]/div/div[2]/div[position() > 1]')\n for row in date_title_part:\n row_extracted = row.extract()\n art_date = Selector(text=row_extracted).xpath('///span/text()').extract_first()\n date_formatted = conf.parse_dtts(art_date, '%B %d, %Y')\n article_title = Selector(text=row_extracted).xpath('///a/text()').extract_first()\n article_url = Selector(text=row_extracted).xpath('///a/@href').extract_first()\n\n if date_formatted >= conf.crawl_date[0].get('LastExecutionDate'):\n conf.write_data_append('authors.json', json.dumps({'keyUrl': key_url,\n 'name': name,\n 'jobTitle': job_title,\n 'linkedinUrl': linkedin_url,\n 'date': date_formatted,\n 'article_title': article_title,\n 'article_url': article_url}))", "def get_author_info(self, author: str):\n for writer_word in self._writer_words:\n data = json.loads(requests.get(WIKIDATA_SEARCH + \"&srsearch=\" + author + \" \" + writer_word).text)\n pages = data.get(\"query\").get(\"search\")\n if len(pages) >= 1:\n pageid = pages[0].get(\"title\")\n author_details = self._reference.author_map.get(author)\n if author_details:\n return author_details\n if pageid == -1:\n continue\n\n else:\n response = requests.get(WIKIDATA_PARSE + pageid + \".json\")\n data = json.loads(response.text)\n if author.lower() not in data.get(\"entities\").get(pageid).get(\"labels\").get(\"en\").get(\"value\").lower():\n continue\n else:\n try:\n id = data.get(\"entities\").get(pageid).get(\"claims\").get(\"P31\")[0].get(\"mainsnak\").get(\"datavalue\").get(\"value\").get(\"id\")\n if str(id) != \"Q5\": # the id for human\n continue\n except IndexError:\n continue\n properties = data.get(\"entities\").get(pageid).get(\"claims\")\n author_details = {\"id\": pageid, \"gender\": self.get_gender(properties)}\n country_details = self.get_country(properties)\n author_details[\"country\"] = country_details\n self._reference.author_map[author] = author_details\n return author_details\n return {\"id\": \"Unknown\", \"gender\": \"Unknown\", \"country\": [{\"name\": \"Unknown\", \"region\": \"Unknown\"}]}", "def _on_authors_list(self, evt):\n \n # raise authors management dialog\n dlg = AuthorsView(self, self._library)\n response = dlg.ShowModal()\n dlg.Destroy()\n \n # check response\n if response != wx.ID_OK:\n return\n \n # refresh collections view\n self._collections_view.UpdateCounts()\n \n # refresh articles view\n self._articles_view.ShowArticles()", "def get_papers_by_authors(authors_list, rows_max=999):\n fl = ['id', 'bibcode', 'title', 'citation_count',\n 'aff', 'author', 'citation', 'pub', 'reference',\n 'metrics', 'year', 'read_count', 'pubdate']\n\n authors = []\n for auth in authors_list:\n print(auth)\n papers = list(ads.SearchQuery(author=auth, rows=rows_max, fl=fl))\n authors.append(papers)\n\n byauth = pd.DataFrame()\n byauth['authors'] = authors_list\n byauth['ppr_list'] = authors\n\n # cantidad de papers por autor:\n npprs = []\n for p in authors:\n npprs.append(len(p))\n byauth['n_papers'] = npprs\n\n return byauth", "def crawl_all_by_year(year, verbose):\n _crawl_by_year_helper(year, verbose, True, True)", "def get_authors(self, blogid=1):\n return self.execute('wp.getAuthors', blogid, self.username, self.password)", "def extract_author(self, parsed_html, author_idx=0, author=None):\n\n if not author:\n author = {}\n\n\n # Find all links with this xpath and tries to classify them\n links = []\n xpath = '//div[@class=\"profile cf\"]/div/ul/li/a'\n\n for url in parsed_html.xpath(xpath):\n links += [url.get('href')]\n\n for social in self.classify_links(links):\n author[social[0]] = social[1]\n\n\n # Get description text provided by the author\n xpath = '//div[contains(@class, \"profile-text\")]/p'\n items = parsed_html.xpath(xpath)\n author['about'] = self.clear_text(items)\n\n\n # Get Crunchbase url profile\n xpath = '//div[contains(@class, \"profile-text\")]/a'\n website = parsed_html.xpath(xpath)\n\n if website:\n author['website'] = website[0].get('href')\n\n\n # Get his/her avatar url\n xpath = '//div[@class=\"profile cf\"]/div/img'\n avatar = parsed_html.xpath(xpath)\n\n if avatar:\n author['avatar'] = avatar[0].get('src')\n\n\n # Tries to get the author's profile url\n xpath = 'meta[@property=\"og:url\"]'\n author['profile'] = self.get_text_or_attr(\n parsed_html, xpath, 'content'\n )\n\n\n # If the author cannot be fetched from his generated url, we have to\n # check the article's page in order to find him/her.\n\n if 'twitter' not in author and 'flag' not in author:\n\n parsed = self.parse(self.article_url, self.article_page_type)\n return self.extract_author_from_page(parsed, author_idx)\n\n\n if 'flag' in author:\n del author['flag']\n\n\n return author", "def get_popular_authors():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n query_popular_authors = \"\"\"\n SELECT aut.name, COUNT(lg.id) AS views\n FROM articles AS art\n JOIN log AS lg ON art.slug = SUBSTRING(lg.path,10)\n AND lg.status = '200 OK'\n JOIN authors AS aut ON aut.id = art.author\n GROUP BY aut.name\n ORDER BY views desc; \"\"\"\n c.execute(query_popular_authors)\n authors = from_db_cursor(c)\n db.close()\n return authors", "def test_discussion_filter_author(self):\n author_vals = (\n ('DoesNotExist', 0),\n ('admin', 1),\n ('jsocol', 4),\n )\n\n for name, number in author_vals:\n u = UserFactory(username=name)\n for i in range(number):\n thread1 = ThreadFactory(title=u'audio')\n PostFactory(thread=thread1, author=u)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 4, 'format': 'json'}\n\n for author, total in author_vals:\n qs.update({'author': author})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(total, json.loads(response.content)['total'])", "def get_article_author(self, article_webpage):\n pass", "def menu_authors_homepage(self, event=None):\n self.link('http://www.stani.be')" ]
[ "0.66120636", "0.6457187", "0.6398303", "0.6192936", "0.61002207", "0.60938454", "0.6064179", "0.6056438", "0.60388", "0.5980902", "0.5950264", "0.59492725", "0.5898721", "0.5894304", "0.58883196", "0.5852367", "0.581795", "0.5807714", "0.58075255", "0.5806006", "0.5805787", "0.57818496", "0.5769066", "0.5721423", "0.5720553", "0.56596184", "0.56488866", "0.561552", "0.56127125", "0.5605279" ]
0.64995414
1
construct_network establishes all weight matrices and biases and connects them. The outputs may include parameters of the flow
def construct_network(self, n_units, n_samples=1, noise_dim=0, keep_p=1., nonlinearity=True, init_params=None, name=""): print "constructing network, n_units: ",n_units # TODO use kwargs for more elagant solutions to being called by this # base class assert keep_p ==1. and nonlinearity and noise_dim == 0 assert init_params is None # this is implemented only in the Bayesian flow version of this function ### Define parameters of the network self.weights, self.biases, KL = {}, {}, 0. self.layers = [] # Establish paramters of appromiate posterior over weights and # biases. for l in range(1, len(n_units)): with tf.variable_scope(name+'Layer_%d'%l): n_in, n_out = n_units[l-1], n_units[l] # use non neglidgible uncertainty if we are doing VI sigma_init = self.init_sigma_params w_prior_sigma, b_prior_sigma = self.w_prior_sigma, self.w_prior_sigma mu_init_sigma_w, mu_init_sigma_b = np.sqrt(1./(n_in)), 1. (w_mu, w_logstd), _, w_KL = utils.set_q(name+"w_%d"%l, sigma_prior=w_prior_sigma, mu_init_sigma=mu_init_sigma_w, sigma_init=sigma_init, n_samples=0, size=[n_in, n_out], save_summary=True) # We use same init_sigma for weights and biases. (b_mu, b_logstd), _, b_KL = utils.set_q(name+"b_%d"%l, sigma_prior=b_prior_sigma, mu_init_sigma=mu_init_sigma_b, sigma_init=sigma_init, n_samples=0, size=[n_out], save_summary=True) self.weights['w_%d_mu'%l], self.weights['w_%d_std'%l] = w_mu, tf.nn.softplus(w_logstd) self.biases['b_%d_mu'%l], self.biases['b_%d_std'%l] = b_mu, tf.nn.softplus(b_logstd) self.params += [w_mu, b_mu, w_logstd, b_logstd] KL += w_KL + b_KL # Add an extra dimension to correspond to samples. prev_layer = tf.stack([self.x]*n_samples) self.layers.append(prev_layer) # shape is [n_samples, ?, dim(x)] ### Define activations in each layer for l in range(1,len(n_units)): print "defining activations in layer %d"%l # Multiply with weight matrix and add bias prev_layer = tf.reshape(prev_layer, [-1, n_units[l-1]]) layer_pre_bias = tf.matmul(prev_layer, self.weights['w_%d_mu'%l]) layer_pre_bias = tf.reshape(layer_pre_bias, [n_samples, -1, n_units[l]]) # Shape of layer_pre_bias is [n_samples, ?, n_units[l]] # add mean bias term layer = tf.add(layer_pre_bias, self.biases['b_%d_mu'%l][None, None, :]) # Calculate the noise in each hidden unit. # must use absolute value of activation because final layer may # have negative values. layer_var = tf.matmul(tf.reshape(prev_layer**2,[-1, n_units[l-1]]), self.weights['w_%d_std'%l]**2) layer_var = tf.reshape(layer_var, [n_samples, -1, n_units[l]]) layer_var += self.biases['b_%d_std'%l]**2 # Now sample noise and add scaled noise. # This constitutes the local reparameterization trick. eps = tf.random_normal(name='eps_%d'%l, mean=0., stddev=1.0, shape=[n_samples, 1, n_units[l]]) layer_sigma = tf.sqrt(layer_var) layer += layer_sigma*eps with tf.name_scope(name+"Neural_Network_Activations_%d"%l): tf.summary.histogram(name+"Layer_%d_sigmas"%l, layer_sigma) tf.summary.histogram(name+"Layer_%d_activations_pre_tanh"%l, layer) # Add tanh nonlinearity if l != (len(n_units) - 1): layer = tf.nn.tanh(layer) with tf.name_scope(name+"Neural_Network_Activations_%d"%l): tf.summary.histogram(name+"Layer_%d_activations_post_tanh"%l,layer) prev_layer = layer self.layers.append(prev_layer) self.KL_BNN = KL return prev_layer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_networks(self):\n self.online_convnet = self._create_network(name='Online')\n self.target_convnet = self._create_network(name='Target')\n self._net_outputs = self.online_convnet(self.state_ph, training=True)\n self._q_argmax = tf.argmax(self._net_outputs.q_values, axis=1)[0]\n self._replay_net_outputs = self.online_convnet(self._replay.states,\n training=True)\n self._replay_next_target_net_outputs = self.target_convnet(\n self._replay.next_states)", "def _generate_network_initialization(self, graph, memory_manager):\n\n # TODO: To be changed if we want to support multiple outputs\n output_buffer_name = graph.outputs[0].name\n\n ops_to_ignore = ['Reshape', 'Mul']\n\n buffers_allocated = []\n\n buffer_declaration = \"\"\n buffer_declaration += \" pico_cnn::naive::Tensor **kernels;\\n\"\n buffer_declaration += \" pico_cnn::naive::Tensor **biases;\\n\"\n\n constructor_code = \"\"\n #constructor_code += \"Network::Network() {\\n\\n\"\n\n num_layers = 0\n num_kernels = 0\n num_biases = 0\n\n for node in graph.nodes:\n \"\"\"Do not count the reshape layers as the input tensor will only define the dimensions\"\"\"\n if len(node.input_tensors) > 0 and node.op_type not in ops_to_ignore:\n num_layers += 1\n for num, input in enumerate(node.input_tensors):\n if input in buffers_allocated:\n continue\n else:\n tensor = node.input_tensors[input]\n buffers_allocated.append(input)\n if len(tensor.shape) == 1:\n num_biases += 1\n else:\n num_kernels += 1\n\n \"\"\"The arrays kernels and biases will be used to pass only two variables to read_binary_weights\"\"\"\n constructor_code += \" kernels = new pico_cnn::naive::Tensor*[{}]();\\n\".format(num_kernels)\n constructor_code += \" biases = new pico_cnn::naive::Tensor*[{}]();\\n\\n\".format(num_biases)\n\n pos = -1\n pos_kernel = -1\n pos_bias = -1\n\n buffers_allocated.clear()\n\n \"\"\"Iterate over all nodes in the graph and generate the corresponding allocation code.\"\"\"\n for node_id, node in enumerate(graph.nodes):\n\n if len(node.input_tensors) > 0 and node.op_type not in ops_to_ignore:\n pos += 1\n\n buffer_declaration += \" // Layer: \" + node.name + \", Operation: \" + node.op_type + \"\\n\"\n constructor_code += \" // Layer: \" + node.name + \", Operation: \" + node.op_type + \"\\n\"\n\n # Allocate memory for kernels and biases\n buffer_declaration += \" // Inputs\\n\"\n constructor_code += \" // Inputs\\n\"\n for num, input in enumerate(node.input_tensors):\n\n if node.op_type in ops_to_ignore:\n continue\n\n if input in buffers_allocated:\n continue\n else:\n buffers_allocated.append(input)\n\n tensor = node.input_tensors[input]\n if len(tensor.shape) == 1:\n pos_bias += 1\n else:\n pos_kernel += 1\n\n buffer = memory_manager.get_buffer(graph, input)\n\n buffer_declaration += \" // \" + str(buffer.shape) + \"\\n\"\n\n pico_cnn_tensor = \" pico_cnn::naive::Tensor *\"\n\n buffer_declaration += pico_cnn_tensor + buffer.name + \";\\n\"\n\n constructor_code += \" // \" + str(buffer.shape) + \"\" # TODO maybe we sometimes need \\n\n\n functionality = CodeRegistry.get_funct(\"KernelAllocation\")\n impl = functionality[0].create(buffer, pos, pos_kernel, pos_bias)\n\n if impl:\n constructor_code += impl.generate_code()\n constructor_code += \"\\n\"\n\n buffer_declaration += \" // Outputs\\n\"\n constructor_code += \" // Outputs\\n\"\n for num, output in enumerate(node.outputs):\n\n buffer = memory_manager.get_buffer(graph, output)\n\n if output == output_buffer_name:\n buffer_declaration += \" // Output tensor {} with shape {} of network provided as argument of Network::run()\".format(buffer.name, str(buffer.shape))\n constructor_code += \" // Output tensor {} with shape {} of network provided as argument of Network::run()\".format(buffer.name, str(buffer.shape))\n continue\n\n buffer_declaration += \" // \" + str(buffer.shape) + \"\\n\"\n\n pico_cnn_tensor = \" pico_cnn::naive::Tensor *\"\n\n buffer_declaration += pico_cnn_tensor + buffer.name + \";\\n\"\n\n constructor_code += \" // \" + str(buffer.shape) + \"\" # TODO maybe we sometimes need \\n\n\n functionality = CodeRegistry.get_funct(\"OutputAllocation\")\n impl = functionality[0].create(buffer)\n\n if impl:\n constructor_code += impl.generate_code()\n constructor_code += \"\\n\"\n\n buffer_declaration += \"\\n\\n\"\n constructor_code += \"\\n\\n\"\n\n #constructor_code += \"}\\n\"\n\n self.buffer_declaration = buffer_declaration\n self.constructor_code = constructor_code", "def build_network(self):\n\n input_placeholder = Input(shape = self.input_shape)\n\n # Stage 1\n x = self.main_path_block(\n input_placeholder,\n 64, (7, 7), 'same',\n 'conv1', 'bn_conv1',\n activation = 'relu',\n strides = (2, 2)\n )\n x = MaxPooling2D((3, 3), strides = (2, 2), padding = 'same')(x)\n\n # Stage 2\n x = self.identity_block(x, 64, 'relu', 2, 'a', False)\n x = self.identity_block(x, 64, 'relu', 2, 'b')\n\n # Stage 3\n x = self.convolutional_block(x, [128, 128, 128], 'relu', 3, 'a')\n x = self.identity_block(x, 128, 'relu', 3, 'b')\n\n # Stage 4\n x = self.convolutional_block(x, [256, 256, 256], 'relu', 4, 'a')\n x = self.identity_block(x, 256, 'relu', 4, 'b')\n\n # Stage 5\n x = self.convolutional_block(x, [512, 512, 512], 'relu', 5, 'a')\n x = self.identity_block(x, 512, 'relu', 4, 'b')\n\n # Fully Connected Layers\n x = BatchNormalization(axis = 3)(x)\n x = Activation('relu')(x)\n x = AveragePooling2D((2, 1), padding = 'valid', strides = (2, 2))(x)\n x = Flatten()(x)\n x = Dense(512)\n x = Dense(\n self.classes, activation = 'softmax',\n name = 'fc_' + str(self.classes),\n kernel_initializer = glorot_uniform(seed = 0)\n )(x)\n\n self.model = Model(input_placeholder, x, name = 'Resnet18')", "def _build_networks(self):\n # Calling online_convnet will generate a new graph as defined in\n # self._get_network_template using whatever input is passed, but will always\n # share the same weights.\n self.online_convnet = tf.make_template('Online', self._network_template)\n self.target_convnet = tf.make_template('Target', self._network_template)\n self._net_outputs = self.online_convnet(self.state_ph)\n\n self._replay_net_outputs = self.online_convnet(self._replay.states)\n self._replay_next_target_net_outputs = self.target_convnet(\n self._replay.next_states)\n\n if self.acting_policy == 'hyperbolic':\n self._q_argmax = tf.argmax(self._net_outputs.hyp_q_value, axis=1)[0]\n elif self.acting_policy == 'largest_gamma':\n self._q_argmax = tf.argmax(self._net_outputs.q_values[-1], axis=1)[0]\n else:\n raise NotImplementedError", "def construct(self, weights):\n in_weights = weights # first to append zero column as the last output (no out-degree)\n weights = np.zeros((weights.shape[0], weights.shape[1]+1))\n weights[:,:-1] = in_weights\n din, dout, dhid = self.dim_in, self.dim_out, self.dim_hid # the max dim\n hid = weights.shape[0] - dout # this hidden dim\n if not (weights.shape[1]-din-dout==hid and (0<hid<=dhid)):\n raise self.ANNException('weight matrix hidden nodes not matching')\n if not (dout < weights.shape[0] <= hid+dout):\n raise self.ANNException('weight matrix row shape not matching')\n if not (din+dout < weights.shape[1] <= din+hid+dout):\n raise self.ANNException('weight matrix column shape not matching')\n\n self.weight[din:din+hid,:din+hid] = weights[:hid,:din+hid]\n self.weight[din:din+hid,din+dhid:] = weights[:hid,din+hid:]\n self.weight[din+dhid:,:din+hid] = weights[hid:,:din+hid]\n self.weight[din+dhid:,din+dhid:] = weights[hid:,din+hid:]\n\n for i in range(hid):\n self.connectivity[din+i,:din+i] = True\n self.connectivity[din+dhid:,:din+hid] = True\n for i in range(dout):\n self.connectivity[din+dhid+i,din+dhid:din+dhid+i] = True\n\n self.hidden[:hid] = True", "def build_model(self):\n for link in self.links:\n # if from neuron is input to graph, add it to input_neurons set\n if self.is_input_neuron(link.from_neuron_id):\n self.input_neurons.add(link.from_neuron_id)\n # add weight to neuron\n if link.to_neuron_id not in self.weights:\n self.weights[link.to_neuron_id] = []\n self.weights[link.to_neuron_id].append(link.weight)\n # add input to neuron\n if link.to_neuron_id not in self.connections:\n self.connections[link.to_neuron_id] = []\n self.connections[link.to_neuron_id].append(link.from_neuron_id)", "def create_network(outfname_train, outfname_deploy, N_conv_layers=3, N_fully_connected_layers=3, batch_size_train=100,batch_size_test=100, source_train='datatrain', source_test='datatest', num_output_conv=32, kernel_size=3, weight_std_conv=0.01, activation='relu', num_output_fully_connected=64, weight_std_fully_connected=0.01, do_batchnorm=1, do_last_batchnorm=1, scale=1,shift=0, weight_std_affine=0, use_softmax=0, num_classes=3, input_dim_1=1,input_dim_2=3, input_dim_3=32, input_dim_4=32, use_lowrank=1, T_dimension=None, softmax_weight=1, lowrank_weight=1, data_type='lmdb'):\n\n if T_dimension==None:\n T_dimension = num_classes\n \n train_txt = \"\"\n deploy_txt = \"\"\n\n train_txt += data_layer(name='data_layer', source_train=source_train, batch_size_train=batch_size_train, source_test=source_test, batch_size_test=batch_size_test, data_type=data_type)\n\n deploy_txt += deploy_data_layer(name='data_layer', input_dim_1=input_dim_1, input_dim_2=input_dim_2, input_dim_3=input_dim_3, input_dim_4=input_dim_4)\n\n last_name = 'data'\n\n ####### CONVOLUTIONAL LAYERS\n for i in range(N_conv_layers):\n conv_name = 'conv%i' % (i+1)\n top = conv_name\n\n conv_txt = convolution_layer(conv_name, last_name, num_output=num_output_conv, kernel_size=kernel_size, weight_std=weight_std_conv)\n\n train_txt += conv_txt\n deploy_txt += conv_txt\n \n if activation == 'pool':\n pool_name = 'pool%i' % (i+1)\n activation_txt = pooling_layer(pool_name, conv_name)\n last_name = pool_name\n elif activation == 'relu':\n relu_name = 'relu%i' % (i+1)\n activation_txt = relu_layer(relu_name, conv_name)\n last_name = conv_name\n else:\n raise Exception('Unknown activation')\n \n\n train_txt += activation_txt\n deploy_txt += activation_txt\n\n \n\n ####### FULLY CONNECTED LAYERS\n for i in range(N_fully_connected_layers):\n fully_connected_name = 'ip%i' % (i+1)\n\n fully_connected_txt = fully_connected_layer(fully_connected_name, last_name, num_output=num_output_fully_connected, weight_std=weight_std_fully_connected)\n\n relu_name = 'iprelu%i' % (i+1)\n relu_txt = relu_layer(relu_name, fully_connected_name)\n\n batchnorm_name = 'ipbn%i' % (i+1)\n\n if do_batchnorm and i<N_fully_connected_layers-1:\n batchnorm_txt_train = batchnorm_layer(batchnorm_name, fully_connected_name, use_global_stats=False, phase='TRAIN', deploy=False)\n batchnorm_txt_test = batchnorm_layer(batchnorm_name, fully_connected_name, use_global_stats=True, phase='TEST', deploy=False)\n \n batchnorm_txt_deploy = batchnorm_layer(batchnorm_name, fully_connected_name, deploy=True)\n scale_txt = ''\n \n last_name = batchnorm_name\n \n elif do_last_batchnorm:\n batchnorm_txt_train = batchnorm_layer(batchnorm_name, fully_connected_name, use_global_stats=False, phase='TRAIN', deploy=False)\n batchnorm_txt_test = batchnorm_layer(batchnorm_name, fully_connected_name, use_global_stats=True, phase='TEST', deploy=False)\n \n batchnorm_txt_deploy = batchnorm_layer(batchnorm_name, fully_connected_name, deploy=True)\n scale_name = 'ipbnscaled%i' % (i+1)\n\n scale_txt = scale_layer(scale_name, batchnorm_name, scale=scale,shift=shift)\n \n last_name = scale_name\n else:\n batchnorm_txt_train = ''\n batchnorm_txt_test = ''\n batchnorm_txt_deploy = ''\n last_name = fully_connected_name\n scale_txt = ''\n \n train_txt += fully_connected_txt + relu_txt + batchnorm_txt_train + batchnorm_txt_test + scale_txt\n deploy_txt += fully_connected_txt + relu_txt + batchnorm_txt_deploy + scale_txt\n \n\n\n\n\n # add affine layer on top of funnel layer \n affine_name = 'affine' # (matrix T)\n affine_txt = fully_connected_layer(affine_name, last_name, num_output=T_dimension, weight_std=weight_std_affine)\n\n train_txt += affine_txt\n deploy_txt += affine_txt\n \n # apply lowrank loss to output of 'affine' layer [conv - fully_connected -\n # funnel - affine - lowrank] the lowrank output is located in affine. The\n # 'funnel' layer is used to allow softmax to separate between classes before\n # LRT\n if use_lowrank:\n lowrank_txt = lowrank_layer('lowrank_loss', affine_name, loss_weight=lowrank_weight)\n train_txt += lowrank_txt\n\n if use_softmax:\n # apply softmax loss to output of funnel layer [conv - fully_connected - funnel - softmax]\n # add one affine layer to reduce from num_output_fully_connected to num_classes\n\n # apr 4. trying on top of fully connected layer\n funnel_name = 'funnel'\n funnel_txt = fully_connected_layer(funnel_name, last_name, num_output=num_classes, weight_std=weight_std_fully_connected)\n\n train_txt += funnel_txt\n deploy_txt += funnel_txt\n\n softmax_txt = softmax_layer('softmax_loss', funnel_name, loss_weight=softmax_weight)\n train_txt += softmax_txt\n\n write_to_file(outfname_train, train_txt)\n write_to_file(outfname_deploy, deploy_txt)\n\n \n return train_txt, deploy_txt", "def build_network(self, inputs, targets, training=False):\n raise NotImplementedError", "def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()", "def build_transformation_network(n_styles, depthwise_separable_conv):\n\n image_input = Input((None, None, 3), name=\"image\")\n style_weights = Input((n_styles, ), name=\"style_weights\")\n\n net = conv_block(image_input,\n style_weights,\n filters=32,\n kernel_size=(9, 9),\n strides=(1, 1),\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = conv_block(net,\n style_weights,\n filters=64,\n kernel_size=(3, 3),\n strides=(2, 2),\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = conv_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(2, 2),\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = residual_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(1, 1),\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = residual_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(1, 1),\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = residual_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(1, 1),\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = residual_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(1, 1),\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = residual_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(1, 1),\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = upsampling_block(net,\n style_weights,\n interpolation_factor=2,\n filters=64,\n kernel_size=(3, 3),\n strides=(1, 1),\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = upsampling_block(net,\n style_weights,\n interpolation_factor=2,\n filters=32,\n kernel_size=(3, 3),\n strides=(1, 1),\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = conv_block(net,\n style_weights,\n filters=3,\n kernel_size=(9, 9),\n strides=(1, 1),\n activation=\"sigmoid\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = Lambda(lambda t: t * 255.0, name=\"output\")(net)\n\n return Model([image_input, style_weights], net, name=\"transform_net\")", "def __init__(self, network_path='.', logging=True,\n input_image_size=None, n_input_channels=None,\n n_output_classes=None,\n conv1_size=5, conv1_n_chan=32, conv1_n_pool=2,\n conv2_size=5, conv2_n_chan=64, conv2_n_pool=2,\n fc1_n_chan=1024, fc1_dropout=0.5, alpha=4e-4 ):\n self.logging = logging\n\n # If network path does not yet exists\n self.network_path = network_path\n if not os.path.isdir(self.network_path):\n # Make network directory\n os.mkdir(self.network_path)\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Creation of new network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log(\"\\nNetwork did not exist ... \")\n self.log(\"Created new network with supplied (or default) architecture\")\n\n # Set up new network\n self.y_res = input_image_size[0]\n self.x_res = input_image_size[1]\n self.n_input_channels = n_input_channels\n self.n_output_classes = n_output_classes\n self.conv1_size = conv1_size\n self.conv1_n_chan = conv1_n_chan\n self.conv1_n_pool = conv1_n_pool\n self.conv2_size = conv2_size\n self.conv2_n_chan = conv2_n_chan\n self.conv2_n_pool = conv2_n_pool\n self.fc1_y_size = int( np.ceil( np.ceil(\n self.y_res/self.conv1_n_pool ) / self.conv2_n_pool ) )\n self.fc1_x_size = int( np.ceil( np.ceil(\n self.x_res/self.conv1_n_pool ) / self.conv2_n_pool ) )\n self.fc1_n_chan = fc1_n_chan\n self.fc1_dropout = fc1_dropout\n self.alpha = alpha\n self.n_samples_trained = 0\n self.n_class_samples_trained = self.n_output_classes*[0]\n self.n_samples_list = []\n self.n_class_samples_list = [[] for _ in range(self.n_output_classes)]\n self.accuracy_list = [[] for _ in range(self.n_output_classes)]\n self.precision_list = [[] for _ in range(self.n_output_classes)]\n self.recall_list = [[] for _ in range(self.n_output_classes)]\n self.F1_list = [[] for _ in range(self.n_output_classes)]\n\n # Save network architecture\n self.save_network_architecture( network_path=self.network_path )\n\n else:\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Re-initialization of existing network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \" \")\n\n # Load network architecture from directory\n net_architecture = self.load_network_architecture(self.network_path)\n\n # Set up network variables from loaded architecture\n self.y_res = net_architecture['y_res']\n self.x_res = net_architecture['x_res']\n self.n_input_channels = net_architecture['n_input_channels']\n self.n_output_classes = net_architecture['n_output_classes']\n self.conv1_size = net_architecture['conv1_size']\n self.conv1_n_chan = net_architecture['conv1_n_chan']\n self.conv1_n_pool = net_architecture['conv1_n_pool']\n self.conv2_size = net_architecture['conv2_size']\n self.conv2_n_chan = net_architecture['conv2_n_chan']\n self.conv2_n_pool = net_architecture['conv2_n_pool']\n self.fc1_y_size = int( np.ceil( np.ceil(\n self.y_res/self.conv1_n_pool ) / self.conv2_n_pool ) )\n self.fc1_x_size = int( np.ceil( np.ceil(\n self.x_res/self.conv1_n_pool ) / self.conv2_n_pool ) )\n self.fc1_n_chan = net_architecture['fc1_n_chan']\n self.fc1_dropout = net_architecture['fc1_dropout']\n self.alpha = net_architecture['alpha']\n self.n_samples_trained = net_architecture['n_samples_trained']\n self.n_class_samples_trained = net_architecture['n_class_samples_trained']\n self.n_samples_list = net_architecture['n_samples_list']\n self.n_class_samples_list = net_architecture['n_class_samples_list']\n self.accuracy_list = net_architecture['accuracy_list']\n self.precision_list = net_architecture['precision_list']\n self.recall_list = net_architecture['recall_list']\n self.F1_list = net_architecture['F1_list']\n\n # Update values of alpha and dropout if supplied\n if self.alpha != alpha:\n self.alpha = alpha\n self.log(\"Updated learning rate 'alpha' to {}\".format(self.alpha))\n if self.fc1_dropout != fc1_dropout:\n self.fc1_dropout = fc1_dropout\n self.log(\"Updated dropout fraction to {}\".format(self.fc1_dropout))\n\n # Clear previous graphs\n tf.reset_default_graph()\n\n #########################################################\n # Input and target variable placeholders\n # x = [ m_samples x [channel_1_data, channel_2_data, etc.] ]\n self.x = tf.placeholder( tf.float32, shape = [None,\n self.n_input_channels * self.y_res * self.x_res] )\n self.y_trgt = tf.placeholder( tf.float32, \\\n shape = [None, self.n_output_classes] )\n\n # Convert input image to tensor with channel as last dimension\n # x_image = [-1 x im-height x im-width x n-input-channels]\n x_image_temp = tf.reshape(self.x, [-1,\n self.n_input_channels,self.y_res,self.x_res])\n x_image = tf.transpose(x_image_temp, [0,2,3,1])\n\n #########################################################\n # Set up convolutional layer 1\n # W = [im-height x im-width x n-input-channels x n-output-channels])\n self.conv1_shape = [self.conv1_size, self.conv1_size,\n self.n_input_channels, self.conv1_n_chan]\n self.W_conv1 = tf.Variable( tf.truncated_normal(\n shape=self.conv1_shape, stddev=0.1))\n self.b_conv1 = tf.Variable( tf.constant(0.1,\n shape=[self.conv1_n_chan] ))\n\n # Convolve x_image with the weight tensor\n self.conv1_lin = tf.nn.conv2d( x_image, self.W_conv1,\n strides=[1, 1, 1, 1], padding='SAME' )\n\n # Add bias and apply transfer function\n self.conv1_relu = tf.nn.relu( self.conv1_lin + self.b_conv1 )\n\n # Max pooling\n self.conv1_kernel = [1, self.conv1_n_pool, self.conv1_n_pool, 1]\n self.conv1_pool = tf.nn.max_pool( self.conv1_relu,\n ksize=self.conv1_kernel, strides=self.conv1_kernel, padding='SAME')\n\n #########################################################\n # Convolutional layer 2\n self.conv2_shape = [self.conv2_size, self.conv2_size,\n self.conv1_n_chan, self.conv2_n_chan]\n self.W_conv2 = tf.Variable( tf.truncated_normal(\n shape=self.conv2_shape, stddev=0.1 ) )\n self.b_conv2 = tf.Variable( tf.constant(0.1,\n shape=[self.conv2_n_chan] ))\n\n # Convolve x_image with the weight tensor\n self.conv2_lin = tf.nn.conv2d( self.conv1_pool, self.W_conv2,\n strides=[1, 1, 1, 1], padding='SAME' )\n\n # Add bias and apply transfer function\n self.conv2_relu = tf.nn.relu( self.conv2_lin + self.b_conv2 )\n\n # Max pooling\n self.conv2_kernel = [1, self.conv2_n_pool, self.conv2_n_pool, 1]\n self.conv2_pool = tf.nn.max_pool( self.conv2_relu,\n ksize=self.conv2_kernel, strides=self.conv2_kernel, padding='SAME')\n\n\n #########################################################\n # Densely Connected Layer\n # Weights and bias\n self.fc1_shape = [self.fc1_y_size * self.fc1_x_size * self.conv2_n_chan,\n self.fc1_n_chan]\n self.W_fc1 = tf.Variable( tf.truncated_normal(\n shape=self.fc1_shape, stddev=0.1 ) )\n self.b_fc1 = tf.Variable( tf.constant(0.1, shape=[self.fc1_n_chan] ))\n\n # Flatten output from conv2\n self.conv2_pool_flat = tf.reshape(\n self.conv2_pool, [-1, self.fc1_shape[0]] )\n\n # Calculate network step\n self.fc1_relu = tf.nn.relu( tf.matmul( self.conv2_pool_flat,\n self.W_fc1) + self.b_fc1 )\n\n # Set up dropout option for fc1\n self.fc1_keep_prob = tf.placeholder(tf.float32)\n self.fc1_relu_drop = tf.nn.dropout(self.fc1_relu, self.fc1_keep_prob)\n\n #########################################################\n # Readout layer\n # Weights and bias\n self.fc_out_shape = [self.fc1_n_chan, self.n_output_classes]\n self.W_fc_out = tf.Variable( tf.truncated_normal(\n shape=self.fc_out_shape, stddev=0.1 ) )\n self.b_fc_out = tf.Variable( tf.constant(0.1,\n shape=[self.fc_out_shape[1]] ))\n\n # Calculate network step\n self.fc_out_lin = tf.matmul( self.fc1_relu_drop,\n self.W_fc_out ) + self.b_fc_out\n\n #########################################################\n # Define cost function and optimizer algorithm\n self.cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n logits=self.fc_out_lin, labels=self.y_trgt ) )\n self.train_step = tf.train.AdamOptimizer(self.alpha).minimize(\n self.cross_entropy )\n\n #########################################################\n # Define how to test trained model\n self.network_prediction = tf.cast( tf.argmax(\n self.fc_out_lin, 1 ), tf.float32 )\n self.is_correct_prediction = tf.equal( tf.argmax( self.fc_out_lin, 1 ),\n tf.argmax( self.y_trgt, 1 ) )\n self.accuracy = tf.reduce_mean( tf.cast(\n self.is_correct_prediction, tf.float32 ) )\n\n #########################################################\n # Create save operation\n self.saver = tf.train.Saver()", "def build_network(self, dimList, actType=\"Tanh\", verbose=True):\n self.Q_network = Model(dimList, actType, verbose=verbose)\n self.target_network = Model(dimList, actType)\n\n if self.device == torch.device(\"cuda\"):\n self.Q_network.cuda()\n self.target_network.cuda()\n\n self.build_optimizer()", "def __init__(self, num_visible, num_hidden, act_func='logistic'):\n\n print('Initializing network... ', end='')\n sys.stdout.flush()\n\n self.num_visible = num_visible\n self.num_hidden = num_hidden\n \n #self.reconstructed = np.zeros((self.num_examples, self.num_visible))\n\n self.weights = 0.1 * np.random.randn(num_visible, num_hidden)\n self.v_bias = np.zeros((1, num_visible))\n self.h_bias = -4.0 * np.ones((1, num_hidden))\n\n self.w_inc = np.zeros((num_visible, num_hidden))\n self.v_inc = np.zeros((1, num_visible))\n self.h_inc = np.zeros((1, num_hidden))\n\n if act_func == 'chaotic':\n self.act_func = self.chaotic_logistic\n else:\n self.act_func = self.logistic\n\n print('Done!')\n return", "def build_and_display_network():\n bpn = NeuralNetwork.BackPropagationNetwork((input_nodes, hidden_nodes, output_nodes),[None, sigmoid, linear])\n DisplayNetwork.displayLayers(bpn.matrixDimension)\n\n return bpn", "def __init__(self, input_nodes, hidden_nodes, hidden_layers, output_nodes):\n # Class members:\n # num_input_nodes\n # num_hidden_nodes\n # num_hidden_layers\n # num_output_nodes\n # weights = [[num_hidden_nodes, num_input_nodes],[num_hidden_nodes, num_hidden_nodes],[]<- for each hl,\n # [num_output_nodes, num_hidden_nodes]]\n # biases\n\n self.num_input_nodes = input_nodes\n self.num_hidden_nodes = hidden_nodes\n self.num_hidden_layers = hidden_layers\n self.num_output_nodes = output_nodes\n\n self.weights = []\n for i in range(self.num_hidden_layers + 1):\n if i is 0:\n # first weights array is input to hidden\n self.weights.append(.5 * np.random.rand(self.num_hidden_nodes, self.num_input_nodes) - .25)\n\n elif i < self.num_hidden_layers:\n # next weight array is hidden nodes to hidden nodes\n self.weights.append(.5 * np.random.rand(self.num_hidden_nodes, self.num_hidden_nodes) - .25)\n else:\n # last weight array is hidden nodes to output nodes\n self.weights.append(.5 * np.random.rand(self.num_output_nodes, self.num_hidden_nodes) - .25)\n\n self.biases = []\n for i in range(self.num_hidden_layers + 1):\n if i < self.num_hidden_layers:\n # for every hidden node there is a bias\n self.biases.append(0.5 * np.random.rand(self.num_hidden_nodes) - .25)\n else:\n # for the output node there is a bias as well\n self.biases.append(0.5 * np.random.rand(self.num_output_nodes) - .25)\n\n self.activation = np.vectorize(self.tanh, otypes=[float])", "def get_initial_graph(n_in, code_spec, n_out):\n G = nx.MultiDiGraph()\n\n G.add_node(\"source\", label=\"SOURCE\", shape=\"cylinder\", color=\"gold\")\n G.add_node(\"sink\", label=\"SINK\", shape=\"cylinder\", color=\"gold\")\n\n for task in tasks:\n task_source_key = f\"{task.name}-source\"\n G.add_node(task_source_key, label=task_source_key, shape=\"cylinder\", color=\"gold\")\n G.add_edge(\"source\", task_source_key)\n task_sink_key = f\"{task.name}-sink\"\n G.add_node(task_sink_key, label=task_sink_key, shape=\"cylinder\", color=\"gold\")\n G.add_edge(task_sink_key, \"sink\")\n for i, input in enumerate(task.inputs):\n input_key = f\"{task.name}-input-{i}\"\n G.add_node(input_key, label=input_key, shape=\"circle\", color=\"blue\",\n node_type=\"input\", input=input)\n G.add_edge(task_source_key, input_key)\n encoder_key = f\"{input_key}-encoder\"\n G.add_node(encoder_key, label=encoder_key, shape=\"diamond\", color=\"black\",\n node_type=\"encoder\", input=input, output=CODE)\n G.add_edge(input_key, encoder_key)\n for box_number in range(initial_boxes):\n if box_number not in G.nodes():\n G.add_node(box_number, label=box_number, shape=\"square\", color=\"black\")\n G.add_edge(encoder_key, box_number)\n for o, output in enumerate(task.outputs):\n output_key = f\"{task.name}-output-{o}\"\n G.add_node(output_key, label=output_key, shape=\"triangle\", color=\"red\",\n node_type=\"output\", output=output)\n G.add_edge(output_key, task_sink_key)\n decoder_key = f\"{output_key}-decoder\"\n G.add_node(decoder_key, label=decoder_key, shape=\"diamond\", color=\"black\",\n node_type=\"decoder\", input=CODE, output=output)\n for box_number in range(initial_boxes):\n G.add_edge(box_number, decoder_key)\n G.add_edge(decoder_key, output_key)\n return G", "def build_network(num_actions: int) -> hk.Transformed:\n\n def q(obs):\n network = hk.Sequential(\n [hk.Flatten(),\n nets.MLP([FLAGS.hidden_units, num_actions])])\n return network(obs)\n\n return hk.without_apply_rng(hk.transform(q, apply_rng=True))", "def __init__(self, network_path='.', logging=True,\n input_image_size=None, n_input_channels=None,\n n_output_classes=None,\n fc1_n_chan=1024, fc1_dropout=0.5, alpha=4e-4 ):\n self.logging = logging\n\n # If network path does not yet exists\n self.network_path = network_path\n if not os.path.isdir(self.network_path):\n # Make network directory\n os.mkdir(self.network_path)\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Creation of new network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log(\"\\nNetwork did not exist ... \")\n self.log(\"Created new network with supplied (or default) architecture\")\n\n # Set up new network\n self.y_res = input_image_size[0]\n self.x_res = input_image_size[1]\n self.n_input_channels = n_input_channels\n self.n_output_classes = n_output_classes\n self.fc1_n_chan = fc1_n_chan\n self.fc1_dropout = fc1_dropout\n self.alpha = alpha\n self.n_samples_trained = 0\n self.n_class_samples_trained = self.n_output_classes*[0]\n self.n_samples_list = []\n self.n_class_samples_list = [[] for _ in range(self.n_output_classes)]\n self.accuracy_list = [[] for _ in range(self.n_output_classes)]\n self.precision_list = [[] for _ in range(self.n_output_classes)]\n self.recall_list = [[] for _ in range(self.n_output_classes)]\n self.F1_list = [[] for _ in range(self.n_output_classes)]\n\n # Save network architecture\n self.save_network_architecture( network_path=self.network_path )\n\n else:\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Re-initialization of existing network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \" \")\n\n # Load network architecture from directory\n net_architecture = self.load_network_architecture(self.network_path)\n\n # Set up network variables from loaded architecture\n self.y_res = net_architecture['y_res']\n self.x_res = net_architecture['x_res']\n self.n_input_channels = net_architecture['n_input_channels']\n self.n_output_classes = net_architecture['n_output_classes']\n self.fc1_n_chan = net_architecture['fc1_n_chan']\n self.fc1_dropout = net_architecture['fc1_dropout']\n self.alpha = net_architecture['alpha']\n self.n_samples_trained = net_architecture['n_samples_trained']\n self.n_class_samples_trained = net_architecture['n_class_samples_trained']\n self.n_samples_list = net_architecture['n_samples_list']\n self.n_class_samples_list = net_architecture['n_class_samples_list']\n self.accuracy_list = net_architecture['accuracy_list']\n self.precision_list = net_architecture['precision_list']\n self.recall_list = net_architecture['recall_list']\n self.F1_list = net_architecture['F1_list']\n\n # Update values of alpha and dropout if supplied\n if self.alpha != alpha:\n self.alpha = alpha\n self.log(\"Updated learning rate 'alpha' to {}\".format(self.alpha))\n if self.fc1_dropout != fc1_dropout:\n self.fc1_dropout = fc1_dropout\n self.log(\"Updated dropout fraction to {}\".format(self.fc1_dropout))\n\n # Clear previous graphs\n tf.reset_default_graph()\n\n #########################################################\n # Input and target variable placeholders\n # x = [ m_samples x [channel_1_data, channel_2_data, etc.] ]\n self.x = tf.placeholder( tf.float32, shape = [None,\n self.n_input_channels * self.y_res * self.x_res] )\n self.y_trgt = tf.placeholder( tf.float32, \\\n shape = [None, self.n_output_classes] )\n\n #########################################################\n # Densely Connected Layer\n # Weights and bias\n self.fc1_shape = \\\n [self.y_res * self.x_res * self.n_input_channels,\n self.fc1_n_chan]\n self.W_fc1 = tf.Variable( tf.truncated_normal(\n shape=self.fc1_shape, stddev=0.1 ) )\n self.b_fc1 = tf.Variable( tf.constant(0.1, shape=[self.fc1_n_chan] ))\n\n # Calculate network step\n self.fc1_relu = tf.nn.relu( tf.matmul( self.x,\n self.W_fc1) + self.b_fc1 )\n\n # Set up dropout option for fc1\n self.fc1_keep_prob = tf.placeholder(tf.float32)\n self.fc1_relu_drop = tf.nn.dropout(self.fc1_relu, self.fc1_keep_prob)\n\n #########################################################\n # Readout layer\n # Weights and bias\n self.fc_out_shape = [self.fc1_n_chan, self.n_output_classes]\n self.W_fc_out = tf.Variable( tf.truncated_normal(\n shape=self.fc_out_shape, stddev=0.1 ) )\n self.b_fc_out = tf.Variable( tf.constant(0.1,\n shape=[self.fc_out_shape[1]] ))\n\n # Calculate network step\n self.fc_out_lin = tf.matmul( self.fc1_relu_drop,\n self.W_fc_out ) + self.b_fc_out\n\n #########################################################\n # Define cost function and optimizer algorithm\n self.cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n logits=self.fc_out_lin, labels=self.y_trgt ) )\n self.train_step = tf.train.AdamOptimizer(self.alpha).minimize(\n self.cross_entropy )\n\n #########################################################\n # Define how to test trained model\n self.network_prediction = tf.cast( tf.argmax(\n self.fc_out_lin, 1 ), tf.float32 )\n self.is_correct_prediction = tf.equal( tf.argmax( self.fc_out_lin, 1 ),\n tf.argmax( self.y_trgt, 1 ) )\n self.accuracy = tf.reduce_mean( tf.cast(\n self.is_correct_prediction, tf.float32 ) )\n\n #########################################################\n # Create save operation\n self.saver = tf.train.Saver()", "def build_graph(self):\n self.__create_placeholders()\n self.__create_encoder()\n self.__create_latent()\n self.__create_decoder()\n self.__create_loss()\n self.__create_generate()\n self.__create_reconstruct()\n self.__create_optimizer()\n self.__create_summary()", "def build(self):\n\n self.W = self.init([self.n_atom_input_feat, self.n_output])\n self.b = model_ops.zeros(shape=[\n self.n_output,\n ])\n\n self.trainable_weights = self.W + self.b", "def _build_network(self):\n self.new_trainable_variable(\"w0_sin\", np.zeros(\n (config.somites * 2 - 2, HIDDEN_LAYER_UNITS), dtype=np.float64))\n self.new_trainable_variable(\"b0_sin\", np.zeros(HIDDEN_LAYER_UNITS, dtype=np.float64))\n self.new_trainable_variable(\"w1_sin\", np.zeros(\n (HIDDEN_LAYER_UNITS, config.oscillators), dtype=np.float64))\n self.new_trainable_variable(\"b1_sin\", np.zeros(config.oscillators, dtype=np.float64))\n\n self.new_trainable_variable(\"w0_cos\", np.zeros(\n (config.somites * 2 - 2, HIDDEN_LAYER_UNITS), dtype=np.float64))\n self.new_trainable_variable(\"b0_cos\", np.zeros(HIDDEN_LAYER_UNITS, dtype=np.float64))\n self.new_trainable_variable(\"w1_cos\", np.zeros(\n (HIDDEN_LAYER_UNITS, config.oscillators), dtype=np.float64))\n self.new_trainable_variable(\"b1_cos\", np.zeros(config.oscillators, dtype=np.float64))\n\n def action_infer(state: np.array) -> np.array:\n \"\"\"\n Get state and return feedback.\n\n state: [f_0, f_1, ..., phi_0, phi_1, ..., t_0, t_1, ...]\n return: [phase_feedback0, phase_feedback1, ..., angle_range0, angle_range1, ...]\n\n Discrepancy for torsion spring = alpha / 2 * k * range * T * sin(phi_i)\n \"\"\"\n forces = state[:config.somites]\n phis = state[config.somites:config.somites + config.oscillators]\n tensions = state[config.somites + config.oscillators:]\n\n f_sin, f_cos = self._calc_fs(np.concatenate((forces, tensions)))\n discrepancies = -0.5 * config.caterpillar_params[\"vertical_ts_k\"] * config.caterpillar_params[\"realtime_tunable_ts_rom\"] * tensions * np.sin(phis)\n return f_sin * np.sin(phis) + f_cos * np.cos(phis) - self.get_discrep_coeffs() * discrepancies, np.ones(config.oscillators) * config.caterpillar_params[\"realtime_tunable_ts_rom\"]\n\n return action_infer", "def __init__(self, num_inputs=3, hidden_layers=[3, 3], num_outputs=2):\n\n self.num_inputs = num_inputs\n self.hidden_layers = hidden_layers\n self.num_outputs = num_outputs\n\n # create a generic representation of the layers\n layers = [num_inputs] + hidden_layers + [num_outputs]\n\n # create random connection weights for the layers\n weights = []\n for i in range(len(layers) - 1):\n w = np.random.rand(layers[i], layers[i + 1])\n weights.append(w)\n self.weights = weights\n\n activations = []\n\n for i in range(len(layers)):\n a = np.zeros(layers[i])\n activations.append(a)\n self.activations = activations\n\n derivatives = []\n\n for i in range(len(layers) - 1):\n d = np.zeros(layers[i])\n derivatives.append(d)\n self.derivatives = derivatives", "def __init__(self, layerNeurons, numberOfLayers, initialWeights = None, lowerBound = None, upperBound = None):\r\n \r\n # Ensure that there is at-least one input and one output layer in the network\r\n assert len(layerNeurons) > 1, \"At least one input layer and one output layer is needed\"\r\n \r\n # Get the total number of weights needed in the network\r\n totalWeightCount = NeuralNetwork.getSizeOfWeightVector(layerNeurons)*numberOfLayers\r\n \r\n # Initialise the weights with the initialiser or random values\r\n if initialWeights is None:\r\n if lowerBound is None:\r\n lowerBound=-1/np.sqrt(layerNeurons[0])\r\n if upperBound is None:\r\n upperBound=1/np.sqrt(layerNeurons[0])\r\n self.weights = np.random.uniform(lowerBound, upperBound, totalWeightCount)\r\n else:\r\n assert initialWeights.size == totalWeightCount, (\"Length of initial weight matrix incorrect. You need \"+str(totalWeightCount)+\" weights\")\r\n self.weights = initialWeights.view()\r\n \r\n self.weights.shape = (numberOfLayers, -1)\r\n # create an empty array of layers\r\n self.layers = []\r\n layerBlockStart = 0\r\n \r\n for layerInputDimention, layerOutputDimention in zip(layerNeurons, layerNeurons[1:]):\r\n # initialise each layer with its input and output dimentions and bi-directional pointers to the relivant weights\r\n layerBlockEnd = layerBlockStart+(layerInputDimention*layerOutputDimention)\r\n layerBiasEnd = layerBlockEnd+layerOutputDimention\r\n newLayer = batchNetworkLayer(layerInputDimention, layerOutputDimention, numberOfLayers, \r\n self.weights[..., :, layerBlockStart:layerBlockEnd], \r\n self.weights[..., :, layerBlockEnd:layerBiasEnd])\r\n self.layers.append(newLayer)\r\n \r\n layerBlockStart = layerBiasEnd", "def __init__(self, network_path='.', logging=True,\n input_image_size=None, n_input_channels=None,\n n_output_classes=None,\n fc1_dropout=1.0, alpha=4e-4 ):\n self.logging = logging\n\n # If network path does not yet exists\n self.network_path = network_path\n if not os.path.isdir(self.network_path):\n # Make network directory\n os.mkdir(self.network_path)\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Creation of new network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log(\"\\nNetwork did not exist ... \")\n self.log(\"Created new network with supplied (or default) architecture\")\n\n # Set up new network\n self.y_res = input_image_size[0]\n self.x_res = input_image_size[1]\n self.n_input_channels = n_input_channels\n self.n_output_classes = n_output_classes\n self.fc1_dropout = fc1_dropout\n self.alpha = alpha\n self.n_samples_trained = 0\n self.n_class_samples_trained = self.n_output_classes*[0]\n self.n_samples_list = []\n self.n_class_samples_list = [[] for _ in range(self.n_output_classes)]\n self.accuracy_list = [[] for _ in range(self.n_output_classes)]\n self.precision_list = [[] for _ in range(self.n_output_classes)]\n self.recall_list = [[] for _ in range(self.n_output_classes)]\n self.F1_list = [[] for _ in range(self.n_output_classes)]\n\n # Save network architecture\n self.save_network_architecture( network_path=self.network_path )\n\n else:\n now = datetime.datetime.now()\n self.log(\"\\n\\n++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \"Re-initialization of existing network: \")\n self.log( \" {}\".format(self.network_path) )\n self.log( \" @ {}\".format(now.strftime(\"%Y-%m-%d %H:%M\")) )\n self.log( \"++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n self.log( \" \")\n\n # Load network architecture from directory\n net_architecture = self.load_network_architecture(self.network_path)\n\n # Set up network variables from loaded architecture\n self.y_res = net_architecture['y_res']\n self.x_res = net_architecture['x_res']\n self.n_input_channels = net_architecture['n_input_channels']\n self.n_output_classes = net_architecture['n_output_classes']\n self.fc1_dropout = net_architecture['fc1_dropout']\n self.alpha = net_architecture['alpha']\n self.n_samples_trained = net_architecture['n_samples_trained']\n self.n_class_samples_trained = net_architecture['n_class_samples_trained']\n self.n_samples_list = net_architecture['n_samples_list']\n self.n_class_samples_list = net_architecture['n_class_samples_list']\n self.accuracy_list = net_architecture['accuracy_list']\n self.precision_list = net_architecture['precision_list']\n self.recall_list = net_architecture['recall_list']\n self.F1_list = net_architecture['F1_list']\n\n # Update values of alpha and dropout if supplied\n if self.alpha != alpha:\n self.alpha = alpha\n self.log(\"Updated learning rate 'alpha' to {}\".format(self.alpha))\n if self.fc1_dropout != fc1_dropout:\n self.fc1_dropout = fc1_dropout\n self.log(\"Updated dropout fraction to {}\".format(self.fc1_dropout))\n\n # Clear previous graphs\n tf.reset_default_graph()\n\n #########################################################\n # Input and target variable placeholders\n # x = [ m_samples x [channel_1_data, channel_2_data, etc.] ]\n self.x = tf.placeholder( tf.float32, shape = [None,\n self.n_input_channels * self.y_res * self.x_res] )\n self.y_trgt = tf.placeholder( tf.float32, \\\n shape = [None, self.n_output_classes] )\n\n # Set up dropout option for inputs\n self.fc1_keep_prob = tf.placeholder(tf.float32)\n self.x_drop = tf.nn.dropout(self.x, self.fc1_keep_prob)\n\n #########################################################\n # Readout layer\n # Weights and bias\n self.fc_out_shape = \\\n [self.y_res * self.x_res * self.n_input_channels,\n self.n_output_classes]\n self.W_fc_out = tf.Variable( tf.truncated_normal(\n shape=self.fc_out_shape, stddev=0.1 ) )\n self.b_fc_out = tf.Variable( tf.constant(0.1,\n shape=[self.fc_out_shape[1]] ))\n\n # Calculate network step\n self.fc_out_lin = tf.matmul( self.x_drop,\n self.W_fc_out ) + self.b_fc_out\n\n #########################################################\n # Define cost function and optimizer algorithm\n self.cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n logits=self.fc_out_lin, labels=self.y_trgt ) )\n self.train_step = tf.train.AdamOptimizer(self.alpha).minimize(\n self.cross_entropy )\n\n #########################################################\n # Define how to test trained model\n self.network_prediction = tf.cast( tf.argmax(\n self.fc_out_lin, 1 ), tf.float32 )\n self.is_correct_prediction = tf.equal( tf.argmax( self.fc_out_lin, 1 ),\n tf.argmax( self.y_trgt, 1 ) )\n self.accuracy = tf.reduce_mean( tf.cast(\n self.is_correct_prediction, tf.float32 ) )\n\n #########################################################\n # Create save operation\n self.saver = tf.train.Saver()", "def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):\n super().__init__(inputnodes, hiddennodes, outputnodes, learningrate)\n\n # link weight matrices, wih and who\n # weights inside the arrays are w_i_j, where link is from node i to node i to j in the next layer\n # w11 w21\n # w12 w22 etc\n self.wih = cupy.random.normal(\n 0.0, pow(self.inodes, -0.5), (self.hnodes, self.inodes))\n self.who = cupy.random.normal(\n 0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))\n\n #activation function is the sigmoid function\n self.activation_function = lambda x: 1 / (1 + cupy.exp(x) ** (-1))", "def build_graph(self):\n n_classes = self.n_classes\n\n (self.feed('data')\n .conv(3, 3, 64, 1, 1, name='conv1_1', trainable=False)\n .conv(3, 3, 64, 1, 1, name='conv1_2', trainable=False)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool1')\n .conv(3, 3, 128, 1, 1, name='conv2_1', trainable=False)\n .conv(3, 3, 128, 1, 1, name='conv2_2', trainable=False)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool2')\n .conv(3, 3, 256, 1, 1, name='conv3_1')\n .conv(3, 3, 256, 1, 1, name='conv3_2')\n .conv(3, 3, 256, 1, 1, name='conv3_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool3')\n .conv(3, 3, 512, 1, 1, name='conv4_1')\n .conv(3, 3, 512, 1, 1, name='conv4_2')\n .conv(3, 3, 512, 1, 1, name='conv4_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool4')\n .conv(3, 3, 512, 1, 1, name='conv5_1')\n .conv(3, 3, 512, 1, 1, name='conv5_2')\n .conv(3, 3, 512, 1, 1, name='conv5_3'))\n\n self.compute_rDeRF() # dummy\n\n # Classification\n (self.feed('conv5_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool6')\n .reshape(shape=(-1, 7, 7, 512), name='pool6_reshape')\n .fc(4096, name='fc6')\n .dropout(0.5, name='drop6')\n .fc(4096, name='fc7')\n .dropout(0.5, name='drop7')\n # .make_time(name='drop7_reduced')\n .fc(n_classes, relu=False, name='cls_score')\n .softmax(name='cls_prob'))\n pass", "def __init__(self, input_dim=(1, 28, 28), num_classes=10):\n self.params = {}\n\n #######################################################################\n # TODO: Initialize weights and biases for the convolutional neural #\n # network. Weights should be initialized from a Gaussian distribution;#\n # biases should be initialized to zero. All weights and biases should #\n # be stored in the dictionary self.params. #\n #######################################################################\n\n filter_size = 5\n weight_scale = 1e-2\n num_filters = 6\n hidden_dim = 784\n\n #****** THIS WAS TO TEST OUT FASTER NETWORKS *******\n\n self.params['W1'] = np.random.normal(scale=weight_scale, size=(num_filters, input_dim[0], filter_size, filter_size))\n # self.params['W2'] = np.random.normal(scale=weight_scale, size=(num_filters, 6, filter_size, filter_size))\n self.params['W3'] = np.random.normal(scale=weight_scale, size=(864, num_classes))\n\n # self.params['W3'] = np.random.normal(scale=weight_scale, size=(hidden_dim, num_classes))\n # self.params['W4'] = np.random.normal(scale=weight_scale, size=(hidden_dim, num_classes))\n\n self.params['b1'] = np.zeros(num_filters)\n # self.params['b2'] = np.zeros(num_filters)\n self.params['b3'] = np.zeros(num_classes)\n\n # self.params['b3'] = np.zeros(num_classes)\n # self.params['b4'] = np.zeros(num_classes)", "def build_network(self):\n # Position the node centers\n self.set_node_centers()\n\n # Set the nodes\n self.nodes = []\n for i in range(self.n_states):\n node = Node(\n self.node_centers[i],\n self.node_radius,\n self.labels[i]\n )\n self.nodes.append(node)", "def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):\n \n super().__init__(inputnodes, hiddennodes, outputnodes, learningrate)\n\n # link weight matrices, wih and who\n # weights inside the arrays are w_i_j, where link is from node i to node i to j in the next layer\n # w11 w21\n # w12 w22 etc\n self.wih = numpy.random.normal(\n 0.0, pow(self.inodes, -0.5), (self.hnodes, self.inodes))\n self.who = numpy.random.normal(\n 0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))\n\n #activation function is the sigmoid function\n self.activation_function = lambda x: scipy.special.expit(x)\n\n pass", "def init_network() -> dict:\n network = {}\n network['W1'] = np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])\n network['b1'] = np.array([0.1, 0.2, 0.3])\n network['W2'] = np.array([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]])\n network['b2'] = np.array([0.1, 0.2])\n network['W3'] = np.array([[0.1, 0.3], [0.2, 0.4]])\n network['b3'] = np.array([0.1, 0.2])\n return network" ]
[ "0.7192482", "0.7097717", "0.70156425", "0.69921356", "0.69625026", "0.68106323", "0.67981684", "0.6779482", "0.67386734", "0.67300445", "0.6718721", "0.6694646", "0.6689944", "0.6686015", "0.66773814", "0.66746324", "0.6673208", "0.6619003", "0.66150516", "0.6595705", "0.65637094", "0.65623355", "0.6554509", "0.65524644", "0.65327805", "0.65178853", "0.65145856", "0.65095437", "0.6503738", "0.649654" ]
0.71916044
1
Add transaction to the history.
def add_history(self): # add separator, if there already are history entries if self.parentApp.History != '': self.parentApp.History += ( '\n\n--- --- --- --- --- --- --- --- --- --- --- ---\n\n' ) # add the transaction to it self.parentApp.History += self.parentApp.tmpTransC.to_str()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addTransaction(self, transaction):\n self.transactions.append(transaction)\n self.transactionIDs.add(transaction.id)", "def add(self, transaction):\n if isinstance(transaction, Transaction):\n # If the transaction already exists\n if(transaction.hash in self.transaction_index):\n print(\"Debug: The transaction already exists in the list\")\n return None\n\n self.transaction_list.append(transaction)\n size = len(self.transaction_list)-1\n self.transaction_index[transaction.hash] = size\n else:\n raise Exception(\"Error: not a transaction\")", "def _push_history(self):\n self._history.append(self._state)", "def add_task_history(self, task_name):\n self._task_history.append(task_name)", "def add_transaction_to_user_coin_history(user, amount, transaction=0, purchase=False):\n if purchase and isinstance(purchase, Suggestion):\n transaction = UserCoinHistory(user=user, coins_change=amount, suggestion=purchase, transaction=transaction)\n else:\n transaction = UserCoinHistory(user=user, coins_change=amount, transaction=transaction)\n\n transaction.save()", "def append_history_record(self, history_record):\n self.history.append(history_record)\n self._increment_history_pointer()", "def AddTransaction(self, tx):\n if BC.Default() is None:\n return False\n\n if tx.Hash.ToBytes() in self.MemPool.keys():\n return False\n\n if BC.Default().ContainsTransaction(tx.Hash):\n return False\n\n if not tx.Verify(self.MemPool.values()):\n logger.error(\"Verifying tx result... failed\")\n return False\n\n self.MemPool[tx.Hash.ToBytes()] = tx\n\n return True", "def insert(self, *args):\n return _ida_hexrays.qvector_history_t_insert(self, *args)", "def add_transaction(self, tx_json):\n recv_tx = Transaction.from_json(tx_json)\n if not recv_tx.verify():\n raise Exception(\"New transaction failed signature verification.\")\n with self.all_tx_lock:\n if tx_json in self._all_transactions:\n print(f\"{self.name} - Transaction already exist in pool.\")\n return\n self._all_transactions.add(tx_json)", "def do(self, market_data):\r\n self.data.history = self.data.history + market_data", "def addHistory(self):\r\n \r\n data = self.get()\r\n \r\n if data == '':\r\n return\r\n elif len(self.history) != 0 and self.history[0] == data:\r\n return\r\n \r\n if len(self.history) == self.historySize:\r\n self.history.pop()\r\n \r\n self.history.insert(0, data)", "def add_history(self,date,what,note):\r\n note = '.'.join(note.split(','))\r\n self.history.append([date,what,note])", "def append(self, new):\n new = HistoryItem(new)\n list.append(self, new)\n new.idx = len(self)", "def add(self, record):\n self._hist_records[record.uid] = record", "def add_transaction(table, id, store_id, hr_id, crm_id, quantity):\n record = [id, store_id, hr_id, crm_id, quantity]\n table.append(record)\n\n return table", "def _AddSnapshot(self, snapshot):\n if self._history.count(snapshot) == 0:\n self._history.append(snapshot)", "def append(self, search):\n self._search_history.append(search)", "def save_transaction(**kwargs):\n if not 'user_id' in kwargs:\n raise AttributeError(\"Cannot create a transaction without user_id\")\n\n\n return History.create(\n user_id=kwargs['user_id'],\n from_curr=kwargs['currencyFrom'],\n to_curr=kwargs['currencyTo'],\n amount=kwargs['amountTo'],\n address_in=kwargs['payinAddress'],\n address_out=kwargs['payoutAddress'],\n extraid=kwargs['payinExtraId'],\n transaction_id=kwargs['id'],\n exchange_status=kwargs['status'],\n )", "def insert_in_history(cls, keyword, user_id):\n\n search_query = \"\"\"SELECT keyword FROM search_history WHERE user_id='{user_id}' AND keyword='{keyword}'\"\"\"\n cls.db.query(search_query, {'user_id': user_id, 'keyword': keyword})\n search_results = cls.db.cursor.fetchall()\n if not search_results:\n query = \"\"\"INSERT INTO search_history (keyword, user_id) VALUES('{keyword}', '{user_id}')\"\"\"\n cls.db.query(query, {'user_id': user_id, 'keyword': keyword})\n cls.db.commit() # do this in processor on exception handling and rollback", "def push(self, value):\n self.history.append(value)", "def enter_transaction():\n _state.transactions = get_transactions() + 1", "def add_history_to_address(address):\n \n result = add_transaction_observation_to_address(address)\n \n # if successfully stored in observation list, return a plain 200\n if \"error\" in result:\n return make_response(jsonify(build_error(result[\"error\"])), result[\"status\"])\n else:\n return \"\"", "def history(self, history):\n self._history = history", "def add_transaction(self,transaction):\n if type(transaction) != PoWGenericTransaction:\n raise Exception('TYPEERROR','transaction should be type of \"PoWGenericTransaction\" but got {}'.format(type(transaction)))\n if not transaction.is_validation_passed():\n print 'The transaction is not valid. Skipped...'\n return\n self.transactions.append(transaction)", "def add_history(self, author, string, data_var=None):\n\n attr = 'history'\n self.add_string(attr, author, string, data_var=data_var)", "def history(self, history):\n\n self._history = history", "def add_to_game_history(self, tile, flag=False):\n move = {\n 'tile': tile,\n 'flag':flag,\n 'coordinate': self.stack[tile]['coordinate'],\n 'value': self.stack[tile]['value']}\n self.history.append(move)", "def add(self, node):\n if str(node.getPosition()) in self._history:\n # duplicate entry\n return\n self._history[str(node.getPosition())] = True\n self._insort(node)", "def recTrans(self,NoSh,BorS,Price,TS):\n self.TL.append(Transaction(NoSh,BorS,Price,TS))\n self.Price=Price", "def new_transaction(self, sender, recipient, amount):\n self.current_transactions.append({\n 'sender': sender,\n 'recipient': recipient,\n 'amount': amount\n })\n return self.last_block['index']+1" ]
[ "0.73509127", "0.71942204", "0.7029798", "0.6894006", "0.68311554", "0.6707649", "0.6418446", "0.6413464", "0.6399687", "0.6394123", "0.6381334", "0.63626176", "0.6356362", "0.63558763", "0.63207406", "0.62853384", "0.6273537", "0.6218804", "0.61947453", "0.6156782", "0.6138252", "0.6048631", "0.6035331", "0.6031381", "0.602358", "0.59834623", "0.5981603", "0.59106123", "0.59103155", "0.5879934" ]
0.730184
1
Press cancel go back.
def on_cancel(self, keypress=None): self.parentApp.switchFormPrevious()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def press_back_button(self):\n self.driver.back()", "def skip(self):\n self.click_back_button()", "def back(self):\n self.input_key_event(InputActions.BACK)", "def back( self ):\n super( ConfirmationScreen, self ).back()\n\n self._current_option = self._current_option - 1\n print( \"Current option is \" +str( self._current_option ) )\n \n if self._current_option < 0:\n self._current_option = len( self._options ) - 1", "def click_back_button(driver):\n driver.back()\n return PASSED", "def back_clicked(self):\n self.close()", "def cancelButton(self):\n \n self.answer=-1\n self.top.destroy()", "def call_q(self, _):\n return MENU_GO_BACK", "def call_q(self, _):\n return MENU_GO_BACK", "def call_q(self, _):\n return MENU_GO_BACK", "def go_back(self):\n self.hide()", "def go_back(self):\n self.hide()", "def goBack(self):\n self.hide()", "def back(self):", "def cancelButton(self):\n \n self.answer=\"cancel\"\n self.top.destroy()", "def back(self):\n self.log_info(f\"Browser.back: Telling browser to return to previous page\")\n self.CORE.back()\n return", "def pressCancel(self):\n self.close()", "def on_cancel(self, *args):\n self.response(Gtk.ResponseType.CANCEL)", "def go_back(self):\n app = App.get_running_app()\n app.sm.current = 'menu'", "def back(self):\n self.cursor.back()", "def back(self):\n\n\t\tself.controller.showFrame(self.prevFrame)", "def back(self, _event=None):\n self.on_closingWindow()", "def back(self,**kwargs):\n self.mm.loadPreviousMenu()", "def presscancel(self):\n self.mode.resetMode()", "def continue_shopping(self):\n self._back_btn.click()", "def back_button(self):\r\n self.update_settings()\r\n self.is_action = True\r\n if self.back_call is not None:\r\n self.back_call()", "def go_back(self):\n self.master.switch_frame(MainView)", "def go_back(self):\n self.master.switch_frame(MainView)", "def i_go_back(self):\n if not world.using_selenium:\n assert False, (\"this step needs to be implemented for the \"\n + \"django test client\")\n world.browser.back()", "def click_cancel(self):\n self.click_element(self.cancel_button_selector)" ]
[ "0.7686845", "0.76652855", "0.7510146", "0.73989546", "0.7366367", "0.735423", "0.7283691", "0.7280198", "0.7280198", "0.7280198", "0.72527945", "0.72527945", "0.7245617", "0.7226324", "0.7215707", "0.7212924", "0.72069484", "0.70282036", "0.7014023", "0.6985043", "0.6983128", "0.6909127", "0.68980265", "0.6893787", "0.6892065", "0.687678", "0.6863445", "0.6863445", "0.6855849", "0.6833607" ]
0.80343944
0
Parse or generate classification (e.g. public health, education, etc).
def _parse_classification(self, item): full_name = item.css('td[headers=Name]::text').extract_first() if "Metra" in full_name and "Board Meeting" in full_name: return BOARD elif "Citizens Advisory" in full_name: return ADVISORY_COMMITTEE elif "Committee Meeting" in full_name: return COMMITTEE else: return NOT_CLASSIFIED
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_classification(self, links):\n for link in links:\n if \"hearing\" in link[\"title\"].lower():\n return FORUM\n return COMMISSION", "def classification(self) -> 'outputs.CaseClassificationResponse':\n return pulumi.get(self, \"classification\")", "def _parse_classification(self, title):\n if \"advisory\" in title.lower():\n return ADVISORY_COMMITTEE\n if \"committee\" in title.lower():\n return COMMITTEE\n return COMMISSION", "def classify():\n excerpt = request.form.get('data')\n if excerpt is None or not excerpt:\n return jsonify(message='Bad request'), 400\n return jsonify(clf.predict(excerpt))", "def _parse_classification(self, meeting_type):\n if 'committee' in meeting_type:\n return COMMITTEE\n elif 'board' in meeting_type:\n return BOARD\n return NOT_CLASSIFIED", "def classify(self, data):\n abstract", "def test(name, data, classifier):\n classification = classifier.classify(data)\n print('Item ' + name + ' is a ' + classification)", "def classify(strings: List[str], params: Any) -> List[str]:\n \n # ############################ REPLACE THIS WITH YOUR CODE #############################\n def predict_one_sample(sample, train_dict, ngram_lvl=1):\n ngrams = [sample[i:i+ngram_lvl] for i in", "def classification(request):\n\n ctx = {\n 'classification_text': getattr(settings, 'CLASSIFICATION_TEXT', 'UNCLASSIFIED'),\n 'classification_text_color': getattr(settings, 'CLASSIFICATION_TEXT_COLOR', 'white'),\n 'classification_background_color': getattr(settings, 'CLASSIFICATION_BACKGROUND_COLOR', 'green'),\n 'classification_banner_enabled': getattr(settings, 'CLASSIFICATION_BANNER_ENABLED', True),\n 'classification_link': getattr(settings, 'CLASSIFICATION_LINK', None)\n }\n\n return ctx", "def classify(self, example):\n raise NotImplementedError()", "def classificationgroup(self):\n clg = self._json['author-profile'].get('classificationgroup', {}).get('classifications', {})\n out = []\n items = clg.get('classification', [])\n if not isinstance(items, list):\n items = [items]\n for item in items:\n out.append((item['$'], item['@frequency']))\n return out", "def parse_classification(classification_string):\n return {\n 'CONNECTED': True,\n 'DISCONNECTED': False\n }.get(classification_string)", "def parse_for_classification(sentence):\n for token in sentence:\n if token.deprel == \"nsubj\":\n try:\n label = next(iter(token.feats[\"Number\"]))\n text = sentence.text\n return {\"text\": text, \"label\": label}\n except (StopIteration, KeyError):\n return {}\n return {}", "def classify_new_email(filename,probabilities_by_category,prior_by_category):\n ### TODO: Write your code here\n spam_distribution = 0\n ham_distribution = 0\n word_frequency = util.get_word_freq([filename])\n for w in word_frequency:\n if w in probabilities_by_category[0]:\n spam_distribution += word_frequency[w] * np.log(probabilities_by_category[0][w])\n if w in probabilities_by_category[1]:\n ham_distribution += word_frequency[w] * np.log(probabilities_by_category[1][w])\n spam_distribution += np.log(prior_by_category[0])\n ham_distribution += np.log(prior_by_category[1])\n\n predict = \"\"\n if(spam_distribution > ham_distribution):\n predict = \"spam\"\n else:\n predict = \"ham\"\n\n word_distribution = [spam_distribution, ham_distribution]\n\n classify_result = (predict, word_distribution)\n\n return classify_result", "def classify(self, data):\n \"*** YOUR CODE HERE ***\"\n return self.sklearn_classifier.predict(data)", "def classification(original_training_data):\n\n ''' Storing the dataframe as numpy array '''\n original_training_data_values = original_training_data.values\n\n ''' Storing the values of target attribute for finding out the counts of each recipetype'''\n target_column = original_training_data_values[:, -1]\n\n ''' Recipe_type stores the unique values of target attribute in the form of a list [Muffin Cupcake] \n cupcake_muffin_count stores the count of muffin and cupcakes in the form of a list [451 451]'''\n recipe_type, cupcake_muffin_count = np.unique(target_column, return_counts=True)\n\n ''' cupcake_muffin_count.argmax() returns the index of the highest value. In this case, it will return the index of \n muffin or cupcake count. '''\n majority_class = recipe_type[cupcake_muffin_count.argmax()]\n\n return majority_class", "def get_class(summary):\n\n if re.search(r\"unlikely\", summary.lower()) > 0:\n var_class = \"2\"\n elif re.search(r\"not\\sclinically\\simportant|benign|polymorphism\", summary.lower()) > 0:\n var_class = \"1\"\n elif re.search(r\"no\\sevidence|no\\sapparent\\sevidence|normal|neg|-ve|no\\smut|no\\sother\\svariants\", summary.lower()) > 0:\n var_class = \"N\"\n elif re.search(r\"likely\\spathogenic|consistent\", summary.lower()) > 0:\n var_class = \"4\"\n elif re.search(r\"pathogenic|out-of-frame\", summary.lower()) > 0:\n var_class = \"5\"\n elif re.search(r\"uv|uncertain|missense\\svariant|unclassified|unknown|variant|in-frame|heterozygous\\s(deletion|duplication)\", summary.lower()) > 0:\n var_class = \"3\"\n elif re.search(r\"pathogenic|confirm|frameshift|nonsense|splice\\ssite\\smutation|deletion|splicesite|mutation\",\n summary.lower()) > 0:\n var_class = \"5\"\n elif re.search(r\"missense\", summary.lower()) > 0:\n var_class = \"3\"\n else:\n var_class = \"U\"\n\n return var_class", "def classify(indicator):\n # this function prints the spam classification\n if indicator > SPAMTHRESHOLD:\n return \"SPAM\"\n else:\n return \"HAM\"", "def convertclasstoemotion(pred):\n \n label_conversion = {'0': 'neutral',\n '1': 'calm',\n '2': 'happy',\n '3': 'sad',\n '4': 'angry',\n '5': 'fearful',\n '6': 'disgust',\n '7': 'surprised'}\n\n for key, value in label_conversion.items():\n if int(key) == pred:\n label = value\n return label", "def getClassifier(self):\n return self.classify", "def classify_string(self, s, **kwargs):\n\n token = GoldTagPOSToken(s, goldlabel=\"NONE\")\n\n sio = StringIO()\n\n # TODO: Fix the behavior of write_gram such that we can just do it from a string.\n intent.igt.grams.write_gram(token, type='classifier', output=sio, **kwargs)\n\n c_token = sio.getvalue().strip()\n sio.close()\n\n result = self.classify(c_token)\n return result", "def GetClassification(self, *args, **kwargs):\n pass", "def check_classifier():\n content = []\n labels = []\n file = 'COMP3074-CW1-Dataset.csv'\n content, labels = get_tag(file, \"question_book\", content, labels)\n file = 'name.csv'\n content, labels = get_tag(file, \"question_book\", content, labels)\n file = 'Small_talk.csv'\n content, labels = get_tag(file, \"small_talk\", content, labels, )\n x_train, x_test, y_train, y_test = train_test_split(content, # Sample feature set to be divided\n labels, # The sample result to be divided (label)\n stratify=labels, # Keep the category proportions\n # the same in training and testing\n test_size=0.25, # Refers to the proportion of\n # samples reserved for testing\n random_state=22) # Random seed\n count_vect = CountVectorizer(stop_words=stopwords.words('english'))\n x_train_counts = count_vect.fit_transform(x_train)\n tfidf_transformer = TfidfTransformer(use_idf=True, # Tf_idf\n sublinear_tf=True).fit(x_train_counts)\n x_train_tf = tfidf_transformer.transform(x_train_counts) # Standardize the inherent attributes of the training set,\n # reduce dimensionality and normalize\n classify = LogisticRegression(random_state=0).fit(x_train_tf, y_train) # Logistic regression\n return classify, tfidf_transformer, count_vect", "def classify(cls, title):\n\t\tpairs_list = TitleClassifier.reduce(TitleClassifier.classify_words(title))\n\t\treduced_list = [ cl for (cl, w) in pairs_list ]\n\t\tword_list = [ w for (cl, w) in pairs_list ]\n\t\tbrand_classes = [ ['russian', 'braced_ru'],\n\t\t\t['latin', 'braced_ru'],\n\t\t\t['latin', 'braced_lat'],\n\t\t\t['russian', 'braced_lat'],\n\t\t\t['latin', 'dash', 'russian', 'braced_ru'],\n\t\t\t['russian', 'dash', 'latin', 'braced_ru']\n\t\t]\n\t\tif reduced_list in brand_classes:\n\t\t\t#TODO: brand name extraction here\n\t\t\tif 'russian' in reduced_list:\n\t\t\t\tk = 'russian'\n\t\t\telif 'braced_ru' in reduced_list:\n\t\t\t\tk = 'braced_ru'\n\t\t\tname = ''\n\t\t\tfor cl, val in pairs_list:\n\t\t\t\tif cl == k:\n\t\t\t\t\tname += val\n\t\t\treturn (True, name.strip(\" ()\"))\n\t\telse:\n\t\t\treturn (False, None)", "def load_classification_dataset(step, do_lower_case,data_type,data_subtype,use_syntetic_data):\n assert step in ['train', 'test']\n binary = False \n undersample_majority = False\n\n paths = ['~/Github/Data/Patient/NIRADS/PET_CT_NIRADS.xlsx', '~/Github/Data/Patient/NIRADS/MR_NIRADS_2018.xlsx','~/Github/Data/Patient/NIRADS/MR_NIRADS.xlsx']\n if data_type == 'ct':\n data_r = pd.read_excel(paths[0])\n else:\n data_r = pd.read_excel(paths[1])\n data_r.append(pd.read_excel(paths[2]), ignore_index = True, sort=False)\n\n data_p,data_n, y_p, y_n = tc.text_cleaning(data_r, None, data_target='section') \n\n if data_subtype == 'primary':\n data = data_p\n y = y_p -1\n else:\n data = data_n\n y = y_n -1\n\n if binary:\n y[y<2]=0\n y[y>0]=1\n\n y_dist = [np.sum(y==x) for x in np.unique(y)]\n print(\"Distribution of all labels: \", y_dist, \"\\n\\n\")\n\n train_text, test_text, y_train, y_test = train_test_split(data, y, test_size=0.2, random_state=1)\n\n y_dist = [np.sum(y_train==x) for x in np.unique(y_train)]\n print(\"Distribution of training labels: \", y_dist, \"\\n\\n\")\n\n if step =='train':\n if use_syntetic_data:\n data_syntetic = pd.read_csv('~/Github/Data/Patient/NIRADS/PET_CT_NIRADS_syntetic.csv')\n train_text = np.concatenate((train_text,data_syntetic['syntetic_data'].values))\n y_train = np.concatenate((y_train,data_syntetic['syntetic_label'].values-1))\n\n train_text, test_text, y_train, y_test = train_test_split(train_text, y_train, test_size=0.5, random_state=1)\n train_text = np.concatenate((train_text,test_text))\n y_train = np.concatenate((y_train,y_test))\n y_dist = [np.sum(y_train==x) for x in np.unique(y_train)]\n print(\"Distribution of training labels after inserting syntetic data: \", y_dist, \"\\n\\n\")\n\n if not undersample_majority:\n data_to_use = train_text.copy()\n y_to_use = y_train.copy()\n else:\n max_label1 = 1000\n data_to_use = []\n y_to_use = []\n y1=0\n for x in range(len(y_train)):\n if y_train[x] !=1:\n data_to_use.append(train_text[x])\n y_to_use.append(y_train[x])\n else:\n if y1 <max_label1:\n data_to_use.append(train_text[x])\n y_to_use.append(y_train[x])\n y1+=1\n\n else:\n data_to_use = test_text.copy()\n y_to_use = y_test.copy()\n\n basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n examples = []\n\n for i, tokens in tqdm(enumerate(data_to_use)):\n label = y_to_use[i]\n examples.append(\n ClassificationExample(\n id=i,\n tokens_a=basic_tokenizer.tokenize(tokens),\n tokens_b=None,\n label=label,\n )\n )\n logging.info('Number of `%s` examples: %d', step, len(examples))\n \n return examples", "def _parse_classification(self, item):\n return BOARD", "def classify_text(classifier, sentence):\n\n sentence = Sentence(sentence)\n classifier.predict(sentence, multi_class_prob=True)\n return sentence.labels", "def classify(self, dataSet):\n\n return nltk.classify.apply_features(self.extrairFrase, dataSet)", "def _classification(text_path_list, id_list, label_list):\n textnum = len(text_path_list)\n batched_num = ((textnum - 1) // classify.BATCH_SIZE + 1) * classify.BATCH_SIZE\n for i in range(batched_num - textnum):\n text_path_list.append(text_path_list[0])\n id_list.append(id_list[0])\n annotations = classify_obj.inference(text_path_list, id_list, label_list) #\n return annotations[0:textnum]", "def classifier(self):\n return self.config.get('classifier', \"general\")" ]
[ "0.707388", "0.6647457", "0.6626518", "0.6521891", "0.6510014", "0.64428645", "0.636594", "0.6330406", "0.6225585", "0.61786884", "0.61213285", "0.6062574", "0.6061338", "0.60570234", "0.6047572", "0.60411346", "0.5997528", "0.5993576", "0.5987054", "0.5979351", "0.5978588", "0.59748095", "0.5956325", "0.5945186", "0.5878724", "0.5861608", "0.58532906", "0.58512735", "0.58489835", "0.5832133" ]
0.68485194
1
Cache the response if this request qualifies and has not been cached yet or for restbased and restandtimebased evict the record from the cache if the request method is POST/PATCH/PUT or DELETE
def process_response(self, req, resp, resource, req_succeeded): # Step 1: for 'rest-based' and 'rest&time-based' eviction strategies the # POST/PATCH/PUT/DELETE calls are never cached and even more they # invalidate the record cached by the GET method if self.cache_config['CACHE_EVICTION_STRATEGY'] in [CacheEvictionStrategy.rest_based, CacheEvictionStrategy.rest_and_time_based] \ and req.method.upper() in [HttpMethods.POST, HttpMethods.PATCH, HttpMethods.PUT, HttpMethods.DELETE]: # get the cache key created by the GET method (assuming there was one) key = self.generate_cache_key(req, method='GET') self.cache.delete(key) return # Step 2: if it is marked to be cached, but has not yet been cached # then we cache it if hasattr(req.context, 'cache') and req.context.cache \ and (not hasattr(req.context, 'cached') or not req.context.cached): key = self.generate_cache_key(req) value = self.serialize(req, resp, resource) # for the REST-based strategy there is no timeout, the cached record never expires if self.cache_config['CACHE_EVICTION_STRATEGY'] in [CacheEvictionStrategy.rest_based]: # timeout 0 - never expires timeout = 0 else: # for the time-based and rest-and-time-based eviction strategy the # cached record expires timeout = req.context.cache_timeout if hasattr(req.context, 'cache_timeout') else 600 self.cache.set(key, value, timeout=timeout)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_resource(self, req, resp, resource, params):\n\n # Step 1: for 'rest-based' and 'rest&time-based' eviction strategies the\n # POST/PATCH/PUT/DELETE calls are never cached, they should never be\n # loaded from cache as they must always execute,\n # so for those we don't need to try to search the cache\n if self.cache_config['CACHE_EVICTION_STRATEGY'] in [CacheEvictionStrategy.rest_based,\n CacheEvictionStrategy.rest_and_time_based] \\\n and req.method.upper() in [HttpMethods.POST,\n HttpMethods.PATCH,\n HttpMethods.PUT,\n HttpMethods.DELETE]:\n return\n\n # Step 2: determine whether the given responder has caching setup\n # and if not then short-circuit to save on the lookup of request in the cache\n # as anyhow this request was not marked to be cached\n\n # find out which responder (\"on_...\" method) is going to be used to process this request\n responder = None\n for _method in dir(resource):\n if _DECORABLE_METHOD_NAME.match(_method) and _method[3:].upper() == req.method.upper():\n responder = _method\n break\n\n if responder:\n # get the name of the responder wrapper, which for cached objects is 'cache_wrap'\n # see the \"Cache.cache\" decorator in cache.py\n responder_wrapper_name = getattr(getattr(resource, responder), '__name__')\n\n # is the given method (or its class) decorated by the cache_wrap being the topmost decorator?\n if responder_wrapper_name == 'cache_wrap':\n logger.debug(\" This endpoint is decorated by 'cache' being the topmost decorator.\")\n else:\n # 'cache_wrap' is not the topmost decorator - let's check whether 'cache' is\n # any of the other decorator on this method (not the topmost):\n # this requires the use of @register(decor1, decor2) as the decorator\n if hasattr(getattr(resource, responder), '_decorators') and \\\n 'cache' in [d._decorator_name for d in getattr(resource, responder)._decorators\n if hasattr(d, '_decorator_name')]:\n logger.debug(\" This endpoint is decorated by 'cache', but it is NOT the topmost decorator.\")\n else:\n # no cache was requested on this responder as no decorator at all\n logger.debug(\" No 'cache' was requested for this endpoint.\")\n return\n\n # Step 3: look up the record in the cache\n key = self.generate_cache_key(req)\n data = self.cache.get(key)\n\n if data:\n # if the CACHE_CONTENT_TYPE_JSON_ONLY = True, then we are NOT\n # caching the response's Content-Type, only its body\n if self.cache_config['CACHE_CONTENT_TYPE_JSON_ONLY']:\n if FALCONVERSION_MAIN < 3:\n resp.body = self.deserialize(data)\n else:\n resp.text = self.deserialize(data)\n else:\n if FALCONVERSION_MAIN < 3:\n resp.content_type, resp.body = self.deserialize(data)\n else:\n resp.content_type, resp.text = self.deserialize(data)\n resp.status = HTTP_200\n req.context.cached = True\n\n # Short-circuit any further processing to skip any remaining\n # 'process_request' and 'process_resource' methods, as well as\n # the 'responder' method that the request would have been routed to.\n # However, any 'process_response' middleware methods will still be called.\n resp.complete = True", "def cache():\n is_conditional = request.headers.get(\"If-Modified-Since\") or request.headers.get(\n \"If-None-Match\"\n )\n\n if is_conditional is None:\n response = view_get()\n response.headers[\"Last-Modified\"] = http_date()\n response.headers[\"ETag\"] = uuid.uuid4().hex\n return response\n else:\n return status_code(304)", "def disable_caching(self):\n\n def after_request(r: flask.Response):\n if 'Cache-Control' not in r.headers:\n r.headers['Cache-Control'] = 'no-store'\n return r\n\n self.after_request(after_request)", "def _may_cache(self, request, response=None):\n # any successful request may be cached\n return ((HTTPStatus.OK <= response.status_code < HTTPStatus.BAD_REQUEST)\n if response else True)", "def cache_response(self, request, response, body=None):\r\n # From httplib2: Don't cache 206's since we aren't going to\r\n # handle byte range requests\r\n if response.status not in [200, 203]:\r\n return\r\n\r\n response_headers = CaseInsensitiveDict(response.headers)\r\n\r\n cc_req = self.parse_cache_control(request.headers)\r\n cc = self.parse_cache_control(response_headers)\r\n\r\n cache_url = self.cache_url(request.url)\r\n\r\n # Delete it from the cache if we happen to have it stored there\r\n no_store = cc.get('no-store') or cc_req.get('no-store')\r\n if no_store and self.cache.get(cache_url):\r\n self.cache.delete(cache_url)\r\n\r\n # If we've been given an etag, then keep the response\r\n if self.cache_etags and 'etag' in response_headers:\r\n self.cache.set(\r\n cache_url,\r\n self.serializer.dumps(request, response, body=body),\r\n )\r\n\r\n # Add to the cache if the response headers demand it. If there\r\n # is no date header then we can't do anything about expiring\r\n # the cache.\r\n elif 'date' in response_headers:\r\n # cache when there is a max-age > 0\r\n if cc and cc.get('max-age'):\r\n if int(cc['max-age']) > 0:\r\n self.cache.set(\r\n cache_url,\r\n self.serializer.dumps(request, response, body=body),\r\n )\r\n\r\n # If the request can expire, it means we should cache it\r\n # in the meantime.\r\n elif 'expires' in response_headers:\r\n if response_headers['expires']:\r\n self.cache.set(\r\n cache_url,\r\n self.serializer.dumps(request, response, body=body),\r\n )", "def send(self, request, **kw):\r\n if request.method == 'GET':\r\n cached_response = self.controller.cached_request(request)\r\n if cached_response:\r\n return self.build_response(request, cached_response, from_cache=True)\r\n\r\n # check for etags and add headers if appropriate\r\n request.headers.update(self.controller.conditional_headers(request))\r\n\r\n resp = super(CacheControlAdapter, self).send(request, **kw)\r\n\r\n return resp", "def never_cache_preview(self, response):\n response.cache_control.max_age = 0\n response.cache_control.no_cache = True\n response.cache_control.must_revalidate = True\n response.cache_control.no_store = True\n return response", "def set_cached_response(self) -> None:\n if self.get_caching_duration() > 0: # if caching is enabled for this request\n json_response = self._request_result.json()\n with open(self.cache_file_name, 'w') as json_file:\n json.dump(json_response, json_file, indent=4)", "def process_response(self, request, response):\n #if not self._should_update_cache(request, response):\n # # We don't need to update the cache, just return.\n # return response\n\n if response.streaming or response.status_code != 200:\n return response\n \n # Don't cache responses that set a user-specific (and maybe security\n # sensitive) cookie in response to a cookie-less request.\n if not request.COOKIES and response.cookies and has_vary_header(response, 'Cookie'):\n return response\n\n # Try to get the timeout from the \"max-age\" section of the \"Cache-\n # Control\" header before reverting to using the default cache_timeout\n # length.\n timeout = get_max_age(response)\n if timeout == None:\n timeout = self.cache_timeout\n elif timeout == 0:\n # max-age was set to 0, don't bother caching.\n return response\n patch_response_headers(response, timeout)\n if timeout:\n cache_key = \"%s-%s\" % (self.key_prefix, request.get_full_path())\n #raise ValueError(cache_key)\n if hasattr(response, 'render') and isinstance(response.render, collections.Callable):\n response.add_post_render_callback(\n lambda r: cache._cache.set(cache_key.encode(\"utf-8\"), zlib.compress(r.content, 9), timeout)\n )\n else:\n # we use the highest compression level, because since it is cached we hope for it to pay off\n cache._cache.set(cache_key.encode(\"utf-8\"), zlib.compress(response.content, 9), timeout)\n return response", "def update_cached_response(self, request, response):\r\n cache_url = self.cache_url(request.url)\r\n\r\n cached_response = self.serializer.loads(request, self.cache.get(cache_url))\r\n\r\n if not cached_response:\r\n # we didn't have a cached response\r\n return response\r\n\r\n # Lets update our headers with the headers from the new request:\r\n # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1\r\n #\r\n # The server isn't supposed to send headers that would make\r\n # the cached body invalid. But... just in case, we'll be sure\r\n # to strip out ones we know that might be problmatic due to\r\n # typical assumptions.\r\n excluded_headers = [\r\n \"content-length\",\r\n ]\r\n\r\n cached_response.headers.update(\r\n dict((k, v) for k, v in response.headers.items()\r\n if k.lower() not in excluded_headers)\r\n )\r\n\r\n # we want a 200 b/c we have content via the cache\r\n cached_response.status = 200\r\n\r\n # update our cache\r\n self.cache.set(\r\n cache_url,\r\n self.serializer.dumps(request, cached_response),\r\n )\r\n\r\n return cached_response", "def _invalidate_http_cache(self):\n self._requests_cache = {}", "def nocache(response):\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, max-age=0'\n return response", "def build_response(self, request, response, from_cache=False):\r\n if not from_cache and request.method == 'GET':\r\n if response.status == 304:\r\n # We must have sent an ETag request. This could mean\r\n # that we've been expired already or that we simply\r\n # have an etag. In either case, we want to try and\r\n # update the cache if that is the case.\r\n cached_response = self.controller.update_cached_response(\r\n request, response\r\n )\r\n\r\n if cached_response is not response:\r\n from_cache = True\r\n\r\n response = cached_response\r\n else:\r\n # Wrap the response file with a wrapper that will cache the\r\n # response when the stream has been consumed.\r\n response._fp = CallbackFileWrapper(\r\n response._fp,\r\n functools.partial(\r\n self.controller.cache_response,\r\n request,\r\n response,\r\n )\r\n )\r\n\r\n resp = super(CacheControlAdapter, self).build_response(\r\n request, response\r\n )\r\n\r\n # See if we should invalidate the cache.\r\n if request.method in self.invalidating_methods and resp.ok:\r\n cache_url = self.controller.cache_url(request.url)\r\n self.cache.delete(cache_url)\r\n\r\n # Give the request a from_cache attr to let people use it\r\n resp.from_cache = from_cache\r\n\r\n return resp", "def never_cache_preview(response):\n response.cache_control.max_age = 0\n response.cache_control.no_cache = True\n response.cache_control.must_revalidate = True\n response.cache_control.no_store = True\n return response", "def test_client_max_age_0(self, sess):\r\n print('first request')\r\n r = sess.get(self.url)\r\n assert self.cache.get(self.url) == r.raw\r\n\r\n print('second request')\r\n r = sess.get(self.url, headers={'Cache-Control': 'max-age=0'})\r\n\r\n # don't remove from the cache\r\n assert self.cache.get(self.url)\r\n assert not r.from_cache", "def cached():\n ##from pprint import pprint\n # let's restrict this to the api server, to avoid shenanigans\n root_relative_url = request.env.request_uri.split('/cached/')[-1]\n ##pprint('ROOT-RELATIVE URL: ')\n ##pprint(root_relative_url)\n fetch_url = '%s://%s/%s' % (request.env.wsgi_url_scheme, request.env.http_host, root_relative_url)\n ##pprint('PROXYING TO SIMPLE URL: ')\n ##pprint(fetch_url)\n\n # permissive CORS handling of requests from another domain (e.g. tree.opentreeoflife.org)\n if request.env.request_method == 'OPTIONS':\n if request.env.http_access_control_request_method:\n response.headers['Access-Control-Allow-Methods'] = request.env.http_access_control_request_method\n if request.env.http_access_control_request_headers:\n response.headers['Access-Control-Allow-Headers'] = request.env.http_access_control_request_headers\n ##pprint('RESPONDING TO OPTIONS')\n raise HTTP(200, **(response.headers))\n\n # N.B. This try/except block means we'll cache errors. For now, the fix is to clear the entire cache.\n try:\n # fetch the latest IDs as JSON from remote site\n import simplejson\n\n if fetch_url.startswith('//'):\n # Prepend scheme to a scheme-relative URL\n fetch_url = \"http:%s\" % fetch_url\n\n fetch_args = request.vars # {'startingTaxonOTTId': \"\"}\n\n # TODO: For more flexibility, we should examine and mimic the original request (HTTP verb, headers, etc)\n\n # this needs to be a POST (pass fetch_args or ''); if GET, it just describes the API\n # N.B. that gluon.tools.fetch() can't be used here, since it won't send \"raw\" JSON data as treemachine expects\n req = urllib2.Request(url=fetch_url, data=simplejson.dumps(fetch_args), headers={\"Content-Type\": \"application/json\"}) \n the_response = urllib2.urlopen(req).read()\n ##pprint('RESPONSE:')\n ##pprint(the_response)\n return the_response\n\n except Exception, e:\n # throw 403 or 500 or just leave it\n return ('ERROR', e.message)", "def getFromCache(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def addToCache(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def clear_response_cache():\n global __response_cache\n __response_cache = {}", "def request_cache(self):\n return self._request_cache", "def cached_api(*args, **kwargs):\n def decorator(func):\n kwargs['request_gatekeeper'] = lambda request: not getattr(cached_view, 'never_cache', False)\n kwargs['response_gatekeeper'] = _response_gatekeeper\n\n def response_wrapper(ret):\n ret = loads(ret)\n ret['success'] = True\n ret = client_dumps(ret)\n return HttpResponse(ret, 'application/json')\n\n cache_func = cached_view(*args,\n cached_response_wrapper=response_wrapper,\n serializer=client_dumps,\n **kwargs)(func)\n cache_func.arg_spec = ArgSpec(func)\n\n return cache_func\n return decorator", "def handle_response(self, response):\n\n self._tmp_request_args = {}\n self.cache_response(response)", "def test_must_revalidate(self):\n content = self.unique()\n self.assertViewBehavior(\n {\"cache_control_must_revalidate\": True, \"get\": content},\n status_code=200,\n content=content,\n headers_exact={\"Cache-Control\": \"must-revalidate\"})", "def is_cacheable(self, response: Union[AnyResponse, None]) -> bool:\n if not response:\n return False\n cache_criteria = {\n 'allowed method': response.method in self.allowed_methods,\n 'allowed status': response.status in self.allowed_codes,\n 'not disabled': not self.disabled,\n 'not expired': not getattr(response, 'is_expired', False),\n 'not filtered': self.filter_fn(response),\n }\n logger.debug(f'Pre-cache checks for response from {response.url}: {cache_criteria}') # type: ignore\n return all(cache_criteria.values())", "def dispatch(self, *args, **kwargs):\n cache_allowed = self.is_cache_allowed()\n logging.debug('%s: caching is %s', self.request.path, 'allowed' if cache_allowed else 'NOT allowed', )\n\n response = None\n cache_hit = False\n if cache_allowed: # get from cache\n response = yield self.get_cached()\n cache_hit = True if response is not None else False\n logging.debug('%s: cache %s', self.request.uri, 'HIT' if cache_hit else 'MISS')\n\n if response is None: # get actual\n response = yield self.proxy_async_request()\n\n if cache_allowed:\n if 200 <= response.code <= 299: # store into cache\n yield self.set_cache(response)\n logging.debug('%s: status %d - stored in cache', self.request.uri, response.code)\n else:\n logging.debug('%s: error status %d', self.request.uri, response.code)\n\n # output proxied response\n self.process_response(response)\n self.finish()\n\n if cache_allowed:\n if cache_hit: # renew cache if cache hit\n yield self.renew_cache(self.proxy_async_request)\n logging.debug('%s: slow endpoint, cache %s', self.request.path, 'updated' if cache_hit else 'NOT updated')", "def __cached(self):\n # already cached stuff\n if self._cached is None:\n self._cached = Cached(self.resource)\n return self._cached", "async def refresh_cache(request: Request) -> Response:\n await request.state.canvas.sync_cache(request.state.db_conn, skip_check=True)\n\n return Response(status_code=204)", "def removeFromCache(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def disable_cache(response):\n\n response.headers['Cache-Control'] = 'max-age=0, no-cache, no-store, must-revalidate, private'\n response.headers['Pragma'] = 'no-cache'\n response.headers['Expires'] = '0'\n return response", "def render_cached(self, cache_key, render_cls, max_age, cache_time=0, *args, **kwargs):\r\n\r\n # Default the cache to be the same as our max age if not\r\n # supplied.\r\n cache_time = cache_time or max_age\r\n\r\n # Postfix the cache key with the subreddit name\r\n # This scopes all the caches by subreddit\r\n cache_key = cache_key + '-' + c.site.name\r\n\r\n # Get the etag and content from the cache.\r\n hit = g.rendercache.get(cache_key)\r\n if hit:\r\n etag, content = hit\r\n else:\r\n # Generate and cache the content along with an etag.\r\n content = render_cls(*args, **kwargs).render()\r\n etag = '\"%s\"' % datetime.utcnow().isoformat()\r\n g.rendercache.set(cache_key, (etag, content), time=cache_time)\r\n\r\n # Check if the client already has the correct content and\r\n # throw 304 if so. Note that we want to set the max age in the\r\n # 304 response, we can only do this by using the\r\n # pylons.response object just like the etag_cache fn does\r\n # within pylons (it sets the etag header). Setting it on the\r\n # c.response won't work as c.response isn't used when an\r\n # exception is thrown. Note also that setting it on the\r\n # pylons.response will send the max age in the 200 response\r\n # (just like the etag header is sent in the response).\r\n response.headers['Cache-Control'] = 'max-age=%d' % max_age\r\n etag_cache(etag)\r\n\r\n # Return full response using our cached info.\r\n c.response.content = content\r\n return c.response" ]
[ "0.7296144", "0.7083652", "0.701264", "0.68986917", "0.6891372", "0.6742869", "0.67302907", "0.66479725", "0.6640116", "0.6626159", "0.653558", "0.6509554", "0.65028024", "0.6493731", "0.64882547", "0.6464503", "0.61653435", "0.6115088", "0.6103556", "0.6070408", "0.6057066", "0.60444826", "0.60345244", "0.60216266", "0.59961843", "0.5983902", "0.59778184", "0.5975631", "0.5970544", "0.5953773" ]
0.77516645
0
Generate the cache key from the request using the path and the method
def generate_cache_key(req, method: str = None) -> str: path = req.path if path.endswith('/'): path = path[:-1] if not method: method = req.method return f'{path}:{method.upper()}'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_view_response_cache_key( # pylint: disable=unused-argument\n handler: Callable[..., Awaitable[StreamResponse]],\n request: Request,\n *args,\n **kwargs,\n) -> str:\n get_params = request.query\n\n hash_ = sha1(request.path.encode('utf-8'))\n\n for param in sorted(get_params):\n hash_.update(param.encode('utf-8'))\n\n for value in sorted(get_params.getall(param)):\n hash_.update(str(value).encode('utf-8'))\n\n return f'{_URI_BASED_CACHE_KEY_PREFIX}:{request.method}:{hash_.hexdigest()}'", "def _get_cache_key(self):\n\n return '__CACHED__{method}__'.format(\n method=function_utils.get_fully_qualified_name(self.fget).upper())", "def cache_key(self):", "def _make_cache_key(key_prefix):\n if callable(key_prefix):\n cache_key = key_prefix()\n elif '%s' in key_prefix:\n cache_key = key_prefix % request.path\n else:\n cache_key = key_prefix\n\n cache_key = cache_key.encode('utf-8')\n\n return cache_key", "def generate_cache_key(self, *args, **kwargs):\n #smooshed = [\"%s=%s\" % (key, value) for key, value in kwargs.items()]\n smooshed = urlencode(kwargs)\n\n # Use a list plus a ``.join()`` because it's faster than concatenation.\n return \"%s:%s:%s:%s\" % (self._meta.api_name, self._meta.resource_name, ':'.join(args), smooshed)", "def get_cache_key(self, *args, **kwargs):\n key = self._key(*self._inject_obj(args), **kwargs)\n\n if self._hash_algorithm:\n key = self._hash_algorithm(key).hexdigest()\n\n return key", "def cache_key(self):\n return self.__class__.create_cache_key(self.key, **self.get_kwargs())", "def get_key_for_path(cls, path):\n key = hashlib.sha224(path).hexdigest()\n return 'flexible_page_url_{}'.format(key)", "def _memcache_key(method, email, scopes, key_id=None):\n blob = utils.encode_to_json({\n 'method': method,\n 'email': email,\n 'scopes': scopes,\n 'key_id': key_id,\n })\n return hashlib.sha256(blob).hexdigest()", "def cache_key(self):\n\n return \"{}.json\".format(self.path)", "def cachepath(self, *args, **kw):\n cachename = self.cachefunc(*args, **kw)\n ret = os.path.join(self.cachedir, cachename)+'.'+self.serializer\n return ret", "def get_cache_key(prefix):\n return '%s' % (prefix)", "def generate_cache_key(cached, **kwargs):\r\n\r\n if isinstance(cached, QuerySet):\r\n key = str(cached.query)\r\n\r\n elif isinstance(cached, (Model, ModelBase)):\r\n key = '%s.%s:%s' % (cached._meta.app_label,\r\n cached._meta.module_name,\r\n ','.join('%s=%s' % item for item in kwargs.iteritems()))\r\n\r\n else:\r\n raise AttributeError(\"Objects must be queryset or model.\")\r\n\r\n if not key:\r\n raise Exception('Cache key cannot be empty.')\r\n\r\n key = clean_cache_key(key)\r\n return key", "def cache_key(cls) -> str:\n return cls._cache_key", "def memoize_key(prefix, *args, **kwargs):\n key = hashlib.md5()\n for arg in itertools.chain(args, sorted(kwargs.items())):\n key.update(str(arg))\n return '%s:memoize:%s:%s' % (settings.CACHE_PREFIX,\n prefix, key.hexdigest())", "def _get_cache_key(self, source, language):\n key_source = u'%s:%s:%s' % (self.CACHE_SALT, source, language)\n return hashlib.md5(key_source.encode('utf8')).hexdigest()", "def create_key(\n request: PreparedRequest,\n ignored_params: Iterable[str] = None,\n include_get_headers: bool = False,\n **kwargs,\n) -> str:\n key = hashlib.sha256()\n key.update(encode((request.method or '').upper()))\n url = remove_ignored_url_params(request, ignored_params)\n url = url_normalize(url)\n key.update(encode(url))\n key.update(encode(kwargs.get('verify', True)))\n\n body = remove_ignored_body_params(request, ignored_params)\n if body:\n key.update(body)\n if include_get_headers and request.headers != DEFAULT_HEADERS:\n for name, value in normalize_dict(request.headers).items(): # type: ignore\n key.update(encode(f'{name}={value}'))\n\n return key.hexdigest()", "def get_cache_key(self, extra_args='', version=None):\r\n query, params = self.query.get_compiler(using=self.db).as_sql()\r\n query_string = (query % params).strip().encode(\"utf-8\")\r\n base_key = md5_constructor('.'.join((query_string, extra_args))).hexdigest()\r\n return cache.make_key('.'.join((self.model._meta.db_table, 'cachebot.results', base_key)), version=version)", "def _get_cache_key(self, inputs: Dict[str, Tensor], prefix: str) -> Optional[str]:\n if self.time_varying_kwargs is not None:\n if len(set(inputs).intersection(self.time_varying_kwargs)) > 0:\n return None\n return f'{prefix}_static'", "def _generate_cache_key(self, address):\n\n return re.sub(r'[^a-z0-9]', '', str(address).lower())", "def cache_key(self):\r\n statinfo = os.stat(self.pathname)\r\n return (self.filename + str(statinfo.st_mtime)).encode('ascii', 'ignore')", "def create_key(\n self,\n method: str,\n url: StrOrURL,\n params: dict = None,\n data: dict = None,\n headers: dict = None,\n **kwargs,\n ) -> str:\n if self.ignored_params:\n url, params, body = self._remove_ignored_parameters(url, params, data)\n\n key = hashlib.sha256()\n key.update(method.upper().encode())\n key.update(str(url).encode())\n key.update(_encode_dict(params))\n key.update(_encode_dict(data))\n\n if (\n self.include_headers\n and headers is not None\n and headers != ClientRequest.DEFAULT_HEADERS\n ):\n for name, value in sorted(headers.items()):\n key.update(name.encode())\n key.update(value.encode())\n return key.hexdigest()", "def get_cache_key(self):\n return get_cache_key(\n self.__class__.__name__, settings=(self.pk, ))", "def get_cache_path(self):", "def get_cache_path(self):", "def generate_request_session_key(self):\n uuid = uuid4().hex\n return self.compute_request_session_key(uuid), uuid", "def create_cache_tag(cls, key):\n hash_engine = hashlib.sha1()\n if isinstance(key, str):\n hash_engine.update(key)\n if isinstance(key, dict):\n hash_key = str(u'__'.join(sorted(key.values()))).encode('utf-8')\n hash_engine.update(hash_key)\n return hash_engine.hexdigest()", "def cache_path(self):", "def cache_path(self):", "def create_cache_key(cls, setting_key, **kwargs):\n\n key = f\"{str(cls.__name__)}:{setting_key}\"\n\n for k, v in kwargs.items():\n key += f\"_{k}:{v}\"\n\n return key.replace(\" \", \"\")" ]
[ "0.7671069", "0.73947465", "0.69811064", "0.69450694", "0.6926922", "0.6827275", "0.66507095", "0.66111356", "0.6543088", "0.65096855", "0.6467866", "0.64333147", "0.6348608", "0.6315374", "0.6295006", "0.6290446", "0.61842054", "0.6159068", "0.6157648", "0.6108428", "0.6080078", "0.6071984", "0.60184103", "0.5997907", "0.5997907", "0.59877014", "0.59627426", "0.5917681", "0.5917681", "0.586218" ]
0.8796167
0
Serializes the response, so it can be cached. If CACHE_CONTENT_TYPE_JSON_ONLY = False (default), then we need to keep the response ContentType header, so we need to serialize the response body with the content type with msgpack, which takes away performance. For this reason the user can set CACHE_CONTENT_TYPE_JSON_ONLY = True, in which case the response ContentType is NOT cached, so it will be the default which is application/json. That should be fine for most REST APIs and should bring a nice performance bump by avoiding the msgpack serialization.
def serialize(self, req, resp, resource) -> bytes: if self.cache_config['CACHE_CONTENT_TYPE_JSON_ONLY']: if FALCONVERSION_MAIN < 3: return resp.body else: return resp.text else: if FALCONVERSION_MAIN < 3: return msgpack.packb([resp.content_type, resp.body], use_bin_type=True) else: return msgpack.packb([resp.content_type, resp.text], use_bin_type=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_http_response(self) -> HttpResponse:\n response = (\n JsonResponse(self.body)\n if (self.headers or {}).get(\"Content-Type\") == \"application/json\"\n else HttpResponse(self.body)\n )\n response.headers = self.headers\n return response", "def render_response(self, context):\n\n # if object is a string just return as is\n if isinstance(context, basestring):\n self.response.write(context)\n # else attempt to serialise and return\n else:\n context = json.dumps(context)\n self.response.write(context)\n # set the right content-type header\n self.response.headers['Content-Type'] = 'application/json'", "def serialize(self, request, content_type, default_serializers=None):\n\n if self.serializer:\n serializer = self.serializer\n else:\n _mtype, _serializer = self.get_serializer(content_type,\n default_serializers)\n serializer = _serializer()\n\n response = webob.Response()\n response.status_int = self.code\n for hdr, value in self._headers.items():\n response.headers[hdr] = str(value)\n response.headers['Content-Type'] = content_type\n if self.obj is not None:\n response.body = serializer.serialize(self.obj)\n\n return response", "def serialize_response(self, response):\n raise NotImplementedError()", "def serialize_response(response_data, content_type=None):\n content_type = content_type or get_best_mimetype()\n\n if not content_type:\n abort(406)\n\n rv = current_app.blueprints[request.blueprint]\\\n .response_mimetypes[content_type](response_data)\n\n response = make_response(rv)\n\n if isinstance(response_data, HTTPException):\n response.status_code = response_data.code\n\n response.headers['Content-type'] = content_type\n return response", "def json_response(self, request, *args, **kwargs):\n\n return HttpResponse(self.construct_json(),\n content_type='application/json',\n mimetype='application/json', status=self.status)", "def set_cached_response(self) -> None:\n if self.get_caching_duration() > 0: # if caching is enabled for this request\n json_response = self._request_result.json()\n with open(self.cache_file_name, 'w') as json_file:\n json.dump(json_response, json_file, indent=4)", "def json_response(func):\n\t@wraps(func)\n\tdef decorated_view(*args, **kwargs):\n\t\tdata = func(*args, **kwargs)\n\t\tdata = json.dumps(data)\n\t\tresponse = make_response(data)\n\t\tresponse.headers['Content-Type'] = 'application/json'\n\t\treturn response\n\treturn decorated_view", "def get_json_response(self, content, **httpresponse_kwargs):\n\t\treturn HttpResponse(content,\n\t\t\t\t\t\t\t\t content_type='application/json',\n\t\t\t\t\t\t\t\t **httpresponse_kwargs)", "def dispatch(self, request, *args, **kwargs):\n # Wrap the dispatch method, so that we autoencode JSON\n response = super(JSONRestView, self).dispatch(request, *args, **kwargs)\n # If this is not an HTTPResponseBase object (Base class for responses) \n if not isinstance(response, HttpResponseBase):\n response = json_response(response)\n\n return response", "def json_response(self, response_data):\n resp_obj = json_dumps(response_data, default=encode_ion_object, indent=None if request.is_xhr else 2)\n resp = self.response_class(resp_obj, mimetype=CONT_TYPE_JSON)\n if self.develop_mode and (self.set_cors_headers or (\"api_key\" in request.args and request.args[\"api_key\"])):\n self._add_cors_headers(resp)\n self._log_request_response(CONT_TYPE_JSON, resp_obj, len(resp_obj))\n return resp", "def render_to_response(self, context, **kwargs):\n kwargs = {}\n additional_headers = {}\n #create response headers\n if 'header' in context:\n for key in context['header']:\n if key == 'Content-Type':\n kwargs['content_type'] = context['header'][key]\n elif key.lower() == 'status':\n kwargs['status'] = context['header'][key]\n else:\n additional_headers[key] = context['header'][key]\n del context['header']\n \n #return json if not header\n if not 'content_type' in kwargs:\n kwargs['content_type'] = 'application/json'\n \n if 'pointer' in context: #return file\n context['pointer'].seek(0)\n kwargs['content'] = context['pointer'].read()\n context['volume'].close(context['pointer'], context['info']['hash'])\n elif 'raw' in context and context['raw'] and 'error' in context and context['error']: #raw error, return only the error list\n kwargs['content'] = context['error']\n elif kwargs['content_type'] == 'application/json': #return json\n kwargs['content'] = json.dumps(context)\n else: #return context as is!\n kwargs['content'] = context\n \n response = HttpResponse(**kwargs)\n for key, value in additional_headers.items():\n response[key] = value\n\n return response", "def get_json_response(self, content, **httpresponse_kwargs):\n response = HttpResponse(content,\n content_type='application/json',\n **httpresponse_kwargs)\n return response", "def render_to_json_response(self, context, **response_kwargs):\n return HttpResponse(\n self.convert_context_to_json(context),\n content_type='application/json',\n **response_kwargs\n )", "def render_to_json_response(self, context, **response_kwargs):\n return HttpResponse(\n self.convert_context_to_json(context),\n content_type='application/json',\n **response_kwargs\n )", "def json(self, **kwargs):\n\t\ttry:\n\t\t\treturn self.response.json(**kwargs)\n\t\texcept ValueError:\n\t\t\t# No valid JSON encoding\n\t\t\treturn None", "def render_to_response(self, context, **response_kwargs):\n return JsonResponse(context)", "def update_cached_response(self, request, response):\r\n cache_url = self.cache_url(request.url)\r\n\r\n cached_response = self.serializer.loads(request, self.cache.get(cache_url))\r\n\r\n if not cached_response:\r\n # we didn't have a cached response\r\n return response\r\n\r\n # Lets update our headers with the headers from the new request:\r\n # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1\r\n #\r\n # The server isn't supposed to send headers that would make\r\n # the cached body invalid. But... just in case, we'll be sure\r\n # to strip out ones we know that might be problmatic due to\r\n # typical assumptions.\r\n excluded_headers = [\r\n \"content-length\",\r\n ]\r\n\r\n cached_response.headers.update(\r\n dict((k, v) for k, v in response.headers.items()\r\n if k.lower() not in excluded_headers)\r\n )\r\n\r\n # we want a 200 b/c we have content via the cache\r\n cached_response.status = 200\r\n\r\n # update our cache\r\n self.cache.set(\r\n cache_url,\r\n self.serializer.dumps(request, cached_response),\r\n )\r\n\r\n return cached_response", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(self.get_data(context))", "def _respond(self, request, response):\n request.respond(200, {\"Content-Type\": \"application/json\"}, JSON.stringify(response))", "def _output(content):\n serve = True\n # check modifications and etag\n if 'If-Modified-Since' in request.headers:\n last_seen = datetime.datetime.strptime(\n request.headers['If-Modified-Since'], HTTP_DATE_FMT)\n if last_seen >= content.modified.replace(microsecond=0):\n serve = False\n if 'If-None-Match' in request.headers:\n etags = [x.strip('\" ')\n for x in request.headers['If-None-Match'].split(',')]\n if content.etag in etags:\n serve = False\n\n headers = {}\n if content.content_type:\n headers['Content-Type'] = content.content_type\n last_modified = content.modified.strftime(HTTP_DATE_FMT)\n headers['Last-Modified'] = last_modified\n headers['ETag']= '\"%s\"' % (content.etag,)\n for header in content.headers:\n key, value = header.split(':', 1)\n headers[key] = value.strip()\n if serve:\n response.body = content.body\n for key, value in headers.iteritems():\n response.set_header(key, value)\n response.content_type=content.content_type\n response.status=int(content.status)\n else:\n response.status=304\n return response", "def process_response(self, request, response):\n #if not self._should_update_cache(request, response):\n # # We don't need to update the cache, just return.\n # return response\n\n if response.streaming or response.status_code != 200:\n return response\n \n # Don't cache responses that set a user-specific (and maybe security\n # sensitive) cookie in response to a cookie-less request.\n if not request.COOKIES and response.cookies and has_vary_header(response, 'Cookie'):\n return response\n\n # Try to get the timeout from the \"max-age\" section of the \"Cache-\n # Control\" header before reverting to using the default cache_timeout\n # length.\n timeout = get_max_age(response)\n if timeout == None:\n timeout = self.cache_timeout\n elif timeout == 0:\n # max-age was set to 0, don't bother caching.\n return response\n patch_response_headers(response, timeout)\n if timeout:\n cache_key = \"%s-%s\" % (self.key_prefix, request.get_full_path())\n #raise ValueError(cache_key)\n if hasattr(response, 'render') and isinstance(response.render, collections.Callable):\n response.add_post_render_callback(\n lambda r: cache._cache.set(cache_key.encode(\"utf-8\"), zlib.compress(r.content, 9), timeout)\n )\n else:\n # we use the highest compression level, because since it is cached we hope for it to pay off\n cache._cache.set(cache_key.encode(\"utf-8\"), zlib.compress(response.content, 9), timeout)\n return response", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(\n self.get_data(context),\n **response_kwargs\n )", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(\n self.get_data(context),\n **response_kwargs\n )", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(\n self.get_data(context),\n **response_kwargs\n )", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(\n self.get_data(context),\n **response_kwargs\n )", "def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(\n self.get_data(context),\n **response_kwargs\n )", "def getResponseContentType(self):\n return self.JSON_APPLICATION", "def __call__(self, rv):\n if isinstance(rv, ResponseBase):\n return rv\n data, status, headers = unpack(rv)\n resp = flask.make_response(self._encoder(data, **self.json_settings),\n status, {'Content-Type': self.content_type})\n resp.headers.extend(headers)\n return resp", "async def json(self, encoding=\"utf-8\", content_type=None, loads=json_loads):\n return loads(self.response.decode(encoding))" ]
[ "0.64963675", "0.64194614", "0.636542", "0.6278377", "0.61988986", "0.6171429", "0.6129956", "0.60481405", "0.6038928", "0.6000924", "0.60004413", "0.59598386", "0.5909832", "0.5895042", "0.5895042", "0.58946943", "0.58941776", "0.5894121", "0.58709216", "0.5853939", "0.58444303", "0.5801462", "0.5782089", "0.5782089", "0.5782089", "0.5782089", "0.5782089", "0.57308155", "0.5717896", "0.5699395" ]
0.72587997
0
Deserializes the cached record into the response Body or the ContentType and Body
def deserialize(self, data: bytes) -> Tuple[str, Any]: if self.cache_config['CACHE_CONTENT_TYPE_JSON_ONLY']: return data else: return msgpack.unpackb(data, raw=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deserialize(self, resp):\r\n return self.serializer.deserialize(resp.content, format=resp['Content-Type'])", "def deserialize(self, data, caches=None):\n return data", "def decode(self) -> D:\n if self.has_cached_data():\n return self._data\n\n # Dispatch decoding\n data = lookup_serializer(self.encoding).loads(self.blob)\n\n self._cache_data(data)\n return data", "def cache_response(self, request, response, body=None):\r\n # From httplib2: Don't cache 206's since we aren't going to\r\n # handle byte range requests\r\n if response.status not in [200, 203]:\r\n return\r\n\r\n response_headers = CaseInsensitiveDict(response.headers)\r\n\r\n cc_req = self.parse_cache_control(request.headers)\r\n cc = self.parse_cache_control(response_headers)\r\n\r\n cache_url = self.cache_url(request.url)\r\n\r\n # Delete it from the cache if we happen to have it stored there\r\n no_store = cc.get('no-store') or cc_req.get('no-store')\r\n if no_store and self.cache.get(cache_url):\r\n self.cache.delete(cache_url)\r\n\r\n # If we've been given an etag, then keep the response\r\n if self.cache_etags and 'etag' in response_headers:\r\n self.cache.set(\r\n cache_url,\r\n self.serializer.dumps(request, response, body=body),\r\n )\r\n\r\n # Add to the cache if the response headers demand it. If there\r\n # is no date header then we can't do anything about expiring\r\n # the cache.\r\n elif 'date' in response_headers:\r\n # cache when there is a max-age > 0\r\n if cc and cc.get('max-age'):\r\n if int(cc['max-age']) > 0:\r\n self.cache.set(\r\n cache_url,\r\n self.serializer.dumps(request, response, body=body),\r\n )\r\n\r\n # If the request can expire, it means we should cache it\r\n # in the meantime.\r\n elif 'expires' in response_headers:\r\n if response_headers['expires']:\r\n self.cache.set(\r\n cache_url,\r\n self.serializer.dumps(request, response, body=body),\r\n )", "def serialize(self, req, resp, resource) -> bytes:\n if self.cache_config['CACHE_CONTENT_TYPE_JSON_ONLY']:\n if FALCONVERSION_MAIN < 3:\n return resp.body\n else:\n return resp.text\n else:\n if FALCONVERSION_MAIN < 3:\n return msgpack.packb([resp.content_type, resp.body], use_bin_type=True)\n else:\n return msgpack.packb([resp.content_type, resp.text], use_bin_type=True)", "def deserialize_response(self, serialized_response):\n raise NotImplementedError()", "def deserialize(self, blob):\n pass", "def update_cached_response(self, request, response):\r\n cache_url = self.cache_url(request.url)\r\n\r\n cached_response = self.serializer.loads(request, self.cache.get(cache_url))\r\n\r\n if not cached_response:\r\n # we didn't have a cached response\r\n return response\r\n\r\n # Lets update our headers with the headers from the new request:\r\n # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1\r\n #\r\n # The server isn't supposed to send headers that would make\r\n # the cached body invalid. But... just in case, we'll be sure\r\n # to strip out ones we know that might be problmatic due to\r\n # typical assumptions.\r\n excluded_headers = [\r\n \"content-length\",\r\n ]\r\n\r\n cached_response.headers.update(\r\n dict((k, v) for k, v in response.headers.items()\r\n if k.lower() not in excluded_headers)\r\n )\r\n\r\n # we want a 200 b/c we have content via the cache\r\n cached_response.status = 200\r\n\r\n # update our cache\r\n self.cache.set(\r\n cache_url,\r\n self.serializer.dumps(request, cached_response),\r\n )\r\n\r\n return cached_response", "def process_response(self, request, response):\n #if not self._should_update_cache(request, response):\n # # We don't need to update the cache, just return.\n # return response\n\n if response.streaming or response.status_code != 200:\n return response\n \n # Don't cache responses that set a user-specific (and maybe security\n # sensitive) cookie in response to a cookie-less request.\n if not request.COOKIES and response.cookies and has_vary_header(response, 'Cookie'):\n return response\n\n # Try to get the timeout from the \"max-age\" section of the \"Cache-\n # Control\" header before reverting to using the default cache_timeout\n # length.\n timeout = get_max_age(response)\n if timeout == None:\n timeout = self.cache_timeout\n elif timeout == 0:\n # max-age was set to 0, don't bother caching.\n return response\n patch_response_headers(response, timeout)\n if timeout:\n cache_key = \"%s-%s\" % (self.key_prefix, request.get_full_path())\n #raise ValueError(cache_key)\n if hasattr(response, 'render') and isinstance(response.render, collections.Callable):\n response.add_post_render_callback(\n lambda r: cache._cache.set(cache_key.encode(\"utf-8\"), zlib.compress(r.content, 9), timeout)\n )\n else:\n # we use the highest compression level, because since it is cached we hope for it to pay off\n cache._cache.set(cache_key.encode(\"utf-8\"), zlib.compress(response.content, 9), timeout)\n return response", "def deserialize(self, blob):\n return json.loads(blob)", "def deserialize(self, data):\n payload = self._unpack(data)\n return decode(payload['body'], content_type=payload['content_type'],\n content_encoding=payload['content_encoding'], force=True)", "def _post_deserialize (self):\n pass", "def cache_body(self):\n with open(self.path, \"rb\") as fh:\n fh.seek(fh.tell(), os.SEEK_END)\n fh.seek(max(0, fh.tell()-LEN_CACHE_BYTES), os.SEEK_SET)\n return fh.read(LEN_CACHE_BYTES).decode('utf-8') #.split(\"\\n\")", "def process_response(self, req, resp, resource, req_succeeded):\n\n # Step 1: for 'rest-based' and 'rest&time-based' eviction strategies the\n # POST/PATCH/PUT/DELETE calls are never cached and even more they\n # invalidate the record cached by the GET method\n if self.cache_config['CACHE_EVICTION_STRATEGY'] in [CacheEvictionStrategy.rest_based,\n CacheEvictionStrategy.rest_and_time_based] \\\n and req.method.upper() in [HttpMethods.POST,\n HttpMethods.PATCH,\n HttpMethods.PUT,\n HttpMethods.DELETE]:\n # get the cache key created by the GET method (assuming there was one)\n key = self.generate_cache_key(req, method='GET')\n self.cache.delete(key)\n return\n\n # Step 2: if it is marked to be cached, but has not yet been cached\n # then we cache it\n if hasattr(req.context, 'cache') and req.context.cache \\\n and (not hasattr(req.context, 'cached') or not req.context.cached):\n key = self.generate_cache_key(req)\n value = self.serialize(req, resp, resource)\n\n # for the REST-based strategy there is no timeout, the cached record never expires\n if self.cache_config['CACHE_EVICTION_STRATEGY'] in [CacheEvictionStrategy.rest_based]:\n # timeout 0 - never expires\n timeout = 0\n else:\n # for the time-based and rest-and-time-based eviction strategy the\n # cached record expires\n timeout = req.context.cache_timeout if hasattr(req.context, 'cache_timeout') else 600\n\n self.cache.set(key, value, timeout=timeout)", "def deserialize(self, data):", "def deserialize(self, data, status_code):\r\n if status_code == 204:\r\n return data\r\n return serializer.Serializer(self.get_attr_metadata()).deserialize(\r\n data, self.content_type())['body']", "def set_cached_response(self) -> None:\n if self.get_caching_duration() > 0: # if caching is enabled for this request\n json_response = self._request_result.json()\n with open(self.cache_file_name, 'w') as json_file:\n json.dump(json_response, json_file, indent=4)", "def deserialize(self, obj):\n raise NotImplementedError", "def read_cache(self):\n with open(self.get_cache_filename(), 'rb') as f:\n data = pickle.loads(f.read())\n self.timestamp = data['timestamp']\n self.cache = data['cache']", "def load_from_cache(self):\n try:\n with open(self.cache_filename, 'r') as cache:\n json_data = cache.read()\n data = json.loads(json_data)\n except IOError:\n data = {'data': {}, 'inventory': {}}\n\n self.data = data['data']\n self.inventory = data['inventory']", "def decode_response(\n res_model: Type[T],\n resp: Response,\n) -> T:\n if resp.headers.get(HEADER_CONTENT_TYPE) == MSGPACK_CONTENT_TYPE:\n return msgpack.decode(resp.content, type=res_model)\n return parse_raw_as(res_model, resp.text)", "def _deserialize_response(self, response):\n text = response.content.decode(errors='replace')\n text = _remove_control_characters(text)\n doc = json.loads(text, cls=_TransmissionJSONDecoder)\n\n if doc['result'] != 'success':\n raise TransmissionError(\"Request failed: '%s'\" % doc['result'])\n\n if doc['tag'] != self.tag:\n raise TransmissionError(\"Tag mismatch: (got %d, expected %d)\" % (doc['tag'], self.tag))\n else:\n self.tag += 1\n\n if 'arguments' in doc:\n return doc['arguments'] or None\n\n return None", "def _read_body(self, data):\n resp = self.current_response\n if resp._decompressor:\n resp.body = resp._decompressor.decompress(data)\n resp.body += resp._decompressor.flush()\n del resp._decompressor\n else:\n resp.body = data\n self._on_response()", "def build_response(self, request, response, from_cache=False):\r\n if not from_cache and request.method == 'GET':\r\n if response.status == 304:\r\n # We must have sent an ETag request. This could mean\r\n # that we've been expired already or that we simply\r\n # have an etag. In either case, we want to try and\r\n # update the cache if that is the case.\r\n cached_response = self.controller.update_cached_response(\r\n request, response\r\n )\r\n\r\n if cached_response is not response:\r\n from_cache = True\r\n\r\n response = cached_response\r\n else:\r\n # Wrap the response file with a wrapper that will cache the\r\n # response when the stream has been consumed.\r\n response._fp = CallbackFileWrapper(\r\n response._fp,\r\n functools.partial(\r\n self.controller.cache_response,\r\n request,\r\n response,\r\n )\r\n )\r\n\r\n resp = super(CacheControlAdapter, self).build_response(\r\n request, response\r\n )\r\n\r\n # See if we should invalidate the cache.\r\n if request.method in self.invalidating_methods and resp.ok:\r\n cache_url = self.controller.cache_url(request.url)\r\n self.cache.delete(cache_url)\r\n\r\n # Give the request a from_cache attr to let people use it\r\n resp.from_cache = from_cache\r\n\r\n return resp", "def obj_from_response(self, response):\n\n obj = self.model()\n serializer = self.get_serializer()\n field_data = serializer.deserialize(to_unicode(response.content))\n obj.update_fields(field_data)\n obj._full_url = response.url\n\n return obj", "def deserialize(cls, record):\n return cls(\n source=record.get(\"source\", \"\"),\n category=record.get(\"category\", \"\"),\n name=record.get(\"name\", \"\"),\n message=record.get(\"message\", \"\"),\n timestamp=record.get(\"timestamp\", \"\"),\n **record[\"data\"],\n )", "def translate_response(self, r):\n mime = r.headers['Content-Type']\n if 'json' in mime:\n return r.json()\n elif self.url.endswith('parquet') and 'octet-stream' in mime or 'parquet' in mime:\n stream = io.BytesIO(r.content)\n df = pd.read_parquet(stream)\n return df\n else:\n return r.content", "def _load_object(self, cid):\n object_data = unixfs_pb2.Data()\n object_data.ParseFromString(self.client.object.data(\n cid,\n **self.client_request_kwargs,\n ))\n\n self.cid_type_cache[cid] = object_data.Type\n self.path_size_cache[cid] = object_data.filesize\n self.block_cache[cid] = object_data.Data\n self.subblock_sizes_cache[cid] = object_data.blocksizes\n\n return object_data", "def handle_get_response(self, response):\n\n content_str = to_unicode(response.content)\n resource_data = self.deserialize(content_str)\n\n self._raw_response_content = resource_data\n self.handle_response(response)", "def get_object_contents(key):\n if key is None or key == \"\":\n r = jsonify(message=\"Not all required params are present\", success=False, status_code=400)\n r.status_code = 400\n return r\n\n contents = cache_utils.get(key)\n return Response(contents)" ]
[ "0.66872615", "0.628563", "0.6097179", "0.60517174", "0.6006489", "0.60002464", "0.59839606", "0.59279156", "0.5876185", "0.5767751", "0.57473266", "0.5706114", "0.56609535", "0.56507987", "0.562745", "0.5612654", "0.5603545", "0.5589274", "0.5583386", "0.5578948", "0.5569624", "0.55694634", "0.55369365", "0.5536535", "0.55347705", "0.5518634", "0.55118704", "0.5439805", "0.5430998", "0.53877413" ]
0.6360597
1
Takes an incoming socket and either stores the command to the command queue, or performs another action based on the command.
def receive_and_store(self, socket, addr): # Create the incoming connection conn = IncomingConnection(addr, socket) # Receive the data from the connection data = conn.recv_data() if not data: logger.warning("Invalid data received") return # Get the type of data transfer_type = data[0] # Do an appropriate action based on the type of data if transfer_type == COMMAND_TRANSFER: command_string = data[1] command = data[1].split(" ")[0] command_object = Command(command_string, "00:00:00", self.command_targets[command]) self.command_queue.put(command_object) logger.debug("LightCommandInput: Command added to queue -> " + data[1]) elif transfer_type == FILE_TRANSFER: logger.warning("File transfer started") pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def perform_command(self, command_str):\n print \"Received command: {}\".format(command_str)\n commands = command_str.split('#')\n if len(commands) == 2:\n data = None\n elif len(commands) > 2:\n data = commands[2:]\n elif command_str == '':\n # Gets received when the server socket is closed (should be -1)\n print \"Command suggests socket is being closed, ignore this command and close the socket\"\n self.stop()\n else:\n print \"Not a command, must be of form COMMAND#ACTION: {}\".format(command_str)\n\n if len(commands) >= 2:\n command = commands[0]\n action = commands[1]\n if command in COMMANDS and action in ACTIONS:\n self.request_queue.put((command, action, data))\n else:\n print \"Corrupt action\"", "def command_callback(self, command):\n while not self.socket_available: # wait for socket to be available\n pass\n self.socket_available = False # block socket from being used in other processes\n if self.robot.is_in_error():\n self.robot.ResetError()\n self.robot.ResumeMotion()\n reply = self.robot.exchange_msg(command.data, decode=False)\n self.socket_available = True # Release socket so other processes can use it\n if reply is not None:\n self.reply_publisher.publish(reply)", "def handle(self):\r\n # self.request is the TCP socket connected to the client\r\n # read the incoming command\r\n request = self.request.recv(1024).strip()\r\n # write to the queue waiting to be processed by the server\r\n INPUT_QUEUE.put(request)\r\n # wait for the server answer in the output queue\r\n response = OUTPUT_QUEUE.get(timeout=5.0)\r\n # send back the answer\r\n self.request.send(response)", "def handle(self):\n socket = self.request[1]\n data = self.request[0].strip()\n logger.info(\"Address {} at {} wrote: '{}'\".format(self.client_address[1], self.client_address[0], data))\n cmd_strn, ret = self.command_service(data)\n print(ret)\n self.command_response(cmd_strn, ret, self.request[1], self.client_address[0],\n self.mapInterface.router[cmd_strn])", "def server_cmd():\n print_banner()\n print_menu()\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n # main server command loop\n while True:\n try:\n user_input = raw_input('\\033[1m' + 'khala_botnet$ ' + '\\033[0m')\n except KeyboardInterrupt: # kill process if kb interrupt\n sock.close()\n os.kill(os.getpid(), 9)\n\n tokens = user_input.split(' ')\n command = tokens[0].lower()\n\n if command == 'roll':\n roll_command(sock)\n\n elif command == 'attack':\n if len(tokens) == 3:\n attack_command(sock, tokens[1], tokens[2])\n else:\n print(\"Invalid command. Enter 'help' for options.\")\n\n elif command == 'stop':\n stop_command(sock)\n\n elif command == 'list':\n list_bots()\n\n elif command == 'help':\n print_help()\n\n elif command == 'exit':\n sock.close()\n os.kill(os.getpid(), 9)\n else:\n print(\"Invalid command. Enter 'help' for options.\")", "def command(s_socket):\r\n command = raw_input(\"#> \")\r\n bytes_value = to_bytes(len(command) + 5, 4, 'little')\r\n s_socket.send('c' + bytes_value + command)\r\n\r\n print(s_socket.recv(MAX_BUFFER_LENGTH))", "def do_socket_logic():\n pass", "def run_command(command, sender_socket, ip, port):\n command_bytes = bytes(command, \"UTF-8\")\n sender_socket.sendto(command_bytes, (ip, port))\n has_data = True\n while has_data:\n try:\n byte_reply = sender_socket.recv(BUFFER_SIZE)\n str_reply = byte_reply.decode(\"UTF-8\")\n print(str_reply)\n if \"|-- Transfer \" in str_reply:\n handle_keylog_transfer(str_reply, sender_socket)\n except socket.timeout:\n has_data = False", "def runner(socket,id):\n socket.send('proceed')\n \n while True:\n data = socket.recv()\n print(\"id(\",id,\")=\",data)\n if not data: break\n \n elif data == \"info\":\n run_time = time.ctime(start_time)\n statusMessage = \"SERVER STATUS: Running...\\nInterface id:\"+str(id)+\"\\nBeen running since: \"+str(run_time)+\"\\n\"\n socket.send(statusMessage)\n \n elif data == \"plug\": #talk to plugin? aka. other commands \n pass\n \n else: #not valid command.\n socket.send(\"invalid command\")\n \n socket.close() \n print(\"closed connection\") #means the thread is also quitting", "def cmd_handler():\n context = zmq.Context()\n\n # socket to receive commands (a subscription to ELECTION_CODE channel)\n cmd_socket = context.socket(zmq.SUB)\n cmd_socket.connect (\"tcp://%s:5556\" % SERVER_HOST)\n topicfilter = \"politiche2013\"\n cmd_socket.setsockopt(zmq.SUBSCRIBE, topicfilter)\n\n # socket to send replies\n reply_sender = context.socket(zmq.PUSH)\n reply_sender.connect(\"tcp://%s:5557\" % SERVER_HOST)\n\n # main loop\n while True:\n print \"Aye sir, unit {0} ready for your commands ...\".format(computer_id)\n # wait for a command\n string = cmd_socket.recv()\n\n # action\n print \"Message received: '%s'\" % (string,)\n\n # send reply to server\n print \"Sending reply to server\"\n reply = { 'unit' : computer_id, 'status' : 'configured'}\n reply_sender.send_json(reply)", "def run(self):\n #setsockopt() is used to specify options on the socket.\n #Here, we set the option SO_REUSEADDR, which indicates that the system can reuse this socket\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n try:\n #associate the socket with the server address and port\n self.sock.bind((self.host, self.port))\n\n except socket.error as e:\n print \"Bind Error : \", e\n\n #puts the socket into server mode, The number you give to listen()\n #specifies how many connections can be queued for this socket\n self.sock.listen(1)\n\n #print socket listening state\n print('Starting socket server (host {}, port {})'.format(self.host, self.port))\n\n #loop to wait for connection\n while True:\n\n #wait for connection\n print(\"Wating for connection ... \")\n\n try:\n #accept waits for an incoming connection, returning the open connection between\n #the server and client and the address of the client\n #The connection is actually a different socket on another port (assigned by the kernel)\n self.connection, self.client_address = self.sock.accept()\n #print client connected\n print('Client {} connected'.format(self.client_address))\n\n except Exception, e:\n\t\t\t\tprint \"sock closed! Error: \",e\n\n #if connection successful, enter second loop where data exchange is done\n while True:\n #receive data\n try:\n data = self.connection.recv(self.buf_size).decode('utf-8')\n #close if exeception\n except Exception, e:\n print \"error\", e\n\n #if not data, continue receiving data\n if not data:\n print('no data')\n break\n #split data by \">\" to get commands\n data_array = data.split(\">\")\n #act depending on command received\n for data_command in data_array:\n if data_command == \"\":\n continue\n\n #GO BACKWARDS\n if Commands.CMD_FORWARD[1:] in data_command:\n print data_command + \" at \" + datetime.datetime.now().strftime(\"%H:%M:%S\")\n #move forward\n self.c.forward()\n\n #GO FORWARD\n elif Commands.CMD_BACKWARD[1:] in data_command:\n print data_command + \" at \" + datetime.datetime.now().strftime(\"%H:%M:%S\")\n #set the direction in which motors will spin\n self.c.writeBlock(self.c.MOTOR_LEFT_DIR,1)\n self.c.writeBlock(self.c.MOTOR_RIGHT_DIR,1)\n #increase power (PWM) supplied to the motor\n for i in range(0,500,10):\n self.c.writeBlock(self.c.MOTOR_LEFT,i)\n self.c.writeBlock(self.c.MOTOR_RIGHT,i)\n time.sleep(0.005)\n\n #TURN RIGHT\n elif Commands.CMD_TURN_RIGHT[1:] in data_command:\n #log the info\n print data_command + \" \" + str(self.c.WHEELS_ORIENTATION) + \" at \" + datetime.datetime.now().strftime(\"%H:%M:%S\")\n #turn to the right the direction\n self.c.turn_right()\n #update the UI\n self.emit( SIGNAL('update_wheel_orientation_lcd(QString)'), str(self.c.WHEELS_ORIENTATION))\n\n #TURN LEFT\n elif Commands.CMD_TURN_LEFT[1:] in data_command:\n #log the info\n print data_command + \" \" + str(self.c.WHEELS_ORIENTATION) + \" at \" + datetime.datetime.now().strftime(\"%H:%M:%S\")\n #turn to the right the direction\n self.c.turn_left()\n #update the UI\n self.emit( SIGNAL('update_wheel_orientation_lcd(QString)'), str(self.c.WHEELS_ORIENTATION))\n\n #STOP\n elif Commands.CMD_STOP[1:] in data_command:\n\n #print command and timestamp\n print data_command + \" at \" + datetime.datetime.now().strftime(\"%H:%M:%S\")\n #stop\n self.c.stop()\n\n #ULTRASONIC TURN RIGHT\n elif Commands.CMD_ULTRASONIC_TURN_RIGHT[1:] in data_command:\n #log the info\n print data_command + \" \" + str(self.c.ULTRASONIC_ORIENTATION) + \" at \" + datetime.datetime.now().strftime(\"%H:%M:%S\")\n #turn to the right the direction\n self.c.ultrasonic_right()\n #update the UI\n self.emit( SIGNAL('update_ultrasonic_orientation_lcd(QString)'), str(self.c.ULTRASONIC_ORIENTATION))\n\n #ULTRASONIC TURN RIGHT\n elif Commands.CMD_ULTRASONIC_TURN_LEFT[1:] in data_command:\n #log the info\n print data_command + \" \" + str(self.c.ULTRASONIC_ORIENTATION) + \" at \" + datetime.datetime.now().strftime(\"%H:%M:%S\")\n #turn to the right the direction\n self.c.ultrasonic_left()\n #update the UI\n self.emit( SIGNAL('update_ultrasonic_orientation_lcd(QString)'), str(self.c.ULTRASONIC_ORIENTATION))\n\n #RED LED\n elif Commands.CMD_RGB_R[1:] in data_command:\n #print command and timestamp\n print data_command + \" at \" + datetime.datetime.now().strftime(\"%H:%M:%S\")\n #turn red led ON\n self.c.turn_red_led_on()\n #update server UI\n self.emit( SIGNAL('update_led_label(QString, QString)'), \"red\", \"background-color: red\")\n\n #GREEN LED\n elif Commands.CMD_RGB_G[1:] in data_command:\n #print command and timestamp\n print data_command + \" at \" + datetime.datetime.now().strftime(\"%H:%M:%S\")\n #turn green led ON\n self.c.turn_green_led_on()\n #update server UI\n self.emit( SIGNAL('update_led_label(QString, QString)'), \"green\" ,\"background-color: green\")\n\n #BLUE LED\n elif Commands.CMD_RGB_B[1:] in data_command:\n #print command and timestamp\n print data_command + \" at \" + datetime.datetime.now().strftime(\"%H:%M:%S\")\n #turn blue led ON\n self.c.turn_blue_led_on()\n #update server UI\n self.emit( SIGNAL('update_led_label(QString, QString)'), \"blue\" ,\"background-color: blue\")\n\n #OFF LED\n elif Commands.CMD_RGB_OFF[1:] in data_command:\n #print command and timestamp\n print data_command + \" at \" + datetime.datetime.now().strftime(\"%H:%M:%S\")\n #turn blue led ON\n self.c.turn_led_off()\n #update server UI\n self.emit( SIGNAL('update_led_label(QString, QString)'), \"off\", \"background-color: white\")\n self.connection.close()\n break", "def command(self, cmd, recvSize=0, callback=None):\n if not self.isConnected:\n # If we're shutting down, break the chain of polling callbacks...\n return\n\n if callback and recvSize:\n self.lock.acquire()\n self.recv.queue.put((recvSize, callback))\n self.send.queue.put(cmd)\n self.lock.release()\n elif cmd:\n self.send.queue.put(cmd)", "def handleCommand(self,message):\n command = message[0]\n pcaId = None\n if len(message) > 1:\n pcaId = message[1].decode()\n if command == codes.ping:\n self.commandSocket.send(codes.ok)\n elif command == codes.pcaAsksForDetectorStatus:\n pcaId = message[1].decode()\n if pcaId and pcaId in self.PCAs:\n if pcaId in self.pcaConfigTag:\n self.commandSocket.send_multipart([self.StateMachineForPca[pcaId].currentState.encode(),self.pcaConfigTag[pcaId].encode()])\n else:\n self.commandSocket.send_multipart([self.StateMachineForPca[pcaId].currentState.encode()])\n elif command == codes.addPartition:\n data = partitionDataObject(json.loads(message[1].decode()))\n self.addPartition(data)\n self.commandSocket.send(codes.ok)\n elif command == codes.deletePartition:\n pcaId = message[1].decode()\n self.deletePartition(pcaId)\n self.commandSocket.send(codes.ok)\n elif command == codes.remapDetector:\n detectorId = message[2].decode()\n if message[1] == codes.removed:\n self.abortFunction(self.detectorMapping[detectorId])\n del self.detectorMapping[detectorId]\n else:\n pcaId = message[1].decode()\n self.abortFunction(pcaId)\n if detectorId in self.detectorMapping:\n self.abortFunction(self.detectorMapping[detectorId])\n self.detectorMapping[detectorId] = pcaId\n self.commandSocket.send(codes.ok)\n #transitions\n elif command.decode() == GlobalSystemTransitions.configure:\n conf = None\n if len(message) > 2:\n conf = configObject(json.loads(message[2].decode()))\n if self.isPCAinTransition[pcaId]:\n self.commandSocket.send(codes.busy)\n elif not self.StateMachineForPca[pcaId].checkIfPossible(GlobalSystemTransitions.configure) or not conf:\n self.commandSocket.send(codes.error)\n print(\"error\")\n else:\n self.commandSocket.send(codes.ok)\n self.isPCAinTransition[pcaId] = True\n workThread = threading.Thread(name=\"worker\", target=self.configure, args=(pcaId,conf))\n workThread.start()\n elif command.decode() == GlobalSystemTransitions.abort:\n if pcaId and pcaId in self.PCAs:\n self.abortFunction(pcaId)\n self.commandSocket.send(codes.ok)\n else:\n self.commandSocket.send(codes.error)\n elif command.decode() == GlobalSystemTransitions.reset:\n self.reset(pcaId)\n self.commandSocket.send(codes.ok)\n else:\n #command unknown\n return False\n return True", "def process_command(self, command):\n if not (type(command) is tuple and len(command) == 2):\n raise ValueError(\"Expected command to be a tuple of a string and a list\")\n\n action, channels = command\n self.logger.info(\"Received command %s (%s)\" % (action, ','.join(channels)))\n if action == 'join':\n for channel in channels:\n self.conn.join(channel)\n self.channels += channels\n elif action == 'part':\n for channel in channels:\n self.conn.part(channel)\n self.channels = [c for c in self.channels if c not in channels]", "def handle_input(sock):\n\tprint(\"Type message, enter to send. 'q' to quit\")\n\twhile True:\n\t\tmsg = input() #Blocks\n\t\tif msg == 'q':\n\t\t\tprint('Shut Down Client')\n\t\t\tsock.shutdown(socket.SHUT_RDWR)\n\t\t\tsock.close()\n\t\t\tbreak\n\t\ttry:\n\t\t\ttincanchat.send_msg(sock,msg) #Blocks until sent\n\t\texcept(BrokenPipeError,ConnectionError):\n\t\t\tbreak", "def transmit_command(command, socket, guit):\n if command == \"get_map_update\":\n send_data(socket, \"SEND_MAP\")\n ack = receive_data(socket)\n robot_map_data = json.loads(receive_data(socket)) # [[robot_x, robot_y], [map..]\n guit.receive_command([\"update_map\", robot_map_data[0], robot_map_data[1]])\n elif command == \"sync_mode\":\n send_data(socket, \"SYNC_MODE\") #0 is autonomous, 1 is manual\n ack = receive_data(socket)\n current_mode_integer = receive_data(socket)\n guit.receive_command([\"update_mode\", current_mode_integer]) \n elif len(command) > 3 and command[:4] == \"key_\": #Fulhack that will save us many rows.\n send_data(socket, \"KEY_EVENT\")\n ack = receive_data(socket)\n send_data(socket, command[4:])\n elif len(command) > 4 and command [:5] == \"mode_\":\n send_data(socket, \"TOGGLE_MODE\")\n ack = receive_data(socket)\n send_data(socket, command[5:])\n elif command == \"get_motor_data\":\n send_data(socket, \"FORWARD_MOTOR_INFO\")\n ack = receive_data(socket)\n motor_data = json.loads(receive_data(socket))\n dir_mod_left = 1 if motor_data[\"LEFT_SIDE_DIRECTION\"] else -1\n dir_mod_right = 1 if motor_data[\"RIGHT_SIDE_DIRECTION\"] else -1\n speed_left = motor_data[\"LEFT_SIDE_SPEED\"]*dir_mod_left\n speed_right = motor_data[\"RIGHT_SIDE_SPEED\"]*dir_mod_right\n guit.receive_command([\"set_motors\", speed_left, speed_right])\n guit.receive_command([\"set_servo\", motor_data[\"SERVO_ANGLE\"]])\n elif command == \"get_sensor_data\":\n send_data(socket, \"FORWARD_SENSOR_INFO\")\n ack = receive_data(socket)\n sensor_data = json.loads(receive_data(socket))\n guit.receive_command([\"set_sensors\", sensor_data])", "def run_command(self, command, joy_state):\n cmd = self.command_list[command]\n if cmd['type'] == 'topic':\n self.run_topic(command, joy_state)\n elif cmd['type'] == 'action':\n if cmd['action_name'] in self.offline_actions:\n self.get_logger().error('command {} was not played because the action '\n 'server was unavailable. Trying to reconnect...'\n .format(cmd['action_name']))\n self.register_action(command, self.command_list[command])\n else:\n if joy_state.buttons != self.old_buttons:\n self.run_action(command, joy_state)\n elif cmd['type'] == 'service':\n if cmd['service_name'] in self.offline_services:\n self.get_logger().error('command {} was not played because the service '\n 'server was unavailable. Trying to reconnect...'\n .format(cmd['service_name']))\n self.register_service(command, self.command_list[command])\n else:\n if joy_state.buttons != self.old_buttons:\n self.run_service(command, joy_state)\n else:\n raise JoyTeleopException(\n 'command {} is neither a topic publisher nor an action or service client'\n .format(command))", "def handle_parsed(parsed, sock, queue, nick, channel, operator):\n if parsed.type == 'PING':\n sock.send('PONG :{0}\\r\\n'.format(parsed.msg))\n if parsed.type == 'PRIVMSG' and parsed.msg.startswith(operator):\n cmd = parsed.msg[1:].split()[0]\n args = ' '.join(parsed.msg[1:].split()[1:])\n if cmd == 'id':\n temp_msg = '{#type} {#recipient} :%s' % parsed.msg[4:]\n queue.put(messages.Message(temp_msg, recipient=channel))\n if cmd == 'build':\n if len(args.split('/')) == 2:\n owner, repo = args.split('/')\n slight.execute_shell_script({\"name\": owner, \"repo_name\": repo})\n else:\n temp_msg = '{#type} {#recipient} :Invalid argument! Usage: %sbuild Owner/Repo' % operator\n queue.put(messages.Message(temp_msg, recipient=channel))", "def _process(self):\n\n while True:\n try:\n sockets = [self.master_fd]\n if self.sock:\n sockets.append(self.sock)\n # Don't handle user input while a side command is running.\n if len(self.filter) == 1:\n sockets.append(pty.STDIN_FILENO)\n rfds, _, _ = select.select(sockets, [], [], 0.25)\n except select.error as ex:\n if ex[0] == errno.EAGAIN: # Interrupted system call.\n continue\n raise\n\n if not rfds:\n self._timeout()\n else:\n # Handle one packet at a time to mitigate the side channel\n # breaking into user input.\n if self.master_fd in rfds:\n data = os.read(self.master_fd, 1024)\n self.master_read(data)\n elif pty.STDIN_FILENO in rfds:\n data = os.read(pty.STDIN_FILENO, 1024)\n self.stdin_read(data)\n elif self.sock in rfds:\n data, self.last_addr = self.sock.recvfrom(65536)\n if data[-1] == b'\\n':\n self.log(\"WARNING: the command ending with <nl>. \"\n \"The StreamProxy filter known to fail.\")\n self.log(\"Got command '%s'\" % data.decode('utf-8'))\n command = self.filter_command(data)\n self.log(\"Translated command '{}'\"\n .format(command.decode('utf-8')))\n if command:\n self.write_master(command)\n self.write_master(b'\\n')", "def add_socket_switch_operation_to_queue(self, address, unit, state):\n self.__operations.append([int(address), int(unit), int(state)])", "def usingHandler(self, cmd):\n self.command_handler.handle_command(cmd)\n while msg_queue.empty() is False:\n self.writeresponse(msg_queue.get())", "def connect(command):\r\n global CurrentState\r\n global CurrentInput\r\n global RESPONSEOPTIONS\r\n stateNum = \"\"\r\n for ltr in command:\r\n if ltr.isnumeric():\r\n stateNum += ltr\r\n try:\r\n target_state = getState(int(stateNum))\r\n if target_state != None:\r\n if RESPONSEOPTIONS != []:\r\n RESPONSEOPTIONS[0] = target_state\r\n else:\r\n RESPONSEOPTIONS.append(target_state)\r\n else:\r\n print(\"Could not find state\")\r\n except Exception as e:\r\n print(\"<<<Error: Connecting state failed>>>\",e)", "def do_cmd(cmd,sock):\n\n buffer = ''\n \n # Write the command and wait one second.\n print 'writing command '+cmd \n sock.send(cmd+SBE37_NEWLINE)\n time.sleep(1)\n \n # Block to receive all data.\n # Continue reading if the received data does not include a prompt.\n # Break out when the received data ends in a prompt.\n while True:\n try:\n data = ''\n data = sock.recv(1024)\n buffer += data\n except:\n raise\n else:\n #print 'received '+str(len(data))+' bytes' \n if buffer.endswith(SBE37Prompt.COMMAND):\n break\n elif buffer.endswith(SBE37Prompt.AUTOSAMPLE):\n break\n elif buffer.endswith(SBE37Prompt.BAD_COMMAND):\n break\n\n return buffer", "def handle_msg(s: socket, inputs: list[socket], server: socket):\n buff = bytes()\n # Put the socket in a list to pass it to select with timeout\n s_list = [s]\n\n while True:\n readable, writable, exceptional = select(s_list, [], [], 10)\n if len(readable) == 0:\n print(\"[x] Invalid petition\")\n return\n\n data = s.recv(1024)\n\n if len(data) == 0:\n return\n\n buff += data\n slice_obj = slice(-1, -5, -1)\n\n last_chars = buff[slice_obj]\n\n if last_chars.decode() == \"\\n\\r\\n\\r\":\n break\n\n petition = Petition(data.decode())\n\n if len(petition.method) != 0:\n if petition.method == 'GET':\n handle_get(s, petition)\n\n elif petition.method == 'POST':\n handle_post(s, petition)\n\n elif petition.method == 'DELETE':\n handle_delete(s, petition)\n\n if not petition.keep_alive:\n addr, port = s.getpeername()\n print(f' closing {addr}:{port}', file=stderr)\n # Stop listening for input on the connection\n inputs.remove(s)\n s.close()", "def _process(connection, process):\n try:\n command = connection.recv()\n except IOError as e:\n return \"Connection receive error: %s\" %(str(e))\n\n if command == __quit_command:\n try:\n connection.send(\"Exited server.\")\n finally:\n connection.close()\n return __quit_command\n\n #print \"Processing command\", command\n data = process(command)\n\n try:\n connection.send(data)\n except IOError as e:\n return \"Connection send error: %s\" %(str(e))\n\n connection.close()", "def _dispatch(self, msg):\n self.debug(\"Dispatching message CMD %r %s\", msg.cmd, msg)\n if msg.seqno in self.listeners:\n # self.debug(\"Dispatching sequence number %d\", msg.seqno)\n sem = self.listeners[msg.seqno]\n if isinstance(sem, asyncio.Semaphore):\n self.listeners[msg.seqno] = msg\n sem.release()\n else:\n self.debug(\"Got additional message without request - skipping: %s\", sem)\n elif msg.cmd == HEART_BEAT:\n self.debug(\"Got heartbeat response\")\n if self.HEARTBEAT_SEQNO in self.listeners:\n sem = self.listeners[self.HEARTBEAT_SEQNO]\n self.listeners[self.HEARTBEAT_SEQNO] = msg\n sem.release()\n elif msg.cmd == UPDATEDPS:\n self.debug(\"Got normal updatedps response\")\n if self.RESET_SEQNO in self.listeners:\n sem = self.listeners[self.RESET_SEQNO]\n self.listeners[self.RESET_SEQNO] = msg\n sem.release()\n elif msg.cmd == SESS_KEY_NEG_RESP:\n self.debug(\"Got key negotiation response\")\n if self.SESS_KEY_SEQNO in self.listeners:\n sem = self.listeners[self.SESS_KEY_SEQNO]\n self.listeners[self.SESS_KEY_SEQNO] = msg\n sem.release()\n elif msg.cmd == STATUS:\n if self.RESET_SEQNO in self.listeners:\n self.debug(\"Got reset status update\")\n sem = self.listeners[self.RESET_SEQNO]\n self.listeners[self.RESET_SEQNO] = msg\n sem.release()\n else:\n self.debug(\"Got status update\")\n self.listener(msg)\n else:\n if msg.cmd == CONTROL_NEW:\n self.debug(\"Got ACK message for command %d: will ignore it\", msg.cmd)\n else:\n self.debug(\n \"Got message type %d for unknown listener %d: %s\",\n msg.cmd,\n msg.seqno,\n msg,\n )", "async def handle_command(server, process, command):\n process.stdout.write(f'Hello {server.username}\\r\\n')\n if server.listeners:\n forwarding(server, process)\n return\n\n if command is None:\n if config.ENABLE_SHELL:\n await shell(server, process)\n\n else:\n process.stderr.write('This server does not support'\n ' interactive sessions.\\r\\n')\n logging.warning('Interactive shell disabled')\n process.exit(1)\n\n elif command not in supported_commands:\n process.stderr.write('Unsupported command\\n')\n process.exit(1)\n\n else:\n eval(f'{command}(server, process)')\n process.exit(0)", "def server_do(self,input, connstream):\r\n pass", "def _execute_impl(self, commands):\n conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n conn.connect((self.host, self.port))\n for c in commands:\n conn.sendall(c)\n conn.recv(4096)\n conn.close()", "def interact(self):\n print('Ready to interact on socket connected with {}.'.format(self.remote_addr))\n try:\n # get initial input from user\n print('Enter input or press CTRL-D for no input.')\n data = sys.stdin.readline()\n self.remote_socket.sendall(data.encode())\n while True:\n if data.startswith('exit'):\n print('[*] Closing remote shell.')\n self.close()\n break\n # wait for response from target host\n recv_len = 1\n response = ''\n while recv_len:\n data = self.remote_socket.recv(4096)\n recv_len = len(data)\n response += data.decode()\n if recv_len < 4096:\n break\n print(response)\n # get further input from user\n print('Enter further input or press CTRL-D for no input.')\n data = sys.stdin.readline()\n self.remote_socket.sendall(data.encode())\n except Exception as e:\n print(e)\n print('[*] Closing remote shell.')\n self.close()" ]
[ "0.6953094", "0.6832541", "0.6550946", "0.6328179", "0.6293573", "0.6292742", "0.6161187", "0.6027672", "0.5915629", "0.5909542", "0.58832407", "0.5841701", "0.58089155", "0.58026075", "0.5779208", "0.5778535", "0.5763915", "0.5758714", "0.5742956", "0.57220924", "0.5711825", "0.56895906", "0.5686557", "0.5649239", "0.5603905", "0.5601284", "0.55879396", "0.5570548", "0.5570009", "0.55673414" ]
0.7060211
0
get base64 string repr of object or np image
def getbase64(nparr,): if type(nparr) == type({}): nparr = nparr['img'] im = Image.fromarray(nparr) buf = BytesIO() im.save(buf,format="JPEG") return base64.b64encode(buf.getvalue()).decode('ascii')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def base64(self):\n image = self.png.getvalue()\n return base64.encodestring(image).decode('utf-8')", "def data64(self) -> str:\n return Image.encode64(self.data)", "def base64_string(self) -> global___Expression:", "def _get_image(x):\n return b64encode(x).decode('ascii')", "def data_2_base64(data: np.ndarray) -> str:\n bytes_io = io.BytesIO()\n np.save(bytes_io, data, allow_pickle=False)\n return base64.b64encode(zlib.compress(bytes_io.getvalue())).decode('utf-8')", "def get_image_base64_str(self, message: ImageMessage) -> str:\n return ImageContentProcessor.binary_img_to_base64_str(self._core.get_message_content(str(message.id)).content)", "def np_to_base64(img_np):\n img = Image.fromarray(img_np.astype('uint8'), 'RGB')\n buffered = BytesIO()\n img.save(buffered, format=\"PNG\")\n return u\"data:image/png;base64,\" + base64.b64encode(buffered.getvalue()).decode(\"ascii\")", "def b64_image(self) -> bytes:\n buffer = BytesIO()\n self.image.save(buffer, \"PNG\") \n im_b64 = base64.b64encode(buffer.getvalue())\n im_b64 = b\"data:image/png;base64,\" + im_b64\n return im_b64", "def base64_encode_image(inArray):\n imgDat = [base64_encode_array(inArray).decode(\"utf-8\")]\n imgType = str(inArray.dtype)\n imgShape = inArray.shape\n return json.dumps([ imgDat, imgType, imgShape ])", "def image_to_base64str(image):\n file_bytes = image.file.read()\n base64_img_str = 'data:image;base64, '\n base64_img_str += str(base64.b64encode(file_bytes), 'utf-8')\n return base64_img_str", "def __repr__(self) -> str:\n d = self.image_data\n shape = \"x\".join(map(str, d.shape)) if d is not None else \"<no data>\"\n return f\"{self.widget_type}({shape}, name={self.name!r})\"", "def figure_to_base64str(fig: matplotlib.figure.Figure) -> str:\n buf = io.BytesIO()\n fig.savefig(buf, bbox_inches='tight', format='png')\n return base64.b64encode(buf.getbuffer().tobytes()).decode('ascii')", "def obimg():\n # The client might make a call to get a pic for an object which might\n # not have one. Better to return a blank than an error in that case.\n imgdat = B64ENCTRANSPARENT4X4PNG\n try:\n dsType = dbacc.reqarg(\"dt\", \"string\", required=True)\n dsId = dbacc.reqarg(\"di\", \"string\", required=True)\n inst = dbacc.cfbk(dsType, \"dsId\", dsId)\n if inst:\n picfldmap = {\"Point\": \"pic\"}\n imgdat = inst[picfldmap[dsType]]\n imgdat = base64.b64decode(imgdat)\n except ValueError as e:\n return util.serve_value_error(e)\n return util.respond(imgdat, mimetype=\"image/png\")", "def as_str(self) -> str:\n return dumps(self.as_dict(), cls=NumpyEncoder)", "def get_body(self):\n from matplotlib.backends.backend_agg import \\\n FigureCanvasAgg as FigureCanvas\n\n canvas = FigureCanvas(self._body)\n png_output = BytesIO()\n canvas.print_png(png_output)\n data = png_output.getvalue()\n\n data_uri = base64.b64encode(data).decode('utf-8')\n return '<img title=\"{}\" src=\"data:image/png;base64,{}\">'.format(\n self.key, data_uri)", "def b64raster(self):\n r = self.craster()\n if r:\n if len(r) == 1:\n return r\n return b64encode(r)\n else:\n return None", "def serialize(obj):\n result = base64.urlsafe_b64encode(obj)\n # this workaround is needed because in case of python 3 the\n # urlsafe_b64encode method returns string of 'bytes' class.\n result = result.decode()\n return result", "def dumps(self) -> str:\n bits = dill.dumps(self)\n return base64.b64encode(bits).decode(\"ascii\")", "def __repr__(self) -> str:\n\n thresh = np.get_printoptions()[\"threshold\"]\n np.set_printoptions(threshold=20)\n extra_chars = len(self.__class__.__name__)\n arr_str = \"data=\" + str(self.data).replace(\"\\n\", \"\\n\" + \" \" * (extra_chars + 6))\n shape_str = (\n \" \" * extra_chars\n + \" shape=\"\n + str(self.shape).replace(\"\\n\", \"\\n\" + \" \" * (extra_chars + 7))\n )\n dtype_str = \" \" * extra_chars + \" dtype=\" + str(self.dtype)\n np.set_printoptions(threshold=thresh)\n return \"{klass}({data},\\n{shape},\\n{dtype})\".format(\n klass=self.__class__.__name__,\n data=arr_str,\n shape=shape_str,\n dtype=dtype_str,\n )", "def save_img_base64(_preds):\n img = Image.fromarray(_preds)\n buff = BytesIO()\n img.save(buff, format=\"JPEG\")\n return base64.b64encode(buff.getvalue())", "def picture_base64(self) -> str:\n return self.properties.get(MessageField.PICTURE.value)", "def __repr__(self) -> str:\n if self.long_repr:\n attr_repr = 'arr_shape={}, arr_dtype={}, metadata={}'.format(self.data.shape,\n self.data.dtype,\n self.metadata)\n else:\n attr_repr = 'arr_shape={}, arr_dtype={}, metadata_type={}'.format(self.data.shape,\n self.data.dtype,\n type(self.metadata))\n repr_str = '{}({})'.format(\n self.__class__.__name__,\n attr_repr\n )\n return repr_str", "def get_image(self, data_base):\n cursor = data_base.cursor(dictionary=True)\n cursor.execute(f\"SELECT data FROM image WHERE id = '{self.image_id}'\")\n image = cursor.fetchone()\n cursor.close()\n return b64encode(image['data']).decode('utf-8')", "def _encode_value(value):\n # leave numbers alone\n if isinstance(value, (int, long, float)):\n return value\n\n # leave Nones alone (they turn into null in JSON)\n if value is None:\n return value\n\n # convert datetime to str\n if isinstance(value, datetime.datetime):\n # return, don't go through truncation\n return str(value)\n\n # represent image as base64-encoded bytes\n import binascii\n from graphlab.data_structures.image import Image\n if isinstance(value, Image):\n image_format = None\n if value._format_enum == 0:\n image_format = 'jpeg'\n elif value._format_enum == 1:\n image_format = 'png'\n elif value._format_enum == 2:\n image_format = 'raw'\n if image_format is not None:\n ret = {\n 'type': 'image',\n 'width': value._width,\n 'height': value._height,\n 'channels': value._channels,\n 'format': image_format,\n 'id': id(value)\n }\n if image_format in ('jpeg', 'png'):\n ret.update({\n 'value': 'image/%s;base64,%s' % (image_format, binascii.b2a_base64(value._image_data))\n })\n elif image_format == 'raw':\n ret.update({\n 'value': list(value._image_data)\n })\n return ret\n\n # fallback case for images the browser does not know how to display\n # just convert to str and treat like any other type\n value = str(value)\n\n # convert strings to unicode (assumes utf-8 encoding, replaces invalid\n # characters with ?\n if isinstance(value, str) and sys.version_info.major == 2:\n value = unicode(value, encoding='utf-8', errors='replace')\n\n # get the array into a list so it is JSON serializable\n if isinstance(value, array.array):\n value = value.tolist()\n\n # truncate to 10 elements first\n if isinstance(value, (array.array, list)):\n value = value[:10]\n elif isinstance(value, dict):\n keys = value.keys()[:10]\n truncated = {}\n for key in keys:\n truncated[key] = value[key]\n value = truncated\n\n # get dict/list values properly encoded inside before dumping to str\n if isinstance(value, list):\n value = [_encode_value(v) for v in value]\n elif isinstance(value, dict):\n value = {_encode_value(k): _encode_value(v) for (k,v) in six.iteritems(value)}\n\n # json serialize dict/list types to convert to string\n if isinstance(value, (dict, list)):\n value = _to_json(value)\n\n # truncate via textwrap (will break on word boundaries if possible)\n wrapped = textwrap.wrap(value, 18)\n if len(wrapped) == 0:\n return ''\n\n return '%s%s' % (\n wrapped[0],\n '' if len(wrapped) == 1 else ' ...'\n )", "def prepare_output(image: np.ndarray) -> str:\n response_image = Image.fromarray(np.uint8(image * 255))\n buffer = BytesIO()\n response_image.save(buffer, \"PNG\")\n encoded = base64.b64encode(buffer.getvalue())\n return \"data:image/png;base64,\" + str(encoded)[2:-1]", "def _repr_html_(self):\n\n import numpy as np\n import matplotlib.pyplot as plt\n from .._tier9 import imshow\n\n\n size_in_pixels = np.prod(self.shape)\n size_in_bytes = size_in_pixels * self.dtype.itemsize\n\n labels = (self.dtype == np.uint32)\n\n # In case the image is 2D, 3D and larger than 100 pixels, turn on fancy view\n if len(self.shape) in (2, 3) and size_in_pixels >= 100:\n import matplotlib.pyplot as plt\n imshow(self,\n labels=labels,\n continue_drawing=True,\n colorbar=not labels)\n image = self._png_to_html(self._plt_to_png())\n else:\n return \"<pre>cle.array(\" + str(np.asarray(self)) + \", dtype=\" + str(self.dtype) + \")</pre>\"\n\n\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n size = \"{:.1f}\".format(size_in_bytes) + \" GB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" MB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" kB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" B\"\n\n histogram = \"\"\n\n if size_in_bytes < 100 * 1024 * 1024:\n if not labels:\n\n import numpy as np\n from .._tier2 import minimum_of_all_pixels, maximum_of_all_pixels\n from .._tier3 import histogram\n\n num_bins = 32\n\n h = np.asarray(histogram(self, num_bins=num_bins))\n\n plt.figure(figsize=(1.8, 1.2))\n plt.bar(range(0, len(h)), h)\n\n # hide axis text\n # https://stackoverflow.com/questions/2176424/hiding-axis-text-in-matplotlib-plots\n # https://pythonguides.com/matplotlib-remove-tick-labels\n frame1 = plt.gca()\n frame1.axes.xaxis.set_ticklabels([])\n frame1.axes.yaxis.set_ticklabels([])\n plt.tick_params(left=False, bottom=False)\n\n histogram = self._png_to_html(self._plt_to_png())\n\n min_max = \"<tr><td>min</td><td>\" + str(self.min()) + \"</td></tr>\" + \\\n \"<tr><td>max</td><td>\" + str(self.max()) + \"</td></tr>\"\n\n else:\n\n min_max = \"\"\n\n all = [\n \"<table>\",\n \"<tr>\",\n \"<td>\",\n image,\n \"</td>\",\n \"<td style=\\\"text-align: center; vertical-align: top;\\\">\",\n \"<b><a href=\\\"https://github.com/clEsperanto/pyclesperanto_prototype\\\" target=\\\"_blank\\\">cle._</a> image</b><br/>\",\n \"<table>\",\n \"<tr><td>shape</td><td>\" + str(self.shape).replace(\" \", \"&nbsp;\") + \"</td></tr>\",\n \"<tr><td>dtype</td><td>\" + str(self.dtype) + \"</td></tr>\",\n \"<tr><td>size</td><td>\" + size + \"</td></tr>\",\n min_max,\n \"</table>\",\n histogram,\n \"</td>\",\n \"</tr>\",\n \"</table>\",\n ]\n\n return \"\\n\".join(all)", "def as_bytes(array_or_image,mimetype='image/png'):\n buf = StringIO()\n fmt = mimetype2format(mimetype)\n im = as_pil(array_or_image).save(buf,fmt)\n return buf.getvalue()", "def imageset_to_string(obj, compact=False):\n if compact:\n return json.dumps(\n imageset_to_dict(obj), separators=(\",\", \":\"), ensure_ascii=True\n )\n else:\n return json.dumps(imageset_to_dict(obj), indent=2, ensure_ascii=True)", "def get_Base64(self):\n\n return base64_with_linebreaks(self.get_DER())", "def _repr_png_(self):\n mol = self.owner.mol\n keku = IPythonConsole.kekulizeStructures\n size = IPythonConsole.molSize\n opts = IPythonConsole.drawOptions\n return Draw._moltoimg(\n mol, size, self.aix, \"\", returnPNG=True, drawOptions=opts,\n kekulize=keku, highlightBonds=self.bix\n )" ]
[ "0.72159815", "0.6906734", "0.68322635", "0.682055", "0.6737309", "0.67277545", "0.6667643", "0.66208005", "0.66028064", "0.6586784", "0.65735114", "0.6563396", "0.65275586", "0.6448807", "0.63930434", "0.63762724", "0.63067997", "0.63018715", "0.62593025", "0.6230752", "0.6191719", "0.6155181", "0.61424905", "0.6131876", "0.612612", "0.6110699", "0.6069267", "0.6067608", "0.606382", "0.605048" ]
0.6916091
1
make a plot of each object and put image next to it. func defines the type of plot and anything that is done to each image,obj pair
def _dump_plotly(objs, images, func): l = len(objs) #print(l) titles = [] for i,x in enumerate(objs): if 'id' in x: titles.append('shape id %d' % x.id) else: titles.append('item %d' % i) fig = tools.make_subplots(rows=l, cols=1, subplot_titles = titles,print_grid=False ) #print('figure attmpt: ') #fig['layout']['xaxis1'].update(title='monkeybar') #for x in fig['layout']['xaxis1']: #print(x) fig.layout.showlegend = False for i,x in enumerate(objs): traces,annotations,title = func(x,images[i]) im = { "source": 'data:image/png;base64, ' + getbase64(images[i]), "x": 1, "y": 1 - i/(l-.5), "sizex": .5, "sizey": .5, } fig.layout.images.append(im) for t in traces: fig.append_trace(t,i+1,1) if title is not None: fig.layout['xaxis%d' % (i+1)].update(title=title) if annotations is not None: for a in annotations: a['xref'] = 'x%d' % (i+1) a['yref'] = 'y%d' % (i+1) fig.layout.annotations += annotations fig['layout'].update(height=400*l, width=1100, margin={ 'l':80, 'r':330, 't':100, 'b':80, 'pad':0, 'autoexpand':True, },title='plots') return fig
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_single(potential_func, obstacles, filename, xlim=(-400, 400), ylim=(-400, 400)):\n print \"Generating\", filename\n fig = plt.figure()\n plot = plt.subplot(111)\n show_arrows(plot, potential_func, xlim=xlim, ylim=ylim)\n for obstacle in obstacles:\n show_obstacle(plot, obstacle)\n fig.savefig(filename, format='png')", "def _plot_objects(self):\n\n # Remove Existing Annotations\n for annotation in self._objectAnnotations:\n annotation.remove()\n self._objectAnnotations[:] = []\n\n # Set Offsets\n self._objectPoints.set_offsets(np.c_[self._objectX, self._objectZ])\n\n # Update Annotations\n for name, x, z in zip(self._objectNames, self._objectX, self._objectZ):\n self._objectAnnotations.append(self._scatPlot.annotate(name, (x, z)))", "def plot_obj_func():\n X1 = [i for i in range(-63, 65, 1)]\n Y1 = [8 * math.sin(0.06 * x) + 8 * math.cos(0.14 * x) + 8 * math.exp(math.cos(0.2*x)) for x in X1]\n plt.plot(X1, Y1)\n plt.show()", "def PlotGraph(obj):\n\n generated_text = \"\\n\\n\\nclass PlotGraph():\"\n\n # get the parameters needed from the object\n expression = obj[\"expression\"]\n title = obj[\"name\"] + \" Graph\"\n graphColor = \"b\"\n scatter = False\n\n # optional parameters\n if obj[\"title\"]:\n title = obj[\"title\"] # should be written more concisely in python 3.8\n\n if obj[\"graphColor\"]:\n graphColor = obj[\"graphColor\"] # should be written more concisely in python 3.8\n\n if obj[\"scatter\"]:\n scatter = obj[\"scatter\"] # should be written more concisely in python 3.8\n\n # CONSTRUCTOR\n # def __init__(self, start, stop, num_samples, title=\"example\"):\n generated_text += \"\\n\\tdef __init__(self, start, stop, num_samples, title=\\\"{}\\\"): \".format(title)\n generated_text += \"\\n\\t\\tself.function = \\\"\\\"\"\n generated_text += \"\\n\\t\\tself.title = title\"\n generated_text += \"\\n\\t\\tself.X = np.linspace(start, stop, num_samples)\"\n generated_text += \"\\n\\t\\tself.Y = []\"\n\n # f()\n generated_text += \"\\n\\n\\tdef f(self):\"\n generated_text += \"\\n\\t\\tself.Y = [self.compute(x) for x in self.X]\"\n\n # compute()\n generated_text += \"\\n\\n\\tdef compute(self, x):\"\n generated_text += \"\\n\\t\\treturn np.sin(x)\"\n\n # plot()\n generated_text += \"\\n\\n\\tdef plot(self, scatter=False, color='{}'):\".format(graphColor)\n generated_text += \"\\n\\t\\tplt.figure(1)\\n\\t\\tplt.title(self.title)\"\n generated_text += \"\\n\\t\\tif scatter:\"\n generated_text += \"\\n\\t\\t\\tplt.scatter(self.X, self.Y, c=color)\\n\\t\\t\\treturn\"\n generated_text += \"\\n\\t\\tplt.plot(self.X, self.Y, c=color)\"\n\n # show()\n generated_text += \"\\n\\n\\tdef show(self):\"\n generated_text += \"\\n\\t\\tplt.show()\"\n\n # call()\n generated_text += \"\\n\\n\\tdef call(self):\"\n generated_text += \"\\n\\t\\tself.f()\"\n generated_text += \"\\n\\t\\tself.plot()\"\n generated_text += \"\\n\\t\\tself.show()\"\n\n #print(generated_text)\n return generated_text", "def plot_object(fits_file, object_getter=asteroid):\n data = data_from_fits(fits_file)\n data = object_getter(data)\n plt.figure()\n plt.title(fits_file)\n plt.imshow(np.log10(data))", "def plot_images(data_obj, fs):\n\n fig = new_pdf_page(data_obj.pdf_obj) # Create a new page\n plt.suptitle('Cropped and Rotated Images')\n\n for i, file in enumerate(data_obj.files):\n\n # Specify the plot parameters\n ax2 = fig.add_subplot(3, 2, i + 1)\n plt.tick_params(axis='both', which='both', bottom='off', left='off',\n top='off', right='off', labelbottom='off', labelleft='off')\n\n # Plot the image and the image title\n plt.imshow(data_obj.img_data[i].img, cmap=plt.get_cmap('gray'))\n plt.title('Orientation ' + str(data_obj.img_data[i].orientation), fontsize=fs)\n\n # Add the ROI rectangles to the plot\n data_obj.img_data[i].step_wedge_ROI.add_rect_to_plot(edgecolor='orange')\n data_obj.img_data[i].lead_foil_ROI.add_rect_to_plot(edgecolor='blue')\n data_obj.img_data[i].POM_piece_ROI.add_rect_to_plot(edgecolor='red')", "def main(self, args):\n for plot in args.plots:\n if plot == 'no_plot':\n break\n print \"plotting\", plot\n\n fig = self.plot_figure(plot)\n\n fformat = '{plot}_{index}.{ext}'\n fname = fformat.format(plot=plot, index=self.index, ext='png')\n fpath = os.path.join(plot_dir, fname)\n fig.savefig(fpath)\n\n if args.distributions == 'all':\n distributions = ['Uf', 'Wf', 'uf_abs',\n 'vorticity', 'vertical_shear']\n else:\n distributions = args.distributions\n for dist in distributions:\n range = self.properties[dist]['range']\n name = self.properties[dist]['name']\n print \"plotting distribution\", dist, name\n fig = self.plot_distribution(getattr(self, dist), range, name)\n\n fformat = 'distribution_{q}_{index}.{ext}'\n fname = fformat.format(q=dist, index=self.index, ext='png')\n fpath = os.path.join(plot_dir, fname)\n fig.savefig(fpath)\n\n if args.funcs:\n for func in args.funcs:\n print \"multiprocessing\", func\n f = getattr(self, 'plot_' + func)\n f()", "def make_plot(x,y):", "def plot_ppplot(obj1,sheet1,variable1,obj2,sheet2,variable2,title,opath):\n p1 = np.percentile(obj1.me[sheet1][variable1],range(0,101,1))\n p2 = np.percentile(obj2.me[sheet2][variable2],range(0,101,1))\n p1c = np.cumsum(np.array(p1))/np.cumsum(np.array(p1)).max()\n p2c = np.cumsum(np.array(p2))/np.cumsum(np.array(p2)).max()\n fig = plt.figure(figsize=(8,8),dpi=120)\n plt.scatter(p1c,p2c,color='#566c73',s=30)\n plt.plot([0,1],[0,1],color='red',alpha=0.3)\n plt.xlim(0,1)\n plt.ylim(0,1)\n plt.grid()\n plt.xlabel(sheet1+'_'+variable1)\n plt.ylabel(sheet2+'_'+variable2)\n plt.title(title)\n plt.savefig(opath+'.png')\n plt.close()", "def map_plot(self, iter_no):\n \n m = self._m\n n = self._n\n plt.figure()\n label=np.zeros(m*n)\n self._trained = True\n mapped = self.map_vects(datanorm)\n mapped=tuple(map(tuple, mapped))\n c=Counter(mapped)\n \n c= sorted(c.items(), key=itemgetter(1))\n a=[m*n]\n for i in range(0,len(c)):\n x=(((c[i])[0])[0])\n y=(((c[i])[0])[1])\n z=((c[i])[1])\n plt.plot(x, y, 'ro', markersize= z/(2*m*n)) \n plt.savefig('exoplanet{}.png'.format(iter_no))\n p=plt.imread('exoplanet{}.png'.format(iter_no))\n imgs.append(p)\n plt.show()\n plt.close()\n print(c)\n self._trained = False", "def visualize(imgobjs, cols=4, collated=True, size=None):\n\n ## Separate into list of single instance image objects\n imgs = []\n if isinstance(imgobjs, list):\n for io in imgobjs:\n imgs += images._create_img_list(io)\n else:\n imgs = images._create_img_list(imgobjs)\n\n ## Grid layout settings. Sets N, N_rows, N_cols\n N = len(imgs)\n assert N > 0\n if not size:\n size = [0, 0] # H, W\n for img in imgs:\n _, _, H, W = get_dimensions(img)\n size[0] += H\n size[1] += W\n size = [int(d/len(imgs)) for d in size]\n else:\n assert len(size) == 2\n\n N_cols = cols if cols else 4\n if N < 4:\n N_cols = N\n N_rows = math.ceil(N/N_cols)\n print(f\"Cols: {N_cols}, Rows: {N_rows}\")\n\n ## Display Figure\n figure = plt.figure(figsize=(15, 10))\n for i in range(N):\n dims = images.get_dimensions(imgs[i])[1:]\n title = f\"[Image {i+1}/{N}]\"\n if isinstance(imgs[i], str):\n title = f\"[Image {i+1}/{N}] {files.get_filename(imgs[i])}\"\n title += f\"\\n shape{dims}\"\n img = images.to_np(imgs[i], size=size, color='rgb')\n subplt = figure.add_subplot(N_rows, N_cols, i+1)\n subplt.set_title(title, fontsize=10)\n subplt.axis('off')\n plt.imshow(img)\n figure.tight_layout()\n # plt.subplots_adjust(wspace=.25, hspace=.5)\n plt.show()", "def plot():\n pass", "def visualise(self, obj):\n self.clear()\n self.draw(obj)\n self.show()", "def plot_3d_object(object_):\n \n # Initialize renderer instance\n r = Renderer()\n\n # Add surfaces and goal regions to the renderer instance\n for surf in object_:\n r.add((object_[surf][0],'b',1))\n if len(object_[surf])>2:\n r.add((object_[surf][2],'r',1))\n r.add((gPoint(-15,-15,-15),'k',1))\n r.show()", "def plot_sample_imgs(get_imgs_fun, img_shape, plot_side=5, savepath=None, cmap='gray'):\n f, axarr = plt.subplots(plot_side, plot_side)\n samples = get_imgs_fun(plot_side*plot_side)\n for row in range(plot_side):\n for col in range(plot_side):\n axarr[row, col].imshow(samples[plot_side*row+col].reshape(img_shape), cmap=cmap)\n axarr[row, col].set_title('')\n axarr[row, col].axis('off')\n if savepath:\n f.savefig(savepath)\n plt.close()\n else:\n plt.show()", "def __init__(self, MRIObj, pRFModelObj = None, FAModelObj = None,\n pRF_data = [], FA_data = [],\n prf_dm = [], max_ecc_ext = 5.5,\n pysub = 'hcp_999999', flatmap_height = 2048, full_figsize = (12, 8)):\n\n # set data object to use later on\n self.MRIObj = MRIObj\n\n # Load pRF and model object\n self.pRFModelObj = pRFModelObj\n self.FAModelObj = FAModelObj\n\n ## data to be plotted \n self.pRF_data = pRF_data\n self.FA_data = FA_data\n\n ## figure settings\n self.flatmap_height = flatmap_height\n self.full_figsize = full_figsize\n self.images = {}\n \n ## create pycortex vars\n self.mask, extents = cortex.quickflat.utils.get_flatmask(pysub, height = self.flatmap_height)\n self.vc = cortex.quickflat.utils._make_vertex_cache(pysub, height = self.flatmap_height)\n\n self.mask_index = np.zeros(self.mask.shape)\n self.mask_index[self.mask] = np.arange(self.mask.sum())\n\n # set prf dm\n self.prf_dm = prf_dm\n\n ## set grid of possible points in downsampled space\n self.point_grid_2D = np.array(np.meshgrid(np.linspace(-1, 1, prf_dm.shape[0]) * max_ecc_ext,\n np.linspace(1, -1, prf_dm.shape[0]) * max_ecc_ext))", "def visualize(self):\n colors = {'outline': (220, 220, 220),\n 'inlier': (0, 255, 0),\n 'outlier': (0, 0, 255),\n 'lines': (128, 220, 128)}\n # Create output image for visualization\n gap = 5\n h1, w1 = self.target.image.shape[:2]\n h2, w2 = self.image.shape[:2]\n vis = np.zeros((max(h1, h2), w1 + w2 + gap, 3), np.uint8)\n vis[:h1, :w1, :] = self.target.image\n w1 += gap\n vis[:h2, w1:w1+w2, :] = self.image\n \n # Draw the located object \n quad = np.float32(self.quad) + np.float32([w1, 0])\n self.draw(vis, colors['outline'], 2, quad)\n \n # draw point details\n inliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.inliers]\n outliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.outliers]\n if colors['outlier'] is not None: # draw x on each point\n r = 2 # radius\n thickness = 2\n for x0, y0, x1, y1 in outliers:\n cv2.line(vis, (x0 - r, y0 - r), (x0 + r, y0 + r), colors['outlier'], thickness)\n cv2.line(vis, (x0 + r, y0 - r), (x0 - r, y0 + r), colors['outlier'], thickness)\n cv2.line(vis, (x1 - r, y1 - r), (x1 + r, y1 + r), colors['outlier'], thickness)\n cv2.line(vis, (x1 + r, y1 - r), (x1 - r, y1 + r), colors['outlier'], thickness)\n if colors['lines'] is not None:\n for x0, y0, x1, y1 in inliers:\n cv2.line(vis, (x0, y0), (x1, y1), colors['lines'], 1)\n if colors['inlier'] is not None:\n for x0, y0, x1, y1 in inliers:\n cv2.circle(vis, (x0, y0), 2, colors['inlier'], -1)\n cv2.circle(vis, (x1, y1), 2, colors['inlier'], -1)\n return vis", "def plot_sample_imgs(get_imgs_fun, img_shape, plot_side=5, savepath=None):\n f, axarr = plt.subplots(plot_side, plot_side)\n samples = get_imgs_fun(plot_side*plot_side)\n for row in range(plot_side):\n for col in range(plot_side):\n axarr[row, col].imshow(samples[plot_side*row+col].reshape(img_shape))\n axarr[row, col].set_title('')\n axarr[row, col].axis('off')\n if savepath:\n f.savefig(savepath)\n plt.close()\n else:\n plt.show()", "def chooseObject(self, x, y, z):\r\n\r\n \"\"\"deletes the editPoints list. This must be reset every time a new object\r\n is clicked on so that the prevous editpoints from another segmentation do not\r\n interfere with the current object's segmentation\"\"\"\r\n \r\n del self.editPoints[:]\r\n \r\n self.dispedge = to_rgb(self.img[self.z_stack])\r\n \r\n #print \"SENDER LABEL: \" + str(sender.text()\r\n self.center= np.array((z*self.zinterp, x, y))\r\n\r\n xpix=self.img.shape[2]-1\r\n ypix= self.img.shape[1]-1\r\n zpix= self.img.shape[0]*self.zinterp-1\r\n \r\n\r\n #currently padding all sides by 50 for now.\r\n\r\n self.radius= self.curRadius\r\n self.count+=1\r\n \r\n self.padList= np.array([xpix-x, ypix-y, zpix-z*self.zinterp, x, y,z*self.zinterp])\r\n self.padList= self.radius-self.padList\r\n self.padList=(self.padList>0)*self.padList\r\n\r\n \"\"\"perform graphcut and display on interface\"\"\"\r\n self.temp, self.edge= graphCut(interp_img(self.img, self.zinterp), self.center, self.radius,self.temp, self.edge, self.count, self.editPoints, self.padList, self.theta_div, self.phi_div)\r\n self.shrink= self.edge[0:interp_img(self.img, self.zinterp).shape[0]:self.zinterp]!=0\r\n \r\n \"\"\"display the object on the xy plane\"\"\"\r\n self.pixmap2=self.writeEdge(\"xy\")\r\n self.lbl2.setPixmap(self.pixmap2)\r\n \r\n \"\"\"display the object on the xz plane\"\"\"\r\n self.pixmap4=self.writeEdge(\"xz\")\r\n self.lbl4.setPixmap(self.pixmap4)\r\n \r\n \"\"\"display the object on the yz plane\"\"\"\r\n self.pixmap6=self.writeEdge(\"yz\")\r\n self.lbl6.setPixmap(self.pixmap6)", "def plot(self, ax: Axes):\n\n plotted_objects = Element.plot(self, ax)\n plotted_objects += plotting.plot_aperture(ax, self)\n\n if plot_blockers:\n plotted_objects += plotting.plot_blocker(ax, self, self.blocker_diameter)\n\n return plotted_objects", "def plot_individual(xdict, ydict, xprop, yprop, documents, spline):\n figure_array = {}\n for item in documents:\n xlabel = \"\\\\textbf{\" + label_dict[xprop] + \"}\"\n ylabel = \"\\\\textbf{\" + label_dict[yprop] + \"}\"\n x = xdict[item[\"path_id\"]]\n y = ydict[item[\"path_id\"]]\n # fig_title = item[\"path_id\"] + \"(\" + item[\"pretty_formula\"] + \")\" # Individual traces\n # fig_title = yprop + item[\"cation_type\"] # Plot by cation\n fig_title = yprop # All together\n figure_array[item[\"path_id\"]] = plt.figure(fig_title, figsize=(6,6), dpi=plotting_dpi)\n ax = figure_array[item[\"path_id\"]].add_subplot(111) \n ax.scatter(x,y, s=70, zorder=2, color=color_dict[item[\"cation_type\"]], linewidths=2.5, edgecolors='black')\n if spline:\n tck = interpolate.splrep(x, y, s=0)\n xnew = np.arange(0, 100, 0.1)\n splfit = interpolate.splev(xnew, tck, der=0)\n x = xnew\n y = splfit\n if item[\"path_id\"][-3:] == \"002\":\n ax.plot(x,y, linewidth=2.5, zorder=1, color=color_dict[item[\"cation_type\"]], linestyle='dashed')\n elif item[\"path_id\"][-3:] == \"003\":\n ax.plot(x,y, linewidth=2.5, zorder=1, color=color_dict[item[\"cation_type\"]], linestyle='dotted')\n else:\n ax.plot(x,y, linewidth=2.5, zorder=1, color=color_dict[item[\"cation_type\"]])\n ax.set_xlabel(xlabel, fontsize=24)\n # ax.set_ylim([0,1200])\n # ax.set_xlim([0,100])\n ax.set_ylabel(ylabel, fontsize=24)\n ax.tick_params(axis='x', labelsize=22)\n ax.tick_params(axis='y', labelsize=22)\n border_width = 2\n [i.set_linewidth(border_width) for i in ax.spines.itervalues()]\n plt.tight_layout()\n plt.legend(loc='best', prop={'size': 14})\n plt.rc('text', usetex=True)\n plt.rc('font', family='sans-serif')\n plt.tight_layout()\n plt.show()", "def show_dprime(sim_attr_generator):\n#TODO description\n dprime_fnc_list = [\n (sim_attr.id_name,sim_attr.dprime_fnc) for sim_attr in sim_attr_generator\n ]\n\n if Args.mat_file_out != None:\n save_dict = dict()\n else:\n x_axis = int(math.ceil(math.sqrt(len(dprime_fnc_list))))\n y_axis = int(math.ceil(float(len(dprime_fnc_list)) / x_axis))\n fig, axes = plt.subplots(nrows=y_axis,ncols=x_axis)\n\n#? Code duplication\n if len(dprime_fnc_list) == 1:\n id_name, dprime_fnc = dprime_fnc_list[0]\n mesh_X, mesh_Y, mesh_Z = dprime_fnc_to_mesh_grid(\n dprime_fnc, linspace=Args.grid_size\n )\n im = show_plot_imshow_from_mesh(\n axes, mesh_X, mesh_Y, mesh_Z, title=id_name, vmax=Args.upper_bound\n )\n fig.colorbar(im,shrink=0.8)\n plt.show()\n# End code duplication\n return\n\n for i, (id_name, dprime_fnc) in enumerate(dprime_fnc_list):\n mesh_X, mesh_Y, mesh_Z = dprime_fnc_to_mesh_grid(\n dprime_fnc, linspace=Args.grid_size\n )\n if Args.mat_file_out != None:\n dprime_fnc[id_name] = {'X':mesh_X, 'Y':mesh_Y, 'Z':mesh_Z}\n else:\n im = show_plot_imshow_from_mesh(\n axes.flat[i], mesh_X, mesh_Y, mesh_Z, title=id_name, vmax=Args.upper_bound\n )\n if Args.mat_file_out != None:\n scipy.io.savemat(Args.mat_file_out, save_dict)\n else:\n fig.colorbar(im,ax=axes.ravel().tolist(),shrink=0.8)\n plt.show()", "def show(image, label, weights, prediction, ax):\n global img_objects\n if len(img_objects)==0:\n for i in range(10):\n _img = ax[0, i].imshow(weights[i].reshape(28,28), cmap='gray')\n img_objects.append(_img)\n _img = ax[1, 5].imshow(image.reshape(28,28), cmap='gray')\n img_objects.append(_img)\n else:\n for i in range(10):\n img_objects[i].set_data(weights[i].reshape(28,28))\n img_objects[i].set_clim(vmin=0, vmax=np.max(weights[i]))\n img_objects[10].set_data(image.reshape(28,28))\n ax[0,5].set_title('truth: %d, predict: %d'%(np.argmax(label), prediction))", "def show(self):\n \n \n \n \n \n \n r = 4\n f, axarr = plt.subplots(r, r, figsize=(8,8))\n counter = 0\n for i in range(r):\n for j in range(r):\n temp = self.x[counter,:]\n counter += 1\n img = self.x[counter,:]\n axarr[i][j].imshow(img)\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################", "def plot_rgb(r_fits, g_fits, b_fits, object_getter=asteroid):\n data = [None, None, None]\n for ii, fits_file in enumerate([r_fits, g_fits, b_fits]):\n data[ii] = data_from_fits(fits_file)\n data[ii] = object_getter(data[ii])\n plt.figure()\n plt.imshow(data[ii])\n\n data = np.dstack(data)\n plt.figure()\n plt.imshow(data)", "def plot(path, subjects):\n transformToXYZmm = np.array([[-3.125, 0, 0, 81.250], [0, 3.125, 0, -115.625], [0, 0, 6, -54.000], [0, 0, 0, 1.000]])\n data = data_load.load_data(path, subjects)\n dimx = int(data[0][\"meta\"][\"dimx\"][0])\n dimy = int(data[0][\"meta\"][\"dimy\"][0])\n dimz = int(data[0][\"meta\"][\"dimz\"][0])\n coordToCol = data[0][\"meta\"][\"coordToCol\"][0][0]\n images = {}\n max_val = 0\n voxels = np.load(\"data/general_selected_500_1.npy\")\n directory = os.listdir(\"data/input/\")\n bar = pyprind.ProgBar(len(directory), title='Info extraction and Image Building')\n bar2 = pyprind.ProgBar(len(images.keys()), title='Saving Pictures')\n for file in directory:\n file_name = \"data/input/{}\".format(file)\n fh = open(file_name)\n activation_values = np.asarray(list(map(lambda x: float(x), filter(lambda x: x != '', fh.read().split(\",\")))))\n fh.close()\n plot_matrix = np.zeros((dimx, dimy, dimz))\n for x in range(dimx):\n for y in range(dimy):\n for z in range(dimz):\n indice = coordToCol[x][y][z]\n if indice != 0:\n if indice in list(voxels):\n voxel_indice = list(voxels).index(indice)\n value = activation_values[voxel_indice]\n if abs(value) > max_val:\n max_val = abs(value)\n plot_matrix[x][y][z] = value\n image = nib.Nifti1Image(plot_matrix, transformToXYZmm)\n images[file_name] = image\n bar.update(force_flush=True)\n print(bar)\n for image in images:\n plotting.plot_glass_brain(images[image], display_mode='ortho', vmax=max_val, plot_abs=False, threshold=None, colorbar=True, output_file=\"{}-wom1.png\".format(image))\n bar2.update(force_flush=True)\n print(bar2)", "def plot(self, *names):\r\n for name in names:\r\n if name in self.__obs.keys():\r\n list_obs = self.__obs[name]\r\n if not isinstance(list_obs[0], matrix):\r\n fig = plt.figure()\r\n plt.plot(self.__obs[name])\r\n else:\r\n fig = plt.figure()\r\n for i in range(list_obs[0].size):\r\n plt.plot([float(obs[i]) for obs in list_obs], label=\"Dimension {0}\".format(i))\r\n plt.legend()\r\n plt.ylabel(name)\r\n plt.show()\r\n else:\r\n for sous_objet in self.__sous_objets:\r\n if re.match((sous_objet+\"?\").encode('string-escape'), name.lower()):\r\n self.__dict__[sous_objet].plot(name)", "def gen_img_list(self, fig, **kwargs):\n cond = dict(self.cond_ex, **self.cond_cal, **self.plot_param)\n dic_dat = {\"x\": self.x,\n \"t_history\": self.t_history,\n \"r_history\": self.r_history,\n \"Pc_history\": self.Pc_history,\n \"Vox_history\": self.Vox_history,\n \"mf_history\": self.mf_history,\n \"mox_history\": self.mox_history,\n \"rdot_history\": self.rdot_history,\n \"cstr_history\": self.cstr_history,\n \"of_history\": self.of_history,\n \"Vf_history\": self.Vf_history\n }\n self.img_list = mod_plot.gen_img_list(fig, self.img_list, dic_dat, **cond)\n return self.img_list", "def plot_sample(x):\n plt.imshow(x[:,:,0])\n plt.title(\"gasf\")\n plt.colorbar()\n plt.show()\n\n plt.imshow(x[:,:,1])\n plt.title(\"gadf\")\n plt.colorbar()\n plt.show()\n\n plt.imshow(x[:,:,2])\n plt.title(\"mtf\")\n plt.colorbar()\n plt.show()", "def plot_some(*arr, **kwargs):\n title_list = kwargs.pop('title_list',None)\n pmin = kwargs.pop('pmin',0)\n pmax = kwargs.pop('pmax',100)\n cmap = kwargs.pop('cmap','magma')\n imshow_kwargs = kwargs\n return _plot_some(arr=arr, title_list=title_list, pmin=pmin, pmax=pmax, cmap=cmap, **imshow_kwargs)" ]
[ "0.6392929", "0.6309091", "0.62998956", "0.62422526", "0.62251675", "0.59421015", "0.59245807", "0.590018", "0.58759063", "0.5816621", "0.5809763", "0.5801232", "0.57604563", "0.57517", "0.5740631", "0.57335705", "0.5718802", "0.5717321", "0.56906945", "0.56868666", "0.56854093", "0.56494015", "0.5642458", "0.5640442", "0.5626862", "0.56254816", "0.55999374", "0.557818", "0.5575727", "0.5573572" ]
0.6703569
0
Parses pidgin's htmlformated logfiles HTML within messages is converted to normal text, so messages about HTMLcode will get lost
def parse_html(root, filename): root_filename = os.path.join(root, filename) match_date = regex_date.findall(filename) if not match_date: raise Exception(root_filename, 'r') year = int(match_date[0][0]) month = int(match_date[0][1]) day = int(match_date[0][2]) file = open(root_filename) lines = file.readlines() for line in lines[1:]: match_time = regex_html_time.match(line) if match_time: hour = int(match_time.group(1)) minute = int(match_time.group(2)) second = int(match_time.group(3)) time = datetime.datetime(year, month, day, hour, minute, second) timestamp = calendar.timegm(time.utctimetuple()) match_html = regex_html.match(line) if match_html: name = match_html.group(1) message_text = html2text.html2text(match_html.group(2)).replace("\\n", "\n").strip() add_message(name, timestamp, message_text, root) else: match_rest = regex_html_rest.match(line) message_text = None if match_rest: message_text = html2text.html2text(match_rest.group(1)).replace("\\n", "\n").strip() add_message(None, timestamp, message_text, root)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strip_logfile_html(text):\n out_text = \"\"\n buff = \"\"\n start_tag = \"\"\n end_tag = \"\"\n context = \"none\"\n for i in range(len(text)):\n c = text[i]\n # print \"c = \"+str(c)+\" context = \"+str(context)\n if c == \"<\":\n if context == \"none\":\n # Possible start of a tag, depending on\n # next character\n context = \"putative_tag\"\n buff = c\n else:\n # Everything up to this needs to\n # be dumped directly to output\n out_text = out_text + escape_xml_characters(buff)\n elif context == \"putative_tag\":\n buff = buff + c\n if c.isalpha():\n context = \"start_tag\"\n elif c == \"/\":\n context = \"end_tag\"\n elif c == \"!\":\n context = \"comment_tag\"\n else:\n # Not a tag so dump it\n context = \"none\"\n out_text = out_text + escape_xml_characters(buff)\n elif context == \"start_tag\" or context == \"end_tag\" or context == \"comment_tag\":\n buff = buff + c\n if c == \">\":\n if context == \"start_tag\":\n # End of a start tag\n # Process it and see if we can\n # salvage something\n salvage_text = salvage_tag_data(buff)\n if salvage_text != \"\":\n out_text = out_text + escape_xml_characters(salvage_text)\n # Reset the buffer\n context = \"none\"\n buff = \"\"\n elif context == \"end_tag\":\n # End of an end tag\n # Throw this away (for now)\n context = \"none\"\n buff = \"\"\n elif context == \"comment_tag\":\n # End of a comment\n # Throw this away (for now)\n context = \"none\"\n buff = \"\"\n else:\n # Nothing special about this\n # Add to the output\n out_text = out_text + escape_xml_characters(c)\n # Finished - append the remaining buffer\n out_text = out_text + escape_xml_characters(buff)\n return remove_blank_lines(out_text)", "def parse(message):\n html = render(message['text'])\n\n return html", "def _html(self, text):\r\n html = URL_REGEX.sub(self._parse_urls, text)\r\n html = USERNAME_REGEX.sub(self._parse_users, html)\r\n html = LIST_REGEX.sub(self._parse_lists, html)\r\n return HASHTAG_REGEX.sub(self._parse_tags, html)", "def clean_html(message):\n all_lines = []\n started_html = False\n finished_with_html_tag = False\n html_part = []\n for idx, line in enumerate(message.split(\"\\n\")):\n if re.search(r\"<.*?html.*?>\", line):\n started_html = True\n html_part.append(line)\n else:\n if started_html:\n html_part.append(line)\n else:\n all_lines.append(line)\n if \"</html>\" in line:\n finished_with_html_tag = True\n if finished_with_html_tag:\n all_lines.append(clean_text_from_html_tags(\"\\n\".join(html_part)))\n html_part = []\n finished_with_html_tag = False\n started_html = False\n if len(html_part) > 0:\n all_lines.extend(html_part)\n return delete_empty_lines(\"\\n\".join(all_lines))", "def clean_text_from_html_tags(message):\n regex_style_tag = re.compile('<style.*?>[\\\\s\\\\S]*?</style>')\n message = re.sub(regex_style_tag, \" \", message)\n regex_script_tag = re.compile('<script.*?>[\\\\s\\\\S]*?</script>')\n message = re.sub(regex_script_tag, \" \", message)\n regex_html_tags = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')\n message = re.sub(regex_html_tags, \" \", message)\n return message", "def process_uploaded_logs():\n os_type = request.forms.get('os_type')\n log_file = request.files.get('log_file')\n raw_text = request.forms.get('message')\n return_val = {\n 'page': 'upload_logs',\n 'raw_logs': raw_text,\n 'log_entries': Markup(''),\n }\n\n if log_file:\n message = str(log_file.file.read(), 'utf-8')\n elif raw_text.strip():\n message = raw_text\n else:\n return_val['flash'] = {\n 'content': 'Please upload a file or paste logs.',\n 'cls': 'error',\n }\n return return_val\n\n try:\n parsed_message = parsing_lib.LogParser.parse(message, os_type)\n log_entries = parsing_lib.LogParser.convert_to_html(parsed_message)\n return_val['log_entries'] = Markup(log_entries)\n except Exception as e:\n return_val['flash'] = {\n 'content': 'Log format error: %s' % str(e),\n 'cls': 'error',\n }\n finally:\n return return_val", "def parse_html(self):\n if self.file_extension == '.czm': # Caso de fichero comprimido czm.\n folder_path = extract_file(self.input_file) # Descomprime el archivo de entrada.\n self.html_path = find_extension(folder_path, '.html') # Busca el html en el directorio de extracción.\n else: # Caso de html proporcionado directamente.\n self.html_path.append(self.input_file)\n if not self.html_path: # En caso de que no exista ningún html.\n raise IOError('html file not found.')\n for path in self.html_path: # Almacena cada uno de los html parseados en un diccionario.\n html_file = open(path, encoding=\"utf8\") # Almacena los datos del html.\n parsed_html = BeautifulSoup(html_file, \"lxml\") # Hay que instalar lxml.\n self.parsed_html_dic.update({os.path.splitext(os.path.basename(path))[0]:parsed_html})", "def htmlParsePage(page):\n if 'parsedHtml' not in page:\n logging.debug('Parsing HTML')\n html = page['data']\n html = html.replace(' xmlns=\"http://www.w3.org/1999/xhtml\"', '')\n html = removeThreeByteUtf(html)\n page['parsedHtml'] = BeautifulSoup(html)", "def FilterRawEmail(raw_msg):\r\n links = []\r\n soup = BeautifulSoup(raw_msg, features=\"lxml\")\r\n for a_tag in soup.find_all(\"a\", href=True):\r\n link = a_tag[\"href\"]\r\n if (len(link) < 10):\r\n continue\r\n else:\r\n print(\"Before Cleaning: \", link, end=\"\\n\\n\")\r\n clean_link = parse.unquote_plus(quopri.decodestring(link).decode('utf-8'))\r\n print(\"Link: \", clean_link, end = \"\\n\\n\")\r\n links.append(clean_link)\r\n return links\r\n\r\n\r\n# =============================================================================\r\n# =============================================================================\r\n\r\n\r\n def WriteToFile(msg, file_name):\r\n \"\"\"Write out a message to a file for debugging purposes.\r\n Args:\r\n msg: a message object\r\n file_name: the output file name\r\n Returns:\r\n None\r\n \"\"\"\r\n out_msg = str(msg)\r\n file = open(file_name, \"w\")\r\n file.write(str(decoded_msg))", "def GetErrorFromHtml(data):\n pattern = re.compile('\\n')\n data = pattern.sub('', data)\n # Fetch error message.\n pattern = re.compile('<title>(.*)</title>|<TITLE>(.*)</TITLE>')\n msg = pattern.findall(data)\n if msg:\n for item in msg[0]:\n if item:\n msg = item\n # Cut unnecessary wording.\n pattern = re.compile('Error: |ERROR: ')\n msg = pattern.sub('', msg, count=1)\n # Fetch detail for the error message\n pattern = re.compile(('<blockquote><H1>.*</H1>(.*)<p></blockquote>|'\n '<H1>.*</H1>(.*)</BODY>'))\n msg_detail = pattern.findall(data)\n if isinstance(msg_detail, list) and msg_detail:\n for item in msg_detail[0]:\n if item:\n msg_detail = item\n msg_detail = msg_detail.strip('.')\n # Cut any HTML tags that appear in the message.\n pattern = re.compile('<.?H2>|<.?p>|<.?A.*>|<.?P.*>|<.?HR.*>')\n msg_detail = pattern.sub(' ', msg_detail).strip(' ')\n if msg_detail == msg:\n msg_detail = ''\n else:\n msg_detail = ''\n\n if msg:\n if not msg_detail:\n return '%s.' % msg\n return '%s. %s.' % (msg, msg_detail)\n else:\n # Check for non standard HTML content, with just the <body>.\n pattern = re.compile('<body>(.*)</body>')\n msg = pattern.findall(data)\n if msg:\n return msg[0]\n return ''", "def parsingconvtext(retrievedtext,customtextlist):\r\n if not retrievedtext: #in case empty text \r\n retrievedtext=changenonetostr(retrievedtext)\r\n newtext=BeautifulSoup(retrievedtext).get_text() \r\n #newtext=changenonetostr(retrievedtext)\r\n #newtext=BeautifulSoup(newtext).get_text() \r\n #remove http links\r\n newtext=re.sub(r'http\\S+', '', newtext)\r\n newtext=re.sub(r'\\r\\r\\r\\n', ' ', newtext)\r\n #remove LL specific text\r\n if customtextlist:\r\n for i in customtextlist:\r\n newtext=re.sub(i, '', newtext)\r\n return newtext", "def _parse_message(self, soup):\n kind, = soup.attrs[u'class']\n title = soup.findChild().text\n body = ''.join(t.text for t in soup.findChildren()[1:])\n message = dict(kind=kind, title=title, body=body)\n for val in message.values():\n assert type(val) == str\n return message", "def process_doc_html(self, doc_in):\n self.feed(doc_in) #SGMLParser call\n self.close() #SGMLParser call\n self.hand_off_temp_pieces('to_doc_pieces')\n self.all_pieces = self.all_pieces[:-16] # drop </body></html>\n return self.all_pieces", "def convert_html():\n return", "def parse_pocket_html_export(html_file):\n\n html_file.seek(0)\n pattern = re.compile(\"^\\\\s*<li><a href=\\\"(.+)\\\" time_added=\\\"(\\\\d+)\\\" tags=\\\"(.*)\\\">(.+)</a></li>\", re.UNICODE)\n for line in html_file:\n # example line\n # <li><a href=\"http://example.com/ time_added=\"1478739709\" tags=\"tag1,tag2\">example title</a></li>\n match = pattern.search(line)\n if match:\n url = match.group(1).replace('http://www.readability.com/read?url=', '') # remove old readability prefixes to get original url\n time = datetime.fromtimestamp(float(match.group(2)))\n tags = match.group(3)\n title = match.group(4).replace(' — Readability', '').replace('http://www.readability.com/read?url=', '')\n \n yield {\n 'url': url,\n 'timestamp': str(time.timestamp()),\n 'title': title or None,\n 'tags': tags or '',\n 'sources': [html_file.name],\n }", "def convert(md_text):\n # separate by line\n md_text = md_text.split('\\n')\n\n # save the html content for return\n html_text = ''\n\n # begin looping from the first line\n index = -1\n while index < len(md_text) - 1:\n index += 1\n line = md_text[index]\n\n # code segment\n if len(line) >= 3 and line[:3] == '```':\n html_line = \"\"\n language = line[3:].replace(' ', '')\n if len(language) == 0:\n language = False\n order_index = index + 1\n find_end = False\n while order_index < len(md_text):\n if md_text[order_index][:3] == '```':\n find_end = True\n break\n else:\n temp_line = md_text[order_index]\n temp_line = temp_line.replace('<', '&lt;')\n temp_line = temp_line.replace('>', '&gt;')\n temp_line = temp_line.replace(' ', '&nbsp;')\n html_line += temp_line + '<br />'\n order_index += 1\n\n if find_end:\n # if language is not False:\n # html_text += ('<pre><code class=\"' + language + '\">' + html_line + '</code></pre>')\n # else:\n html_text += ('<code>' + html_line + '</code>')\n # print(language)\n index = order_index\n continue\n\n # inline code\n\n\n # header\n is_header, html_line = check_header(line)\n if is_header:\n html_text = html_text + html_line\n continue\n\n # horizontal rule\n is_horizontal_rule, html_line = check_horizontal_rule(line)\n if is_horizontal_rule:\n html_text = html_text + html_line\n continue\n\n # paragraph\n line = check_paragraph(line)\n\n # deal with ordered list\n if len(line.split('.')) != 0 and '1.' == line[:2]:\n html_line = '<ol>'\n order_index = index\n while order_index < len(md_text)\\\n and len(md_text[order_index].split('.')) != 0\\\n and (str(order_index - index + 1) == md_text[order_index].split('.')[0]\n or '1' == md_text[order_index].split('.')[0]):\n to_replace = [str(order_index - index + 1) + '.', '1.']\n for replace_content in to_replace:\n md_text[order_index] = md_text[order_index].replace(replace_content, '')\n html_line = html_line + '<li>' + md_text[order_index] + '</li>'\n\n order_index += 1\n index = order_index - 1\n html_line = html_line + '</ol>'\n line = html_line\n\n # deal with unordered list\n is_unordered_list, html_line = check_unordered_list(line)\n if is_unordered_list:\n line = html_line\n\n # deal with strong\n line = strong(line)\n\n # Scratch\n line = scratch(line)\n\n # italics\n line = italics(line)\n\n # image\n while len(re.match(r'((?P<pre_text>.*)!\\[(?P<alt_text>.*)\\]\\((?P<link>.*)\\)(?P<after_text>.*))*', line).group())\\\n != 0:\n match = re.match(r'((?P<pre_text>.*)!\\[(?P<alt_text>.*)\\]\\((?P<link>.*)\\)(?P<after_text>.*))*', line)\n pre_text = match.group('pre_text')\n alt_text = match.group('alt_text')\n link = match.group('link')\n after_text = match.group('after_text')\n img_html = '<img src=\"' + link + '\" alt=\"' + alt_text + '\">'\n line = pre_text + img_html + after_text\n\n # link\n while len(re.match(r'((?P<pre_text>.*)\\[(?P<alt_text>.*)\\]\\((?P<link>.*)\\)(?P<after_text>.*))*', line).group())\\\n != 0:\n match = re.match(r'((?P<pre_text>.*)\\[(?P<alt_text>.*)\\]\\((?P<link>.*)\\)(?P<after_text>.*))*', line)\n pre_text = match.group('pre_text')\n alt_text = match.group('alt_text')\n link = match.group('link')\n after_text = match.group('after_text')\n img_html = '<a href=\"' + link + '\">' + alt_text + '</a>'\n line = pre_text + img_html + after_text\n\n html_text = html_text + line\n if not is_unordered_list:\n html_text = html_text + '<br>'\n\n return html_text", "def parse_html(self):\n\n try:\n parser = HtmlParser(self.url)\n\n parser.set_pattern(self.pattern)\n parser.set_urls(self.spider_config)\n parser.set_next_depth(self.depth)\n parser.feed(self.page)\n parser.close()\n except UnicodeDecodeError as e:\n logging.error('Thread:{} parse {} failed, msg:{}'.format(self.thread_id, self.url, e))\n return False\n\n return True", "def render(txt):\n\n # Removing links to other channels\n txt = re.sub(r'<#[^\\|]*\\|(.*)>', r'#\\g<1>', txt)\n\n # Removing links to other users\n txt = re.sub(r'<(@.*)>', r'\\g<1>', txt)\n\n # handle named hyperlinks\n txt = re.sub(r'<([^\\|]*)\\|([^\\|]*)>', r'<a href=\"\\g<1>\" target=\"blank\">\\g<2></a>', txt)\n\n # handle unnamed hyperlinks\n txt = re.sub(r'<([^a|/a].*)>', r'<a href=\"\\g<1>\" target=\"blank\">\\g<1></a>', txt)\n\n # handle ordered and unordered lists\n for delimeter in LIST_DELIMITERS:\n slack_tag = delimeter\n class_name = LIST_DELIMITERS[delimeter]\n\n # Wrap any lines that start with the slack_tag in <li></li>\n list_regex = u'(?:^|\\n){}\\s?(.*)'.format(slack_tag)\n list_repl = r'<li class=\"list-item-{}\">\\g<1></li>'.format(class_name)\n txt = re.sub(list_regex, list_repl, txt)\n\n # hanlde blockquotes\n txt = re.sub(u'(^|\\n)(?:&gt;){3}\\s?(.*)$', r'\\g<1><blockquote>\\g<2></blockquote>', txt, flags=re.DOTALL)\n txt = re.sub(u'(?:^|\\n)&gt;\\s?(.*)\\n?', r'<blockquote>\\g<1></blockquote>', txt)\n\n # handle code blocks\n txt = re.sub(r'```\\n?(.*)```', r'<pre>\\g<1></pre>', txt, flags=re.DOTALL)\n txt = re.sub(r'\\n(</pre>)', r'\\g<1>', txt)\n\n # handle bolding, italics, and strikethrough\n for wrapper in FORMATTERS:\n slack_tag = wrapper\n html_tag = FORMATTERS[wrapper]\n\n # Grab all text in formatted characters on the same line unless escaped\n regex = r'(?<!\\\\)\\{t}([^\\{t}|\\n]*)\\{t}'.format(t=slack_tag)\n repl = r'<{t}>\\g<1></{t}>'.format(t=html_tag)\n txt = re.sub(regex, repl, txt)\n\n # convert line breaks\n txt = txt.replace('\\n', '<br />')\n\n # clean up bad HTML\n parser = CustomSlackdownHTMLParser(txt)\n txt = parser.clean()\n\n # convert multiple spaces\n txt = txt.replace(r' ', ' &nbsp')\n\n return txt", "def html_to_text(html_message):\r\n process = Popen(\r\n ['lynx', '-stdin', '-display_charset=UTF-8', '-assume_charset=UTF-8', '-dump'],\r\n stdin=PIPE,\r\n stdout=PIPE\r\n )\r\n # use lynx to get plaintext\r\n (plaintext, err_from_stderr) = process.communicate(\r\n input=html_message.encode('utf-8')\r\n )\r\n\r\n if err_from_stderr:\r\n log.info(err_from_stderr)\r\n\r\n return plaintext", "def _parse_user_messages_page(self, html):\n\n if not html:\n return None\n\n dom = BeautifulSoup(html, 'html.parser')\n\n data = self._parse_logged_in_user(dom)\n\n return data", "def clean_text(text2, project_key):\n\n text = text2\n text = return_text_without_headlines(text)\n # remove text written between double curly braces\n text = re.sub(r\"{{code}}.*{{code}}\", \"code.\", text)\n text = re.sub(r\"{code.*{code}\", \"code.\", text)\n text = re.sub(r\"{code:java}.*{code:java}\", \"code.\", text)\n text = re.sub(r\"{noformat}.*{noformat}\", \"code.\", text)\n text = re.sub(r\"{{monospaced}}.*{{monospaced}}\", \"code.\", text)\n text = re.sub(r'<script type=\"text/javascript\">.*</noscript>', 'code.', text)\n text = re.sub(r\"'''.*'''\", \"code\", text)\n text = text.replace('<p>&nbsp;</p>', \"\")\n text = text.replace('<div>&nbsp;</div>', \"\")\n text = text.replace('&nbsp;', \" \")\n # remove URLs link\n text = re.sub(r\"<a href=.*</a>\", \"url. \", text)\n text = re.sub(r\"http\\S+\", \"url. \", text)\n text = re.sub(r\"hdfs://\\S+\", \"url. \", text)\n text = re.sub(r\"tcp://\\S+\", \"url. \", text)\n text = re.sub(r\"webhdfs://\\S+\", \"url. \", text)\n text = re.sub(r\":/\\S+\", \"url. \", text)\n text = re.sub(r\"\\S+.com \", \"url. \", text)\n text = re.sub(r\"N/A]\", \" \", text)\n text = \" \".join(x for x in text.split() if not x.endswith('.com'))\n text = \" \".join(x for x in text.split() if not x.endswith('.com*'))\n text = \" \".join(x for x in text.split() if not x.endswith('.org'))\n text = \" \".join(x for x in text.split() if not x.endswith('.xml'))\n text = \" \".join(x for x in text.split() if not x.startswith('*javax.xml.'))\n text = \" \".join(x for x in text.split() if not x.startswith('javax.xml.'))\n # remove Image attachments\n text = re.sub(r\"<p><img alt=.></p>\", \"image.\", text)\n text = re.sub(r\"{}-\\d+\".format(project_key), \"issue\", text)\n # remove date\n text = re.sub(r'(\\w{4})-(\\d{1,2})-(\\d{1,2}) ', 'date.', text)\n text = re.sub(r'(\\w{3,4,5})-(\\d{1,2})-(\\d{4})', 'date.', text)\n text = re.sub(r'(\\d{1,2})/(\\d{1,2})/(\\d{4})', 'date.', text)\n text = re.sub(r'(\\w{3}). (\\d{1,2}), (\\d{4})', 'date.', text)\n text = re.sub(r'(\\w{3}). (\\d{1,2}) (\\d{4})', 'date.', text)\n text = re.sub(r'&lt;= Today’s Date AND', 'date.', text)\n text = re.sub(r'yyyy-mm-dd', 'date', text)\n # remove text written between small braces\n text = re.sub(r'<.+?>', \"\", text)\n text = text.replace(\"e.g.,\", \" \")\n text = text.replace(\"e.g.\", \" \")\n text = text.replace(\"i.e.,\", \" \")\n text = text.replace(\"i.e.\", \" \")\n # replace non-breaking space with regular space\n text = text.replace(u'\\xa0', u' ')\n # replace all punctuations with space\n text = text.replace('-->', \" \")\n text = text.replace('--', \" \")\n text = text.replace('-', \" \")\n text = text.replace('/', \" \")\n text = text.replace('&amp;', \" \")\n text = text.replace(' * ', \". \")\n text = re.sub(r\"\\\"|\\#|\\“|\\*|\\'|\\]|\\^|\\`|\\(|\\)|\\~\", \"\", text)\n text = re.sub(r\"\\\"|\\$|\\%|\\&|\\/|\\|\\=|\\>|\\<|\\@|\\[|\\\\|\\]|\\{|\\||\\}\", \" \", text)\n text = text.replace('$', \"\")\n text = text.replace('?', \".\")\n text = text.replace('+', \" \")\n text = re.sub(r\" \\d\\.\\d\\.N \", \" \", text)\n text = re.sub(r\" \\d\\.\\d\\.b.\", \" \", text)\n text = re.sub(r\" \\d\\.\\d\\.b \", \" \", text)\n text = re.sub(r\"\\d\\.\\d\\.N\", \" \", text)\n text = re.sub(r\"\\d\\.\\d\\.X\", \" \", text)\n text = re.sub(r\"v\\d\\.\\d\\.\\d+\", \" \", text)\n text = re.sub(r\"V\\d\\.\\d\\.\\d+\", \" \", text)\n text = re.sub(r\"v\\d\\.\\d+\", \" \", text)\n text = re.sub(r\"V\\d\\.\\d+\", \" \", text)\n text = re.sub(r\"\\d\\.\\d+\", \" \", text)\n text = re.sub(r\"\\d\\.\\d\\.\\d+\", \" \", text)\n text = text.replace(\"V1\", \" \")\n text = text.replace(\"v1\", \" \")\n # remove digits from text\n text = re.sub(r\"\\d+\", \"\", text)\n text = text.replace('lt;=', \" \")\n text = text.replace('.!', \".\")\n text = text.replace('!.', \".\")\n text = text.replace('!', \".\")\n text = text.replace('... ', \". \")\n text = text.replace('.. ', \". \")\n text = text.replace('..', \".\")\n text = text.replace('. . . ', \". \")\n text = text.replace('. . ', \". \")\n text = text.replace('. . ', \". \")\n text = text.replace(' .', \".\")\n text = text.replace('. . ', \". \")\n text = text.replace('. . ', \". \")\n text = text.replace(':.', \".\")\n text = text.replace(' :', \" \")\n text = text.lower()\n text = text.replace('..', \".\")\n text = ' '.join(text.split())\n\n return text", "def parse(self, content):\n pass", "def parse_text(self, page):\n text = page.find(self.tag_prefix + self.revision_tag).find(self.tag_prefix + self.text_tag).text\n title = page.find(self.tag_prefix + self.title_tag).text\n categories = []\n #\n text = self.parse_archivo(text)\n text = self.parse_foto(text)\n text = self.parse_by_line(text)\n text = self.parse_link(text)\n text = self.parse_url(text)\n text = self.parse_fecha(text)\n text = self.parse_bracketed_word(text)\n #\n if text:\n categories = re.findall(self.category_finder_regex, text)\n #\n text = self.parse_category(text)\n text = self.parse_other_language(text)\n text = self.parse_table_regex(text)\n text = self.parse_ver_fuente(text)\n text = self.remove_extra_text(text)\n text = self.remove_extra_characters(text)\n\n categorias = []\n for cat in categories:\n categorias.append(cat[6])\n\n if text:\n if 'REDIRECT' in text or 'redirect' in text:\n return None\n\n return Article(title=title, content=text, categories=categorias)", "def parse_html(html):\n parser = lxml.html.HTMLParser(encoding='utf8')\n return lxml.html.fromstring(html.encode('utf8'), parser=parser)", "def remove_html( html):\n return html2txt(html)", "def parse_response_error(html_text: str) -> str:\n html = BeautifulSoup(markup=html_text, features=\"html.parser\")\n inner_html = BeautifulSoup(markup=html.p.text, features=\"html.parser\")\n message = inner_html.text if inner_html.p is None else inner_html.p.text\n if \"face_not_found\" in message:\n message = \"Could not find a face in the image.\"\n elif \"multiple_faces\" in message:\n message = \"The image has more than one person.\"\n elif \"quality_failed\" in message:\n message = \"The provided image does not have enough quality.\"\n return message", "def parseOutText(f):\n\n\n f.seek(0) ### go back to beginning of file (annoying)\n all_text = f.read()\n ### split off metadata\n \n content = re.split(\"X-FileName:.*$\", all_text, flags=re.MULTILINE, maxsplit=1)\n words = \"\"\n if len(content) > 1:\n text_string = content[1]\n\n ## remove mails that are forwarded or to which are responded\n # e.g. ---------------------- Forwarded\"\n text_string = re.split(\"-*\\sForwarded\", text_string, maxsplit=1)[0]\n\n # -----Original Message-----\n text_string = re.split(\"-*\\Original\\sMessage\", text_string, maxsplit=1)[0]\n\n # Vince J Kaminski@ECT\n # 04/30/2001 02:28 PM\n # To:\tStanley Horton/Corp/Enron@Enron, Danny McCarty/ET&S/Enron@Enron\n # cc:\tVince J Kaminski/HOU/ECT@ECT \n # or\n # Vince J Kaminski@ECT\n # 04/30/2001 02:28 PM\n # to:\tStanley Horton/Corp/Enron@Enron, Danny McCarty/ET&S/Enron@Enron\n # cc:\tVince J Kaminski/HOU/ECT@ECT \n \n text_string = re.split(\"((.*\\n){2})[Tt]o:\\s\", text_string, maxsplit=1)[0]\n\n ### remove punctuation\n # should be autopmatically by scikit learn\n #text_string = text_string.translate(string.maketrans(\"\", \"\"), string.punctuation)\n\n ### project part 2: comment out the line below\n #words = text_string\n\n ### split the text string into individual words, stem each word,\n ### and append the stemmed word to words (make sure there's a single\n ### space between each stemmed word)\n from nltk.stem.snowball import SnowballStemmer\n\n stemmer = SnowballStemmer(\"english\")\n words = [stemmer.stem(word) for word in text_string.split()]\n\n\n\n return \" \".join(words)", "def parse(self):\n i = 0\n while i < len(self.__lines):\n line = self.__lines[i]\n dt = re.match(r\"(\\d{4}-\\d{1,2}-\\d{1,2}\\s\\d{1,2}:\\d{1,2}:\\d{1,2})\", line)\n if not dt:\n i += 1\n continue\n log = {\n \"datetime\": dt.group()\n }\n line = line[dt.end()+1:].rstrip(\"\\n\")[::-1]\n qq_flag = line.find(\"(\")\n log[\"qq\"] = line[qq_flag-1:0:-1]\n log[\"name\"] = line[:qq_flag:-1].strip(\" \")\n i += 1\n log[\"content\"] = self.__lines[i].rstrip(\"\\n\")\n while self.__lines[i+1] != \"\\n\":\n i += 1\n log[\"content\"] += \" \" + self.__lines[i].rstrip(\"\\n\")\n self.__logs.append(log)\n i += 2", "def preprocess(text):\r\n\r\n #Regex to remove URL and @ symbol\r\n regex = '@\\S*|http\\S*|www\\S*'\r\n preprocessed_text = re.sub(regex, '', text)\r\n preprocessed_text = deEmojify(preprocessed_text)\r\n preprocessed_text = strip_html(preprocessed_text)\r\n\r\n return preprocessed_text", "def extract_raw_text(soup, url):\n \n title_class = \"nom-notice\"\n title = soup.find(class_=title_class)\n raw_infos = {}\n raw_infos['name'] = title.contents[0].replace(u'\\xa0', ' ')\n \n notice = soup.find(class_=\"notice\")\n \n summary = notice.find(class_=\"chapo\")\n if summary is not None:\n first_para = summary.find_all('p', recursive=False)[-1]\n first_para.tag = 'div'\n first_para['class'] = 'summary'\n raw_infos['summary'] = unicode(first_para)\n \n else:\n raw_infos['summary'] = unicode('')\n\n article = notice.find(class_='texte')\n if article is not None:\n article['class'] = 'article'\n raw_infos['article'] = unicode(article)\n \n sources = notice.find(class_='sources')\n raw_infos['sources'] = unicode(sources)\n \n works = notice.find(class_='oeuvres')\n if works is not None:\n works['class'] = 'works'\n raw_infos['works'] = unicode(works)\n \n # In function that writes, encode everything to bytes! .encode('utf-8')\n return raw_infos" ]
[ "0.6302063", "0.6082321", "0.60166854", "0.59433186", "0.59056187", "0.57682514", "0.57220733", "0.56769717", "0.5576809", "0.5557175", "0.55498254", "0.5516565", "0.5464202", "0.5434003", "0.5428622", "0.5424175", "0.53731394", "0.5366191", "0.5362259", "0.5360732", "0.5359679", "0.53495157", "0.53332895", "0.533157", "0.53281116", "0.5325022", "0.5274929", "0.52739394", "0.52719676", "0.52563316" ]
0.6280499
1
Asks user for own nicks after listing all encountered ones
def names_interaction(): already_printed = [] for protocol in protocols: for account in protocol.accounts: for contact in account.contacts: for message in contact.messages: if message.name not in already_printed: already_printed.append(message.name) print(message.name) nicks = input("Own nicks, comma separated: ") nicks = nicks.split(",") nicks = [nick.strip() for nick in nicks] return nicks
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def take_sticks_ai(self, sticks):\n print(\"\\nThere are {} sticks on the board\".format(sticks))\n sticks_taken = random.choice(self.hats[sticks]['content'])\n self.hats[sticks]['choice'] = sticks_taken\n sticks -= sticks_taken\n return sticks", "def user_picks():\r\n print (\"Enter the second to last posted Fantasy 5 lotto numbers from 1 to 42:\")\r\n ui = []\r\n while len(ui) < 5:\r\n print (len(ui) + 1,)\r\n try:\r\n i = int(input(\"--> \" ))\r\n # check if i is unique and has a value from 1 to 42\r\n # and is an integer, otherwise don't append\r\n if (i not in ui) and (1 <= i <= 42): \r\n ui.append(i)\r\n except:\r\n print (\"Enter an integer number!\")\r\n return ui", "def get_user_list(self):\n self.user_list = db.get_user_list()\n for each in self.user_list:\n print each[1] # username\n while(True):\n selection = raw_input(\"Enter username to use\")\n if selection in self.user_list:\n return selection", "def k(self, irc, msg, args, nicks):\n\n if(self._checkCPO(irc, msg)):\n \n hostmasks = []\n \n for nick in nicks:\n prefix = irc.state.nickToHostmask(nick)\n user = ircutils.userFromHostmask(prefix)\n host = ircutils.hostFromHostmask(prefix)\n \n hostmask = '*!*@%s' % host\n hostmasks.append(hostmask)\n \n irc.queueMsg(ircmsgs.bans(msg.args[0], hostmasks))\n irc.queueMsg(ircmsgs.kicks(msg.args[0], nicks, 'Your behavior is not conducive to the desired environment.'))\n \n def unban():\n irc.queueMsg(ircmsgs.unbans(msg.args[0], hostmasks))\n \n schedule.addEvent(unban, time.time() + 900)\n \n irc.noReply()", "def getPicks(self):\n return self.picks", "def take_sticks(self):\n print(\"\\nThere are {} sticks on the board\".format(self.sticks))\n while True:\n try:\n sticks_taken = int(input(\"{} How many sticks do you take (1-3)? \".format(self.player_turn)))\n except ValueError:\n print(\"Please enter an integer between 1 and 3.\")\n continue\n if sticks_taken < 1 or sticks_taken > 3:\n print(\"Please enter an integer between 1 and 3.\")\n continue\n else:\n self.sticks -= sticks_taken\n break", "def run_checklist(items):\n\tuser_responses = OrderedDict()\n\n\ttotal_items = 0\n\tfor item in items:\n\t\ttotal_items = total_items + 1\n\t\n\t#Ask questions\n\tcurrent_item_number = 1\n\tfor item in items:\n\t\tprint (\"%i of %i: \" % (current_item_number, total_items) + item)\n\t\tanswer = raw_input(\"> \")\n\t\tuser_responses[item] = answer\n\t\tcurrent_item_number = current_item_number + 1\n\t#Todo: Plain text Antyhing elsE?\n\tprint (\"\\nChecklist complete.\")\n\treturn user_responses", "def apply_kicks(self):\n\n\n for cor in self.orbit.corrs:\n if cor.ui.alarm:\n self.stop_feedback()\n logger.info(\"apply_kicks: kick exceeds limits. Try 'Uncheck Red' and recalculate correction\")\n self.error_box(\"kick exceeds limits. Try 'Uncheck Red' and recalculate correction\")\n return 0\n kick_table = []\n for cor in self.orbit.corrs:\n kick_mrad = cor.ui.get_value()\n logger.debug(cor.id + \" set: %s --> %s\" % (cor.ui.get_init_value(), kick_mrad))\n try:\n cor.mi.set_value(kick_mrad)\n kick_table.append({\"corrector\": cor.id, \"value\": kick_mrad})\n except Exception as e:\n logger.error(cor.id + \" apply_kicks Error: \" + str(e))\n self.cor_hist.append(kick_table)", "def getInterestedUsers():", "def are_valid(nicks):\n if not len(nicks) == len(set(nicks)):\n return False\n used_nicks = []\n for nick in nicks:\n if nick not in used_nicks:\n used_nicks.append(nick)\n else:\n return False\n\n return True", "def my_ticket_attempts(lottery_win):\n\n my_ticket = []\n while len(my_ticket) < 4:\n pulled_value = choice(lottery_values)\n if pulled_value not in my_ticket:\n my_ticket.append(pulled value)\n\n return my_ticket", "def StrawPicker():\n names_list = []\n print(\"Welcome to the straw picker. Start entering your names. Type 'done' once you're finished.\")\n while True:\n names = input(f\"Enter your name #{len(names_list)}: \")\n if QuBa(names):\n return\n if len(names) < 1:\n print(\"Enter name with more than 2 alphabets.\")\n continue\n if names == 'done':\n break\n names_list.append(names)\n while True:\n print(\"The one with the shortest straw is.. \" + choice(names_list) + \"!\")\n yn = input(\"Roll again? \").lower()\n if yn.startswith('y'):\n continue\n elif yn.startswith('n'):\n yn = input(\"Enter again? \")\n if yn.startswith('n') or QuBa(yn):\n print(\"OK. Bye!\")\n return\n elif yn.startswith('y'):\n StrawPicker()", "def get_nicklist(self, caller=None):\n if not hasattr(self, \"_nicklist_callers\"):\n self._nicklist_callers = []\n if caller:\n self._nicklist_callers.append(caller)\n super(ServerBot, self).msg(request_nicklist=\"\")\n return", "def run_im_bored():\n \n greet_user()\n \n bored = True\n \n while bored:\n generate_suggestion()\n bored = ask_to_continue()", "def refresh_userlist(self):\n if self._userlist is not None:\n self._userlist.delete(0, Tix.END)\n if self._channel in self._frame.client.channels:\n ops = [ ]\n voices = [ ]\n users = [ ]\n l = self._frame.client.channels[self._channel].nicknames\n for name in l:\n ni = l[name]\n mode = l[name].mode\n if 'o' in mode:\n ops.append(\"@\" + ni.nickname)\n elif 'v' in mode:\n voices.append(\"+\" + ni.nickname)\n else:\n users.append(ni.nickname)\n l = (sorted(ops, key=unicode.lower)\n + sorted(voices, key=unicode.lower)\n + sorted(users, key=unicode.lower))\n for name in l:\n self._userlist.insert(Tix.END, name)\n self._userlistlabel.configure(\n text=\"%d Users, %d OPs\" % (len(l), len(ops)))", "def get_user_list(question):\n return [int(s) for s in input(question).split()]", "def print_recommendations_from_user_input(self):\n\n getting_name = True\n\n print(\"Please enter username and press enter:\\n\")\n\n while getting_name:\n username = input()\n\n redditor = self(username)\n\n if not redditor.username:\n print(\"Redditor does not exist. Please enter again.\\n\")\n continue\n\n break\n\n redditor.print_recommendations()", "def passengers(not_checked_in, checked_in):\n while not_checked_in:\n current_passenger = not_checked_in.pop() # remove last item on the list\n\n # Simulate checking a passenger inself.\n print(\"Checking in passenger: \" + current_passenger)\n checked_in.append(current_passenger) # Add to the check in list", "def show_checked_in_passengers(checked_in):\n print(\"\\nThe following passengers have been checked in: \")\n for passengers in checked_in:\n print(passengers)", "def kudos(self, work):\r\n \r\n return utils.kudos(work, self)", "def getLikedOkCupidUsers(self):\n\t\tself.logger.info(\"Get all liked OkCupid users\")\n\t\tusers = self.session.query(Models.Okcupid).filter(Models.Okcupid.liked==True).all()\n\t\treturn users", "def get_checklist(twitchid):\n\twith postgres, postgres.cursor() as cur:\n\t\tcur.execute(\"select checklist from mustard.users where twitchid=%s\", (twitchid,))\n\t\treturn cur.fetchone()[0]", "async def ign_whoami(self, ctx):\n user = ctx.message.author\n igns = self.names.get(user.mention)\n if not igns:\n await self.bot.say(\"You have not yet entered any IGN info. :cry:\".format(user.mention))\n else:\n await self.bot.say(self.format_igns(user, igns))", "def _process_user_choice(self):\n verifying_choice = True\n idx = 0\n print(\"Current occupants: %s\" % self.get_occupants())\n while verifying_choice:\n user_choice = raw_input(\"Choose a hut number to enter (1-5): \")\n # --------------------------------------------------------------\n # try...except illustration for chapter on exception handling.\n # (Attack Of The Orcs v1.1.0)\n # --------------------------------------------------------------\n try:\n idx = int(user_choice)\n except ValueError as e:\n print(\"Invalid input, args: %s \\n\" % e.args)\n continue\n\n try:\n if self.huts[idx-1].is_acquired:\n print(\"You have already acquired this hut. Try again.\"\n \"<INFO: You can NOT get healed in already acquired hut.>\")\n else:\n verifying_choice = False\n except IndexError:\n print(\"Invalid input : \", idx)\n print(\"Number should be in the range 1-5. Try again\")\n continue\n\n return idx", "def user_checkins(screen_name):\n cks = db.checkin\\\n .find({'user.screen_name': screen_name})\\\n .sort('created_at', -1)\\\n .limit(1200)\n return [strip_checkin(c) for c in cks]", "def view_users(stdscr):\n stdscr.clear()\n safe_put(stdscr, \"* marks a user online at last update. Hit any key to return to menu.\", (2, 1))\n row = 4\n for user in taunet.users.all():\n if user.is_on:\n safe_put(stdscr, \"*\", (row, 1))\n safe_put(stdscr, user.name, (row, 3))\n row += 1\n stdscr.refresh()\n\n # Wait for any key, then clear and return to menu.\n stdscr.getch()\n stdscr.clear()\n stdscr.refresh()", "def wahlrecht(self, irc, msg, args):\n if self._is_voting_enabled(irc, msg, reply=True):\n user_list = []\n for join_id in self.recently_joined:\n (nick, channel) = self._split_id(join_id)\n if channel == msg.args[0]:\n user_list.append(nick)\n if user_list:\n irc.reply(\"Folgende Mitbürger dürfen leider noch nicht abstimmen: %s\" % \", \".join(user_list))\n else:\n irc.reply(\"Alle anwesenden Mitbürger dürfen abstimmen.\")", "def work(self):\n for pokemon in pokemons().all(): # type: Pokemon\n if not pokemon.is_favorite or not self.ignore_favorites:\n self._nickname_pokemon(pokemon)", "def play():\n display_starting_message()\n print(\"\")\n print(\"*\"*10)\n for question_number, question in enumerate(list_of_questions):\n print(question)\n print(\"\")\n for responses in list_of_questions[question]:\n print(responses)\n pick_one = input(\"pick one: \")\n check_murder_sauce(question, pick_one)\n\n murder_sauce_result(murder_sauce)", "def donick(self, nick, *args, **kwargs):\n pass" ]
[ "0.601555", "0.5969573", "0.576979", "0.5542987", "0.5438632", "0.5384672", "0.53576845", "0.53359205", "0.5284243", "0.5240409", "0.518095", "0.5168703", "0.5161023", "0.5150909", "0.5149311", "0.5141872", "0.5106042", "0.5085934", "0.50680995", "0.5062412", "0.5058632", "0.5054417", "0.5039769", "0.50080365", "0.49648842", "0.49361622", "0.49305964", "0.49276242", "0.49160802", "0.48968688" ]
0.6445882
0
Implement the check_unused_args in superclass.
def check_unused_args(self, used_args, args, kwargs): for k, v in kwargs.items(): if k in used_args: self._used_kwargs.update({k: v}) else: self._unused_kwargs.update({k: v})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_args(self, args_):\n\n pass", "def __init__(self, *unused_args, **unused_kwargs):", "def __check_args(self):\n self.__check_args_type()\n self.__check_args_val()", "def ignore(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass", "def validate_class_args(self, **kwargs):\n pass", "def warning(self, *args, **kwargs): # real signature unknown\n pass", "def warning(self, *args, **kwargs):", "def __checkArgs(self, kwargs):\n requiredArgs = self.__class__.__requiredArgs + \\\n self.__class__.__singleCompArgs if self.singleComp else\\\n self.__class__.__requiredArgs + self.__class__.__doubleCompArgs\n for arg in requiredArgs:\n if arg not in kwargs:\n raise ValueError(\"Essential keyword argument %s missing\" % arg)\n for (k, v) in kwargs.items():\n assert k in self.__class__.__allowedArgs, \"Invalid Argument %s\" % k", "def handle_noargs(self, **options):\n raise NotImplementedError()", "def get_unused_kwargs(self):\n return self._unused_kwargs", "def __getnewargs__(self):\n return ()", "def test_missing_args(self, test, x, y, z=3, _private_arg=3): # noqa: D213, D407", "def extra_args(self):\n return []", "def exclude(self, *args, **kwargs):", "def check_for_unused_names(self):\n for s in self.unused_names:\n self.warning(\"'%s' is unused.\"%s)\n\n# warns for param that specified with -c (but also if name gets defined in __main__,\n# e.g. by default_density=global_params.default_density in a script file\n## for name in self.params():\n## if name in self.context:\n## self.warning(\"'%s' still exists in global_params.context\"%name)\n\n # detect duplicate param value that wasn't used (e.g. specified with after script)\n for name,val in self.params().items():\n if name in self.context:\n if self.context[name]!=self.inspect_value(name):\n self.warning(\"'%s=%s' is unused.\"%(name,self.context[name]))", "def _arg(self, t):\n self.RaiseError(t, \"Arguments should already have been processed\")", "def _arguments(self, t):\n self.RaiseError(t, \"Arguments should already have been processed\")", "def raise_on_kwargs_not_empty(kwargs):\n if kwargs:\n raise SyntaxWarning(f\"Unknown arguments: {kwargs}\")", "def __check_args_val(self):\n if self.__num_prev_scans < 0:\n error_msg = \"num_prev_scans must be greater than or equal to zero\"\n raise ValueError(error_msg)", "def _validate_args(self, args):\r\n invalid_args = [k for k in self.required_params if args.get(k) is None]\r\n if invalid_args:\r\n raise ArgumentError('Missing required options: %s'\r\n % ','.join(invalid_args))", "def check_args(*args: Tuple[Any, ...], **kwargs: Any) -> None:\n\n # We begin by initializing the maximum number of args we will allow at 0. We will iterate\n # this if by chance we see an argument whose name is \"self\".\n max_arg_len = 0\n\n # iterate through every parameter passed in\n for idx, param_name in enumerate(literal_signature.parameters):\n\n if idx == 0 and (param_name == \"self\" or param_name == \"cls\"):\n max_arg_len += 1\n continue\n\n # if this parameter isn't in kwargs, then it's probably in args. However, we can't check\n # directly because we don't have arg names, only the list of args which were passed in.\n # Thus, the way this check works is to return an error if we find an argument which\n # isn't in kwargs and isn't \"self\".\n if param_name not in kwargs and len(args) > max_arg_len:\n traceback_and_raise(\n AttributeError(\n f\"'{param_name}' was passed into a function as an arg instead of a kwarg. \"\n f\"Please pass in all arguments as kwargs when coding/using PySyft.\"\n )\n )", "def validate_input(self, *args):\n return", "def test_missing_args_class_method(cls, test, x, y, _, z=3): # noqa: D213, D407", "def check_params(self):\n raise NotImplementedError", "def invalid_args(event):\n\n s.sendReply(\n event,\n f'Please provide the proper arguments. Use \"@{s.BOT_NAME} help\" for help.',\n )", "def test_missing_args(self, test, x, y, z=3, t=1, _private=0): # noqa: D213, D407", "def bad_args(args):\n PARSER.print_help()\n exit(0)", "def ignored(*args, **kwargs):\n return args, kwargs", "def validate_args(self, parser: argparse):\n pass", "def _check_args(self, args):\n if len(args) == 0:\n print(\"No parameters provided.\")\n return False\n else:\n return True" ]
[ "0.71085674", "0.6883384", "0.67958516", "0.6730387", "0.66968954", "0.6619412", "0.6390801", "0.6302647", "0.6250002", "0.62224966", "0.6195048", "0.6153181", "0.61491466", "0.6146345", "0.6113333", "0.6072033", "0.60612833", "0.60221803", "0.60128295", "0.6003403", "0.5941227", "0.5913698", "0.5889466", "0.58882385", "0.58786905", "0.5872367", "0.5872274", "0.58504695", "0.58427423", "0.58358556" ]
0.7603612
0
format a string by a map
def format_map(self, format_string, mapping): return self.vformat(format_string, args=None, kwargs=mapping)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _reprOfStringToValueMap (stringMap : Map) -> String:\n\n entrySeparator = u\"§\"\n entryTemplate = \"%s: %s\"\n keyList = sorted(list(stringMap.keys()))\n result = \"\"\n \n for key in keyList:\n value = stringMap[key] \n result += (iif(result == \"\", \"\", entrySeparator)\n + entryTemplate % (key, value))\n \n result = \"{\" + result + \"}\";\n return result", "def _fmt_map(self, string_list):\n return self._fmt_csv(string_list, list_braces=\"{}\")", "def format(fmt, st):\n ret = \"\"\n if not st: return ret\n if fmt not in valid_combos:\n return st\n cm = charmap[fmt]\n for c in st:\n ret += cm.get(c, c)\n return ret", "def _formatted_string(self, message: str, dict_values: dict) -> str:\n formatted_values = self._format_values_in_map(dict_values)\n return message.format(**formatted_values)", "def str_map(values, formatting):\n return [formatting % v for v in values]", "def format_str(string, dictionary):\n tokens = string.split('}')\n output = []\n for token in tokens[:-1]:\n if token.split('{')[-1] in dictionary.keys():\n output.append((token + '}').format(**dictionary))\n else:\n output.append(token + '}')\n\n return ''.join(output) + tokens[-1]", "def format_arguments(data: Dict) -> str:\n\n def prep(key: str, value: Any) -> str:\n if isinstance(value, str):\n value = f'''\"{value.replace('\"', \"'\")}\"'''\n if key == \"pattern\":\n value = f\"r{value}\"\n return f\"{key}={value}\"\n\n return \",\\n\".join([prep(key, value) for key, value in data.items()])", "def format(self, valDict):\n return self._formatStr % valDict", "def format_map(units, char=True):\n output = place_units(units, char)\n output = [\"\".join(line) for line in output]\n for y, line in enumerate(output):\n output[y] = (\n line\n + \" \"\n + \", \".join(\n [\n \"{}({})\".format(unit.char, unit.HP)\n for unit in sorted(units)\n if unit.position[0] == y\n ]\n )\n )\n return \"\\n\".join(output)", "def _format_dict(self, dict):\n\n result = \"\"\n for k, v in dict.items():\n result += \"\\n{0}: {1}\".format(k.capitalize(), v)\n\n return result", "def str_replace(data):\n for key, value in data.items():\n if isinstance(value, (str, unicode)):\n data[key] = value.format(**data)", "def format_dict(kv_list):\n return '\\n'.join(['{} - {}'.format(key, value) for\n key, value in kv_list])", "def format_map_file(headers, id_map, desc_key, sample_id_key,\r\n description_map=None, run_description=None):\r\n result = []\r\n if desc_key in headers:\r\n headers.remove(desc_key)\r\n if sample_id_key in headers:\r\n headers.remove(sample_id_key)\r\n header_line = '\\t'.join([sample_id_key] + headers + [desc_key])\r\n if not header_line.startswith('#'):\r\n header_line = '#' + header_line\r\n result.append(header_line)\r\n if run_description:\r\n if not isinstance(run_description, str):\r\n run_description = '\\n#'.join(run_description)\r\n if not run_description.startswith('#'):\r\n run_description = '#' + run_description\r\n result.append(run_description)\r\n for id_, fields in sorted(id_map.items()):\r\n curr_line = [id_]\r\n curr_line.extend([fields.get(h, '') for h in headers])\r\n curr_line.append(description_map.get(id_, ''))\r\n result.append('\\t'.join(map(str, curr_line)))\r\n return '\\n'.join(result)", "def nice_dict_format(d):\n return ''.join([key+\": \"+str(d[key])+\"\\n\" for key in list(d.keys())])", "def formatter(text):\n repl_map = {\n \"degC\": \"$^o$C\",\n \"K\": \"$^o$C\",\n \"month-1\": \"month$^{{-1}}$\",\n \"day-1\": \"day$^{{-1}}$\",\n \"d-1\": \"day$^{{-1}}$\",\n \"decade-1\": \"decade$^{{-1}}$\",\n \"year-1\": \"year$^{{-1}}$\",\n \"rcp85\": \"RCP8.5\",\n \"rcp45\": \"RCP4.5\",\n \"rcp26\": \"RCP2.6\",\n \"RCP85\": \"RCP8.5\",\n \"RCP45\": \"RCP4.5\",\n \"RCP26\": \"RCP2.6\",\n \"cmip5-85\": \"RCP8.5\",\n \"cmip5-60\": \"RCP6.0\",\n \"cmip5-45\": \"RCP4.5\",\n \"cmip5-26\": \"RCP2.6\",\n \"ssp585\": \"SSP5-8.5\",\n \"ssp245\": \"SSP2-4.5\",\n \"ssp126\": \"SSP1-2.6\",\n \"SSP585\": \"SSP5-8.5\",\n \"SSP245\": \"SSP2-4.5\",\n \"SSP126\": \"SSP1-2.6\",\n \"cmip6-85\": \"SSP5-8.5\",\n \"cmip6-70\": \"SSP3-7.0\",\n \"cmip6-60\": \"SSP4-6.0\",\n \"cmip6-34\": \"SSP4-3.4\",\n \"cmip6-45\": \"SSP2-4.5\",\n \"cmip6-26\": \"SSP1-2.6\",\n \"cmip6-19\": \"SSP1-1.9\",\n \"1\": \"%\",\n \"era5\": \"ERA5\",\n \"gpcc025x025_v8\": \"GPCC\",\n \"cru\": \"CRU\",\n \"jra55\": \"JRA55\",\n \"HIGHRESMIP\": \"HighResMIP\",\n \" \": \"\",\n }\n for key, val in repl_map.items():\n if key in text:\n text = text.replace(key, val)\n break\n return text", "def string_dict(d, headline='DICTIONARY:', offset=25):\n template = '{:%is} {}' % offset\n rows = [template.format('\"{}\":'.format(n), d[n]) for n in sorted(d)]\n s = headline + '\\n' + '\\n'.join(rows)\n return s", "def myformat(table):\n m = 0\n table = sorted(table, key=itemgetter(0))\n for t in table:\n t = str(t)\n if len(t[0]) > m:\n m = len(t[0])\n m += 10\n fstr = \"{0:}\" + m*\" \" + \"{1:}\"\n s = \"\"\n for x in table:\n try:\n a = float(x[0])\n b = float(x[1])\n s += \"{0:.5f}{1:{width}}\".format(a, b, width=m) + \"\\n\"\n except IndexError:\n pass\n return s\n \"\"\"\n out = \"\"\n for pair in table:\n out += str(pair[0]) + 5*\" \" + str(pair[1]) + \"\\n\"\n return out\"\"\"", "def _format_entries(self):\n\n def format_item(key, value):\n if value is None:\n return str(key)\n else:\n return '%s -> %x' % (key, value,)\n\n items = self._entries.items()\n items.sort()\n return '{%s}' % (', '.join([format_item(*item) for item in items]),)", "def nice_string_output(d, extra_spacing=5, decimals=3):\n\n names = d.keys()\n max_names = len_of_longest_string(names)\n\n values = values_to_string(d.values(), decimals=decimals)\n max_values = len_of_longest_string(values)\n\n string = \"\"\n for name, value in zip(names, values):\n spacing = extra_spacing + max_values + max_names - len(name) - 1\n string += \"{name:s} {value:>{spacing}} \\n\".format(name=name, value=value, spacing=spacing)\n return string[:-2]", "def skill_stringer(input_dict): #input a dictionary\r\n\treturn ', '.join('-'.join((k, str(v))) for k,v in sorted(input_dict.items())) #output formatted skill list string\r", "def _DictToString(self, value_dict, str_length=5):\n\n def FormatValue(v, value_format, str_length):\n if isinstance(v, (int, float)):\n return value_format % v\n else:\n return str(v).rjust(str_length)\n\n text = []\n blank = '--'.rjust(str_length)\n\n if self._show_label:\n text.append(' '.join(k.rjust(str_length) for k in self._node_labels))\n\n if not self._precision:\n value_format = '%% %dd' % str_length\n else:\n value_format = '%% %d.%df' % (str_length, self._precision)\n\n text.append(' '.join(\n [FormatValue(value_dict[k], value_format, str_length)\n if k in value_dict else blank for k in self._node_labels]))\n\n return '\\n'.join(text)", "def write_config_string(input_dict, entry_char='>', attribution_char='=',\n usekeys=None):\n # Selects the desired entries of the input_dict\n if usekeys is not None:\n input_dict = {key: input_dict[key] for key in usekeys}\n\n result_str = \"\"\n\n for key, value in input_dict.items():\n result_str += entry_string(key, value, entry_char, attribution_char)\n\n return result_str", "def formatted_loss_components_string(components: dict) -> str:\n total_loss = components['L_V']+components['L_beta']\n fractions = { k : v/total_loss for k, v in components.items() }\n fkey = lambda key: f'{components[key]:+.4f} ({100.*fractions[key]:.1f}%)'\n s = (\n 'L_V+L_beta = {L:.4f}'\n '\\n L_V = {L_V}'\n '\\n L_V_attractive = {L_V_attractive}'\n '\\n L_V_repulsive = {L_V_repulsive}'\n '\\n L_beta = {L_beta}'\n '\\n L_beta_noise = {L_beta_noise}'\n '\\n L_beta_sig = {L_beta_sig}'\n .format(L=total_loss,**{k : fkey(k) for k in components})\n )\n if 'L_beta_norms_term' in components:\n s += (\n '\\n L_beta_norms_term = {L_beta_norms_term}'\n '\\n L_beta_logbeta_term = {L_beta_logbeta_term}'\n .format(**{k : fkey(k) for k in components})\n )\n return s", "def _format_parameter_output(self, parameters: dict) -> str:\n \n output = ''\n for key, value in parameters.items():\n output = output + '\\t\\t' + str(key) + ': ' + str(value) + '\\n'\n \n return output", "def make_style(self, mixed):\n if isinstance(mixed, dict):\n return ' '.join('%s: %s;' % (k, v) for k, v in mixed.items())\n return str(mixed)", "def format_sections(self, sections: SectionDict) -> str:", "def entry_string(key, value, entry_char=\">\", attribution_char=\"=\",\n end_char=\"\\n\"):\n result_str = entry_char + \" \"\n result_str += str(key)\n result_str += \" \" + attribution_char + \" \"\n result_str += str(value)\n result_str += end_char\n return result_str", "def _render_dict_to_string(self, adict):\n alist = [ \"%s:%s\" % (self._render_thing(k), \n self._render_thing(adict[k])\n ) for k in adict.keys()]\n return \",\".join(self._render_row(alist))", "def _createTextProfile(self, indict):\r\n\r\n keys = map(lambda x: x[0], indict)\r\n vals = map(lambda x: x[1], indict)\r\n\r\n outstrs = [\"\\n\"]\r\n propDict = {}\r\n total = sum(vals)\r\n maxLenKey = max([len(a) for a in keys])\r\n maxLenVal = max([len(repr(a)) for a in vals]) \r\n\r\n for k, v in indict:\r\n outstr = \" \"\r\n outstr += k.ljust(maxLenKey + 1)\r\n outstr += (\"%.2f\" % v).ljust(maxLenVal + 1)\r\n outstr += \"-\" * int(self.numBars * (v / total))\r\n outstrs.append(outstr)\r\n\r\n return \"\\n\".join(outstrs)", "def _format_dimensions(dimensions):\n if not dimensions:\n return \"\"\n\n dim_pairs = [\"%s=%s\" % (k, v) for k, v in dimensions.items()]\n return \"[%s]\" % (\",\".join(dim_pairs))" ]
[ "0.7346636", "0.69570595", "0.6857461", "0.67944145", "0.67872196", "0.65942025", "0.64885473", "0.64343286", "0.6369347", "0.6199872", "0.6082483", "0.6055873", "0.6032503", "0.59989256", "0.59868157", "0.59844786", "0.597163", "0.59715307", "0.5968912", "0.5882509", "0.58693737", "0.5850151", "0.5828633", "0.58280945", "0.5828027", "0.582522", "0.58162165", "0.58127666", "0.5811416", "0.58073133" ]
0.7537712
0
Get used kwargs after formatting.
def get_used_kwargs(self): return self._used_kwargs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_kwargs(self):\n return {}", "def kwargs(self):\n return self._kwargs", "def kwargs(self):\n return self._kwargs", "def format_arguments(self, **kwargs):\n return kwargs", "def get_kwargs(self):\n return {\n 'user': self.user,\n }", "def get_kwargs(d):\n raise NotImplementedError(\"subclass must implement get_kwargs()\")", "def get_unused_kwargs(self):\n return self._unused_kwargs", "def interpolator_kwargs(self):\n return self._interpolator_kwargs", "def _get_kwargs_for_backend(self):\n return dict()", "def get_kwargs():\n\treturn get_kwargs_raw(sys.argv)", "def _kwargs(self):\n dict = {\"name\":self.name}\n return dict", "def get_kwargs(d):\n return {\"values\": d.get(\"values\", None)}", "def interpolator_kwargs(self) -> dict:\n\n return self._interpolator_kwargs", "def kwargs (self):\n return copy.deepcopy (self._kwargs)", "def get_dict(**kwargs):\n return kwargs", "def _copy_kwargs(self, **kwargs):\n ns = self.__dict__\n for attr, kw in {'_engine': 'engine', '_format': 'format'}.items():\n assert kw not in kwargs\n if attr in ns:\n kwargs[kw] = ns[attr]\n return super()._copy_kwargs(**kwargs)", "def get_filter_kwargs(self, *_, **__) -> Dict[str, Any]:", "def get_value_and_frame_renderer_kwargs(self):\n return self.make_value_and_frame_renderer_kwargs", "def get_kwargs(d):\n return {\"range\": d.get(\"range\", None)}", "def as_kwargs(self) -> Dict[str, Any]:\n ret = {}\n for arg in self.args.values():\n ret[arg.name] = arg.value\n return ret", "def _kwargs(self):\n dict = DAG._kwargs(self) \n if (self.job): \n dict[\"inputpaths\"] = self.job.inputpaths\n dict[\"outputpath\"] = self.job.outputpath\n dict[\"job\"] = \"%s()\" % self.job.__class__.__name__\n return dict", "def params(self, **kwargs):\n return kwargs", "def extrapolator_kwargs(self) -> dict:\n\n return self._extrapolator_kwargs", "def get_save_kwargs(self):\n filetype = self.config[\"format\"]\n kwargs = dict()\n if filetype in (\"gif\", \"jpg\", \"png\"):\n kwargs[\"optimize\"] = self.config[\"optimize\"]\n if filetype == \"gif\":\n kwargs[\"interlace\"] = self.config[\"gif_interlace\"]\n if filetype == \"png\":\n kwargs[\"compress_level\"] = self.config[\"png_compress_level\"]\n if filetype == \"tif\":\n kwargs[\"compression\"] = self.config[\"tif_compression\"]\n logger.debug(kwargs)\n return kwargs", "def get_context_data(self, **kwargs):\n\n return {'params': kwargs}", "def _parse_kwargs(self):\n re_kwargs = r'^[\\w_][\\w\\d_]*=.+$'\n kwargs = [a.split('=') for a in self.args if re.findall(re_kwargs, a)]\n self.kwargs = {k: self._load_json(v) for k, v in kwargs}\n self.args = [a for a in self.args if not re.findall(re_kwargs, a)]", "def get_form_kwargs(self):\n self.object = self.get_object()\n kwargs = super().get_form_kwargs()\n return kwargs", "def _maybe_due_kwargs(self):\n return {'producer': self.producer}", "def _get_context(argspec, kwargs):\n if argspec.has_kwargs:\n return kwargs\n return dict((arg, kwargs[arg]) for arg in argspec.args if arg in kwargs)", "def get(self):\n return self.args, self.kwargs" ]
[ "0.7273864", "0.7083243", "0.7083243", "0.7075471", "0.67573947", "0.66603476", "0.6652786", "0.65693074", "0.6531428", "0.65201193", "0.6439507", "0.64319444", "0.6423987", "0.6408022", "0.6377557", "0.6353454", "0.6334118", "0.6269364", "0.6239159", "0.62325567", "0.62284464", "0.62246776", "0.6143273", "0.6118797", "0.6022164", "0.60183233", "0.5989846", "0.59729725", "0.592358", "0.59029216" ]
0.7414289
0
Get unused kwargs after formatting.
def get_unused_kwargs(self): return self._unused_kwargs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_used_kwargs(self):\n return self._used_kwargs", "def _scrub_kwargs(kwargs: Dict[str, Any]) -> Dict[str, Any]:\n keywords_to_scrub: List[str] = ['extra_arguments', 'kernel_id']\n scrubbed_kwargs = kwargs.copy()\n for kw in keywords_to_scrub:\n scrubbed_kwargs.pop(kw, None)\n\n return scrubbed_kwargs", "def unused_kwargs(kw):\n fn_kw = dict(base_class=None,\n base_name=None, name=None, base_arg=None, base_kw=None, parent=None,\n infer_kw=None, in_shape='BCD', base_shape=None, out_shape='BCD', tuple_out=False,\n forward_arg=None, forward_kw=None, initialization=None, activation=None, )\n return {k:v for k, v in kw.items() if k not in fn_kw}", "def get_misused_opt_arg_dec():\n return list(incompletely_used_decorators.values())", "def check_unused_args(self, used_args, args, kwargs):\n for k, v in kwargs.items():\n if k in used_args:\n self._used_kwargs.update({k: v})\n else:\n self._unused_kwargs.update({k: v})", "def ignored(*args, **kwargs):\n return args, kwargs", "def _clean_kwargs(self, kwargs, fn):\n # Do not do the cleaning if server config\n # doesnt ask to ignore\n if not self.server.IGNORE_UNEXPECTED_KWARGS:\n return kwargs\n\n expected_kwargs = set(inspect.getargspec(fn).args)\n got_kwargs = set(kwargs.keys())\n unexpected_kwargs = got_kwargs - expected_kwargs\n for k in unexpected_kwargs:\n del kwargs[k]\n\n return kwargs", "def format_arguments(self, **kwargs):\n return kwargs", "def filter_extra_accepted_kwargs(fun, kwargs, skip_positional=0):\n sig = inspect.signature(fun)\n # the params from signature with up to skip_positional filtered out\n # (less only if there is not enough of positional args)\n params = [(name, param) for i, (name, param) in enumerate(sig.parameters.items())\n if i >= skip_positional or param.kind not in\n [inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.POSITIONAL_ONLY]]\n extra = [\n name for (name, param) in params\n if param.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY]\n ]\n return {name: value for name, value in kwargs.items() if name in extra}", "def _clean_kwargs(keep_name=False, **kwargs):\n if \"name\" in kwargs and not keep_name:\n kwargs[\"name_or_id\"] = kwargs.pop(\"name\")\n\n return __utils__[\"args.clean_kwargs\"](**kwargs)", "def _clean_kwargs(keep_name=False, **kwargs):\n if \"name\" in kwargs and not keep_name:\n kwargs[\"name_or_id\"] = kwargs.pop(\"name\")\n\n return __utils__[\"args.clean_kwargs\"](**kwargs)", "def get_kwargs(self):\n return {}", "def _filter_kwargs(names, dict_):\n return {k: v for k, v in dict_.items() if k in names and v is not None}", "def _accept_or_ignore_job_kwargs(self, _exclude_errors=(), **kwargs):\n errors = {}\n if kwargs:\n for field_name in kwargs.keys():\n errors[field_name] = [_(\"Field is not allowed on launch.\")]\n return ({}, kwargs, errors)", "def kwargs(self):\n return self._kwargs", "def kwargs(self):\n return self._kwargs", "def get_filter_kwargs(self, *_, **__) -> Dict[str, Any]:", "def _pop_tq_add_args(self, kwargs):\n return {\n x: kwargs.pop('_' + x, None) for x in self.QUEUE_ARGS\n }", "def filter_args(**kwargs):\n valid_args = \"tol_num_frames\"\n\n d = dict((k, kwargs[k]) for k in valid_args if k in kwargs)\n\n return d", "def filter_kwargs(function, **kwargs):\n\n kwargs = deepcopy(kwargs)\n if sys.version_info[0] >= 3:\n args = function.__code__.co_varnames\n else:\n args = function.func_code.co_varnames\n\n args = set(kwargs.keys()) - set(args)\n for key in args:\n kwargs.pop(key)\n\n return kwargs", "def __repr_args__(self):\n args = dict(super().__repr_args__())\n try:\n del args['type_hint']\n except KeyError:\n pass\n return args.items()", "def noneless(**kwargs):\n return {k: v for k, v in kwargs.items() if v is not None}", "def ignorable(*args, **kwargs):\n return args, kwargs", "def kwargs (self):\n return copy.deepcopy (self._kwargs)", "def getArgs(useKwargFormat=True, includeVariableArgs=True, numFramesAgo=1, excludeList=[]):\n\tframe = inspect.getouterframes(inspect.currentframe())[numFramesAgo][0]\n\targNames, varArgs_name, varKwargs_name, locals_ = inspect.getargvalues(frame)\n\tvarArgs = locals_[varArgs_name] if varArgs_name != None else tuple()\n\tvarKwargs = locals_[varKwargs_name] if varKwargs_name != None else {}\n\tnotArgs = set(locals_.iterkeys()) - set(argNames)\n\t\n\tfor notArg in notArgs:\tdel locals_[notArg]\n\texcludeList.append(\"self\")\n\texcludeList.append(\"cls\")\n\tmixedKwargsArgs = OrderedDict((argName, locals_[argName]) for argName in argNames if argName not in excludeList)\n\t\n\tif useKwargFormat == True:\n\t\tkwargs = dict(mixedKwargsArgs)\n\t\tif includeVariableArgs:\n\t\t\tkwargs.update(varKwargs)\n\t\treturn kwargs\n\telif useKwargFormat == False:\n\t\targs = tuple(mixedKwargsArgs.values())\n\t\tif includeVariableArgs:\n\t\t\targs += varArgs\n\t\treturn args\n\telif useKwargFormat == None:\n\t\tkwargs = dict(mixedKwargsArgs)\n\t\tif includeVariableArgs:\n\t\t\tkwargs.update(varKwargs)\n\t\treturn varArgs, kwargs\n\telse:\n\t\traise Exception(\"Invalid useKwargFormat\")", "def exclude_opts(cls) -> Tuple[str, ...]:\n return \"required\", \"print_config\", \"config\", \"ngpu\"", "def _parse_kwargs(self):\n re_kwargs = r'^[\\w_][\\w\\d_]*=.+$'\n kwargs = [a.split('=') for a in self.args if re.findall(re_kwargs, a)]\n self.kwargs = {k: self._load_json(v) for k, v in kwargs}\n self.args = [a for a in self.args if not re.findall(re_kwargs, a)]", "def get_kwargs(d):\n raise NotImplementedError(\"subclass must implement get_kwargs()\")", "def _maybe_due_kwargs(self):\n return {'producer': self.producer}", "def get_kwargs(d):\n return {\"values\": d.get(\"values\", None)}" ]
[ "0.6829997", "0.6761362", "0.6697653", "0.6459872", "0.6360253", "0.63554084", "0.6294143", "0.6281469", "0.6259455", "0.62412184", "0.62412184", "0.62019503", "0.6111536", "0.6063095", "0.60411596", "0.60411596", "0.599486", "0.5875321", "0.5854771", "0.5847538", "0.58386594", "0.5831341", "0.5820963", "0.5781803", "0.57304215", "0.57112193", "0.5708548", "0.5702063", "0.5694892", "0.5681617" ]
0.7941406
0
Add element_by alias and extension' methods(if_exists/or_none).
def add_element_extension_method(Klass): def add_element_method(Klass, using): locator = using.name.lower() find_element_name = "element_by_" + locator find_element_if_exists_name = "element_by_" + locator + "_if_exists" find_element_or_none_name = "element_by_" + locator + "_or_none" wait_for_element_name = "wait_for_element_by_" + locator find_elements_name = "elements_by_" + locator wait_for_elements_name = "wait_for_elements_by_" + locator def find_element(self, value): return self.element(using.value, value) find_element.__name__ = find_element_name find_element.__doc__ = ( "Set parameter 'using' to '{0}'.\n".format(using.value) + "See more in \'element\' method." ) def find_element_if_exists(self, value): return self.element_if_exists(using.value, value) find_element_if_exists.__name__ = find_element_if_exists_name find_element_if_exists.__doc__ = ( "Set parameter 'using' to '{0}'.\n".format(using.value) + "See more in \'element_if_exists\' method." ) def find_element_or_none(self, value): return self.element_or_none(using.value, value) find_element_or_none.__name__ = find_element_or_none_name find_element_or_none.__doc__ = ( "Set parameter 'using' to '{0}'.\n".format(using.value) + "See more in \'element_or_none\' method." ) def wait_for_element_by(self, *args, **kwargs): return self.wait_for_element(using.value, *args, **kwargs) wait_for_element_by.__name__ = wait_for_element_name wait_for_element_by.__doc__ = ( "Set parameter 'using' to '{0}'.\n".format(using.value) + "See more in \'wait_for_element\' method." ) def find_elements(self, value): return self.elements(using.value, value) find_elements.__name__ = find_elements_name find_elements.__doc__ = ( "Set parameter 'using' to '{0}'.\n".format(using.value) + "See more in \'elements\' method." ) def wait_for_elements_available(self, *args, **kwargs): return self.wait_for_elements(using.value, *args, **kwargs) wait_for_elements_available.__name__ = wait_for_elements_name wait_for_elements_available.__doc__ = ( "Set parameter 'using' to '{0}'.\n".format(using.value) + "See more in \'wait_for_elements\' method." ) setattr(Klass, find_element_name, find_element) setattr(Klass, find_element_if_exists_name, find_element_if_exists) setattr(Klass, find_element_or_none_name, find_element_or_none) setattr(Klass, wait_for_element_name, wait_for_element_by) setattr(Klass, find_elements_name, find_elements) setattr(Klass, wait_for_elements_name, wait_for_elements_available) for locator in iter(Locator): add_element_method(Klass, locator)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def contains(self, element):\n pass", "def add(element):", "def artAttrTool(*args, exists: Union[AnyStr, bool]=\"\", remove: AnyStr=\"\", q=True, query=True,\n **kwargs)->Union[None, Any]:\n pass", "def has_element(parent, xpath):\n ele = parent.find('./' + xpath)\n if ele is not None:\n return ele\n ele = parent\n lpath = xpath.split('/')\n for p in lpath:\n e = parent.find('.//' + p)\n if e is None:\n e = ET.SubElement(ele, p)\n ele = e\n return ele", "def try_find_element(web_driver: WebDriver, by: FindElementBy, unique_val, retry_count, ignore_if_not_found=False) \\\n -> WebElement:\n element = None\n retried = 0\n while True:\n try:\n if by == FindElementBy.CLASS:\n element = web_driver.find_element_by_class_name(unique_val)\n elif by == FindElementBy.NAME:\n element = web_driver.find_element_by_name(unique_val)\n elif by == FindElementBy.AUTOMATION_ID:\n element = web_driver.find_element_by_accessibility_id(unique_val)\n except NoSuchElementException:\n retried = retried + 1\n if retried > retry_count:\n if ignore_if_not_found:\n return None\n raise NoSuchElementException\n else:\n sleep(1)\n continue\n break\n return element", "def exists(self, **kwargs):\n if 'timeout' in kwargs:\n self.set_implicit_wait(kwargs['timeout'])\n\n if 'driver' in kwargs:\n d = kwargs['driver']\n else:\n d = self.driver\n\n if kwargs.has_key('element'):\n try:\n return kwargs['element']\n except:\n return False\n else:\n try:\n if 'accessibility_id' in kwargs:\n e = d.find_element_by_accessibility_id(kwargs['accessibility_id'])\n elif 'class_name' in kwargs:\n e = d.find_element_by_class_name(kwargs['class_name'])\n elif 'id' in kwargs:\n e = d.find_element_by_id(kwargs['id'])\n elif 'xpath' in kwargs:\n e = d.find_element_by_xpath(kwargs['xpath'])\n else:\n raise RuntimeError(\"exists() called with incorrect param. kwargs = %s\" % kwargs)\n\n return e\n except NoSuchElementException:\n return False\n finally:\n self.set_implicit_wait()", "def get_by_element(self, element):\n token_ct = ContentType.objects.get_for_model(element)\n try:\n return self.get(\n elements__content_type=token_ct,\n elements__object_id=element.pk,\n )\n except ObjectDoesNotExist:\n return None", "def findPlug(node, attr):\n\n pass", "def get_element(self, by, criteria):\n # Need reuse criteria\n return self._find_by_locator().find_element(by, criteria)", "def element(self, element):\n pass", "def check_and_get_ele_by_tag_name(element, tag_name):\r\n if element is None or not tag_name:\r\n return None \r\n try:\r\n return element.find_element_by_tag_name(tag_name) \r\n except NoSuchElementException:\r\n return None", "def element_by_its(\n self,\n selector: Union[str, Tuple[str, str], Callable[[Element], Element]],\n condition: Condition[Element],\n ) -> Element:\n\n # TODO: tune implementation to ensure error messages are ok\n\n def find_in(parent: Element):\n if callable(selector):\n return selector(parent)\n else:\n return parent.element(selector)\n\n return self.element_by(lambda it: condition(find_in(it)))", "def elements_by_selector(self, selector):\n pass", "def find_element_by_selector(self, selector):\n return UiObject(selector, self.android_device_driver) if UiObject(\n selector, self.android_device_driver).verify_exist() else None", "def add(self, element):\n pass", "def find_element(self, by=By.ID, value=None):\n # Get result from the original implementation of the underlying driver.\n result = self._original_methods['find_element'](by, value)\n # Wrap the element.\n if result:\n result = EyesWebElement(result, self._driver)\n return result", "def check_and_get_ele_by_id(element, id_name):\r\n if element is None or not id_name:\r\n return None \r\n try:\r\n return element.find_element_by_id(id_name) \r\n except NoSuchElementException:\r\n return None", "def _contains_op(spec):", "def check_and_get_ele_by_xpath(element, xpath):\r\n if element is None or not xpath:\r\n return None\r\n try:\r\n return element.find_element_by_xpath(xpath)\r\n except NoSuchElementException:\r\n return None", "def treat_exceptions(method):\n def wrap(*args, **kwargs):\n try:\n return method(*args, **kwargs)\n except NoSuchElementException:\n raise Exception('No such element with xpath: {}'.format(kwargs['xpath']))\n\n return wrap", "def add(self, elem):", "def add(self, elem):", "def __contains__(self, item):", "def does_element_exist(driver, selectors):\n try:\n driver.find_element_by_css_selector(selectors)\n except NoSuchElementException:\n return False\n return True", "def wait_for_element(driver, selector, method, plural=False):\n\n try:\n wait = WebDriverWait(driver, 10)\n wait.until(\n eval(f'EC.presence_of_element_located((By.{method}, \"{selector}\"))')\n )\n finally:\n if plural:\n element = eval(f'driver.find_elements_by_{method.lower()}(\"{selector}\")')\n else:\n element = eval(f'driver.find_element_by_{method.lower()}(\"{selector}\")')\n\n return element", "def check_and_get_ele_by_css_selector(element, selector):\r\n if element is None or not selector:\r\n return None \r\n try:\r\n return element.find_element_by_css_selector(selector)\r\n except NoSuchElementException:\r\n return None", "def __contains__(self, item: Any) -> bool:\n try:\n self.__getattr__(item)\n return True\n except RuntimeError:\n return False", "def get(self, el):\n raise Exception('TODO IMPLEMENT ME !')", "def add(self, el: T) -> bool:\n if el in self:\n return False\n else:\n self[el] = el\n return True", "def isElementPresent(self,locator=\"\",locatorType='id', element=None):\n\n\n\n\n try:\n if locator:\n element = self.getElement(locator, locatorType)\n\n if element is not None:\n self.logger.info(\"Element found with locator \"+locator+\" LocatorType \"+locatorType)\n return True\n\n else:\n self.logger.info(\"Element not found with locator \" + locator + \" LocatorType \" + locatorType)\n return False\n\n except:\n print(\"Element not found\")\n return False" ]
[ "0.52138686", "0.5116739", "0.5036193", "0.50305545", "0.49910548", "0.497446", "0.48736545", "0.48461556", "0.48278427", "0.47965333", "0.47628498", "0.46982324", "0.46863768", "0.46765348", "0.46641046", "0.46609923", "0.46489343", "0.46479532", "0.46302179", "0.45912728", "0.45618", "0.45618", "0.4548172", "0.45111105", "0.44909045", "0.4480775", "0.44726127", "0.44635764", "0.4461124", "0.44437283" ]
0.6793267
0
Fluent interface decorator to return self if method return None.
def fluent(func): @wraps(func) def fluent_interface(instance, *args, **kwargs): ret = func(instance, *args, **kwargs) if ret is not None: return ret return instance return fluent_interface
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __noop(self, *args, **kwargs):\n return None", "def method(self):\n return None", "def return_none() -> None:\n pass", "def __call__(self, *args, **kwargs):\n return self.__wrapped__(*args, **kwargs)", "def pass_null(func):\n\n def wrapper(obj, *args, **kwargs):\n if not obj:\n return obj\n return func(obj, *args, **kwargs)\n\n # used by django template parser to introspect args\n wrapper._decorated_function = getattr(func, '_decorated_function', func)\n return wraps(func)(wrapper)", "def extract_self_if_method_call(args: List[Any], func: Callable) -> Optional[object]:\n if len(args) > 0:\n method = getattr(args[0], func.__name__, False)\n if method:\n wrapped = getattr(method, \"__wrapped__\", False)\n if wrapped and wrapped == func:\n return args[0]\n\n return None", "def _to_be_wrapped(self) -> None:", "def or_none(cls, node):\n return node if isinstance(node, cls) else None", "def omit_exception(method):\r\n\r\n @functools.wraps(method)\r\n def _decorator(self, *args, **kwargs):\r\n if self._ignore_exceptions:\r\n try:\r\n return method(self, *args, **kwargs)\r\n except ConnectionInterrupted:\r\n return None\r\n else:\r\n return method(self, *args, **kwargs)\r\n\r\n return _decorator", "def no_none(decorated):\n def _func(*args, **kwargs):\n \"\"\"wrap generator\"\"\"\n for value in decorated(*args, **kwargs):\n if value is not None:\n yield value\n return _func", "def __call__(self):\n return self", "def noop_decorator(func):\n return func", "def __nonzero__ ( self ) :\n raise AbstractMethodException( self , \"__nonzero__\" )", "def __call__(self, *args, **kwargs):\n return self.method(*args, **kwargs)", "def __call__(self):\n return self", "def set_null(self, /, *defaults: Any, **kwargs: Any) -> \"fn\":\n return self._mod.set_null(self._func, *defaults, **kwargs)", "def next(self) -> Optional[Chainable]:\n return None", "def none(self):", "def __getattr__(self, _):\n return None", "def NoModulator(*args, **kwargs):\n return None", "def nozzle(self) -> Unit:\n try:\n return self.source.nozzle\n except AttributeError:\n return self", "def wrapper(self):\n\n result = caching_services.try_get('permanent', method, self)\n if result is not None:\n return result\n\n result = method(self)\n caching_services.try_set('permanent', result, method, self)\n return result", "def none_comparison(func):\n @functools.wraps(func)\n def inner(obj1,obj2):\n if obj1 is not None and obj2 is not None:\n return func(obj1, obj2)\n if obj1 is None and obj2 is None:\n return []\n if obj1 is not None and obj2 is None:\n return Difference(f\"Second {obj1.__class__.__name__} is None\",(obj1,None))\n return Difference(f\"First {obj2.__class__.__name__} is None\",(None,obj2))\n return inner", "def _wrap(self, meth, *args, **kwargs):\n if not self.connected:\n return self._connectSchedule(self._wrap, meth, *args, **kwargs)\n\n opres = meth(self, *args, **kwargs)\n return self.defer(opres)", "def _wrapped_method(self, _meth_name, *args, **kwargs):\n return self._delegate(_meth_name, *args, **kwargs)", "def __call__(self, *args, **kwargs):\n return self", "def __call__(self, *args, **kwargs):\n return self.func(*args, **kwargs)", "def __call__(self, *args, **kwargs):\n return self.func(*args, **kwargs)", "def decorator(method):\n\n def wrapper(self):\n \"\"\"\n decorates the given method or property and makes it a lazy one.\n\n :returns: method or property result.\n \"\"\"\n\n result = caching_services.try_get('permanent', method, self)\n if result is not None:\n return result\n\n result = method(self)\n caching_services.try_set('permanent', result, method, self)\n return result\n\n return update_wrapper(wrapper, method)", "def _get(self):\n return None" ]
[ "0.6567935", "0.6153579", "0.5921167", "0.5789551", "0.57773596", "0.5770913", "0.5758091", "0.57273376", "0.56640935", "0.5651486", "0.5617815", "0.5551248", "0.5544665", "0.5541051", "0.55385506", "0.5533451", "0.5528883", "0.55121976", "0.54744726", "0.5464299", "0.53599745", "0.5355841", "0.5354612", "0.5354356", "0.5315409", "0.53074", "0.53066283", "0.53066283", "0.53041536", "0.5277207" ]
0.62337273
1
Convert value to a list of key strokes >>> value_to_key_strokes(123) ['1', '2', '3'] >>> value_to_key_strokes('123') ['1', '2', '3'] >>> value_to_key_strokes([1, 2, 3]) ['1', '2', '3'] >>> value_to_key_strokes(['1', '2', '3']) ['1', '2', '3']
def value_to_key_strokes(value): result = [] if isinstance(value, Integral): value = str(value) for v in value: if isinstance(v, Keys): result.append(v.value) elif isinstance(v, Integral): result.append(str(v)) else: result.append(v) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_key_val_list(value):\n if value is None:\n return None\n\n if isinstance(value, (str, bytes, bool, int)):\n raise ValueError('cannot encode objects that are not 2-tuples')\n\n if isinstance(value, collections.Mapping):\n value = value.items()\n\n return list(value)", "def _force_key_as_list(self, key):\r\n return [key] if isinstance(key, (str, unicode)) else key", "def strokes(self):\n\n # Expecting 8-byte chords (4 bytes of steno, 4 of timestamp.)\n assert self.data_length % 8 == 0\n # Steno should only be present on ACTION_READ packets\n assert self.packet_id == self.ID_READ\n\n strokes = []\n for stroke_data in grouper(8, self.data, 0):\n stroke = []\n # Get 4 bytes of steno, ignore timestamp.\n for steno_byte, key_chart_row in zip(stroke_data, STENO_KEY_CHART):\n assert steno_byte >= 0b11000000\n # Only interested in right 6 values\n key_mask = [int(i) for i in bin(steno_byte)[-6:]]\n stroke.extend(compress(key_chart_row, key_mask))\n if stroke:\n strokes.append(stroke)\n return strokes", "def keysequence(value):\r\n return value.toString()", "def ToKeys(hotkey):\n values = hotkey.split(' - ')\n modifiers = sorted(value.upper() for value in values\n if value in ['Shift', 'Ctrl', 'Alt', 'Search'])\n keycode = [value.lower() for value in values\n if value not in ['Shift', 'Ctrl', 'Alt', 'Search']]\n # The keys which are highlighted even without modifier keys.\n base_keys = ['backspace', 'power']\n if not modifiers and (keycode and keycode[0] not in base_keys):\n return None\n return '<>'.join(keycode + modifiers)", "def read_keys(self) -> list[KeyPress]:", "def get_key(self, value):\n return [item[0] for item in self.items() if item[1] == value]", "def getKeyObjs(self,\n key,\n value = None):\n keyList = []\n keys = self.__keyObjs\n tmpKey = key + \"___\"\n for keyIdx in keys:\n if ((string.find(keyIdx, tmpKey) != -1) and (value != None)):\n if (re.search(value,self.__keyObjs[keyIdx].getValue())!=None):\n keyList.append(self.__keyObjs[keyIdx])\n elif (string.find(keyIdx, tmpKey) != -1):\n keyList.append(self.__keyObjs[keyIdx])\n return keyList", "def to_strokes(self, width:float, color:list): \n strokes = []\n strokes.append({\n 'start': tuple(self.points[0]),\n 'end' : tuple(self.points[1]),\n 'width': width,\n 'color': rgb_value(color)\n })\n return strokes", "def split(value, key):\n return str(value).split(key)", "def to_strokes(self, width:float, color:list): \n strokes = []\n for segment in self._instances: \n strokes.extend(segment.to_strokes(width, color))\n return strokes", "def setKeyPath(*args, **kwargs)->List[AnyStr]:\n pass", "def get_key_list(self) -> list:\n return self.key_functs.keys()", "def to_knx(self, value: Any) -> DPTArray:\n return self._climate_mode_transcoder.to_knx(value)", "def chose_param_value(self, value: str) -> list:\n\n index_to_start_from = self.commands.index(value)+1\n commands_to_start_next_params_research = [\n self.commands[index] for index in range(index_to_start_from, len(self.commands))\n ]\n dirs_to_delete_put = []\n for dir_ in commands_to_start_next_params_research:\n if \"--\" in dir_:\n break \n dirs_to_delete_put.append(dir_)\n return dirs_to_delete_put", "def to_list(value):\n if hasattr(value, '__iter__') and not isinstance(value, str):\n return list(value)\n return [value]", "def click_echo_kvp(key, value, padding=20, color='green'):\n return click.echo(\n click.style('{key:<{padding}}'.format(\n key=key + ':',\n padding=padding\n ), fg=color) +\n str(value)\n )", "def ListToStr(val):\n return ''.join(['%c' % c for c in val])", "def get_list(key):\n ret = hookenv.action_get(key)\n return ret.split() if ret else []", "def StrToList(val):\n return [ord(c) for c in val]", "def glyph(x):\n assert isinstance(x, str)\n return [x]", "def to_knx(self, value: XYYColor) -> DPTArray:\n return DPTColorXYY.to_knx(value)", "def to_strokes(self, width:float, color:list):\n # corner points\n # NOTE: center line of path without stroke width considered\n x0, y0, x1, y1 = self.rect\n points = [\n (x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0, y0)\n ]\n # connect each line\n strokes = []\n for i in range(len(points)-1):\n strokes.append({\n 'start': points[i],\n 'end' : points[i+1],\n 'width': width * 2.0, # seems need adjustment by * 2.0\n 'color': rgb_value(color)\n })\n return strokes", "def _to_list(value: Union[Dict[str, Any], List, Tuple, int], name=None, list_length=None):\n if not isinstance(value, (list, tuple)):\n if list_length is not None:\n value = [value] * list_length\n else:\n value = [value]\n if list_length is not None and len(value) != list_length:\n name = '' if name is None else name\n raise ValueError(\"hparams '%s' must be a list of length %d\" % (name, list_length))\n return value", "def device_catalog_path_value_converter(value):\n paths = []\n for path in value:\n pt = tuple(path.split(\"/\"))\n if pt and pt[-2]==\"devices\":\n pt = pt[:-2] + pt[-1:]\n paths.append(pt)\n return paths", "def toWidgetValue(self, value):\n\n if value is self.field.missing_value:\n return value\n else:\n data = []\n for dict_ in value:\n new_dict = AttributeDict()\n for key, value in dict_.items():\n if isinstance(value, list):\n new_dict[safe_utf8(key)] = \\\n [safe_utf8(x) for x in value]\n else:\n new_dict[safe_utf8(key)] = safe_utf8(value)\n data.append(new_dict)\n return data", "def flush_keys(self) -> list[KeyPress]:\n return []", "def topkList(self, key):\n \n return self.execute_command(self.TOPK_LIST, key)", "def escape_value(value: OptionValueType) -> OptionValueType:\n if isinstance(value, str):\n return shlex.quote(value)\n elif isinstance(value, Sequence):\n return [shlex.quote(v) for v in value]\n else:\n return value", "def get_keys(self):\r\n k_list = []\r\n try:\r\n for k in self.buttons:\r\n if self.buttons[k] != 0:\r\n k_list.append(k)\r\n return k_list\r\n except KeyError:\r\n pass\r\n return k_list" ]
[ "0.6031192", "0.52026004", "0.5165081", "0.5160203", "0.50922084", "0.5048257", "0.50478864", "0.5046137", "0.50427055", "0.5000748", "0.4961365", "0.49351156", "0.4862569", "0.48579395", "0.48172843", "0.48017225", "0.47746295", "0.4763067", "0.47456744", "0.4712962", "0.46325272", "0.46269903", "0.45834768", "0.4562473", "0.45433885", "0.45375887", "0.45132434", "0.44997537", "0.44769204", "0.44711274" ]
0.77909297
0
Augment image and key points, bounding boxes !!
def img_and_key_point_augmentation(augmentation, img, bbox, key_points): # img_copy = img.copy() image_shape = img.shape h, w = image_shape[0:2] # Convert the stochastic sequence of augmenters to a deterministic one. # The deterministic sequence will always apply the exactly same effects to the images. det = augmentation.to_deterministic() ia_bbox = list() for bounding_box in bbox: x1, y1, x2, y2 = bounding_box ia_bbox.append(ia.BoundingBox(x1=x1, y1=y1, x2=x2, y2=y2)) bbs = ia.BoundingBoxesOnImage(ia_bbox, shape=image_shape) bbs_aug = det.augment_bounding_boxes([bbs])[0] # img = bbs_aug.draw_on_image(img) after_bbox = list() for bounding_box in bbs_aug.bounding_boxes: bbox_list = [bounding_box.x1_int, bounding_box.y1_int, bounding_box.x2_int, bounding_box.y2_int] if bbox_list[0] >= w: bbox_list[0] = w - 1 if bbox_list[1] >= h: bbox_list[1] = h - 1 if bbox_list[2] >= w: bbox_list[2] = w - 1 if bbox_list[3] >= h: bbox_list[3] = h - 1 if bbox_list[0] == bbox_list[2] or bbox_list[1] == bbox_list[3]: return img_and_key_point_augmentation(augmentation, img, bbox, key_points) bbox_list = list(map(lambda x: max(x, 0), bbox_list)) after_bbox.append(bbox_list) after_key_points = list() for key_point_list in key_points: after_key_point_list = list() for key_point in key_point_list: xy_points = list() for i, x in enumerate(key_point[::2]): y = key_point[(i * 2) + 1] xy_points.append(ia.Keypoint(x=x, y=y)) keypoints_on_image = det.augment_keypoints([ia.KeypointsOnImage(xy_points, shape=image_shape)]) # img = keypoints_on_image[0].draw_on_image(img) xy_points = list() for key_point in keypoints_on_image[0].keypoints: kp = [key_point.x_int, key_point.y_int] if 0 > min(kp) or w <= max(kp[::2]) or h <= max(kp[1::2]): # print(kp) return img_and_key_point_augmentation(augmentation, img, bbox, key_points) xy_points.extend(kp) after_key_point_list.append(xy_points) after_key_points.append(after_key_point_list) img_aug = det.augment_image(img) assert img_aug.shape == image_shape, "Augmentation shouldn't change image size" return img_aug, after_bbox, after_key_points
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def im_detect_keypoints_aug(model, im, boxes):\n\n # Collect heatmaps predicted under different transformations\n heatmaps_ts = []\n # Tag predictions computed under downscaling and upscaling transformations\n ds_ts = []\n us_ts = []\n\n def add_heatmaps_t(heatmaps_t, ds_t=False, us_t=False):\n heatmaps_ts.append(heatmaps_t)\n ds_ts.append(ds_t)\n us_ts.append(us_t)\n\n # Compute the heatmaps for the original image (identity transform)\n im_scales = im_conv_body_only(model, im)\n heatmaps_i = im_detect_keypoints(model, im_scales, boxes)\n add_heatmaps_t(heatmaps_i)\n\n # Perform keypoints detection on the horizontally flipped image\n if cfg.TEST.KPS_AUG.H_FLIP:\n heatmaps_hf = im_detect_keypoints_hflip(model, im, boxes)\n add_heatmaps_t(heatmaps_hf)\n\n # Compute detections at different scales\n for scale in cfg.TEST.KPS_AUG.SCALES:\n ds_scl = scale < cfg.TEST.SCALES[0]\n us_scl = scale > cfg.TEST.SCALES[0]\n heatmaps_scl = im_detect_keypoints_scale(\n model, im, scale, cfg.TEST.KPS_AUG.MAX_SIZE, boxes\n )\n add_heatmaps_t(heatmaps_scl, ds_scl, us_scl)\n\n if cfg.TEST.KPS_AUG.SCALE_H_FLIP:\n heatmaps_scl_hf = im_detect_keypoints_scale(\n model, im, scale, cfg.TEST.KPS_AUG.MAX_SIZE, boxes, hflip=True\n )\n add_heatmaps_t(heatmaps_scl_hf, ds_scl, us_scl)\n\n # Compute keypoints at different aspect ratios\n for aspect_ratio in cfg.TEST.KPS_AUG.ASPECT_RATIOS:\n heatmaps_ar = im_detect_keypoints_aspect_ratio(\n model, im, aspect_ratio, boxes\n )\n add_heatmaps_t(heatmaps_ar)\n\n if cfg.TEST.KPS_AUG.ASPECT_RATIO_H_FLIP:\n heatmaps_ar_hf = im_detect_keypoints_aspect_ratio(\n model, im, aspect_ratio, boxes, hflip=True\n )\n add_heatmaps_t(heatmaps_ar_hf)\n\n # Select the heuristic function for combining the heatmaps\n if cfg.TEST.KPS_AUG.HEUR == 'HM_AVG':\n np_f = np.mean\n elif cfg.TEST.KPS_AUG.HEUR == 'HM_MAX':\n np_f = np.amax\n else:\n raise NotImplementedError(\n 'Heuristic {} not supported'.format(cfg.TEST.KPS_AUG.HEUR)\n )\n\n def heur_f(hms_ts):\n return np_f(hms_ts, axis=0)\n\n # Combine the heatmaps\n if cfg.TEST.KPS_AUG.SCALE_SIZE_DEP:\n heatmaps_c = combine_heatmaps_size_dep(\n heatmaps_ts, ds_ts, us_ts, boxes, heur_f\n )\n else:\n heatmaps_c = heur_f(heatmaps_ts)\n\n return heatmaps_c", "def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):\n nb_images = len(keypoints_on_images)\n samples = self.p.draw_samples((nb_images,), random_state=random_state)\n for i, keypoints_on_image in enumerate(keypoints_on_images):\n if samples[i] == 1:\n for keypoint in keypoints_on_image.keypoints:\n if self.axis == 1:\n width = keypoints_on_image.shape[1]\n keypoint.x = (width - 1) - keypoint.x\n elif self.axis == 0:\n height = keypoints_on_image.shape[0]\n keypoint.y = (height - 1) - keypoint.y\n swapped = keypoints_on_image.keypoints.copy()\n for r in range(len(keypoints_on_image.keypoints)):\n idx = self.swap_index[r]\n if idx >= 0:\n keypoints_on_image.keypoints[r] = swapped[idx]\n return keypoints_on_images", "def img_augmentation(augmentation, img, bbox):\n\n # img_copy = img.copy()\n image_shape = img.shape\n h, w = image_shape[0:2]\n\n # Convert the stochastic sequence of augmenters to a deterministic one.\n # The deterministic sequence will always apply the exactly same effects to the images.\n det = augmentation.to_deterministic()\n img_aug = det.augment_image(img)\n\n ia_bbox = list()\n for bounding_box in bbox:\n x1, y1, x2, y2 = bounding_box\n ia_bbox.append(ia.BoundingBox(x1=x1, y1=y1, x2=x2, y2=y2))\n\n bbs = ia.BoundingBoxesOnImage(ia_bbox, shape=image_shape)\n bbs_aug = det.augment_bounding_boxes([bbs])[0]\n # img = bbs_aug.draw_on_image(img)\n\n after_bbox = list()\n for bounding_box in bbs_aug.bounding_boxes:\n bbox_list = [bounding_box.x1_int, bounding_box.y1_int, bounding_box.x2_int, bounding_box.y2_int]\n\n if bbox_list[0] >= w: bbox_list[0] = w - 1\n if bbox_list[1] >= h: bbox_list[1] = h - 1\n if bbox_list[2] >= w: bbox_list[2] = w - 1\n if bbox_list[3] >= h: bbox_list[3] = h - 1\n\n if bbox_list[0] == bbox_list[2] or bbox_list[1] == bbox_list[3]:\n return img_augmentation(augmentation, img, bbox)\n\n bbox_list = list(map(lambda x: max(x, 0), bbox_list))\n after_bbox.append(bbox_list)\n\n assert img_aug.shape == image_shape, \"Augmentation shouldn't change image size\"\n\n return img_aug, after_bbox", "def draw_boxes(image, gt_boxes_norm, pre_boxes_norm):\n # Load Image\n image = (image * 255.0).astype(np.uint8)\n image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n #image = cv2.add(image,image)\n #image = cv2.bitwise_not(image)\n # Draw prediction boxes\n for pre_box_points in pre_boxes_norm:\n image_shape = np.flip(image.shape[0:2], axis=0)\n\n for pre_box_point_idx in range(len(pre_box_points)):\n\n pre_start_point = pre_box_points[pre_box_point_idx] * image_shape\n pre_end_point = pre_box_points[(pre_box_point_idx + 1) % 4] * image_shape\n\n pre_start_point = pre_start_point.astype(np.int32)\n pre_end_point = pre_end_point.astype(np.int32)\n\n cv2.line(\n image, tuple(pre_start_point),\n tuple(pre_end_point),\n (107,222,35), thickness=1)\n\n # Draw boxes if they exist\n if gt_boxes_norm is not None:\n for gt_box_points in gt_boxes_norm:\n for gt_box_point_idx in range(len(gt_box_points)):\n\n gt_start_point = gt_box_points[gt_box_point_idx] * image_shape\n gt_end_point = gt_box_points[(gt_box_point_idx + 1) % 4] * image_shape\n\n gt_start_point = gt_start_point.astype(np.int32)\n gt_end_point = gt_end_point.astype(np.int32)\n\n cv2.line(\n image, tuple(gt_start_point),\n tuple(gt_end_point),\n (0,0,205), thickness=1)\n\n return image", "def augment_image(im):\n # First crop out the face to save reduce computation load\n bb = im.landmarks['bb'].lms\n bb_vec = bb.as_vector()\n bb_ul = (np.array([bb_vec[0], bb_vec[1]]) - bb.centre()) * 2\n bb_lr = (np.array([bb_vec[4], bb_vec[5]]) - bb.centre()) * 2\n ul = bb_ul + bb.centre()\n lr = bb_lr + bb.centre()\n im = im.crop(ul, lr, constrain_to_boundary=True)\n if im.pixels.shape[0] == 1:\n pix = np.zeros((3, im.pixels.shape[1], im.pixels.shape[2]))\n pix[:,] = im.pixels\n im.pixels = pix\n\n beta = 0.3\n cx = np.random.uniform(-beta, beta)\n cy = np.random.uniform(-beta, beta)\n fx = 1.0\n fy = np.random.uniform(0.6, 1.4)\n max_rotation = 30\n theta = np.random.uniform(-max_rotation, max_rotation)\n\n rotation = menpo.transform.Rotation.init_from_2d_ccw_angle(theta)\n shear = menpo.transform.Affine(np.array([[1, cx, 0],[cy, 1, 0], [0,0,1]]))\n scale = menpo.transform.Affine(np.array([[fx, 0, 0],[0, fy, 0], [0,0,1]]))\n T = scale.compose_after(shear).compose_after(rotation)\n\n t_im = im.transform_about_centre(T)\n\n t_im = add_color_jetting(t_im)\n t_im = add_occlusion(t_im)\n\n\n new_bb = t_im.landmarks['PTS'].lms.bounding_box()\n\n #new_bb contains the gt bounding box\n augmented_bb = add_bb_noise(new_bb)\n augmented_bb = augmented_bb.reshape((4,2))\n augmented_bb = menpo.shape.PointCloud(augmented_bb)\n t_im.landmarks['bb'] = menpo.landmark.LandmarkGroup.init_with_all_label(augmented_bb)\n\n return t_im", "def preprocessing(image_data, max_height, max_width):\n img = image_data[\"image\"]\n img = resize_image(img, max_height, max_width)\n gt_boxes = image_data[\"objects\"][\"bbox\"]\n gt_labels = image_data[\"objects\"][\"label\"]\n return img, gt_boxes, gt_labels", "def change_bbox_color(img, boxes, p1, p2):\n points = np.unique(p1 + p2)\n\n for i in points:\n x1, y1, w, h = boxes[i][0], boxes[i][1], boxes[i][2], boxes[i][3]\n x2, y2 = x1+w, y1+h\n _ = cv2.rectangle(img, (x1, y1), (x2, y2), (0,0,255), 2) \n\n return img", "def resize_image(image, anns, width, height):\n h, w = image.shape[0], image.shape[1]\n c = np.array([image.shape[1] / 2., image.shape[0] / 2.], dtype=np.float32)\n s = max(image.shape[0], image.shape[1]) * 1.0\n trans_output = get_affine_transform(c, s, 0, [width, height])\n out_img = cv2.warpAffine(image, trans_output, (width, height), flags=cv2.INTER_LINEAR)\n\n num_objects = len(anns)\n resize_anno = []\n for i in range(num_objects):\n ann = anns[i]\n bbox = coco_box_to_bbox(ann['bbox'])\n pts = np.array(ann['keypoints'], np.float32).reshape(_NUM_JOINTS, 3)\n\n bbox[:2] = affine_transform(bbox[:2], trans_output)\n bbox[2:] = affine_transform(bbox[2:], trans_output)\n bbox[0::2] = np.clip(bbox[0::2], 0, width - 1)\n bbox[1::2] = np.clip(bbox[1::2], 0, height - 1)\n h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]\n if (h > 0 and w > 0):\n ct = np.array([(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)\n for j in range(_NUM_JOINTS):\n pts[j, :2] = affine_transform(pts[j, :2], trans_output)\n\n bbox = [ct[0] - w / 2, ct[1] - h / 2, w, h, 1]\n keypoints = pts.reshape(_NUM_JOINTS * 3).tolist()\n ann[\"bbox\"] = bbox\n ann[\"keypoints\"] = keypoints\n gt = ann\n resize_anno.append(gt)\n return out_img, resize_anno", "def to_imgaug(self, image_shape):\n image_height, image_width, _ = image_shape\n\n # Create ia bounding boxes from json\n regions = []\n for region in self.regions:\n regions.append(region.to_imgaug(image_width, image_height))\n bbs = BoundingBoxesOnImage(regions, shape=image_shape)\n\n return bbs", "def draw_key_pts(image, keypoints):\n \n # Draw blobs on our image as green circles \n blank = np.zeros((1, 1)) \n image = cv2.drawKeypoints(image, keypoints, blank, (0, 255, 0), \n cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n for curKey in keypoints:\n x=np.int(curKey.pt[0])\n y=np.int(curKey.pt[1])\n #size = np.int(curKey.size)\n image_final = cv2.circle(image,(x,y),2,(255, 0, 0), 3)\n \n return image_final", "def applyKernelToPoints(image,pts,kernel,border_type='BLACK'):\n \n \n pts=np.asarray(pts)\n image=np.asarray(image)\n image.shape\n if len(image.shape)>2:\n grayscale=False\n shaperesult=(len(pts),image.shape[2])\n elif len(image.shape)==1:\n image=image.reshape(1,image.shape[0])\n shaperesult=len(pts)\n grayscale=True\n\n else:\n grayscale=True\n\n # Kernel dimensions - they are integers\n krows=kernel.shape[0] \n kcols=kernel.shape[1]\n\n if krows%2==0:\n # Is even\n ldrows=(krows/2)-1\n udrows=krows/2\n \n else:\n # Is odd\n ldrows=krows/2\n udrows=krows/2\n\n if kcols%2==0:\n # Is even\n ldcols=(kcols/2)-1\n udcols=kcols/2\n else:\n # Is odd\n ldcols=kcols/2\n udcols=kcols/2\n\n #------------------------------------\n # ADD FRAME TO THE ORIGINAL IMAGE\n #------------------------------------\n\n dummyM=image.shape[0]+krows-1\n dummyN=image.shape[1]+kcols-1\n \n if grayscale==True:\n dummyimage=np.asarray(np.zeros((dummyM,dummyN)))\n \n else:\n dummyimage=np.asarray(np.zeros((dummyM,dummyN,image.shape[2])))\n\n if border_type==\"WHITE\":\n dummyimage=dummyimage+255\n\n elif border_type==\"ANTIALIAS\":\n # Fills top border\n dummyimage[0:ldrows,ldcols:ldcols+image.shape[1]]=image[image.shape[0]-ldrows:image.shape[0],:]\n\n # Fills bottom border\n dummyimage[(ldrows+image.shape[0]):,ldcols:(ldcols+image.shape[1])]=image[0:udrows,:]\n \n # Fills left border\n dummyimage[ldrows:ldrows+image.shape[0],0:ldcols]=image[:,image.shape[1]-ldcols:]\n\n # Fills right border\n dummyimage[ldrows:ldrows+image.shape[0],(ldcols+image.shape[1]):]=image[:,0:udcols]\n \n # Fills top, left corner\n dummyimage[0:ldrows,0:ldcols]=image[image.shape[0]-ldrows,image.shape[1]-ldcols]\n\n # Fills bottom, left corner\n dummyimage[(ldrows+image.shape[0]):,0:ldcols]=image[0:udrows,(image.shape[1]-ldcols):]\n \n # Fills top, right corner\n dummyimage[0:ldrows,(ldcols+image.shape[1]):]=image[(image.shape[0]-ldrows):,0:udcols]\n \n # Fills bottom, right corner\n dummyimage[(ldrows+image.shape[0]):,(ldcols+image.shape[1]):]=image[0:udrows,0:udcols]\n \n dummyimage[ldrows:ldrows+image.shape[0],ldcols:ldcols+image.shape[1]]=image \n \n result=np.asarray(np.zeros(shaperesult))\n \n pts[:,0]=pts[:,0]+ldrows\n pts[:,1]=pts[:,1]+ldcols\n \n for k in range(len(pts)):\n total=0\n \n for i in range(-ldrows,udrows+1):\n for j in range(-ldcols,udcols+1):\n total=total+dummyimage[i+pts[k,0],j+pts[k,1]]*kernel[i+ldrows,j+ldcols]\n \n \n result[k]=total\n \n \n return result", "def add_bbox(self, image_path, image_size, obj):\n\n if image_path not in self.img_path_2_img_id:\n self.img_path_2_img_id[image_path] = len(self.img_path_2_img_id)\n\n self.annotation['images'].append({\n \"file_name\": image_path,\n \"height\": image_size[1],\n \"width\": image_size[0],\n \"id\": self.img_path_2_img_id[image_path]\n })\n\n new_ann_id = len(self.annotation['annotations'])\n self.img_id_2_ann_id[self.img_path_2_img_id[image_path]].append(new_ann_id)\n bbox = self.fit_box_in_image(obj['bbox'], image_size)\n\n if bbox:\n self.annotation['annotations'].append({\n \"bbox\": bbox, # x, y, w, h\n \"segmentation\": obj['segmentation'],\n \"attributes\": obj['attributes'],\n \"ignore\": 0,\n \"id\": new_ann_id,\n \"image_id\": self.img_path_2_img_id[image_path],\n \"area\": obj['bbox'][2] * obj['bbox'][3],\n \"iscrowd\": 1 - int(obj['attributes']['legible']),\n \"category_id\": self.label_map['text']\n })", "def encode_bboxes(ann, bboxes, img_name):\n\n ann_root = ann.getroot()\n\n folder = ET.Element(\"folder\")\n folder.text = ann_root.find('folder').text\n filename = ET.Element(\"filename\")\n filename.text = img_name\n path = ET.Element(\"path\")\n path.text = ann_root.find('folder').text + '/' + img_name\n source = ET.Element(\"source\")\n database = ET.Element(\"database\")\n database.text = ann_root.find(\"source\").find('database').text\n source.append(database)\n size = ET.Element(\"size\")\n width = ET.Element(\"width\")\n width.text = ann_root.find(\"size\").find('width').text\n height = ET.Element(\"height\")\n height.text = ann_root.find(\"size\").find('height').text\n depth = ET.Element(\"depth\")\n depth.text = ann_root.find(\"size\").find('depth').text\n size.append(width)\n size.append(height)\n size.append(depth)\n segmented = ET.Element(\"segmented\")\n segmented.text = ann_root.find('segmented').text\n\n new_root = ET.Element(\"annotation\")\n new_root.append(folder)\n new_root.append(filename)\n new_root.append(path)\n new_root.append(source)\n new_root.append(size)\n new_root.append(segmented)\n\n for b in bboxes:\n xmin = ET.Element(\"xmin\")\n xmin.text = str(int(b[0]))\n ymin = ET.Element(\"ymin\")\n ymin.text = str(int(b[1]))\n xmax = ET.Element(\"xmax\")\n xmax.text = str(int(b[2]))\n ymax = ET.Element(\"ymax\")\n ymax.text = str(int(b[3]))\n name = ET.Element(\"name\")\n name.text = self.classes[int(b[4])]\n bndbox = ET.Element(\"bndbox\")\n bndbox.append(xmin)\n bndbox.append(ymin)\n bndbox.append(xmax)\n bndbox.append(ymax)\n pose = ET.Element(\"pose\")\n truncated = ET.Element(\"truncated\")\n difficult = ET.Element(\"difficult\")\n pose.text = \"Unspecified\"\n truncated.text = \"0\"\n difficult.text = \"0\"\n obj = ET.Element(\"object\")\n obj.append(name)\n obj.append(pose)\n obj.append(truncated)\n obj.append(difficult)\n obj.append(bndbox)\n\n new_root.append(obj)\n\n new_tree = ET.ElementTree(new_root)\n\n return new_tree", "def draw_bounding_boxes(self, image_path):\n img = cv.imread(image_path, cv.IMREAD_ANYDEPTH)\n bboxes = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n unique, counts = np.unique(img, return_counts=True)\n for uni in unique:\n if uni == 0:\n continue\n self.get_instance_bounding_box(img, bboxes, uni)\n\n cv.namedWindow('building bounding boxes', cv.WINDOW_NORMAL)\n cv.imshow('building bounding boxes', bboxes)\n cv.waitKey(0)\n cv.destroyAllWindows()", "def show_bounding_boxes(dir_path: str) -> None:\r\n \r\n for image_file in glob.glob(dir_path + '/*.png'):\r\n image = cv2.imread(image_file)\r\n height, width, _ = image.shape\r\n\r\n with open(image_file.split(\".\")[0] +'.txt', 'r') as reader:\r\n annotations = reader.readlines()\r\n for annot in annotations:\r\n annot = annot.split()\r\n \r\n # Calculation of top left point and bottom right point of the bounding box \r\n x1, y1 = int((float(annot[1]) - float(annot[3])/2)*width), int((float(annot[2]) - float(annot[4])/2)*height)\r\n x2, y2 = int((float(annot[1]) + float(annot[3])/2)*width), int((float(annot[2]) + float(annot[4])/2)*height)\r\n \r\n # BGR color format\r\n if annot[0] == '0':\r\n color = (0,255,0) # Mask is worn correctly (Green color)\r\n label = 'Good'\r\n else:\r\n color = (0,0,255) # Mask is either not worn correctly or not worn at all (Red color)\r\n label = 'Bad'\r\n \r\n cv2.putText(image,\r\n label, \r\n (x1, y1 - 10),\r\n fontFace=cv2.FONT_HERSHEY_TRIPLEX,\r\n fontScale=0.5, \r\n color=color,\r\n thickness=1) \r\n \r\n cv2.rectangle(image, (x1, y1), (x2, y2), color, thickness=1)\r\n \r\n k = cv2.waitKey(0) & 0xFF\r\n cv2.imshow(image_file.split(\"sss\")[-1], image)\r\n if k == 27:\r\n cv2.destroyAllWindows()\r\n break", "def _train_aug(self, results):\n img = results['img']\n h, w, c = img.shape\n boxes = results['gt_bboxes']\n while True:\n scale = random.choice(self.ratios)\n new_h = int(self.crop_size[0] * scale)\n new_w = int(self.crop_size[1] * scale)\n h_border = self._get_border(self.border, h)\n w_border = self._get_border(self.border, w)\n\n for i in range(50):\n center_x = random.randint(low=w_border, high=w - w_border)\n center_y = random.randint(low=h_border, high=h - h_border)\n\n cropped_img, border, patch = self._crop_image_and_paste(\n img, [center_y, center_x], [new_h, new_w])\n\n mask = self._filter_boxes(patch, boxes)\n # if image do not have valid bbox, any crop patch is valid.\n if not mask.any() and len(boxes) > 0:\n continue\n\n results['img'] = cropped_img\n results['img_shape'] = cropped_img.shape\n results['pad_shape'] = cropped_img.shape\n\n x0, y0, x1, y1 = patch\n\n left_w, top_h = center_x - x0, center_y - y0\n cropped_center_x, cropped_center_y = new_w // 2, new_h // 2\n\n # crop bboxes accordingly and clip to the image boundary\n for key in results.get('bbox_fields', []):\n mask = self._filter_boxes(patch, results[key])\n bboxes = results[key][mask]\n bboxes[:, 0:4:2] += cropped_center_x - left_w - x0\n bboxes[:, 1:4:2] += cropped_center_y - top_h - y0\n if self.bbox_clip_border:\n bboxes[:, 0:4:2] = np.clip(bboxes[:, 0:4:2], 0, new_w)\n bboxes[:, 1:4:2] = np.clip(bboxes[:, 1:4:2], 0, new_h)\n keep = (bboxes[:, 2] > bboxes[:, 0]) & (\n bboxes[:, 3] > bboxes[:, 1])\n bboxes = bboxes[keep]\n results[key] = bboxes\n if key in ['gt_bboxes']:\n if 'gt_labels' in results:\n labels = results['gt_labels'][mask]\n labels = labels[keep]\n results['gt_labels'] = labels\n if 'gt_masks' in results:\n raise NotImplementedError(\n 'RandomCenterCropPad only supports bbox.')\n\n # crop semantic seg\n for key in results.get('seg_fields', []):\n raise NotImplementedError(\n 'RandomCenterCropPad only supports bbox.')\n return results", "def show_bboxes(img, bounding_boxes=None, facial_landmarks=[]):\n\n img_copy = img.copy()\n draw = ImageDraw.Draw(img_copy)\n# for b in bounding_boxes:\n# draw.rectangle([\n# (b[0], b[1]), (b[2], b[3])\n# ], outline='white')\n\n for p in facial_landmarks:\n for i in range(106):\n draw.ellipse([\n (p[i*2] - 1.0, p[2*i + 1] - 1.0),\n (p[i*2] + 1.0, p[2*i+1] + 1.0)\n ], outline='blue')\n font = ImageFont.truetype(\"arial.ttf\", 10)\n draw.text([p[2*i], p[2*i+1]], str(i), font=font)\n\n return img_copy", "def boundingbox_location_change(boundingbox, file_name, dir):\n root = dir\n jpgfile = file_name[0:-3]+'jpg'\n img = cv2.imread(root+jpgfile)\n shape = img.shape\n print (img.shape)\n print boundingbox\n new_boundingbox = []\n\n for b in boundingbox:\n axis = b.split(' ')\n x = float(axis[1])\n x = x * shape[0]\n y = float(axis[2])\n y = y*shape[1]\n width = float(axis[3])\n width = width * shape[0]\n heights = float(axis[4])\n heights = heights * shape[1]\n\n xmin = x - 0.5*width\n ymin = y - 0.5*heights\n xmax = x + 0.5*width\n ymax = y + 0.5*heights\n new_b = [int(xmin), int(ymin), int(xmax), int(ymax)]\n new_boundingbox.append(new_b)\n return new_boundingbox", "def process_image_bbox(image, bbox, labels, file_name):\n bounds, classes, scores = postprocessing(bbox, image)\n image_processed = annotate(image, bounds, classes, scores, labels)\n image_processed.save(file_name, 'png')\n return image_processed", "def __getitem__(self, idx):\r\n # Get a specific image id from the list of image ids.\r\n img_index = self.img_indices[idx]\r\n \r\n # Load the image\r\n img_name = os.path.join(self.img_dir, get_image_name(img_index))\r\n img = cv2.imread(img_name).transpose(1, 0, 2)/255\r\n original_shape = img.shape[:2]\r\n # Resize image to 400x400 dimensions.\r\n img = cv2.resize(normalize(img), (img_size, img_size))\r\n # Get the annotation id of the annotaions about the image.\r\n annotations_indices = self.coco.getAnnIds(img_index)\r\n # Load the annotations from the annotaion ids.\r\n annotations = self.coco.loadAnns(annotations_indices) \r\n keypoints = []\r\n mask = np.zeros((img_size // transform_scale, img_size // transform_scale),\r\n np.uint8)\r\n for annotation in annotations:\r\n if annotation['num_keypoints'] != 0:\r\n keypoints.append(annotation['keypoints'])\r\n mask = mask | cv2.resize(self.coco.annToMask(annotation),\r\n (img_size // transform_scale,\r\n img_size // transform_scale))\r\n \r\n # Add neck joints to the list of keypoints\r\n keypoints = add_neck_joint(keypoints)\r\n # Adjust keypoints according to resized images.\r\n keypoints = adjust_keypoints(keypoints, original_shape)\r\n \r\n conf_maps = generate_confidence_maps(keypoints)\r\n paf = generate_paf(keypoints)\r\n paf = paf.reshape(paf.shape[0], paf.shape[1], paf.shape[2] * paf.shape[3])\r\n \r\n return img, conf_maps, paf, mask.transpose()", "def _get_bounding_boxes(self, imgs, summed_viz, threshold_value=.7):\n self.viz = summed_viz # for debug\n viz = summed_viz\n n_batchs = viz.shape[ 0]\n n_classes = viz.shape[-1]\n \n # viz.shape (100,14,14,20) => (14,14,100,20)\n viz = viz.swapaxes(0,2); viz = viz.swapaxes(0,1)\n \n # Normalize <viz>, image per image (to be in range [-1,1])\n viz = viz / np.max(np.abs(viz), axis=(0,1))\n viz = (viz+1)/2 # range[0,1]\n \n # Resize each summed_viz to its original size (size of input image)\n if viz.shape[:2] != imgs.shape[1:3]:\n viz = np.array(\n [ skimage.transform.resize(viz[:,:,idx], imgs[idx].shape[:2])\n for idx in range(len(imgs))\n if viz.shape[0] != imgs.shape[1]\n ] )\n viz = viz.swapaxes(0,2); viz = viz.swapaxes(0,1)\n \n # Threshold <viz>s to keep values over 70% of its max values\n m_max = threshold_value * viz.max(axis=(0,1))\n viz = viz * (m_max < viz)\n \n # We want a 2d boundind box, so project threshold in xs and ys\n xxs = viz.sum(axis=0)\n yys = viz.sum(axis=1)\n \n # Get some non-thresholded values (left, top... of bounding boxes)\n get_lefts = lambda b_id, c_idx: xxs[:,b_id,c_idx].nonzero()[0][ 0]\n get_tops = lambda b_id, c_idx: yys[:,b_id,c_idx].nonzero()[0][-1]\n get_rights = lambda b_id, c_idx: xxs[:,b_id,c_idx].nonzero()[0][-1]\n get_bottoms = lambda b_id, c_idx: yys[:,b_id,c_idx].nonzero()[0][ 0]\n\n # Debug\n # def get_lefts (b_id, c_idx): \n # print xxs[:,b_id,c_idx].nonzero()\n # xxs[:,b_id,c_idx].nonzero()[0][ 0]\n \n # Build the 2d array with first or lasts positions of zeros\n # INNER FUNCTION\n def _get_border_array(f_border=get_lefts):\n return np.array(\n [ map(f_border, [b_idx]*n_classes, range(n_classes))\n for b_idx in range(n_batchs) ]\n )\n \n lefts = _get_border_array(get_lefts)\n tops = _get_border_array(get_tops)\n rights = _get_border_array(get_rights)\n bottoms = _get_border_array(get_bottoms)\n \n return lefts, tops, rights, bottoms", "def compute(self, image, keypoints, imgDescriptor=None): # real signature unknown; restored from __doc__\n pass", "def draw_bbox(image, bboxes, masks, class_ids, class_names, scores, colors, show_label=True, show_mask=True):\n image_h, image_w, _ = image.shape\n\n for i, bbox in enumerate(bboxes):\n y1, x1, y2, x2 = bbox[i]\n coor = np.array([x1, y1, x2, y2], dtype=np.int32)\n fontScale = 0.5\n score = scores[i]\n class_ind = int(class_ids[i])\n bbox_color = colors[class_ind]\n bbox_thick = int(0.6 * (image_h + image_w) / 600)\n c1, c2 = (coor[0], coor[1]), (coor[2], coor[3])\n cv2.rectangle(image, c1, c2, bbox_color, bbox_thick)\n\n if show_label:\n bbox_mess = '%s: %.2f' % (class_names[class_ind], score)\n t_size = cv2.getTextSize(bbox_mess, 0, fontScale, thickness=bbox_thick // 2)[0]\n cv2.rectangle(image, c1, (c1[0] + t_size[0], c1[1] - t_size[1] - 3), bbox_color, -1) # filled\n\n cv2.putText(image, bbox_mess, (c1[0], c1[1] - 2), cv2.FONT_HERSHEY_SIMPLEX,\n fontScale, (0, 0, 0), bbox_thick // 2, lineType=cv2.LINE_AA)\n\n # Mask\n mask = masks[:, :, i]\n if show_mask:\n image = apply_mask(image, mask, bbox_color)\n\n # Mask Polygon\n # Pad to ensure proper polygons for masks that touch image edges.\n padded_mask = np.zeros(\n (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)\n padded_mask[1:-1, 1:-1] = mask\n contours = find_contours(padded_mask, 0.5)\n pts = np.array(contours[0], np.int32)\n pts = pts.reshape((-1, 1, 2))\n # image = cv2.polylines(image, [pts], True, bbox_color)\n\n return image", "def overlay_boxes(image, predictions):\n labels = predictions.get_field(\"labels\")\n boxes = predictions.bbox\n\n for c_label in DRAW_ORDER:\n for box, label in zip(boxes, labels):\n if label == c_label:\n color = CATEGORIES_COLOR[label] # [int(color[0]), int(color[1]), int(color[2])]\n box = box.to(torch.int64)\n top_left, bottom_right = box[:2].tolist(), box[2:].tolist()\n image = cv2.rectangle(\n image, tuple(top_left), tuple(bottom_right), tuple(color), 3\n )\n\n return image", "def transform_val(self, sample):\n img = sample[\"image\"]\n bboxes = sample[\"bboxes\"]\n\n imgH = img.shape[0]\n imgW = img.shape[1]\n\n if imgW / imgH < 2.5:\n scale_factor = min(self.args.img_size[0] / imgH, self.args.img_size[1] / imgW)\n else:\n scale_factor = 1.0\n\n random_scale = np.random.randint(8, 11) / 10\n\n if bboxes.size == 0:\n bboxes = np.array([[0.1, 0.1, 0.1, 0.1, 0.0, 0.0]]) # this is just a dummy - all values must be inside (0,1)\n\n annotations = {'image': img, 'bboxes': bboxes}\n\n transforms = ([#Resize(height=int(scale_factor * imgH), width=int(scale_factor * imgW),\n # p=1.0),\n # PadIfNeeded(min_height=self.args.img_size[0], min_width=self.args.img_size[1],\n # border_mode=cv2.BORDER_REPLICATE,\n # p=1.0),\n # changing image size - mainting aspect ratio for later resize\n # OneOf([RandomCrop(height=self.args.img_size[0], width=self.args.img_size[1], p=0.5),\n # RandomCrop(height=int(random_scale * self.args.img_size[0]),\n # width=int(random_scale * self.args.img_size[1]), p=0.5)], p=1.0),\n # making sure resize fits with yolo input size\n Resize(height=self.args.img_size[0], width=self.args.img_size[1], p=1.0),\n Normalize(p=1.0)])\n\n preform_augmentation = Compose(transforms, bbox_params=BboxParams(format='yolo',\n min_visibility=0.3))\n augmented_sample = preform_augmentation(**annotations)\n\n augmented_sample[\"bboxes\"] = np.array(augmented_sample[\"bboxes\"])\n\n return augmented_sample", "def overlay_boxes(self, image, predictions):\n labels = predictions.get_field(\"labels\")\n boxes = predictions.bbox\n\n colors = self.compute_colors_for_labels(labels).tolist()\n\n for box, color in zip(boxes, colors):\n box = box.to(torch.int64)\n top_left, bottom_right = box[:2].tolist(), box[2:].tolist()\n image = cv2.rectangle(\n image, tuple(top_left), tuple(bottom_right), tuple(color), 1\n )\n\n return image", "def overlay_boxes(self, image, predictions):\n labels = predictions.get_field(\"labels\")\n boxes = predictions.bbox\n\n colors = self.compute_colors_for_labels(labels).tolist()\n\n for box, color in zip(boxes, colors):\n box = box.to(torch.int64)\n top_left, bottom_right = box[:2].tolist(), box[2:].tolist()\n image = cv2.rectangle(\n image, tuple(top_left), tuple(bottom_right), tuple(color), 1\n )\n\n return image", "def _images_and_boxes_preprocessing_cv2(self, imgs, boxes):\r\n\r\n height, width, _ = imgs[0].shape\r\n\r\n boxes[:, [0, 2]] *= width\r\n boxes[:, [1, 3]] *= height\r\n boxes = cv2_transform.clip_boxes_to_image(boxes, height, width)\r\n\r\n # `transform.py` is list of np.array. However, for AVA, we only have\r\n # one np.array.\r\n boxes = [boxes]\r\n # The image now is in HWC, BGR format.\r\n if self._split == \"train\": # \"train\"\r\n imgs, boxes = cv2_transform.random_short_side_scale_jitter_list(\r\n imgs,\r\n min_size=self._jitter_min_scale,\r\n max_size=self._jitter_max_scale,\r\n boxes=boxes,\r\n )\r\n imgs, boxes = cv2_transform.random_crop_list(\r\n imgs, self._crop_size, order=\"HWC\", boxes=boxes\r\n )\r\n if self.random_horizontal_flip:\r\n # random flip\r\n imgs, boxes = cv2_transform.horizontal_flip_list(\r\n 0.5, imgs, order=\"HWC\", boxes=boxes\r\n )\r\n elif self._split == \"val\":\r\n # Short side to test_scale. Non-local and STRG uses 256.\r\n imgs = [cv2_transform.scale(self._crop_size, img) for img in imgs]\r\n boxes = [\r\n cv2_transform.scale_boxes(self._crop_size, boxes[0], height, width)\r\n ]\r\n imgs, boxes = cv2_transform.spatial_shift_crop_list(\r\n self._crop_size, imgs, 1, boxes=boxes\r\n )\r\n\r\n if self._test_force_flip:\r\n imgs, boxes = cv2_transform.horizontal_flip_list(\r\n 1, imgs, order=\"HWC\", boxes=boxes\r\n )\r\n elif self._split == \"test\":\r\n # Short side to test_scale. Non-local and STRG uses 256.\r\n imgs = [cv2_transform.scale(self._crop_size, img) for img in imgs]\r\n boxes = [\r\n cv2_transform.scale_boxes(self._crop_size, boxes[0], height, width)\r\n ]\r\n\r\n if self._test_force_flip:\r\n imgs, boxes = cv2_transform.horizontal_flip_list(\r\n 1, imgs, order=\"HWC\", boxes=boxes\r\n )\r\n else:\r\n raise NotImplementedError(\"Unsupported split mode {}\".format(self._split))\r\n\r\n # Convert image to CHW keeping BGR order.\r\n imgs = [cv2_transform.HWC2CHW(img) for img in imgs]\r\n\r\n # Image [0, 255] -> [0, 1].\r\n imgs = [img / 255.0 for img in imgs]\r\n\r\n imgs = [\r\n np.ascontiguousarray(\r\n # img.reshape((3, self._crop_size, self._crop_size))\r\n img.reshape((3, imgs[0].shape[1], imgs[0].shape[2]))\r\n ).astype(np.float32)\r\n for img in imgs\r\n ]\r\n\r\n # Do color augmentation (after divided by 255.0).\r\n if self._split == \"train\" and self._use_color_augmentation:\r\n if not self._pca_jitter_only:\r\n imgs = cv2_transform.color_jitter_list(\r\n imgs, img_brightness=0.4, img_contrast=0.4, img_saturation=0.4\r\n )\r\n\r\n imgs = cv2_transform.lighting_list(\r\n imgs,\r\n alphastd=0.1,\r\n eigval=np.array(self._pca_eigval).astype(np.float32),\r\n eigvec=np.array(self._pca_eigvec).astype(np.float32),\r\n )\r\n\r\n # Normalize images by mean and std.\r\n imgs = [\r\n cv2_transform.color_normalization(\r\n img,\r\n np.array(self._data_mean, dtype=np.float32),\r\n np.array(self._data_std, dtype=np.float32),\r\n )\r\n for img in imgs\r\n ]\r\n\r\n # Concat list of images to single ndarray.\r\n imgs = np.concatenate([np.expand_dims(img, axis=1) for img in imgs], axis=1)\r\n\r\n if not self._use_bgr:\r\n # Convert image format from BGR to RGB.\r\n imgs = imgs[::-1, ...]\r\n\r\n imgs = np.ascontiguousarray(imgs)\r\n imgs = torch.from_numpy(imgs)\r\n boxes = cv2_transform.clip_boxes_to_image(\r\n boxes[0], imgs[0].shape[1], imgs[0].shape[2]\r\n )\r\n return imgs, boxes", "def _add_roidb_from_annotations(self, entry):\n ann_ids = self._COCO.getAnnIds(imgIds=entry['id'], iscrowd=None)\n objs = self._COCO.loadAnns(ann_ids)\n width = entry['width']\n height = entry['height']\n # valid objs\n # change the annotation boxes from 'xywh' to 'xyxy'\n valid_objs = []\n for obj in objs:\n x1 = np.max((0, obj['bbox'][0]))\n y1 = np.max((0, obj['bbox'][1]))\n x2 = np.min((width, x1 + np.max((0, obj['bbox'][2]))))\n y2 = np.min((height, y1 + np.max((0, obj['bbox'][3]))))\n if obj['area'] > 0 and x2 >= x1 and y2 >= y1:\n obj['clean_box'] = [x1, y1, x2, y2]\n valid_objs.append(obj)\n objs = valid_objs\n num_objs = len(objs)\n\n bboxes = np.zeros((num_objs, 4), dtype=entry['bboxes'].dtype)\n gt_classes = np.zeros((num_objs), dtype=entry['gt_classes'].dtype)\n\n coco_cat_id_to_class_ind = dict(\n [(self._class_to_coco_cat_id[cls], self._class_to_ind[cls]) for cls in self._classes[1:]])\n for ix, obj in enumerate(objs):\n bboxes[ix, :] = obj['clean_box']\n gt_classes[ix] = coco_cat_id_to_class_ind[obj['category_id']]\n entry['bboxes'] = np.append(entry['bboxes'], bboxes, axis=0)\n entry['gt_classes'] = np.append(entry['gt_classes'], gt_classes)", "def postprocess_boxes(pred_bbox, original_image, train_input_size, score_threshold):\n \n # valid scle for box\n valid_scale=[0, np.inf]\n \n # turn bbox to array\n pred_bbox = np.array(pred_bbox)\n \n # obtain predicted x, y, w, h, objectiveness score, class probabilities\n pred_xywh = pred_bbox[:, 0:4]\n pred_objectiveness = pred_bbox[:, 4]\n pred_prob = pred_bbox[:, 5:]\n \n # 1. (x, y, w, h) --> (x_org, y_org, w_org, h_org)\n # obtain original image width and height\n org_h, org_w = original_image.shape[:2]\n \n # obtain resize ratio for height and width \n resize_ratio_h = train_input_size / org_h\n resize_ratio_w = train_input_size / org_w\n \n # scale x, y, w, h to original x, y, w, h\n pred_coor = np.concatenate([np.expand_dims(pred_xywh[:, 0] / resize_ratio_w, axis = -1), \n np.expand_dims(pred_xywh[:, 1] / resize_ratio_h, axis = -1),\n np.expand_dims(pred_xywh[:, 2] / resize_ratio_w, axis = -1),\n np.expand_dims(pred_xywh[:, 3] / resize_ratio_h, axis = -1),], axis = -1)\n \n # 2. (x_org, y_org, w_org, h_org) --> (xmin_org, ymin_org, xmax_org, ymax_org)\n # obtain diagonal image coordinates\n pred_coor = np.concatenate([pred_coor[:, :2] - pred_coor[:, 2:] * 0.5,\n pred_coor[:, :2] + pred_coor[:, 2:] * 0.5], axis = -1)\n\n # 3. clip some boxes those are out of range\n # clip bboxes where xmin_org, ymin_org < 0 and xmax_org, ymax_org out of bounds\n pred_coor = np.concatenate([np.maximum(pred_coor[:, :2], [0, 0]),\n np.minimum(pred_coor[:, 2:], [org_w - 1, org_h - 1])], axis = -1)\n \n # mask that ensure that if xmin < xmax, ymin /> ymax and vice versa\n invalid_mask = np.logical_or((pred_coor[:, 0] > pred_coor[:, 2]), (pred_coor[:, 1] > pred_coor[:, 3]))\n pred_coor[invalid_mask] = 0\n\n # 4. discard some invalid boxes\n bboxes_scale = np.sqrt(np.multiply.reduce(pred_coor[:, 2:4] - pred_coor[:, 0:2], axis = -1))\n scale_mask = np.logical_and((valid_scale[0] < bboxes_scale), (bboxes_scale < valid_scale[1]))\n\n # 5. discard boxes with low scores\n # obtain index of class with max prob for each bbox\n classes = np.argmax(pred_prob, axis = -1)\n \n # multiply max prob with objectivness score for each bbox\n scores = pred_objectiveness * pred_prob[np.arange(len(pred_coor)), classes]\n \n # obtain score mask based on score threshold\n score_mask = scores > score_threshold\n \n # obtain combined mask\n mask = np.logical_and(scale_mask, score_mask)\n \n # obtain coordinates, scores and classes after mask\n coors, scores, classes = pred_coor[mask], scores[mask], classes[mask]\n \n # return concatenated results \n return np.concatenate([coors, scores[:, np.newaxis], classes[:, np.newaxis]], axis = -1)" ]
[ "0.6619781", "0.65760165", "0.6573601", "0.64467424", "0.63438433", "0.6319195", "0.6261868", "0.6209508", "0.6207679", "0.62067044", "0.61714906", "0.6164397", "0.6108835", "0.60427105", "0.6042169", "0.60405695", "0.6033405", "0.60134506", "0.6002291", "0.5998151", "0.59661263", "0.5947906", "0.59446734", "0.59387404", "0.59248906", "0.5881058", "0.5881058", "0.58758736", "0.587456", "0.58668584" ]
0.7612929
0
Augment image and bounding boxes !!
def img_augmentation(augmentation, img, bbox): # img_copy = img.copy() image_shape = img.shape h, w = image_shape[0:2] # Convert the stochastic sequence of augmenters to a deterministic one. # The deterministic sequence will always apply the exactly same effects to the images. det = augmentation.to_deterministic() img_aug = det.augment_image(img) ia_bbox = list() for bounding_box in bbox: x1, y1, x2, y2 = bounding_box ia_bbox.append(ia.BoundingBox(x1=x1, y1=y1, x2=x2, y2=y2)) bbs = ia.BoundingBoxesOnImage(ia_bbox, shape=image_shape) bbs_aug = det.augment_bounding_boxes([bbs])[0] # img = bbs_aug.draw_on_image(img) after_bbox = list() for bounding_box in bbs_aug.bounding_boxes: bbox_list = [bounding_box.x1_int, bounding_box.y1_int, bounding_box.x2_int, bounding_box.y2_int] if bbox_list[0] >= w: bbox_list[0] = w - 1 if bbox_list[1] >= h: bbox_list[1] = h - 1 if bbox_list[2] >= w: bbox_list[2] = w - 1 if bbox_list[3] >= h: bbox_list[3] = h - 1 if bbox_list[0] == bbox_list[2] or bbox_list[1] == bbox_list[3]: return img_augmentation(augmentation, img, bbox) bbox_list = list(map(lambda x: max(x, 0), bbox_list)) after_bbox.append(bbox_list) assert img_aug.shape == image_shape, "Augmentation shouldn't change image size" return img_aug, after_bbox
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preprocessing(image_data, max_height, max_width):\n img = image_data[\"image\"]\n img = resize_image(img, max_height, max_width)\n gt_boxes = image_data[\"objects\"][\"bbox\"]\n gt_labels = image_data[\"objects\"][\"label\"]\n return img, gt_boxes, gt_labels", "def to_imgaug(self, image_shape):\n image_height, image_width, _ = image_shape\n\n # Create ia bounding boxes from json\n regions = []\n for region in self.regions:\n regions.append(region.to_imgaug(image_width, image_height))\n bbs = BoundingBoxesOnImage(regions, shape=image_shape)\n\n return bbs", "def add_bbox(self, image_path, image_size, obj):\n\n if image_path not in self.img_path_2_img_id:\n self.img_path_2_img_id[image_path] = len(self.img_path_2_img_id)\n\n self.annotation['images'].append({\n \"file_name\": image_path,\n \"height\": image_size[1],\n \"width\": image_size[0],\n \"id\": self.img_path_2_img_id[image_path]\n })\n\n new_ann_id = len(self.annotation['annotations'])\n self.img_id_2_ann_id[self.img_path_2_img_id[image_path]].append(new_ann_id)\n bbox = self.fit_box_in_image(obj['bbox'], image_size)\n\n if bbox:\n self.annotation['annotations'].append({\n \"bbox\": bbox, # x, y, w, h\n \"segmentation\": obj['segmentation'],\n \"attributes\": obj['attributes'],\n \"ignore\": 0,\n \"id\": new_ann_id,\n \"image_id\": self.img_path_2_img_id[image_path],\n \"area\": obj['bbox'][2] * obj['bbox'][3],\n \"iscrowd\": 1 - int(obj['attributes']['legible']),\n \"category_id\": self.label_map['text']\n })", "def overlay_boxes(self, image, predictions):\n labels = predictions.get_field(\"labels\")\n boxes = predictions.bbox\n\n colors = self.compute_colors_for_labels(labels).tolist()\n\n for box, color in zip(boxes, colors):\n box = box.to(torch.int64)\n top_left, bottom_right = box[:2].tolist(), box[2:].tolist()\n image = cv2.rectangle(\n image, tuple(top_left), tuple(bottom_right), tuple(color), 1\n )\n\n return image", "def overlay_boxes(self, image, predictions):\n labels = predictions.get_field(\"labels\")\n boxes = predictions.bbox\n\n colors = self.compute_colors_for_labels(labels).tolist()\n\n for box, color in zip(boxes, colors):\n box = box.to(torch.int64)\n top_left, bottom_right = box[:2].tolist(), box[2:].tolist()\n image = cv2.rectangle(\n image, tuple(top_left), tuple(bottom_right), tuple(color), 1\n )\n\n return image", "def change_bbox_color(img, boxes, p1, p2):\n points = np.unique(p1 + p2)\n\n for i in points:\n x1, y1, w, h = boxes[i][0], boxes[i][1], boxes[i][2], boxes[i][3]\n x2, y2 = x1+w, y1+h\n _ = cv2.rectangle(img, (x1, y1), (x2, y2), (0,0,255), 2) \n\n return img", "def boundingbox_location_change(boundingbox, file_name, dir):\n root = dir\n jpgfile = file_name[0:-3]+'jpg'\n img = cv2.imread(root+jpgfile)\n shape = img.shape\n print (img.shape)\n print boundingbox\n new_boundingbox = []\n\n for b in boundingbox:\n axis = b.split(' ')\n x = float(axis[1])\n x = x * shape[0]\n y = float(axis[2])\n y = y*shape[1]\n width = float(axis[3])\n width = width * shape[0]\n heights = float(axis[4])\n heights = heights * shape[1]\n\n xmin = x - 0.5*width\n ymin = y - 0.5*heights\n xmax = x + 0.5*width\n ymax = y + 0.5*heights\n new_b = [int(xmin), int(ymin), int(xmax), int(ymax)]\n new_boundingbox.append(new_b)\n return new_boundingbox", "def process_image_bbox(image, bbox, labels, file_name):\n bounds, classes, scores = postprocessing(bbox, image)\n image_processed = annotate(image, bounds, classes, scores, labels)\n image_processed.save(file_name, 'png')\n return image_processed", "def draw_bounding_boxes(self, image_path):\n img = cv.imread(image_path, cv.IMREAD_ANYDEPTH)\n bboxes = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n unique, counts = np.unique(img, return_counts=True)\n for uni in unique:\n if uni == 0:\n continue\n self.get_instance_bounding_box(img, bboxes, uni)\n\n cv.namedWindow('building bounding boxes', cv.WINDOW_NORMAL)\n cv.imshow('building bounding boxes', bboxes)\n cv.waitKey(0)\n cv.destroyAllWindows()", "def _train_aug(self, results):\n img = results['img']\n h, w, c = img.shape\n boxes = results['gt_bboxes']\n while True:\n scale = random.choice(self.ratios)\n new_h = int(self.crop_size[0] * scale)\n new_w = int(self.crop_size[1] * scale)\n h_border = self._get_border(self.border, h)\n w_border = self._get_border(self.border, w)\n\n for i in range(50):\n center_x = random.randint(low=w_border, high=w - w_border)\n center_y = random.randint(low=h_border, high=h - h_border)\n\n cropped_img, border, patch = self._crop_image_and_paste(\n img, [center_y, center_x], [new_h, new_w])\n\n mask = self._filter_boxes(patch, boxes)\n # if image do not have valid bbox, any crop patch is valid.\n if not mask.any() and len(boxes) > 0:\n continue\n\n results['img'] = cropped_img\n results['img_shape'] = cropped_img.shape\n results['pad_shape'] = cropped_img.shape\n\n x0, y0, x1, y1 = patch\n\n left_w, top_h = center_x - x0, center_y - y0\n cropped_center_x, cropped_center_y = new_w // 2, new_h // 2\n\n # crop bboxes accordingly and clip to the image boundary\n for key in results.get('bbox_fields', []):\n mask = self._filter_boxes(patch, results[key])\n bboxes = results[key][mask]\n bboxes[:, 0:4:2] += cropped_center_x - left_w - x0\n bboxes[:, 1:4:2] += cropped_center_y - top_h - y0\n if self.bbox_clip_border:\n bboxes[:, 0:4:2] = np.clip(bboxes[:, 0:4:2], 0, new_w)\n bboxes[:, 1:4:2] = np.clip(bboxes[:, 1:4:2], 0, new_h)\n keep = (bboxes[:, 2] > bboxes[:, 0]) & (\n bboxes[:, 3] > bboxes[:, 1])\n bboxes = bboxes[keep]\n results[key] = bboxes\n if key in ['gt_bboxes']:\n if 'gt_labels' in results:\n labels = results['gt_labels'][mask]\n labels = labels[keep]\n results['gt_labels'] = labels\n if 'gt_masks' in results:\n raise NotImplementedError(\n 'RandomCenterCropPad only supports bbox.')\n\n # crop semantic seg\n for key in results.get('seg_fields', []):\n raise NotImplementedError(\n 'RandomCenterCropPad only supports bbox.')\n return results", "def draw_boxes(image, gt_boxes_norm, pre_boxes_norm):\n # Load Image\n image = (image * 255.0).astype(np.uint8)\n image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n #image = cv2.add(image,image)\n #image = cv2.bitwise_not(image)\n # Draw prediction boxes\n for pre_box_points in pre_boxes_norm:\n image_shape = np.flip(image.shape[0:2], axis=0)\n\n for pre_box_point_idx in range(len(pre_box_points)):\n\n pre_start_point = pre_box_points[pre_box_point_idx] * image_shape\n pre_end_point = pre_box_points[(pre_box_point_idx + 1) % 4] * image_shape\n\n pre_start_point = pre_start_point.astype(np.int32)\n pre_end_point = pre_end_point.astype(np.int32)\n\n cv2.line(\n image, tuple(pre_start_point),\n tuple(pre_end_point),\n (107,222,35), thickness=1)\n\n # Draw boxes if they exist\n if gt_boxes_norm is not None:\n for gt_box_points in gt_boxes_norm:\n for gt_box_point_idx in range(len(gt_box_points)):\n\n gt_start_point = gt_box_points[gt_box_point_idx] * image_shape\n gt_end_point = gt_box_points[(gt_box_point_idx + 1) % 4] * image_shape\n\n gt_start_point = gt_start_point.astype(np.int32)\n gt_end_point = gt_end_point.astype(np.int32)\n\n cv2.line(\n image, tuple(gt_start_point),\n tuple(gt_end_point),\n (0,0,205), thickness=1)\n\n return image", "def _get_bounding_boxes(self, imgs, summed_viz, threshold_value=.7):\n self.viz = summed_viz # for debug\n viz = summed_viz\n n_batchs = viz.shape[ 0]\n n_classes = viz.shape[-1]\n \n # viz.shape (100,14,14,20) => (14,14,100,20)\n viz = viz.swapaxes(0,2); viz = viz.swapaxes(0,1)\n \n # Normalize <viz>, image per image (to be in range [-1,1])\n viz = viz / np.max(np.abs(viz), axis=(0,1))\n viz = (viz+1)/2 # range[0,1]\n \n # Resize each summed_viz to its original size (size of input image)\n if viz.shape[:2] != imgs.shape[1:3]:\n viz = np.array(\n [ skimage.transform.resize(viz[:,:,idx], imgs[idx].shape[:2])\n for idx in range(len(imgs))\n if viz.shape[0] != imgs.shape[1]\n ] )\n viz = viz.swapaxes(0,2); viz = viz.swapaxes(0,1)\n \n # Threshold <viz>s to keep values over 70% of its max values\n m_max = threshold_value * viz.max(axis=(0,1))\n viz = viz * (m_max < viz)\n \n # We want a 2d boundind box, so project threshold in xs and ys\n xxs = viz.sum(axis=0)\n yys = viz.sum(axis=1)\n \n # Get some non-thresholded values (left, top... of bounding boxes)\n get_lefts = lambda b_id, c_idx: xxs[:,b_id,c_idx].nonzero()[0][ 0]\n get_tops = lambda b_id, c_idx: yys[:,b_id,c_idx].nonzero()[0][-1]\n get_rights = lambda b_id, c_idx: xxs[:,b_id,c_idx].nonzero()[0][-1]\n get_bottoms = lambda b_id, c_idx: yys[:,b_id,c_idx].nonzero()[0][ 0]\n\n # Debug\n # def get_lefts (b_id, c_idx): \n # print xxs[:,b_id,c_idx].nonzero()\n # xxs[:,b_id,c_idx].nonzero()[0][ 0]\n \n # Build the 2d array with first or lasts positions of zeros\n # INNER FUNCTION\n def _get_border_array(f_border=get_lefts):\n return np.array(\n [ map(f_border, [b_idx]*n_classes, range(n_classes))\n for b_idx in range(n_batchs) ]\n )\n \n lefts = _get_border_array(get_lefts)\n tops = _get_border_array(get_tops)\n rights = _get_border_array(get_rights)\n bottoms = _get_border_array(get_bottoms)\n \n return lefts, tops, rights, bottoms", "def overlay_boxes(image, predictions):\n labels = predictions.get_field(\"labels\")\n boxes = predictions.bbox\n\n for c_label in DRAW_ORDER:\n for box, label in zip(boxes, labels):\n if label == c_label:\n color = CATEGORIES_COLOR[label] # [int(color[0]), int(color[1]), int(color[2])]\n box = box.to(torch.int64)\n top_left, bottom_right = box[:2].tolist(), box[2:].tolist()\n image = cv2.rectangle(\n image, tuple(top_left), tuple(bottom_right), tuple(color), 3\n )\n\n return image", "def __draw_boxes(self, img, bboxes, color=(128, 0, 0), thick=4):\n\n # Make a copy of the image\n imcopy = np.copy(img)\n # Iterate through the bounding boxes\n for bbox in bboxes:\n # Draw a rectangle given bbox coordinates\n cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)\n # Return the image copy with boxes drawn\n return imcopy", "def postprocess_boxes(pred_bbox, original_image, train_input_size, score_threshold):\n \n # valid scle for box\n valid_scale=[0, np.inf]\n \n # turn bbox to array\n pred_bbox = np.array(pred_bbox)\n \n # obtain predicted x, y, w, h, objectiveness score, class probabilities\n pred_xywh = pred_bbox[:, 0:4]\n pred_objectiveness = pred_bbox[:, 4]\n pred_prob = pred_bbox[:, 5:]\n \n # 1. (x, y, w, h) --> (x_org, y_org, w_org, h_org)\n # obtain original image width and height\n org_h, org_w = original_image.shape[:2]\n \n # obtain resize ratio for height and width \n resize_ratio_h = train_input_size / org_h\n resize_ratio_w = train_input_size / org_w\n \n # scale x, y, w, h to original x, y, w, h\n pred_coor = np.concatenate([np.expand_dims(pred_xywh[:, 0] / resize_ratio_w, axis = -1), \n np.expand_dims(pred_xywh[:, 1] / resize_ratio_h, axis = -1),\n np.expand_dims(pred_xywh[:, 2] / resize_ratio_w, axis = -1),\n np.expand_dims(pred_xywh[:, 3] / resize_ratio_h, axis = -1),], axis = -1)\n \n # 2. (x_org, y_org, w_org, h_org) --> (xmin_org, ymin_org, xmax_org, ymax_org)\n # obtain diagonal image coordinates\n pred_coor = np.concatenate([pred_coor[:, :2] - pred_coor[:, 2:] * 0.5,\n pred_coor[:, :2] + pred_coor[:, 2:] * 0.5], axis = -1)\n\n # 3. clip some boxes those are out of range\n # clip bboxes where xmin_org, ymin_org < 0 and xmax_org, ymax_org out of bounds\n pred_coor = np.concatenate([np.maximum(pred_coor[:, :2], [0, 0]),\n np.minimum(pred_coor[:, 2:], [org_w - 1, org_h - 1])], axis = -1)\n \n # mask that ensure that if xmin < xmax, ymin /> ymax and vice versa\n invalid_mask = np.logical_or((pred_coor[:, 0] > pred_coor[:, 2]), (pred_coor[:, 1] > pred_coor[:, 3]))\n pred_coor[invalid_mask] = 0\n\n # 4. discard some invalid boxes\n bboxes_scale = np.sqrt(np.multiply.reduce(pred_coor[:, 2:4] - pred_coor[:, 0:2], axis = -1))\n scale_mask = np.logical_and((valid_scale[0] < bboxes_scale), (bboxes_scale < valid_scale[1]))\n\n # 5. discard boxes with low scores\n # obtain index of class with max prob for each bbox\n classes = np.argmax(pred_prob, axis = -1)\n \n # multiply max prob with objectivness score for each bbox\n scores = pred_objectiveness * pred_prob[np.arange(len(pred_coor)), classes]\n \n # obtain score mask based on score threshold\n score_mask = scores > score_threshold\n \n # obtain combined mask\n mask = np.logical_and(scale_mask, score_mask)\n \n # obtain coordinates, scores and classes after mask\n coors, scores, classes = pred_coor[mask], scores[mask], classes[mask]\n \n # return concatenated results \n return np.concatenate([coors, scores[:, np.newaxis], classes[:, np.newaxis]], axis = -1)", "def encode_bboxes(ann, bboxes, img_name):\n\n ann_root = ann.getroot()\n\n folder = ET.Element(\"folder\")\n folder.text = ann_root.find('folder').text\n filename = ET.Element(\"filename\")\n filename.text = img_name\n path = ET.Element(\"path\")\n path.text = ann_root.find('folder').text + '/' + img_name\n source = ET.Element(\"source\")\n database = ET.Element(\"database\")\n database.text = ann_root.find(\"source\").find('database').text\n source.append(database)\n size = ET.Element(\"size\")\n width = ET.Element(\"width\")\n width.text = ann_root.find(\"size\").find('width').text\n height = ET.Element(\"height\")\n height.text = ann_root.find(\"size\").find('height').text\n depth = ET.Element(\"depth\")\n depth.text = ann_root.find(\"size\").find('depth').text\n size.append(width)\n size.append(height)\n size.append(depth)\n segmented = ET.Element(\"segmented\")\n segmented.text = ann_root.find('segmented').text\n\n new_root = ET.Element(\"annotation\")\n new_root.append(folder)\n new_root.append(filename)\n new_root.append(path)\n new_root.append(source)\n new_root.append(size)\n new_root.append(segmented)\n\n for b in bboxes:\n xmin = ET.Element(\"xmin\")\n xmin.text = str(int(b[0]))\n ymin = ET.Element(\"ymin\")\n ymin.text = str(int(b[1]))\n xmax = ET.Element(\"xmax\")\n xmax.text = str(int(b[2]))\n ymax = ET.Element(\"ymax\")\n ymax.text = str(int(b[3]))\n name = ET.Element(\"name\")\n name.text = self.classes[int(b[4])]\n bndbox = ET.Element(\"bndbox\")\n bndbox.append(xmin)\n bndbox.append(ymin)\n bndbox.append(xmax)\n bndbox.append(ymax)\n pose = ET.Element(\"pose\")\n truncated = ET.Element(\"truncated\")\n difficult = ET.Element(\"difficult\")\n pose.text = \"Unspecified\"\n truncated.text = \"0\"\n difficult.text = \"0\"\n obj = ET.Element(\"object\")\n obj.append(name)\n obj.append(pose)\n obj.append(truncated)\n obj.append(difficult)\n obj.append(bndbox)\n\n new_root.append(obj)\n\n new_tree = ET.ElementTree(new_root)\n\n return new_tree", "def from_imagaug(image_width, image_height, imgaug_bounding_boxes):\n\n data = ImageData()\n for bb in imgaug_bounding_boxes:\n # Convert from imgaug bounding boxes to ImageDataRegion\n data.regions.append(ImageDataRegion.from_imgaug(bb, image_width, image_height))\n\n return data", "def show_bounding_boxes(dir_path: str) -> None:\r\n \r\n for image_file in glob.glob(dir_path + '/*.png'):\r\n image = cv2.imread(image_file)\r\n height, width, _ = image.shape\r\n\r\n with open(image_file.split(\".\")[0] +'.txt', 'r') as reader:\r\n annotations = reader.readlines()\r\n for annot in annotations:\r\n annot = annot.split()\r\n \r\n # Calculation of top left point and bottom right point of the bounding box \r\n x1, y1 = int((float(annot[1]) - float(annot[3])/2)*width), int((float(annot[2]) - float(annot[4])/2)*height)\r\n x2, y2 = int((float(annot[1]) + float(annot[3])/2)*width), int((float(annot[2]) + float(annot[4])/2)*height)\r\n \r\n # BGR color format\r\n if annot[0] == '0':\r\n color = (0,255,0) # Mask is worn correctly (Green color)\r\n label = 'Good'\r\n else:\r\n color = (0,0,255) # Mask is either not worn correctly or not worn at all (Red color)\r\n label = 'Bad'\r\n \r\n cv2.putText(image,\r\n label, \r\n (x1, y1 - 10),\r\n fontFace=cv2.FONT_HERSHEY_TRIPLEX,\r\n fontScale=0.5, \r\n color=color,\r\n thickness=1) \r\n \r\n cv2.rectangle(image, (x1, y1), (x2, y2), color, thickness=1)\r\n \r\n k = cv2.waitKey(0) & 0xFF\r\n cv2.imshow(image_file.split(\"sss\")[-1], image)\r\n if k == 27:\r\n cv2.destroyAllWindows()\r\n break", "def _update_bbox(self, image_size, region):\n x1,y1,w,h = region\n #identify normalized bbox\n normalized = False\n if x1 < 1 and y1 < 1 and w < 1 and h < 1:\n normalized = True\n bbox = (y1,x1,y1+h,x1+w)\n width, height = image_size\n #logging.debug(image_size)\n #logging.debug(bbox)\n #normalized bbox\n if not normalized:\n self.normalized_bbox = preprocess.normalize_bbox((width, height), bbox)\n else:\n self.normalized_bbox = bbox\n #logging.debug(self.normalized_bbox)\n #cropbox\n self.cropbox = preprocess.calculate_cropbox(self.normalized_bbox,\n FLAGS.cropbox_grid, FLAGS.bbox_grid)\n #logging.debug(self.cropbox)\n #transformation to map cropbox to [0,0,1,1]\n self.transformation = preprocess.calculate_transformation(self.cropbox)\n #logging.debug(self.transformation)\n #logging.debug(preprocess.apply_transformation(self.cropbox, self.transformation))\n #import pdb; pdb.set_trace()", "def draw_boxes_on_image(img, bboxes, color=(0, 0, 1), thick=6):\n imcopy = np.copy(img)\n\n for bbox in bboxes:\n cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)\n\n return imcopy", "def _images_and_boxes_preprocessing_cv2(self, imgs, boxes):\r\n\r\n height, width, _ = imgs[0].shape\r\n\r\n boxes[:, [0, 2]] *= width\r\n boxes[:, [1, 3]] *= height\r\n boxes = cv2_transform.clip_boxes_to_image(boxes, height, width)\r\n\r\n # `transform.py` is list of np.array. However, for AVA, we only have\r\n # one np.array.\r\n boxes = [boxes]\r\n # The image now is in HWC, BGR format.\r\n if self._split == \"train\": # \"train\"\r\n imgs, boxes = cv2_transform.random_short_side_scale_jitter_list(\r\n imgs,\r\n min_size=self._jitter_min_scale,\r\n max_size=self._jitter_max_scale,\r\n boxes=boxes,\r\n )\r\n imgs, boxes = cv2_transform.random_crop_list(\r\n imgs, self._crop_size, order=\"HWC\", boxes=boxes\r\n )\r\n if self.random_horizontal_flip:\r\n # random flip\r\n imgs, boxes = cv2_transform.horizontal_flip_list(\r\n 0.5, imgs, order=\"HWC\", boxes=boxes\r\n )\r\n elif self._split == \"val\":\r\n # Short side to test_scale. Non-local and STRG uses 256.\r\n imgs = [cv2_transform.scale(self._crop_size, img) for img in imgs]\r\n boxes = [\r\n cv2_transform.scale_boxes(self._crop_size, boxes[0], height, width)\r\n ]\r\n imgs, boxes = cv2_transform.spatial_shift_crop_list(\r\n self._crop_size, imgs, 1, boxes=boxes\r\n )\r\n\r\n if self._test_force_flip:\r\n imgs, boxes = cv2_transform.horizontal_flip_list(\r\n 1, imgs, order=\"HWC\", boxes=boxes\r\n )\r\n elif self._split == \"test\":\r\n # Short side to test_scale. Non-local and STRG uses 256.\r\n imgs = [cv2_transform.scale(self._crop_size, img) for img in imgs]\r\n boxes = [\r\n cv2_transform.scale_boxes(self._crop_size, boxes[0], height, width)\r\n ]\r\n\r\n if self._test_force_flip:\r\n imgs, boxes = cv2_transform.horizontal_flip_list(\r\n 1, imgs, order=\"HWC\", boxes=boxes\r\n )\r\n else:\r\n raise NotImplementedError(\"Unsupported split mode {}\".format(self._split))\r\n\r\n # Convert image to CHW keeping BGR order.\r\n imgs = [cv2_transform.HWC2CHW(img) for img in imgs]\r\n\r\n # Image [0, 255] -> [0, 1].\r\n imgs = [img / 255.0 for img in imgs]\r\n\r\n imgs = [\r\n np.ascontiguousarray(\r\n # img.reshape((3, self._crop_size, self._crop_size))\r\n img.reshape((3, imgs[0].shape[1], imgs[0].shape[2]))\r\n ).astype(np.float32)\r\n for img in imgs\r\n ]\r\n\r\n # Do color augmentation (after divided by 255.0).\r\n if self._split == \"train\" and self._use_color_augmentation:\r\n if not self._pca_jitter_only:\r\n imgs = cv2_transform.color_jitter_list(\r\n imgs, img_brightness=0.4, img_contrast=0.4, img_saturation=0.4\r\n )\r\n\r\n imgs = cv2_transform.lighting_list(\r\n imgs,\r\n alphastd=0.1,\r\n eigval=np.array(self._pca_eigval).astype(np.float32),\r\n eigvec=np.array(self._pca_eigvec).astype(np.float32),\r\n )\r\n\r\n # Normalize images by mean and std.\r\n imgs = [\r\n cv2_transform.color_normalization(\r\n img,\r\n np.array(self._data_mean, dtype=np.float32),\r\n np.array(self._data_std, dtype=np.float32),\r\n )\r\n for img in imgs\r\n ]\r\n\r\n # Concat list of images to single ndarray.\r\n imgs = np.concatenate([np.expand_dims(img, axis=1) for img in imgs], axis=1)\r\n\r\n if not self._use_bgr:\r\n # Convert image format from BGR to RGB.\r\n imgs = imgs[::-1, ...]\r\n\r\n imgs = np.ascontiguousarray(imgs)\r\n imgs = torch.from_numpy(imgs)\r\n boxes = cv2_transform.clip_boxes_to_image(\r\n boxes[0], imgs[0].shape[1], imgs[0].shape[2]\r\n )\r\n return imgs, boxes", "def draw_bbox(image, bboxes, masks, class_ids, class_names, scores, colors, show_label=True, show_mask=True):\n image_h, image_w, _ = image.shape\n\n for i, bbox in enumerate(bboxes):\n y1, x1, y2, x2 = bbox[i]\n coor = np.array([x1, y1, x2, y2], dtype=np.int32)\n fontScale = 0.5\n score = scores[i]\n class_ind = int(class_ids[i])\n bbox_color = colors[class_ind]\n bbox_thick = int(0.6 * (image_h + image_w) / 600)\n c1, c2 = (coor[0], coor[1]), (coor[2], coor[3])\n cv2.rectangle(image, c1, c2, bbox_color, bbox_thick)\n\n if show_label:\n bbox_mess = '%s: %.2f' % (class_names[class_ind], score)\n t_size = cv2.getTextSize(bbox_mess, 0, fontScale, thickness=bbox_thick // 2)[0]\n cv2.rectangle(image, c1, (c1[0] + t_size[0], c1[1] - t_size[1] - 3), bbox_color, -1) # filled\n\n cv2.putText(image, bbox_mess, (c1[0], c1[1] - 2), cv2.FONT_HERSHEY_SIMPLEX,\n fontScale, (0, 0, 0), bbox_thick // 2, lineType=cv2.LINE_AA)\n\n # Mask\n mask = masks[:, :, i]\n if show_mask:\n image = apply_mask(image, mask, bbox_color)\n\n # Mask Polygon\n # Pad to ensure proper polygons for masks that touch image edges.\n padded_mask = np.zeros(\n (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)\n padded_mask[1:-1, 1:-1] = mask\n contours = find_contours(padded_mask, 0.5)\n pts = np.array(contours[0], np.int32)\n pts = pts.reshape((-1, 1, 2))\n # image = cv2.polylines(image, [pts], True, bbox_color)\n\n return image", "def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):\n # make a copy of the image\n imcopy = np.copy(img)\n # draw each bounding box on your image copy using cv2.rectangle()\n # Iterate through the bounding boxes\n for bbox in bboxes:\n # Draw a rectangle given bbox coordinates\n cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)\n # return the image copy with boxes drawn\n return imcopy", "def run_onet(self, image, bounding_boxes):\n n, c, h, w = image.size()\n\n crops = crop_boxes(image, bounding_boxes, size=48)\n\n if len(crops) == 0:\n return []\n\n landmarks, offsets, scores = self.onet(crops)\n\n keep = (scores[:, 1] > self.score_thresholds[2]).nonzero(as_tuple=True)[0]\n bounding_boxes = bounding_boxes[keep, :]\n bounding_boxes[:, 5] = scores[keep, 1].view(-1)\n landmarks = landmarks[keep]\n\n # Rescale landmarks\n width = bounding_boxes[:, 3] - bounding_boxes[:, 1] + 1.0\n height = bounding_boxes[:, 4] - bounding_boxes[:, 2] + 1.0\n\n x_min, y_min = bounding_boxes[:, 1], bounding_boxes[:, 2]\n\n landmarks[:, 0:5] = x_min.unsqueeze(1) + width.unsqueeze(1) * landmarks[:, 0:5]\n landmarks[:, 5:10] = y_min.unsqueeze(1) + height.unsqueeze(1) * landmarks[:, 5:10]\n\n bounding_boxes = adjust_boxes(bounding_boxes)\n bounding_boxes = torch.cat((bounding_boxes, landmarks), dim=1)\n\n bounding_boxes = batched_nms(bounding_boxes, n,\n self.iou_thresholds[2], mode='min')\n\n return bounding_boxes", "def draw_boxes(self, im, boxes):\n for bbox in boxes:\n l = [int(x) for x in bbox[\"coords\"]]\n l = self.scalebox(l)\n icon = self.classes_to_icons[bbox[\"label\"]]\n overlay_im_to_background(im, icon, l[0], l[1] - icon.shape[0] - 5)\n cv2.rectangle(im,(l[0],l[1]),(l[2],l[3]),self.color,2)", "def draw_boundingbox(image, infer_output, image_width, image_height, conf_thresh):\n\n out_image = image.copy()\n logger.debug(' - input image: [width] %d, [height] %d' % (image.shape[1], image.shape[0]))\n\n def check_valid_range(val, max_val):\n \"\"\" check the coordinate of bbox is inside of an image\"\"\"\n if val < 0:\n val = 0\n elif val > max_val:\n val = max_val\n else:\n pass\n return val\n\n valid_obj_num = 0\n valid_obj_bbox = []\n\n for obj_info in infer_output:\n conf = obj_info['conf']\n # filter by the confidence\n if conf >= conf_thresh:\n # calculate bbox coordinate\n xmin = int(obj_info['x_min'] * image_width)\n ymin = int(obj_info['y_min'] * image_height)\n xmax = int(obj_info['x_max'] * image_width)\n ymax = int(obj_info['y_max'] * image_height)\n\n # round up into valid range\n xmin = check_valid_range(xmin, image_width)\n ymin = check_valid_range(ymin, image_height)\n xmax = check_valid_range(xmax, image_width)\n ymax = check_valid_range(ymax, image_height)\n\n # draw bbox\n cv2.rectangle(out_image, (xmin, ymin), (xmax, ymax), (0, 0, 255), 2)\n\n valid_obj_num += 1\n valid_obj_bbox.append((xmin, ymin, xmax, ymax))\n logger.debug(' - draw bbox [%d, %d, %d, %d] confidence: %f' % (xmin,ymin,xmax,ymax,conf))\n\n return out_image, valid_obj_num", "def update_bounding_box(old_shape, new_shape, row):\n # First index specifies rows of image, second index columns.\n # This means first index is y and second x\n scale_factor = new_shape[0] / old_shape[0]\n offset = (new_shape[1] - old_shape[1] * scale_factor)/2\n\n # May end up a little off-center\n row['y_1'] = row['y_1'] * scale_factor\n row['y_2'] = row['y_2'] * scale_factor\n row['x_1'] = row['x_1'] * scale_factor + offset\n row['x_2'] = row['x_2'] * scale_factor + offset\n return row[['x_1', 'x_2', 'y_1', 'y_2']]", "def return_bbox_image(self, image, bboxes, label, color):\n if bboxes:\n for obj in bboxes:\n image = self.draw_single_bbox(image, obj.position_xywh, label=label, color=color)\n\n return image", "def _images_and_boxes_preprocessing_cv2(self, imgs, boxes, gt_boxes=None, min_scale=None, crop_size=None, n_imgs=0):\n\n height, width, _ = imgs[0].shape\n\n # boxes[:, [0, 2]] *= width\n # boxes[:, [1, 3]] *= height\n boxes = cv2_transform.clip_boxes_to_image(boxes, height, width)\n\n # `transform.py` is list of np.array. However, for AVA, we only have\n # one np.array.\n boxes = [boxes]\n\n crop_size = crop_size if self.multigrid_enabled and crop_size is not None else self._crop_size\n \n if self._split != 'train':\n assert gt_boxes is not None\n gt_boxes = cv2_transform.clip_boxes_to_image(gt_boxes, height, width)\n gt_boxes = [gt_boxes]\n\n # The image now is in HWC, BGR format.\n if self._split == \"train\": # \"train\"\n imgs, boxes = cv2_transform.random_short_side_scale_jitter_list(\n imgs,\n min_size=self._jitter_min_scale if not self.multigrid_enabled and min_scale is None else min_scale,\n max_size=self._jitter_max_scale,\n boxes=boxes,\n )\n imgs, boxes = cv2_transform.random_crop_list(\n imgs, crop_size, order=\"HWC\", boxes=boxes, n_imgs=n_imgs, USE_SPA_CONF=self.cfg.MODEL.USE_SPA_CONF\n )\n if self.random_horizontal_flip:\n # random flip\n # if self.cfg.MODEL.USE_SPA_CONF and len(imgs[n_imgs].shape) != 3:\n # for i in range(n_imgs, len(imgs) + 1):\n # imgs[i] = np.expand_dims(imgs[i], axis=-1)\n imgs, boxes = cv2_transform.horizontal_flip_list(\n 0.5, imgs, order=\"HWC\", boxes=boxes, \n n_imgs=n_imgs, USE_SPA_CONF=self.cfg.MODEL.USE_SPA_CONF\n )\n # elif self._split == \"val\":\n # # Short side to test_scale. Non-local and STRG uses 256.\n # imgs = [cv2_transform.scale(self._crop_size, img) for img in imgs]\n # boxes, gt_boxes = cv2_transform.scale_boxes(\n # self._crop_size, boxes[0], height, width, gt_boxes=gt_boxes[0]\n # )\n # boxes, gt_boxes = [boxes], [gt_boxes]\n # imgs, boxes, gt_boxes = cv2_transform.spatial_shift_crop_list(\n # self._crop_size, imgs, 1, boxes=boxes, gt_boxes=gt_boxes\n # )\n\n # if self._test_force_flip:\n # imgs, boxes = cv2_transform.horizontal_flip_list(\n # 1, imgs, order=\"HWC\", boxes=boxes, gt_boxes=gt_boxes\n # )\n elif self._split == \"val\" or self._split == \"test\":\n # Short side to test_scale. Non-local and STRG uses 256.\n imgs = [cv2_transform.scale(crop_size, img) for img in imgs]\n boxes, gt_boxes = cv2_transform.scale_boxes(\n crop_size, boxes[0], height, width, gt_boxes=gt_boxes[0]\n )\n boxes, gt_boxes = [boxes], [gt_boxes]\n\n if self._test_force_flip:\n # if self.cfg.MODEL.USE_SPA_CONF and len(imgs[n_imgs].shape) != 3:\n # imgs[i] = np.expand_dims(imgs[i], axis=-1)\n # imgs[n_imgs:] = [np.expand_dims(img, axis=-1) for img in imgs[n_imgs:]]\n imgs, boxes = cv2_transform.horizontal_flip_list(\n 1, imgs, order=\"HWC\", boxes=boxes, gt_boxes=gt_boxes,\n n_imgs=n_imgs, USE_SPA_CONF=self.cfg.MODEL.USE_SPA_CONF\n )\n else:\n raise NotImplementedError(\n \"Unsupported split mode {}\".format(self._split)\n )\n\n # Convert image to CHW keeping BGR order.\n if self.cfg.MODEL.USE_SPA_CONF:\n try:\n if len(imgs[n_imgs].shape) == 2:\n imgs[n_imgs:] = [np.expand_dims(img, axis=-1) for img in imgs[n_imgs:]]\n elif len(imgs[n_imgs].shape) > 3:\n imgs[n_imgs:] = [np.expand_dims(img.squeeze(), axis=-1) for img in imgs[n_imgs:]]\n except:\n import pdb; pdb.set_trace()\n \n # for i in range(n_imgs, len(imgs) + 1):\n # imgs[i] = np.expand_dims(imgs[i], axis=-1)\n # try:\n imgs = [cv2_transform.HWC2CHW(img) for img in imgs]\n # except:\n # print('imgs[n_imgs].shape:', imgs[n_imgs].shape)\n # print('len(imgs):', len(imgs))\n # print('n_imgs:', n_imgs)\n # import pdb; pdb.set_trace()\n\n # Image [0, 255] -> [0, 1].\n if self.cfg.MODEL.USE_SPA_CONF:\n imgs[:n_imgs] = [img / 255.0 for img in imgs[:n_imgs]]\n else: \n imgs = [img / 255.0 for img in imgs]\n\n if self.cfg.MODEL.USE_SPA_CONF:\n imgs[:n_imgs] = [\n np.ascontiguousarray(\n img.reshape((3, imgs[0].shape[1], imgs[0].shape[2]))\n ).astype(np.float32)\n for img in imgs[:n_imgs]\n ]\n imgs[n_imgs:] = [\n np.ascontiguousarray(\n img.reshape((1, imgs[0].shape[1], imgs[0].shape[2]))\n ).astype(np.float32)\n for img in imgs[n_imgs:]\n ]\n else:\n imgs = [\n np.ascontiguousarray(\n # img.reshape((3, self._crop_size, self._crop_size))\n img.reshape((3, imgs[0].shape[1], imgs[0].shape[2]))\n ).astype(np.float32)\n for img in imgs\n ]\n\n # Do color augmentation (after divided by 255.0).\n if self.cfg.MODEL.USE_SPA_CONF:\n skeleton_imgs = imgs[n_imgs:]\n imgs = imgs[:n_imgs]\n if self._split == \"train\" and self._use_color_augmentation: # False\n if not self._pca_jitter_only:\n imgs = cv2_transform.color_jitter_list(\n imgs,\n img_brightness=0.4,\n img_contrast=0.4,\n img_saturation=0.4,\n )\n\n imgs = cv2_transform.lighting_list(\n imgs,\n alphastd=0.1,\n eigval=np.array(self._pca_eigval).astype(np.float32),\n eigvec=np.array(self._pca_eigvec).astype(np.float32),\n )\n\n # Normalize images by mean and std.\n imgs = [\n cv2_transform.color_normalization(\n img,\n np.array(self._data_mean, dtype=np.float32),\n np.array(self._data_std, dtype=np.float32),\n )\n for img in imgs\n ]\n\n # Concat list of images to single ndarray.\n imgs = np.concatenate(\n [np.expand_dims(img, axis=1) for img in imgs], axis=1\n )\n\n if not self._use_bgr:\n # Convert image format from BGR to RGB.\n imgs = imgs[::-1, ...]\n\n imgs = np.ascontiguousarray(imgs)\n imgs = torch.from_numpy(imgs)\n \n if self.cfg.MODEL.USE_SPA_CONF:\n skeleton_imgs = np.concatenate(\n [np.expand_dims(img, axis=1) for img in skeleton_imgs], axis=1\n )\n skeleton_imgs = np.ascontiguousarray(skeleton_imgs)\n skeleton_imgs = torch.from_numpy(skeleton_imgs)\n\n boxes = cv2_transform.clip_boxes_to_image(\n boxes[0], imgs[0].shape[1], imgs[0].shape[2]\n )\n if gt_boxes is not None:\n gt_boxes = cv2_transform.clip_boxes_to_image(\n gt_boxes[0], imgs[0].shape[1], imgs[0].shape[2]\n )\n if self.cfg.MODEL.USE_SPA_CONF:\n return (imgs, skeleton_imgs, boxes) if gt_boxes is None else (imgs, skeleton_imgs, boxes, gt_boxes)\n else:\n return (imgs, boxes) if gt_boxes is None else (imgs, boxes, gt_boxes)", "def draw_boxes(image, bboxes, color=(0., 0., 1.0), thick=6):\n # make a copy of the image\n draw_img = np.copy(image)\n # draw each bounding box on your image copy using cv2.rectangle()\n for bbox in bboxes:\n # Draw a rectangle given bbox coordinates\n cv2.rectangle(draw_img, bbox[0], bbox[1], color, thick)\n # return the image copy with boxes drawn\n return draw_img" ]
[ "0.68984985", "0.6853272", "0.6828715", "0.6781937", "0.6781937", "0.66911954", "0.66844195", "0.6655804", "0.6620257", "0.6595388", "0.65874094", "0.6579807", "0.65564126", "0.65507674", "0.6549536", "0.65301317", "0.65256107", "0.6521426", "0.6520758", "0.6511823", "0.6501388", "0.64934033", "0.6455024", "0.6453745", "0.64485514", "0.6441861", "0.64332247", "0.6430985", "0.64289457", "0.64271486" ]
0.6885573
1
Add optimization to the store by inspecting the model field type.
def _optimize_field_by_name(self, store: QueryOptimizerStore, model, selection, field_def) -> bool: name = self._get_name_from_field_dev(field_def) if not (model_field := self._get_model_field_from_name(model, name)): return False _logger.info('_optimize_field_by_name %r %r', name, model_field) if self._is_foreign_key_id(model_field, name): # ToDo: check if this works - i write resolvers for this store.only(name) return True if model_field.many_to_one or model_field.one_to_one: # ForeignKey or OneToOneField field_store = self._optimize_gql_selections( selection.selections, self._get_type(field_def), ) store.select_related(name, field_store) return True if model_field.one_to_many or model_field.many_to_many: field_store = self._optimize_gql_selections( selection.selections, self._get_type(field_def), ) if isinstance(model_field, ManyToOneRel): field_store.only(model_field.field.name) related_queryset = model_field.related_model.objects.all() _logger.info('_optimize_field_by_name many relation %r %r', model, name) store.prefetch_related(name, field_store, related_queryset) return True if not model_field.is_relation: store.only(name) return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _optimize_field_by_hints(self, store: QueryOptimizerStore, selected_field, field_def) -> bool:\n if not (optimization_hints := getattr(field_def, 'optimization_hints', None)):\n return False\n args = selected_field.arguments\n self._add_optimization_hints(optimization_hints.select_related(*args), store.select_list)\n self._add_optimization_hints(optimization_hints.prefetch_related(*args), store.prefetch_list)\n if store.only_list is not None:\n self._add_optimization_hints(optimization_hints.only(*args), store.only_list)\n return True", "def _optimize_gql_selections(self, selected_fields: List[SelectedField], graphql_type,\n store: QueryOptimizerStore = None) -> QueryOptimizerStore:\n _logger.info('_optimize_gql_selections %r %r', graphql_type, selected_fields)\n if not store:\n store = QueryOptimizerStore(disable_abort_only=self.disable_abort_only)\n if not selected_fields:\n return store\n optimized_fields_by_model = {}\n # schema = self.root_info.schema\n possible_types = self._get_possible_types(graphql_type)\n for selected_field in selected_fields:\n if isinstance(selected_field, InlineFragment):\n # Inline Fragment e.g. `... on Droid {}`\n # ToDo\n # self.handle_inline_fragment(selected_field, schema, possible_types, store)\n continue\n name = selected_field.name\n if name == '__typename':\n continue\n if type(selected_field) is FragmentSpread:\n self._optimize_gql_selections(selected_field.selections, graphql_type, store=store)\n continue\n for type_ in possible_types:\n if isinstance(type_, LazyType):\n type_ = type_.resolve_type()\n if selected_field.name == 'rows' and selected_field.selections:\n # Cursor pagination - optimize the selected fields in `rows`\n self._optimize_gql_selections(selected_field.selections, graphql_type, store=store)\n continue\n selection_field_def = next(\n (field for field in type_._type_definition.fields if to_camel_case(field.name) == name),\n None)\n if not selection_field_def:\n continue\n model = type_._django_type.model\n if model and name not in optimized_fields_by_model:\n optimized_fields_by_model[name] = model\n self._optimize_field(store, model, selected_field, selection_field_def, type_)\n return store", "def on_sense_field(self, field_type, field_parameter):\n raise NotImplementedError()", "def get_type(self):\r\n return self.mm_type + self.meta_model.get_type()", "def add_field(self, field_name, field_type):\n field_name = field_name.replace('\"','')\n if field_type == 'keyword':\n query = \"\"\"insert or ignore into keywords\n (_keyword) values (\"%s\")\"\"\" % field_name\n else:\n query = 'alter table files add column \"%s\" %s' % (\n field_name, field_type)\n self.connection.execute(query)\n self.connection.commit()\n self.init_fields()", "def OptimizerDataclassField(default={'type': 'adam'}, description='TODO'):\n\n\n class OptimizerMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict to a valid optimizer from\n `ludwig.modules.optimization_modules.optimizer_registry` and creates a corresponding `oneOf` JSON schema\n for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n if 'type' in value and value['type'] in optimizer_registry:\n opt = optimizer_registry[value['type'].lower()][1]\n try:\n return opt.Schema().load(value)\n except (TypeError, ValidationError) as e:\n raise ValidationError(f'Invalid params for optimizer: {value}, see `{opt}` definition. Error: {e}')\n raise ValidationError(f'Invalid params for optimizer: {value}, expect dict with at least a valid `type` attribute.')\n raise ValidationError('Field should be None or dict')\n\n @staticmethod\n def _jsonschema_type_mapping():\n return {'type': 'object', 'properties': {'type': {'type': 'string', 'enum': list(optimizer_registry.keys()), 'default': default['type'], 'description': 'The type of optimizer to use during the learning process'}}, 'title': 'optimizer_options', 'allOf': get_optimizer_conds(), 'required': ['type'], 'description': description}\n if not isinstance(default, dict) or 'type' not in default or default['type'] not in optimizer_registry:\n raise ValidationError(f'Invalid default: `{default}`')\n try:\n opt = optimizer_registry[default['type'].lower()][1]\n load_default = opt.Schema()\n load_default = load_default.load(default)\n dump_default = opt.Schema().dump(default)\n return field(metadata={'marshmallow_field': OptimizerMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default, metadata={'description': description})}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f\"Unsupported optimizer type: {default['type']}. See optimizer_registry. Details: {e}\")", "def get_model_type(self):\n pass", "def update_model(self, **kwargs):\n self.__dict__.update(kwargs)\n opt_params = ['optimizer_params', 'optimizer']\n if any(item in kwargs.keys() for item in opt_params):\n self.get_unet_model()", "def query_fw_capability(self, model):\n # optype_wise and op_wise capability\n self._pre_optimize(model)\n quantizable_ops = self._query_quantizable_ops(self.pre_optimized_model.model)\n optype_wise = OrderedDict()\n special_config_types = list(self.query_handler.get_quantization_capability()\\\n ['int8'].keys()) # pylint: disable=no-member\n default_config = self.query_handler.get_quantization_capability()[\\\n 'int8']['default'] # pylint: disable=no-member\n op_wise = OrderedDict()\n for _, op in enumerate(quantizable_ops):\n if op.op_type not in special_config_types:\n op_capability = default_config\n else:\n op_capability = \\\n self.query_handler.get_quantization_capability()[\\\n 'int8'][op.op_type] # pylint: disable=no-member\n if op.op_type not in optype_wise.keys():\n optype_wise[op.op_type] = copy.deepcopy(op_capability)\n\n op_wise.update(\n {(op.name, op.op_type): copy.deepcopy(op_capability)})\n\n return {'optypewise': optype_wise, 'opwise': op_wise}", "def get_field_type(connection, table_name, row):\n field_params = OrderedDict()\n field_notes = []\n is_geometry = False\n try:\n field_type = connection.introspection.get_field_type(row[1], row)\n except KeyError:\n field_type = 'TextField'\n field_notes.append('This field type is a guess.')\n\n # This is a hook for data_types_reverse to return a tuple of\n # (field_type, field_params_dict).\n if type(field_type) is tuple:\n field_type, new_params = field_type\n field_params.update(new_params)\n\n # Add max_length for all CharFields.\n if field_type == 'CharField' and row[3]:\n field_params['max_length'] = int(row[3])\n\n if field_type == 'DecimalField':\n if row[4] is None or row[5] is None:\n field_notes.append(\n 'max_digits and decimal_places have been guessed, as this '\n 'database handles decimal fields as float')\n field_params['max_digits'] = row[4] if row[4] is not None else 10\n field_params['decimal_places'] = row[\n 5] if row[5] is not None else 5\n else:\n field_params['max_digits'] = row[4]\n field_params['decimal_places'] = row[5]\n\n if field_type == 'GeometryField':\n geo_col = row[0]\n # Getting a more specific field type and any additional parameters\n # from the `get_geometry_type` routine for the spatial backend.\n field_type, geo_params = connection.introspection.get_geometry_type(\n table_name, geo_col)\n field_params.update(geo_params)\n is_geometry = True\n\n return field_type, field_params, is_geometry\n # return getattr(models.fields, field_type), field_params", "def type(self):\n return self._field.type", "def _update_input_type(self):\n pass", "def query_fw_capability(self, model):\n import tensorflow as tf\n from .tf_utils.graph_rewriter.generic.pre_optimize import PreOptimization\n from .tf_utils.graph_rewriter.graph_info import TFLowbitPrecisionPatterns\n\n self.pre_optimizer_handle = PreOptimization(model, self.inputs, self.outputs)\n self.pre_optimized_graph = self.pre_optimizer_handle.get_optimized_graphdef()\n self.exclude_node_names = self.pre_optimizer_handle.get_excluded_node_names()\n tf_version = tf.version.VERSION\n patterns = TFLowbitPrecisionPatterns(tf_version).get_supported_patterns()\n matched_nodes = self.pre_optimizer_handle.get_matched_nodes(patterns)\n\n activation_dtype = ['uint8', 'fp32']\n weight_dtype = ['int8', 'fp32']\n if self._support_bf16():\n activation_dtype.append('bf16')\n weight_dtype.append('bf16')\n capability = {\n 'modelwise': {\n 'activation': {\n 'dtype': activation_dtype,\n 'scheme': ['asym', 'sym'],\n 'granularity': ['per_tensor'],\n 'algorithm': ['minmax']\n },\n 'weight': {\n 'dtype': weight_dtype,\n 'scheme': [\n 'sym',\n ],\n 'granularity': ['per_channel', 'per_tensor'],\n 'algorithm': ['minmax']\n },\n }\n }\n self._query_quantizable_ops(matched_nodes, activation_dtype, weight_dtype)\n capability['opwise'] = self.quantizable_op_details\n logger.debug('Dump framework quantization capability:')\n logger.debug(capability)\n\n return capability", "def _classify_object_field(field: s_obj.Field[Any]) -> FieldStorage:\n\n ftype = field.type\n shadow_ptr_kind = None\n shadow_ptr_type = None\n fieldtype = FieldType.OTHER\n\n is_array = is_multiprop = False\n if issubclass(ftype, s_obj.MultiPropSet):\n is_multiprop = True\n ftype = ftype.type\n elif (\n issubclass(\n ftype,\n (checked.CheckedList, checked.FrozenCheckedList,\n checked.CheckedSet, checked.FrozenCheckedSet))\n and not issubclass(ftype, s_expr.ExpressionList)\n ):\n is_array = True\n ftype = ftype.type # type: ignore\n\n if issubclass(ftype, s_obj.ObjectCollection):\n ptr_kind = 'multi link'\n ptr_type = 'schema::Object'\n if issubclass(ftype, s_obj.ObjectDict):\n fieldtype = FieldType.OBJ_DICT\n\n elif issubclass(ftype, s_obj.Object):\n ptr_kind = 'link'\n ptr_type = f'schema::{ftype.__name__}'\n\n elif issubclass(ftype, s_expr.Expression):\n shadow_ptr_kind = 'property'\n shadow_ptr_type = 'tuple<text: str, refs: array<uuid>>'\n ptr_kind = 'property'\n ptr_type = 'str'\n fieldtype = FieldType.EXPR\n\n elif issubclass(ftype, s_expr.ExpressionList):\n shadow_ptr_kind = 'property'\n shadow_ptr_type = (\n 'array<tuple<text: str, refs: array<uuid>>>'\n )\n ptr_kind = 'property'\n ptr_type = 'array<str>'\n fieldtype = FieldType.EXPR_LIST\n\n elif issubclass(ftype, s_expr.ExpressionDict):\n shadow_ptr_kind = 'property'\n shadow_ptr_type = '''array<tuple<\n name: str,\n expr: tuple<text: str, refs: array<uuid>>\n >>'''\n ptr_kind = 'property'\n ptr_type = 'array<tuple<name: str, expr: str>>'\n fieldtype = FieldType.EXPR_DICT\n\n elif issubclass(ftype, collections.abc.Mapping):\n ptr_kind = 'property'\n ptr_type = 'json'\n\n elif issubclass(ftype, (str, sn.Name)):\n ptr_kind = 'property'\n ptr_type = 'str'\n\n if field.name == 'name':\n # TODO: consider shadow-reflecting names as tuples\n shadow_ptr_kind = 'property'\n shadow_ptr_type = 'str'\n\n elif issubclass(ftype, bool):\n ptr_kind = 'property'\n ptr_type = 'bool'\n\n elif issubclass(ftype, int):\n ptr_kind = 'property'\n ptr_type = 'int64'\n\n elif issubclass(ftype, uuid.UUID):\n ptr_kind = 'property'\n ptr_type = 'uuid'\n\n elif issubclass(ftype, verutils.Version):\n ptr_kind = 'property'\n ptr_type = '''\n tuple<\n major: std::int64,\n minor: std::int64,\n stage: sys::VersionStage,\n stage_no: std::int64,\n local: array<std::str>,\n >\n '''\n else:\n raise RuntimeError(\n f'no metaschema reflection for field {field.name} of type {ftype}'\n )\n\n if is_multiprop:\n ptr_kind = 'multi property'\n if is_array:\n ptr_type = f'array<{ptr_type}>'\n\n return FieldStorage(\n fieldtype=fieldtype,\n ptrkind=ptr_kind,\n ptrtype=ptr_type,\n shadow_ptrkind=shadow_ptr_kind,\n shadow_ptrtype=shadow_ptr_type,\n )", "def _AddType(self, entity_type):\n if not entity_type.IsValid():\n self.AddFindings(entity_type.GetFindings())\n return False\n return self.local_namespace.InsertType(entity_type)", "def store_type(self, ptype):\n attr = self.node.get_attr(Type)\n attr.store(ptype)", "def __createField(self, field):\n name = field['name']\n fType = field['type']\n fieldLength = None\n if 'shape' in name.lower():\n return\n elif \"String\" in fType:\n fieldType = \"TEXT\"\n fieldLength = field['length']\n elif \"Date\" in fType:\n fieldType = \"DATE\"\n elif \"SmallInteger\" in fType:\n fieldType = \"SHORT\"\n elif \"Integer\" in fType:\n fieldType = \"LONG\"\n elif \"Double\" in fType:\n fieldType = \"DOUBLE\"\n elif \"Single\" in fType:\n fieldType = \"FLOAT\"\n else:\n fieldType = \"Unknown\"\n featureClass = self.featureClassLocation + \"\\\\\" + self.name\n validatedName = arcpy.ValidateFieldName(name, self.featureClassLocation)\n arcpy.AddField_management(in_table=featureClass, field_name=name, field_type=fieldType, field_length=fieldLength)", "def model(self) -> Type[Model]:", "def add(self, obj: model.IdentifiableArtefact):\n for field, field_info in direct_fields(self.__class__).items():\n # NB for some reason mypy complains here, but not in __contains__(), below\n if isinstance(\n obj, get_args(field_info.outer_type_)[1], # type: ignore [attr-defined]\n ):\n getattr(self, field)[obj.id] = obj\n return\n raise TypeError(type(obj))", "def _get_optimised(self, cls: Type[RV]) -> Type[RV]:\n try:\n return self._optimised[cls]\n except KeyError:\n pass\n\n # Check if the class comes from psycopg.types and there is a class\n # with the same name in psycopg_c._psycopg.\n from psycopg import types\n\n if cls.__module__.startswith(types.__name__):\n new = cast(Type[RV], getattr(_psycopg, cls.__name__, None))\n if new:\n self._optimised[cls] = new\n return new\n\n self._optimised[cls] = cls\n return cls", "def add_field(self, field):\n if field.name in self.fields:\n print(\"WARNING: Field {0} already in model {1}\"\n .format(field.name, self.table_name))\n return\n\n self.fields[field.name] = field\n self.sorted_fields.append(field)\n self.sorted_fields_names.append(field.name)", "def __update_custom_fieldtype_settings(self,\n eachfield, #field etree\n ):\n\n # xml attributes\n TYPE = \"type\"\n READABLE = \"readable\"\n WRITABLE = \"writable\"\n LABEL = \"label\"\n HINT = \"comment\"\n DEFAULT = \"default\"\n LINES = \"lines\"\n BOXES = \"boxes\"\n HASOPTIONS = \"has_options\"\n\n fieldtype = eachfield.attrib.get(TYPE)\n field_property = self.custom_fieldtype_properties.get(fieldtype, {})\n\n cust_fieldtype = fieldtype_property.get(\"fieldtype\", None)\n cust_readable = fieldtype_property.get(\"readable\", None)\n cust_writable = fieldtype_property.get(\"writable\", None)\n cust_label = fieldtype_property.get(\"label\", None)\n cust_hint = fieldtype_property.get(\"hint\", None)\n cust_default = fieldtype_property.get(\"default\", None)\n cust_lines = fieldtype_property.get(\"lines\", None)\n cust_boxes = fieldtype_property.get(\"boxes\", None)\n cust_has_options = fieldtype_property.get(\"has_options\", None)\n cust_options = fieldtype_property.get(\"options\", None)\n \n if cust_fieldtype:\n if cust_fieldtype != None:\n eachfield.set(TYPE, cust_fieldtype)\n if cust_readable != None:\n eachfield.set(READABLE, cust_readable)\n if cust_writable != None:\n eachfield.set(WRITABLE, cust_writable)\n if cust_label != None:\n eachfield.set(LABEL, cust_label)\n if cust_hint != None:\n eachfield.set(HINT, cust_hint)\n if cust_default != None:\n eachfield.set(DEFAULT, cust_default)\n if cust_lines != None:\n eachfield.set(LINES, cust_lines)\n if cust_boxes != None:\n eachfield.set(BOXES, cust_boxes)\n if cust_has_options != None:\n eachfield.set(HASOPTIONS, cust_has_options)\n if cust_options != None:\n opt_available = eachfield.getchildren()\n if len(opt_available) == 0:\n eachfield.append(cust_options)\n elif len(opt_available) == 1:\n eachfield.remove(opt_available[0])\n eachfield.append(cust_options)", "def dynamic_model(self, input_val: float) -> float:\n pass", "def add(self, obj):\n self._pkcache[obj.pk] = obj\n for ctype in obj._content_types:\n self._typecache[ctype][obj.pk] = True", "def updateType(self):\n # building type\n _type = \"\"\n for ctrl in self.controls:\n _type = _type + ctrl.selection + \"/\"\n _type = _type[:-1]\n \n if scg_alphabet.elementsDescMap.has_key(_type):\n scg_alphabet.changeObjectType(self.object, _type)\n return True\n \n return False", "def test_model_field_types(self):\n self.assertTrue(isinstance(self.UserInfo.have_siblings, str))\n self.assertTrue(isinstance(self.UserInfo.known_env_exposures, str))\n self.assertTrue(isinstance(self.UserInfo.known_genetic_mutations, str))\n self.assertTrue(isinstance(self.UserInfo.age, int))", "def save(self, *args, **kwargs):\n if not self.content_type:\n self.content_type = ContentType.objects.get_for_model(self.__class__)\n super(ProfileUnits, self).save(*args, **kwargs)", "def _store_type(parent, itype):\n\n name = interrogate_type_name(itype)\n if not name:\n # Ignore anonymous types\n return\n\n mangled_name1 = _translate_type_name(name, False)\n mangled_name2 = _translate_type_name(name, True)\n\n _type_cache[(parent, mangled_name1)] = itype\n _type_cache[(parent, mangled_name2)] = itype\n\n for i in range(interrogate_type_number_of_nested_types(itype)):\n itype2 = interrogate_type_get_nested_type(itype, i)\n _store_type(itype, itype2)", "def increase(obj: Any, field: AnyStr):\n if hasattr(obj, field):\n value = getattr(obj, field)\n if isinstance(value, int):\n setattr(obj, field, value + 1)", "def add_field(self, field):\n self.covs_ds[\"num_times\"] += 1\n self.covs_ds[\"sum\"] += field\n self.covs_ds[\"sumsq\"] += np.ma.multiply(field, field)\n\n if 'dstn' in self.covs_ds.dims:\n errorCovs = ForecastErrorCovs(wrap=self.wrap)\n distances = range(self.covs_ds.dims['dstn'])\n self.covs_ds[self.nam_sumsq_var].load()\n if not self.vert:\n self.covs_ds[self.nam_sumsq_var][:] += errorCovs.calc_xy_sq(self.covs_shape,\n distances, field.values)\n else:\n self.covs_ds[self.nam_sumsq_var][:] += errorCovs.calc_xyz_sq_vert(self.covs_shape,\n distances, field.values)" ]
[ "0.53922397", "0.537939", "0.49929884", "0.47674727", "0.47306097", "0.46442184", "0.45238703", "0.45149407", "0.45139423", "0.45052016", "0.45049003", "0.44889838", "0.4485591", "0.4484575", "0.4460081", "0.4451049", "0.44423994", "0.44279674", "0.4426244", "0.442454", "0.44238156", "0.44093436", "0.43918264", "0.4390031", "0.43894958", "0.43886766", "0.43870088", "0.43765613", "0.43699738", "0.43651792" ]
0.5479826
0
Obtain an upload ticket from the API
def get_upload_ticket(self): r = HTTPClient().fetch(self.config['apiroot'] + self.ticket_path, method="POST", body=urlencode({'type': 'streaming'}), headers = self.standard_headers, validate_cert=not self.config['dev']) response = json.loads(r.body) return response['ticket_id'], response['upload_link_secure'], response['complete_uri']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_service_ticket():\n\n payload = {'username': APIC_EM_USER, 'password': APIC_EM_PASSW}\n url = 'https://' + APIC_EM + '/ticket'\n header = {'content-type': 'application/json'}\n ticket_response = requests.post(url, data=json.dumps(payload), headers=header, verify=False)\n if not ticket_response:\n print('Something went wrong, try again!')\n else:\n ticket_json = ticket_response.json()\n ticket = ticket_json['response']['serviceTicket']\n print('APIC-EM ticket: ', ticket) # print the ticket for reference only, not required\n return ticket", "def get_upload(arn=None):\n pass", "def ticket(self, ticket_id):\r\n return resources.Ticket(self, ticket_id)", "def get_ticket_info(name: str) -> JiraTicket:\n ticket_url = build_rest_url(name)\n res = requests.get(ticket_url, auth=get_jira_auth()).json()\n issue_type = JiraIssueTypeFactory(**res[\"fields\"][\"issuetype\"])\n summary = res[\"fields\"][\"summary\"]\n browse_url = build_browse_url(name)\n\n return JiraTicket(name=name, issue_type=issue_type, summary=summary, url=browse_url)", "def get_ticket(self, ticket_id):\n response = self.http_call(\"{0}/tickets/{1}.json\".format(self.uri, ticket_id))\n return json.loads(response.content.decode(sys.stdout.encoding, \"replace\"))", "def get_ticket(self, ticket_id):\r\n mask = ('mask[id, title, assignedUser[firstName, lastName],'\r\n 'createDate,lastEditDate,updates[entry],updateCount]')\r\n return self.ticket.getObject(id=ticket_id, mask=mask)", "def delete_upload_ticket(self, complete_uri):\n url = self.config['apiroot'] + complete_uri\n log.info(\"Requesting %s\" % url)\n r = HTTPClient().fetch(url, method=\"DELETE\", headers=self.standard_headers,\n validate_cert=not self.config['dev'])\n log.info(\"Upload completed: status code: %d\" % r.code)\n if r.code == 201:\n _id = r.headers['location'].split('/')[-1]\n return _id\n raise ValueError(\"Upload completion unsuccessful\")", "def ticket(self, ticket_id):\r\n return tickets.Ticket(self, ticket_id)", "def ticket_id(self):\n return self._ticket_id", "def prepare_ticket(self, req, ticket, fields, actions):", "def create_ticket(self, ticket):\r\n ticket_url = self._zendesk_instance.create_ticket(data=ticket)\r\n return zendesk.get_id_from_url(ticket_url)", "def Ticket(ticket):\n try:\n data = ticket_module.verify(ticket)\n name = data['slivers'][0]['name']\n if data != None:\n deliver_ticket(data)\n logger.log('api_calls: Ticket delivered for %s' % name)\n Create(database.db.get(name))\n except Exception, err:\n raise xmlrpclib.Fault(102, 'Ticket error: ' + str(err))", "def get_ws_ticket(jwt: str) -> str:\n LOGGER.debug(\"Retrieving ticket...\")\n\n args = {\n \"url\": \"{0}/kafka-ws/v1/ticket\".format(CONFIG['dojot']['url']),\n \"headers\": {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {0}\".format(jwt),\n },\n }\n\n res = DojotAPI.call_api(requests.get, args)\n\n LOGGER.debug(\".. retrieved ticket\")\n return res[\"ticket\"]", "def ticket(self, ticket_id):\n return tickets.Ticket(self, ticket_id)", "def get(self, **kwargs):\n params = {\"token\": self.token}\n params.update(kwargs)\n return self.api._get(\"uploads/get\", params=params)", "def transfer_ticket(self, ticket_id):\n ticket = self.zendesk.get_ticket(ticket_id)\n subject = ticket[\"ticket\"][\"subject\"]\n status = ticket[\"ticket\"][\"status\"]\n description = ticket[\"ticket\"][\"description\"]\n if ticket[\"ticket\"][\"assignee_id\"] is not None:\n assignee_email = self.zendesk.get_assignee_email(ticket[\"ticket\"][\"assignee_id\"])\n try:\n requester = ticket[\"ticket\"][\"via\"][\"source\"][\"from\"][\"address\"]\n except KeyError:\n requester = self.default_requester\n # Terms for the status of a ticket on Samanage differ from those on Zendesk\n # When creating a ticket on Samanage through API only statuses allowed are Closed and New.\n # After the ticket is created status can be changed.\n if status in (\"open\", \"pending\"):\n status = \"New\"\n update_status = \"Assigned\"\n if status in (\"closed\", \"solved\"):\n status = \"Closed\"\n update_status = \"Closed\"\n # We can now make incident on Samanage\n if self.samanage:\n incident = self.samanage.incident(\n subject, requester, self.priority, status, assignee_email, description\n )\n incident_id = json.loads(incident)[\"id\"]\n # Get all comments for a ticket on zendesk\n comments = self.zendesk.get_comments(ticket_id)\n comment_list = []\n for comment in comments[\"comments\"]:\n author = self.zendesk.get_comment_author(comment[\"author_id\"])\n if self.dump:\n comment_list.append({\"author\": author, \"body\": comment[\"body\"]})\n # Transfer comment(s) to Samanage\n if self.samanage:\n self.samanage.comment(\"From:{0}\\n{1}\".format(author, comment[\"body\"]), incident_id)\n # Adding comments to samanage ticket reopens it\n # (re)update the status of the ticket on samanage to specified status\n if self.samanage:\n self.samanage.update_status(update_status, incident_id)\n\n # JSON dump if initalized\n if self.dump:\n with open(\"ticket_dump.json\", \"a\", errors='replace') as dump_file:\n card_details = {ticket_id:{\n \"id\": ticket_id,\n \"subject\": subject,\n \"requester\": requester,\n \"status\": status,\n \"assignee\": assignee_email,\n \"description\": description,\n \"comments\": comment_list}}\n dump_file.write(json.dumps(card_details, ensure_ascii=False,\n sort_keys=True, indent=4))", "def get_ticket_by_id(id):\n url = \"https://zccreynosa.zendesk.com/api/v2/tickets/\" + id + \".json\"\n response = requests.get(url, auth=(user, token))\n\n if response.status_code != 200:\n return None\n else:\n json_data = json.loads(response.text)\n return json_data", "def get_upload_status(self, upload_id: str, token: str) -> Upload:\n data, _, _ = self.json('get', f'/{upload_id}', token)\n return self._parse_upload_status(data)", "def upload_file(self):\n request = copy.deepcopy(self.request_template)\n data = json.dumps(request)\n curr_file = {\n 'request': data,\n 'file': open(self.file_path, 'rb')\n }\n print(\"Sending Upload request of av for file {}\".format(self.file_name))\n try:\n response = requests.post(url=self.url + \"upload\", files=curr_file, verify=False)\n except Exception as E:\n print(\"Upload file failed. file: {} , failure: {}\".format(self.file_name, E))\n raise\n response_j = response.json()\n print(\"av Upload response status for file {} : {}\".format(self.file_name,\n response_j[\"response\"][0][\"status\"][\"label\"]))\n return response_j", "def upload(self, filepath):\n if self.ver is not None and LooseVersion(str(self.ver)) < LooseVersion('1.4.0'):\n raise VersionMismatchError('File upload')\n\n try:\n with open(filepath, 'rb') as stream:\n url = '{0}{1}'.format(self.url, '/uploads.json')\n response = self.request('post', url, data=stream, headers={'Content-Type': 'application/octet-stream'})\n except IOError:\n raise NoFileError()\n\n return response['upload']['token']", "def issue( self ):\n\n self.state.issueDate = \"%d\" % time.time()\n\n # Encrypt the protocol state.\n aes = AES.new(self.symmTicketKey, mode=AES.MODE_CBC, IV=self.IV)\n state = repr(self.state)\n assert (len(state) % AES.block_size) == 0\n cryptedState = aes.encrypt(state)\n\n # Authenticate ticket name, IV and the encrypted state.\n hmac = HMAC.new(self.hmacTicketKey, self.IV + \\\n cryptedState, digestmod=SHA256).digest()\n\n ticket = self.IV + cryptedState + hmac\n\n log.debug(\"Returning %d-byte ticket.\" % (len(self.IV) +\n len(cryptedState) + len(hmac)))\n\n return ticket", "def _get(self, ticket_id):\n ticket_id = int(ticket_id)\n ticket = DB_TICKET_TABLE.get(doc_id=ticket_id)\n if not ticket:\n flask_restful.abort(404, message=f\"Ticket '{ticket_id}' not found!\")\n comments_q = Query()\n comments = DB_COMMENT_TABLE.search((comments_q.ticket_id == ticket_id) &\n (comments_q.type == Comment.TYPE_COMMENT))\n worknotes = DB_COMMENT_TABLE.search((comments_q.ticket_id == ticket_id) &\n (comments_q.type == Comment.TYPE_WORKNOTE))\n res = {\n \"id\" : ticket.doc_id,\n }\n res.update(ticket)\n res['_embedded'] = {\n \"comments\" : self.embed_comment_data_in_result(comments),\n \"worknotes\" : self.embed_comment_data_in_result(worknotes)\n }\n res['_links'] = self.make_links({\n \"self\" : Ticket.get_self_url(ticket.doc_id),\n \"contained_in\" : TicketList.get_self_url(),\n \"customer\" : Customer.get_self_url(res['customer_id']),\n \"user\" : User.get_self_url(res['user_id'])\n })\n return res", "def validate_oculus_ticket():\n\n ob = request.get_json()\n try:\n OculusProviderAuthSchema().load(ob)\n except ma.ValidationError as e:\n abort_unauthorized(\"Oculus token property %s is invalid\" % e.field_name)\n\n provider_details = ob['provider_details']\n # Get Oculus authentication config\n oculus_config = get_provider_config('oculus')\n\n if not oculus_config:\n abort(http_client.SERVICE_UNAVAILABLE, description=\"Oculus authentication not configured for current tenant\")\n\n # Call validation and authenticate if ticket is good\n identity_id = run_ticket_validation(\n user_id=provider_details['user_id'],\n access_token=oculus_config['access_token'],\n nonce=provider_details['nonce']\n )\n\n return identity_id", "def upload(self) :\n\t\ttry :\n\t\t\treturn self._upload\n\t\texcept Exception as e:\n\t\t\traise e", "def support_submit_ticket(self, **kw):\n person_name = \"\"\n if http.request.env.user.name != \"Public user\":\n person_name = http.request.env.user.name\n pic = http.request.env['schedule.person.in.charge'].sudo().search([],order = 'end_date desc',limit=1).pic \n return http.request.render('website_support_indonesia.support_submit_ticket', {'categories': http.request.env['website.support.ticket.categories'].sudo().search([('type_view','=',False)]), 'person_name': person_name, 'email': http.request.env.user.email, 'pic':pic})", "def ticket_created(self, ticket):", "def verify_ticket(self, ticket):\n raise NotImplementedError()", "def get_approved_tickets(ticket):\n with open(\"{}/.ssh/jira.pem\".format(os.environ[\"HOME\"]), \"r\") as key_cert_file:\n key_cert_data = key_cert_file.read()\n oauth_dict = {\n \"access_token\": \"PrxvLiICry8gkXXPHqnorpRHjBCn3V0O\",\n \"access_token_secret\": \"kNX06aseOyJx3GnWmRH1w14UQ4o7puiV\",\n \"consumer_key\": \"ceej-ticket-key\",\n \"key_cert\": key_cert_data\n }\n jira = JIRA({\"server\": \"https://issues.couchbase.com\"}, oauth=oauth_dict)\n links = [link.outwardIssue.key\n for link in jira.issue(ticket).fields.issuelinks\n if hasattr(link, \"outwardIssue\")]\n return links", "def test_finalized_no_upload_ticket(self):\n namespace = 'default-gzip'\n request = self.store_request(namespace, pad_string('silence'))\n request.upload_ticket = None\n with self.call_should_fail('400'):\n self.call_api('finalize_gs_upload', self.message_to_dict(request), 200)", "def get_auth_token():\n \n form_fields = {\n \"client_id\": client_id,\n \"client_secret\":client_secret,\n \"code\": code,\n \"redirect_uri\": \"http://www.stackprinter.com\"\n }\n form_data = urllib.urlencode(form_fields)\n results = __gae_fetch(url = 'https://stackexchange.com/oauth/access_token',\n method = urlfetch.POST, \n payload = form_data,\n headers={'Content-Type': 'application/x-www-form-urlencoded'})\n response = results.content\n return response" ]
[ "0.67064804", "0.5887511", "0.58059216", "0.5711891", "0.558435", "0.55571055", "0.55388236", "0.5534896", "0.55088025", "0.54868495", "0.54192376", "0.531124", "0.5295683", "0.5222806", "0.5221019", "0.51976347", "0.51389897", "0.51316166", "0.51182204", "0.50823814", "0.50782853", "0.50688165", "0.5063119", "0.504373", "0.50406444", "0.5028732", "0.49751207", "0.49703068", "0.4959613", "0.49467874" ]
0.8335117
0
Upload a piece of a video file to Vimeo Makes a PUT request to the given URL with the given binary data The _range parameter indicates the first byte to send. The first time you attempt an upload, this will be 0. The next time, it will be the number returned from get_last_uploaded_byte, if that number is less than the total size of the video file in bytes.
def upload_segment(self, upload_uri, _range, data, filetype): content_range = '%d-%d/%d' % (_range, len(data), len(data)) upload_headers = {'Content-Type': 'video/%s' % filetype, 'Content-Length': len(data), 'Content-Range': 'bytes: %s' % content_range} log.info("Sending file of size %d" % len(data)) log.info("Requesting %s" % upload_uri) request_headers = dict(upload_headers.items() + self.standard_headers.items()) r = HTTPClient().fetch(upload_uri, method="PUT", body=data, headers=request_headers) log.info("Uploaded segment: status code %d" % r.code) if r.code != 200: raise ValueError("Upload unsuccessful")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload_range( # type: ignore\n self, data, # type: bytes\n start_range, # type: int\n end_range, # type: int\n validate_content=False, # type: Optional[bool]\n timeout=None, # type: Optional[int]\n encoding='UTF-8',\n **kwargs\n ):\n # type: (...) -> Dict[str, Any]\n if self.require_encryption or (self.key_encryption_key is not None):\n raise ValueError(\"Encryption not supported.\")\n if isinstance(data, six.text_type):\n data = data.encode(encoding)\n\n content_range = 'bytes={0}-{1}'.format(start_range, end_range)\n content_length = end_range - start_range + 1\n try:\n return self._client.file.upload_range( # type: ignore\n range=content_range,\n content_length=content_length,\n optionalbody=data,\n timeout=timeout,\n validate_content=validate_content,\n cls=return_response_headers,\n **kwargs)\n except StorageErrorException as error:\n process_storage_error(error)", "def upload_file(self, source, dest):\n print(f\"Uploading {source} to {dest}\")\n with open(source, \"rb\") as data:\n self.client.upload_blob(name=dest, data=data)", "def _upload_file_bytes(self, conn, http_conn, fp, file_length,\r\n total_bytes_uploaded, cb, num_cb):\r\n buf = fp.read(self.BUFFER_SIZE)\r\n if cb:\r\n if num_cb > 2:\r\n cb_count = file_length / self.BUFFER_SIZE / (num_cb-2)\r\n elif num_cb < 0:\r\n cb_count = -1\r\n else:\r\n cb_count = 0\r\n i = 0\r\n cb(total_bytes_uploaded, file_length)\r\n\r\n # Build resumable upload headers for the transfer. Don't send a\r\n # Content-Range header if the file is 0 bytes long, because the\r\n # resumable upload protocol uses an *inclusive* end-range (so, sending\r\n # 'bytes 0-0/1' would actually mean you're sending a 1-byte file).\r\n put_headers = {}\r\n if file_length:\r\n range_header = self._build_content_range_header(\r\n '%d-%d' % (total_bytes_uploaded, file_length - 1),\r\n file_length)\r\n put_headers['Content-Range'] = range_header\r\n # Set Content-Length to the total bytes we'll send with this PUT.\r\n put_headers['Content-Length'] = str(file_length - total_bytes_uploaded)\r\n http_request = AWSAuthConnection.build_base_http_request(\r\n conn, 'PUT', path=self.tracker_uri_path, auth_path=None,\r\n headers=put_headers, host=self.tracker_uri_host)\r\n http_conn.putrequest('PUT', http_request.path)\r\n for k in put_headers:\r\n http_conn.putheader(k, put_headers[k])\r\n http_conn.endheaders()\r\n\r\n # Turn off debug on http connection so upload content isn't included\r\n # in debug stream.\r\n http_conn.set_debuglevel(0)\r\n while buf:\r\n http_conn.send(buf)\r\n total_bytes_uploaded += len(buf)\r\n if cb:\r\n i += 1\r\n if i == cb_count or cb_count == -1:\r\n cb(total_bytes_uploaded, file_length)\r\n i = 0\r\n buf = fp.read(self.BUFFER_SIZE)\r\n if cb:\r\n cb(total_bytes_uploaded, file_length)\r\n if total_bytes_uploaded != file_length:\r\n # Abort (and delete the tracker file) so if the user retries\r\n # they'll start a new resumable upload rather than potentially\r\n # attempting to pick back up later where we left off.\r\n raise ResumableUploadException(\r\n 'File changed during upload: EOF at %d bytes of %d byte file.' %\r\n (total_bytes_uploaded, file_length),\r\n ResumableTransferDisposition.ABORT)\r\n resp = http_conn.getresponse()\r\n body = resp.read()\r\n # Restore http connection debug level.\r\n http_conn.set_debuglevel(conn.debug)\r\n\r\n if resp.status == 200:\r\n return resp.getheader('etag') # Success\r\n # Retry timeout (408) and status 500 and 503 errors after a delay.\r\n elif resp.status in [408, 500, 503]:\r\n disposition = ResumableTransferDisposition.WAIT_BEFORE_RETRY\r\n else:\r\n # Catch all for any other error codes.\r\n disposition = ResumableTransferDisposition.ABORT\r\n raise ResumableUploadException('Got response code %d while attempting '\r\n 'upload (%s)' %\r\n (resp.status, resp.reason), disposition)", "def put_file(self, file_name: str, value: BytesIO):\n value.seek(0)\n self.client.upload_fileobj(value, self.bucket, file_name)", "def upload_part(self, seq, offset, chunk_size, labels, buffer = ''):\n # TODO implement Content-MD5\n debug(\"Uploading part %i of %r (%s bytes)\" % (seq, self.upload_id, chunk_size))\n headers = { \"content-length\": chunk_size }\n query_string = \"?partNumber=%i&uploadId=%s\" % (seq, self.upload_id)\n request = self.s3.create_request(\"OBJECT_PUT\", uri = self.uri, headers = headers, extra = query_string)\n response = self.s3.send_file(request, self.file, labels, buffer, offset = offset, chunk_size = chunk_size)\n self.parts[seq] = response[\"headers\"][\"etag\"]\n return response", "def upload(self, dest, overwrite=False):\n dest = normpath(dest)\n try:\n remote = get_remote(dest)\n except ValueError: # Nothing exists at dest, nothing to worry about.\n pass\n else: # Something exists here.\n if isinstance(remote, RemoteFile) and self.hash() == remote.hash:\n # Nothing to update.\n pdbox.info(\"%s and %s are identical\" % (self.path, remote.uri))\n return\n if not overwrite:\n raise ValueError(\"%s exists\" % remote.uri)\n\n # Uploading can either happen all at once (with a 150 MB limit),\n # or in chunks. If the file is smaller than the selected chunk size,\n # then try to upload in one go.\n chunksize = min(pdbox._args.get(\"chunksize\", 149.0), 149.0)\n pdbox.debug(\"Chunk size: %.2f MB\" % chunksize)\n if pdbox._args.get(\"dryrun\"):\n pdbox.info(\"Uploaded %s to %s\" % (self.path, dbx_uri(dest)))\n return None\n\n # Set the write mode.\n if overwrite:\n mode = dropbox.files.WriteMode.overwrite\n else:\n mode = dropbox.files.WriteMode.add\n\n chunk = int(chunksize * 1024 * 1024) # Convert B to MB.\n\n with open(self.path, \"rb\") as f:\n data = f.read()\n sz = len(data)\n\n # TODO: Progress bars.\n if sz < chunk: # One-shot upload.\n meta = execute(pdbox.dbx.files_upload, data, dest, mode)\n else: # Multipart upload.\n nchunks = math.ceil(sz / chunk)\n # Initiate the upload with just the first byte.\n start = execute(pdbox.dbx.files_upload_session_start, f[0])\n cursor = dropbox.files.UploadSessionCursor(start.session_id, 1)\n\n # Now just add each chunk.\n while sz - cursor.offset > chunk:\n pdbox.debug(\n \"Uploading chunk %d/%d\" % (cursor.offset % chunk, nchunks),\n )\n execute(\n pdbox.dbx.files_upload_session_append_v2,\n data[cursor.offset:cursor.offset + chunk],\n cursor,\n )\n cursor.offset += chunk\n\n # Upload the remaining to finish the transaction.\n meta = execute(\n pdbox.dbx.files_upload_session_finish,\n data[cursor.offset:],\n dropbox.files.CommitInfo(dest, mode),\n )\n\n pdbox.info(\"Uploaded %s to %s\" % (self.path, dbx_uri(dest)))\n return RemoteFile(None, meta=meta)", "def upload_file(self, upload_link: str, file_data: bytes) -> None:\n r = requests.put(\n upload_link,\n data=file_data,\n headers=self._auth_headers\n )\n if r.status_code in self._errors:\n raise ApiResponseException(\n r.status_code, r.json()[\"error\"][\"message\"])", "def upload(upload_url: str, file_path: str) -> None:\n with open(file_path, 'r') as data:\n try:\n r = requests.put(\n upload_url,\n data=data,\n headers={\"Content-Type\": \"application/octet-stream\"},\n )\n r.raise_for_status()\n except requests.exceptions.HTTPError as err:\n print('Error uploading file')\n raise err", "def put_raw(key: str, data: bytes, project: str, bucket: str) -> None:\n bucket = bucket_construct(project, bucket)\n blob = google.cloud.storage.blob.Blob(name=key, bucket=bucket)\n\n blob.upload_from_string(data, 'application/octet-stream')\n LOG.info(f'Successfully uploaded file to {key} in {bucket}')", "def upload(path, stream, buffer_size=None):\n fs.upload(path, stream, buffer_size)", "def upload_file(\n self, data, # type: Any\n length=None, # type: Optional[int]\n metadata=None, # type: Optional[Dict[str, str]]\n content_settings=None, # type: Optional[ContentSettings]\n validate_content=False, # type: bool\n max_connections=1, # type: Optional[int]\n timeout=None, # type: Optional[int]\n encoding='UTF-8', # type: str\n **kwargs # type: Any\n ):\n # type: (...) -> Dict[str, Any]\n if self.require_encryption or (self.key_encryption_key is not None):\n raise ValueError(\"Encryption not supported.\")\n\n if isinstance(data, six.text_type):\n data = data.encode(encoding)\n if length is None:\n length = get_length(data)\n if isinstance(data, bytes):\n data = data[:length]\n\n if isinstance(data, bytes):\n stream = BytesIO(data)\n elif hasattr(data, 'read'):\n stream = data\n elif hasattr(data, '__iter__'):\n stream = IterStreamer(data, encoding=encoding) # type: ignore\n else:\n raise TypeError(\"Unsupported data type: {}\".format(type(data)))\n return upload_file_helper( # type: ignore\n self,\n stream,\n length,\n metadata,\n content_settings,\n validate_content,\n timeout,\n max_connections,\n self._config.data_settings,\n **kwargs)", "def put_binary(self, url: str, data: Any, content_type: str = None, token: str = None) -> dict:\n res = requests.put(url,\n data=data,\n headers=self._get_headers(\n token,\n {\"Content-Type\": (content_type or \"application/octet-stream\")}\n ),\n verify=False,\n proxies=self._get_proxies())\n if not res.ok:\n logger.error(http_debug(res))\n elif logger.isEnabledFor(logging.DEBUG):\n logger.debug(http_debug(res))\n return self._parse_json_response(res, token)", "def run(self):\r\n m = hashlib.md5\r\n self.sock.connect((IP, self.port))\r\n #the uploaded file\r\n f = open(self.path, 'rb')\r\n dat = f.read(1024)\r\n print 'uploading...'\r\n\r\n while dat != '':\r\n hashed = m(dat)\r\n self.sock.send(hashed.hexdigest())\r\n self.data = self.sock.recv(2)\r\n\r\n if not self.data == 'ok':\r\n #somthing went wrong and the thread instantly stops\r\n return\r\n\r\n self.sock.send(dat)\r\n self.data = self.sock.recv(2)\r\n #in case the data didn't and the hash sent didn't match\r\n while self.data == 'sa':\r\n #sends again the hash and data\r\n hashed = m(dat)\r\n self.sock.send(hashed.hexdigest())\r\n print hashed.hexdigest()\r\n self.data = self.sock.recv(2)\r\n print self.data\r\n self.sock.send(dat)\r\n #print dat\r\n self.data = self.sock.recv(2)\r\n #can keep sending\r\n if self.data == 'kg':\r\n self.progress += 1024\r\n dat = f.read(1024)\r\n else:\r\n return\r\n\r\n if not self.sending:\r\n\r\n self.sock.send('canceled')\r\n self.sock.close()\r\n f.close()\r\n return\r\n\r\n\r\n f.close()\r\n self.sock.send('end-of-upload-d-o-n-e-now-what-?')\r\n print 'processing video... this may take a few minutes'\r\n self.data = self.sock.recv(1024)\r\n if self.data == 'complete':\r\n self.sock.close()\r\n tasks.complete_upload()\r\n else:\r\n pass", "def _upload(url, data_file, username, password):\n url_match = '(http(s)?)\\:\\/\\/localhost'\n if re.search(url_match, url):\n print(\"Please configure url settings.\")\n exit(1)\n\n polarion_request = post(url,\n data=data_file,\n auth=auth.HTTPBasicAuth(username,\n password))\n status_code = polarion_request.status_code\n if status_code == codes.ok:\n return status_code\n else:\n print(\"Results upload failed with the follow: {}\".format(\n polarion_request.status_code))\n raise exceptions.RequestException", "def add_bytes(self, data, **kwargs):\n\t\tbody, headers = multipart.stream_bytes(data, self.chunk_size)\n\t\treturn self._client.request('/add', decoder='json',\n\t\t data=body, headers=headers, **kwargs)", "def run(args):\n\n drive_uid = str(args[\"drive_uid\"])\n file_uid = str(args[\"file_uid\"])\n chunk_idx = int(args[\"chunk_index\"])\n secret = str(args[\"secret\"])\n data = string_to_bytes(args[\"data\"])\n checksum = str(args[\"checksum\"])\n\n drive = DriveInfo(drive_uid=drive_uid)\n\n drive.upload_chunk(file_uid=file_uid, chunk_index=chunk_idx,\n secret=secret, chunk=data, checksum=checksum)\n\n return True", "def upload_chunk(self, file_obj, length, offset=0, upload_id=None):\n\n params = dict()\n\n if upload_id:\n params['upload_id'] = upload_id\n params['offset'] = offset\n\n url, ignored_params, headers = self.request(\"/chunked_upload\", params,\n method='PUT', content_server=True)\n\n try:\n reply = self.rest_client.PUT(url, file_obj, headers)\n return reply['offset'], reply['upload_id']\n except ErrorResponse as e:\n raise e", "def videoclipupload(request, hash_key):\n # need access to temporary_file_path of uploaded clip files so \n # this view always reads files to disk during uploads\n request.upload_handlers = [TemporaryFileUploadHandler()]\n return _videoclipupload(request, hash_key)", "def put(self):\n parser.parse(self.arg_schema_put, request, location='json_or_form')\n if not request.files.get('upload_data'):\n raise FileError(\"Missing upload file.\")\n # Figure out how to validate inputs\n mime_type = request.files.get('upload_data').mimetype\n if mime_type == 'text/csv':\n self.upload_csv_data(request.files.get('upload_data'))\n else:\n raise FileError(\"Bad upload file type received.\")\n return {'status': 200}", "def users_video_upload(self):\n email_token = auth.current_user()[0]\n content = request.form\n if not UPLOAD_VIDEO_MANDATORY_FIELDS.issubset(content.keys()) or not \"video\" in request.files:\n self.logger.debug((messages.MISSING_FIELDS_ERROR % (UPLOAD_VIDEO_MANDATORY_FIELDS - set(content.keys()))))\n return messages.ERROR_JSON % (\n messages.MISSING_FIELDS_ERROR % (UPLOAD_VIDEO_MANDATORY_FIELDS - set(content.keys()))), 400\n title = content[\"title\"]\n location = content[\"location\"]\n visible = True if content[\"visible\"] == \"true\" else False\n video = request.files['video'].stream\n description = content[\"description\"] if \"description\" in content else None\n try:\n file_location = self.media_server.upload_video(user_email=email_token,\n title=title, video=video)\n except InvalidVideoFormatError:\n self.logger.debug(messages.INVALID_VIDEO_FORMAT)\n return messages.ERROR_JSON % messages.INVALID_VIDEO_FORMAT, 400\n video_data = VideoData(title=title, location=location, creation_time=datetime.now(),\n file_location=file_location, visible=visible, description=description)\n self.video_database.add_video(user_email=email_token, video_data=video_data)\n response_dict = video_data._asdict()\n response_dict[\"creation_time\"] = response_dict[\"creation_time\"].isoformat()\n return json.dumps(response_dict), 200", "def upload(self, path, data, headers={}):\n\n client = AsyncHTTPClient()\n method = 'PUT'\n url = self.generate_url(path)\n url_object = urlparse(url)\n params = {\n 'SignatureMethod': 'AWS4-HMAC-SHA256'\n }\n\n headers.update({\n 'Content-Length': str(len(data)),\n 'Content-Type': self._guess_mimetype(path),\n 'Date': self._rfc822_datetime(),\n 'Host': url_object.hostname,\n 'X-Amz-Content-sha256': hashlib.sha256(data).hexdigest(),\n })\n\n try:\n response = yield client.fetch(\n self.sign_request(\n url_object.hostname,\n url_object.path,\n params,\n headers,\n method,\n data\n ),\n method=method,\n body=data,\n connect_timeout=AWS_S3_CONNECT_TIMEOUT,\n request_timeout=AWS_S3_REQUEST_TIMEOUT,\n headers=headers\n )\n except HTTPError as error:\n log.error(error)\n if error.response:\n log.error(error.response.body)\n raise Return(None)\n\n raise Return(response)", "def uploadFile(self, filename, name=\"Dummy name\", type=\"DummyType\"):\n\n with open(filename, 'rb') as f:\n data = f.read()\n\n if (name == \"Dummy name\"):\n name = filename\n\n data = {'name': name,\n 'type': type,\n 'bits': xmlrpclib.Binary(data),\n 'overwrite': True}\n\n try:\n r = self.server.wp.uploadFile(\n self.blogid, self.username, self.password, data)\n except xmlrpclib.Fault as fault:\n display_XMLRPC_errors(\"upload file \" + filename, fault)\n\n #FIXME: do we really need to split the url ?\n try:\n r['url'] = r['url'].split('?')[1]\n except IndexError:\n from urlparse import urlparse\n r['url'] = urlparse(r['url']).path\n\n print \"uploaded file file =\", r['file']\n print \"uploaded file url =\", r['url']\n print \"uploaded file type =\", r['type']", "def upload():\n global video_file_name\n\n if request.method == 'POST' and 'file' in request.files:\n\n # clear uploads folder\n cleanup_uploads()\n\n # Video has been uploaded\n filename = video.save(request.files['file'])\n video_file_name[0] = filename\n\n # Process video on a new thread\n # threading.Thread(target=process_video, args=[os.path.join('uploads', filename)]).start()\n # threading.Thread(target=process_audio, args=[os.path.join('uploads', filename)]).start()\n threading.Thread(target=process_video, args=[os.path.join(basedir, 'static', 'data', 'uploads', filename)]).start()\n threading.Thread(target=process_audio, args=[os.path.join(basedir, 'static', 'data', 'uploads', filename)]).start()\n\n return jsonify({'success': True}), 200, {'ContentType': 'application/json'}\n\n elif request.method == 'GET':\n return render_template('home.html')", "def _videoclipupload(request, hash_key):\n # get video\n video_queryset = Video.objects.all().select_related('owner')\n video = get_object_or_404(video_queryset, hash_key=hash_key)\n \n if request.method == 'POST':\n \n # create a form instance and populate it with data from the request\n form = UploadClipForm(request.POST, request.FILES)\n \n # check whether it's valid\n if form.is_valid():\n # process the data in form.cleaned_data as required\n number = form.cleaned_data['formatted_number'].as_e164\n name = form.cleaned_data['name']\n clip = form.cleaned_data['clip']\n \n # get location of script\n script_dir = os.path.dirname(os.path.abspath(__file__))\n script_path = os.path.join(script_dir, \"process.sh\")\n \n # get locations of input and output files\n input_video = clip.temporary_file_path()\n output_video = NamedTemporaryFile(suffix='.mp4')\n output_image = NamedTemporaryFile(suffix='.jpg')\n output_duration = NamedTemporaryFile()\n \n # get the ffmpeg path\n # on production we had to install the 64 bit static ffmpeg build\n ffmpeg_path = ''\n if not settings.DEBUG:\n ffmpeg_path = '/home/nceruchalu/bin/'\n \n # Generate the output files\n subprocess.check_call(\n [script_path, input_video, output_video.name, \n output_image.name, output_duration.name, ffmpeg_path])\n \n # Get clip duration from the results\n clip_duration = map(float, output_duration)[0]\n \n # if we've come this far then all is well, and we can go ahead\n # and create this clip\n user = User.objects.get_user_by_number(number)\n if not user.is_active and name:\n user.full_name = name\n user.save()\n \n # first add user as a video user, because nobody should\n # add to a clip without being a video user\n VideoUsers.objects.add_users_to_video(video, user)\n \n # Try creating the clip with a thumbnail, if that fails try\n # without the thumbnail\n Clip.objects.create(owner=user,\n video=video,\n mp4=File(output_video),\n photo=ImageFile(output_image),\n duration=clip_duration)\n \n # close and delete the tempoary files\n output_video.close()\n output_image.close()\n output_duration.close()\n \n # redirect to the video details URL but add a querystring param\n # indicating that the video should start from the last clip\n video_url = reverse('web-video-detail',kwargs={'hash_key':hash_key})\n redirect_url = \"{video_url}?latest=1\".format(video_url=video_url)\n \n return HttpResponseRedirect(redirect_url)\n \n else:\n # if a GET (or any other method) we'll create a blank form\n form = UploadClipForm()\n \n return render_to_response('video/upload.html',\n {'video':video,\n 'form':form},\n context_instance=RequestContext(request))", "async def put_file(object_name: str, file: File, **kwargs) -> str:\n # TODO: Do not read file but rather stream content as it comes\n await file.read()\n # Get the synchronous file interface from the asynchronous file\n file_obj = file.file\n # Store position of cursor (number of bytes read)\n file_size = file_obj.tell()\n # Reset cursor at start of file\n file_obj.seek(0)\n # Trace file upload with its size\n logger.debug(f\"Uploading file: {object_name} with {file_size} bytes\")\n # Time file upload for debug\n start = time.time()\n # Store object on s3 storage\n client.put_object(\n bucket_name=DATASETS_BUCKET,\n object_name=object_name,\n length=file_size,\n data=file_obj,\n )\n end = time.time()\n # Log time spent\n logger.debug(f\"Took {end - start} seconds to upload {file_size} bytes\")", "def upload_blob(bucket_name, data, destination_blob_name):\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_string(data)", "def put(self,data):\n\n\t\tself.fd.write(str(data))\n\t\treturn 1", "def upload(self, fullfilename, remotefolder=None):\n print(\"[Remote Server] Uploading %s to %s:%s\" %(fullfilename, self.server, self.remotefolder))\n\n if not self.status:\n return 0\n\n if remotefolder == None:\n remotefolder = self.remotefolder\n\n if not self.cd(remotefolder):\n return 0\n\n if not self.sendFile(fullfilename):\n print(\"[Remote Server] Error uploading file %s\" %fullfilename)\n return 0\n\n print(\"[Remote Server] upload finished successfully\")\n\n return 1", "def upload_blob(bucket, data_string, destination_blob_name):\n\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_string(data_string)\n\n print(\n \"File {} uploaded to {}.\".format(\n destination_blob_name, bucket\n )\n )", "def upload_blob(bucket, data_string, destination_blob_name):\n\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_string(data_string)\n\n print(\n \"File {} uploaded to {}.\".format(\n destination_blob_name, bucket\n )\n )" ]
[ "0.6141114", "0.5634965", "0.56206375", "0.550832", "0.5482587", "0.54187346", "0.5404088", "0.538972", "0.5272799", "0.52320516", "0.5201052", "0.511785", "0.51038563", "0.5073653", "0.5050996", "0.49917027", "0.49861932", "0.49765033", "0.49168584", "0.49088553", "0.48805287", "0.48795724", "0.4865323", "0.48652864", "0.48434505", "0.4835503", "0.48295677", "0.48145643", "0.48082858", "0.48082858" ]
0.7709178
0
Get the last byte index of the file successfully uploaded Performs a PUT to the given url, which returns a Range header indicating how much of the video file was successfully uploaded. If less than the total file size, this number is used in subsequent calls to upload_segment
def get_last_uploaded_byte(self, check_uri): upload_check_headers = {'Content-Range': 'bytes */*'} request_headers = dict(upload_check_headers.items() + self.standard_headers.items()) try: HTTPClient().fetch(check_uri, method="PUT", body='', headers=request_headers) except HTTPError as e: log.info("Upload check: status code %s" % e.code) if e.code == 308: _range = int(e.response.headers['Range'].split('-')[1]) log.info("Last uploaded byte: %d" % _range) return _range else: raise raise ValueError("Upload check unsuccessful")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_file_size(url: str):\n header = requests.head(url).headers\n if \"Content-Length\" in header and header[\"Content-Length\"] != 0:\n return int(header[\"Content-Length\"])\n elif \"Location\" in header:\n h = requests.head(header[\"Location\"]).headers\n return int(h.get(\"Content-Length\", 0))\n else:\n return 0", "def upload_progress(self, cloud_file, size, uploaded):", "def upload_complete(self, path, url, quiet):\r\n file_size = os.path.getsize(path)\r\n try:\r\n with tqdm(total=file_size,\r\n unit='B',\r\n unit_scale=True,\r\n unit_divisor=1024,\r\n disable=quiet) as progress_bar:\r\n with io.open(path, 'rb', buffering=0) as fp:\r\n reader = TqdmBufferedReader(fp, progress_bar)\r\n session = requests.Session()\r\n retries = Retry(total=10, backoff_factor=0.5)\r\n adapter = HTTPAdapter(max_retries=retries)\r\n session.mount('http://', adapter)\r\n session.mount('https://', adapter)\r\n response = session.put(url, data=reader)\r\n except Exception as error:\r\n print(error)\r\n return False\r\n return response.status_code == 200 or response.status_code == 201", "def getsize(url):\n \n o = urlparse(url)\n conn = httplib.HTTPConnection(o.netloc)\n conn.request(\"HEAD\", o.path)\n res = conn.getresponse()\n\n if res.status == 301 or res.status == 302:\t# poprawic na kod opisowy\n # print res.reason, \": \", res.getheader('location')\n return getsize(res.getheader('location'))\n\n elif res.status == 200:\n # inne interesujace tagi: etag\n # print res.getheader('content-length')\n return res.getheader('content-length')\n else:\n print \"getsize() UNKNOWN PROBLEM\"\n print res.reason, \": \", res.getheader('location')\n print res.getheaders()\n raise IOError", "def upload_segment(self, upload_uri, _range, data, filetype):\n content_range = '%d-%d/%d' % (_range, len(data), len(data))\n upload_headers = {'Content-Type': 'video/%s' % filetype,\n 'Content-Length': len(data),\n 'Content-Range': 'bytes: %s' % content_range}\n\n log.info(\"Sending file of size %d\" % len(data))\n log.info(\"Requesting %s\" % upload_uri)\n request_headers = dict(upload_headers.items() + self.standard_headers.items())\n r = HTTPClient().fetch(upload_uri, method=\"PUT\",\n body=data, headers=request_headers)\n log.info(\"Uploaded segment: status code %d\" % r.code)\n if r.code != 200:\n raise ValueError(\"Upload unsuccessful\")", "def get_remote_file_size(url: str = '', httpresponse: object = False) -> int:\n need_to_close = False\n if not httpresponse:\n httpresponse = url_is_alive(url)\n if not httpresponse:\n error_open_mess(url)\n return 0\n need_to_close = True\n\n content_length = httpresponse.getheader('Content-Length')\n if need_to_close:\n httpresponse.close()\n\n return int(content_length) if content_length else 0", "def tell(self):\n return self._upload_position", "def resumable_upload(insert_request):\r\n response = None\r\n error = None\r\n retry = 0\r\n while response is None:\r\n try:\r\n print(\"Uploading file...\")\r\n status, response = insert_request.next_chunk()\r\n if 'id' in response:\r\n print(\"Video id '%s' was successfully uploaded.\" % response['id'])\r\n f=open(\"uploadedIDs.txt\",\"a+\")\r\n f.write(\"\\n\"+response['id'])\r\n f.close()\r\n else:\r\n exit(\"The upload failed with an unexpected response: %s\" % response)\r\n except HttpError:\r\n import sys\r\n e = sys.exc_info()[1]\r\n if e.resp.status in RETRIABLE_STATUS_CODES:\r\n error = \"A retriable HTTP error %d occurred:\\n%s\" % (e.resp.status,\r\n e.content)\r\n else:\r\n raise\r\n except RETRIABLE_EXCEPTIONS:\r\n import sys\r\n e = sys.exc_info()[1]\r\n error = \"A retriable error occurred: %s\" % e\r\n\r\n if error is not None:\r\n print(error)\r\n retry += 1\r\n if retry > MAX_RETRIES:\r\n exit(\"No longer attempting to retry.\")\r\n\r\n max_sleep = 2 ** retry\r\n sleep_seconds = random.random() * max_sleep\r\n print(\"Sleeping %f seconds and then retrying...\" % sleep_seconds)\r\n time.sleep(sleep_seconds)", "def main(url, chunk_size_in_MB, key, bucket):\n\n upload_id = create_multipart_upload(bucket, key)\n parts = download_and_upload(url, upload_id, key, bucket, chunk_size_in_MB)\n location = complete_multipart_upload(key, bucket, upload_id, parts)\n\n return location", "def _upload_file_bytes(self, conn, http_conn, fp, file_length,\r\n total_bytes_uploaded, cb, num_cb):\r\n buf = fp.read(self.BUFFER_SIZE)\r\n if cb:\r\n if num_cb > 2:\r\n cb_count = file_length / self.BUFFER_SIZE / (num_cb-2)\r\n elif num_cb < 0:\r\n cb_count = -1\r\n else:\r\n cb_count = 0\r\n i = 0\r\n cb(total_bytes_uploaded, file_length)\r\n\r\n # Build resumable upload headers for the transfer. Don't send a\r\n # Content-Range header if the file is 0 bytes long, because the\r\n # resumable upload protocol uses an *inclusive* end-range (so, sending\r\n # 'bytes 0-0/1' would actually mean you're sending a 1-byte file).\r\n put_headers = {}\r\n if file_length:\r\n range_header = self._build_content_range_header(\r\n '%d-%d' % (total_bytes_uploaded, file_length - 1),\r\n file_length)\r\n put_headers['Content-Range'] = range_header\r\n # Set Content-Length to the total bytes we'll send with this PUT.\r\n put_headers['Content-Length'] = str(file_length - total_bytes_uploaded)\r\n http_request = AWSAuthConnection.build_base_http_request(\r\n conn, 'PUT', path=self.tracker_uri_path, auth_path=None,\r\n headers=put_headers, host=self.tracker_uri_host)\r\n http_conn.putrequest('PUT', http_request.path)\r\n for k in put_headers:\r\n http_conn.putheader(k, put_headers[k])\r\n http_conn.endheaders()\r\n\r\n # Turn off debug on http connection so upload content isn't included\r\n # in debug stream.\r\n http_conn.set_debuglevel(0)\r\n while buf:\r\n http_conn.send(buf)\r\n total_bytes_uploaded += len(buf)\r\n if cb:\r\n i += 1\r\n if i == cb_count or cb_count == -1:\r\n cb(total_bytes_uploaded, file_length)\r\n i = 0\r\n buf = fp.read(self.BUFFER_SIZE)\r\n if cb:\r\n cb(total_bytes_uploaded, file_length)\r\n if total_bytes_uploaded != file_length:\r\n # Abort (and delete the tracker file) so if the user retries\r\n # they'll start a new resumable upload rather than potentially\r\n # attempting to pick back up later where we left off.\r\n raise ResumableUploadException(\r\n 'File changed during upload: EOF at %d bytes of %d byte file.' %\r\n (total_bytes_uploaded, file_length),\r\n ResumableTransferDisposition.ABORT)\r\n resp = http_conn.getresponse()\r\n body = resp.read()\r\n # Restore http connection debug level.\r\n http_conn.set_debuglevel(conn.debug)\r\n\r\n if resp.status == 200:\r\n return resp.getheader('etag') # Success\r\n # Retry timeout (408) and status 500 and 503 errors after a delay.\r\n elif resp.status in [408, 500, 503]:\r\n disposition = ResumableTransferDisposition.WAIT_BEFORE_RETRY\r\n else:\r\n # Catch all for any other error codes.\r\n disposition = ResumableTransferDisposition.ABORT\r\n raise ResumableUploadException('Got response code %d while attempting '\r\n 'upload (%s)' %\r\n (resp.status, resp.reason), disposition)", "def finish(cls, upload, location=None, bytes_downloaded=0):\n path = \"uploader/finish/%s\" % upload[\"id\"]\n LOGGER.debug(path)\n payload = {\"bytes_transferred\": bytes_downloaded, \"location\": location}\n try:\n return Backend.put(path, payload, headers=Backend.headers())\n except requests.HTTPError as err:\n if err.response.status_code == 410:\n LOGGER.warning(\"Cannot finish file %s. File not active (410)\",\n upload[\"id\"])\n raise err\n except:\n raise", "def download_progress(self, cloud_file, size, downloaded):", "def invalid_file_bytes_uploaded(self) -> float:\n return pulumi.get(self, \"invalid_file_bytes_uploaded\")", "def get_content_length(self, hdr):\n try:\n return int(hdr.data[\"content-length\"])\n except (ValueError, TypeError):\n return -1", "def progress(request):\n file_id = request.GET['X-Progress-ID']\n session = DBSession()\n u = session.query(Upload).filter_by(id=file_id).one()\n data = {'state': u.state}\n if u.state == 'uploading':\n if not os.path.exists(u.tmp_path):\n # The temporary file has not been created yet or it has\n # already been renamed. We return 0 in both case, the\n # front-end code will know what to do.\n received = 0\n else:\n received = os.stat(u.tmp_path).st_size\n data.update({'size': u.size, 'received': received})\n return data", "def _upload_chunk(self, final=False):\n out = self.fs.session.post(\n self.location,\n data=self.buffer.getvalue(),\n headers={\"content-type\": \"application/octet-stream\"},\n )\n out.raise_for_status()\n return True", "def BytesTransferred(self) -> int:", "def content_length(self):\n try:\n value = int(self.environ.get('CONTENT_LENGTH', 0))\n except ValueError:\n return 0\n\n if value >= 0:\n return value\n return 0", "def _attempt_resumable_upload(self, key, fp, file_length, headers, cb,\r\n num_cb):\r\n (server_start, server_end) = self.SERVER_HAS_NOTHING\r\n conn = key.bucket.connection\r\n if self.tracker_uri:\r\n # Try to resume existing resumable upload.\r\n try:\r\n (server_start, server_end) = (\r\n self._query_server_pos(conn, file_length))\r\n self.server_has_bytes = server_start\r\n key=key\r\n if conn.debug >= 1:\r\n print 'Resuming transfer.'\r\n except ResumableUploadException, e:\r\n if conn.debug >= 1:\r\n print 'Unable to resume transfer (%s).' % e.message\r\n self._start_new_resumable_upload(key, headers)\r\n else:\r\n self._start_new_resumable_upload(key, headers)\r\n\r\n # upload_start_point allows the code that instantiated the\r\n # ResumableUploadHandler to find out the point from which it started\r\n # uploading (e.g., so it can correctly compute throughput).\r\n if self.upload_start_point is None:\r\n self.upload_start_point = server_end\r\n\r\n if server_end == file_length:\r\n # Boundary condition: complete file was already uploaded (e.g.,\r\n # user interrupted a previous upload attempt after the upload\r\n # completed but before the gsutil tracker file was deleted). Set\r\n # total_bytes_uploaded to server_end so we'll attempt to upload\r\n # no more bytes but will still make final HTTP request and get\r\n # back the response (which contains the etag we need to compare\r\n # at the end).\r\n total_bytes_uploaded = server_end\r\n else:\r\n total_bytes_uploaded = server_end + 1\r\n fp.seek(total_bytes_uploaded)\r\n conn = key.bucket.connection\r\n\r\n # Get a new HTTP connection (vs conn.get_http_connection(), which reuses\r\n # pool connections) because httplib requires a new HTTP connection per\r\n # transaction. (Without this, calling http_conn.getresponse() would get\r\n # \"ResponseNotReady\".)\r\n http_conn = conn.new_http_connection(self.tracker_uri_host,\r\n conn.is_secure)\r\n http_conn.set_debuglevel(conn.debug)\r\n\r\n # Make sure to close http_conn at end so if a local file read\r\n # failure occurs partway through server will terminate current upload\r\n # and can report that progress on next attempt.\r\n try:\r\n return self._upload_file_bytes(conn, http_conn, fp, file_length,\r\n total_bytes_uploaded, cb, num_cb)\r\n except (ResumableUploadException, socket.error):\r\n resp = self._query_server_state(conn, file_length)\r\n if resp.status == 400:\r\n raise ResumableUploadException('Got 400 response from server '\r\n 'state query after failed resumable upload attempt. This '\r\n 'can happen if the file size changed between upload '\r\n 'attempts', ResumableTransferDisposition.ABORT)\r\n else:\r\n raise\r\n finally:\r\n http_conn.close()", "def upload_finish(self, cloud_file):", "def upload_video(self, video_file):\r\n part = \"snippet,status\"\r\n metadata = self.get_metadata(video_file)\r\n body = {\r\n \"snippet\": {\r\n \"title\": metadata['title'],\r\n \"description\": metadata['description'],\r\n \"tags\": metadata['categoryId'],\r\n \"categoryId\": metadata['categoryId']\r\n },\r\n \"status\": {\r\n \"privacyStatus\": \"public\",\r\n \"license\": \"youtube\", # temporary, see gh#414\r\n \"embeddable\": True,\r\n \"publicStatsViewable\": True\r\n }\r\n }\r\n # This is to fix a bug, the API thinks our .ogg files are audio/ogg\r\n mimetype = \"video/{}\".format(video_file.split(\".\")[-1])\r\n media_body = MediaFileUpload(video_file, chunksize=-1, resumable=True, mimetype=mimetype)\r\n insert_request = self.service.videos().insert(part=part, body=body, media_body=media_body)\r\n response = None\r\n error = None\r\n retry = 0\r\n sleep_seconds = 5.0\r\n while response is None:\r\n try:\r\n log.info(\"Uploading %s\" % video_file)\r\n (status, response) = insert_request.next_chunk()\r\n if 'id' in response:\r\n return (Response.SUCCESS, response)\r\n else:\r\n return (Response.UNEXPECTED_FAILURE, response)\r\n except HttpError as e:\r\n if e.resp.status in self.RETRIABLE_STATUS_CODES:\r\n error = \"A retriable HTTP error {} occurred:\\n{}\".format(e.resp.status, e.content)\r\n else:\r\n return (Response.UNRETRIABLE_ERROR, {\"status\": e.resp.status, \"content\": e.content})\r\n except self.RETRIABLE_EXCEPTIONS as e:\r\n error = \"A retriable error occurred: {}\".format(e)\r\n except client.AccessTokenRefreshError:\r\n return (Response.ACCESS_TOKEN_ERROR, None)\r\n if error is not None:\r\n log.error(error)\r\n retry += 1\r\n if retry > self.MAX_RETRIES:\r\n return (Response.MAX_RETRIES_REACHED, None)\r\n log.info(\"Sleeping %s seconds and then retrying...\" % sleep_seconds)\r\n time.sleep(sleep_seconds)", "def handle_put_progress(self, filegen):\n # print \"bytes so-far: \", filegen.bytes_read\n\n if self.maybe_touch():\n self.log(\"UPLOAD_PROGRESS\", level=INFO)\n self.touch()\n Backend.touch(\n self.current_upload,\n bytes_downloaded=filegen.bytes_read,\n location=self.location)", "def filesize(self):\n return safeInt(self.tag(\"filesize\"))", "def content_length(self):\n # If none, there is nothing we can do, the server didn't have\n # a response.\n return self._expected_size", "def media_seek_position(self):\n return self._state.get(\"seek\", None)", "def _http_get_position(self) -> float:\n return float(self._http_status()['position'])", "def _UploadWithProgressInternal(self, media, gcs_file_name):\n bucket, bucket_path = self._ParseBucketAndPath(gcs_file_name)\n request = self._service.objects().insert(bucket=bucket,\n media_body=media,\n name=bucket_path)\n if media._size == 0: # pylint: disable=protected-access\n return self._RunWithRetries(request.execute, self._CommonErrorMatcher)\n\n response = None\n logged_percent_done = 0\n while response is None:\n status, response = self._RunWithRetries(request.next_chunk,\n self._CommonErrorMatcher)\n if status:\n percent_done = int(status.progress() * 100)\n if percent_done - logged_percent_done >= 5:\n logging.info('Uploading to gs://%s/%s: %d%% complete.',\n bucket,\n bucket_path,\n int(status.progress() * 100))\n logged_percent_done = percent_done\n return response", "def get_server_status_code(url):\n # http://stackoverflow.com/questions/1140661\n host, path = urlparse.urlparse(url)[1:3] # elems [1] and [2]\n try:\n conn = httplib.HTTPConnection(host)\n conn.request('HEAD', path)\n return conn.getresponse().status\n except StandardError:\n return None", "def get_status_code_for_put_or_post(request_header: str, file: str) -> int:\n ctype = \"Content-Type: text/html\"\n if file == \"/\":\n file = \"/index.html\"\n\n if request_header.find(ctype) == -1:\n return 501\n\n if not os.path.isfile(file[1:]):\n return 201\n\n return 204", "def cb(self, complete, total):\n percent = int(complete * 100.0 / total)\n log.info(\"Upload completion: {0}%\".format(percent))" ]
[ "0.61535513", "0.5919975", "0.5807138", "0.5694676", "0.56919193", "0.5665816", "0.562974", "0.55861944", "0.54725754", "0.54708993", "0.54157346", "0.53107536", "0.53089446", "0.5262854", "0.52135384", "0.51811814", "0.5161932", "0.5133586", "0.5120558", "0.5094605", "0.50907135", "0.5025104", "0.5024852", "0.5018597", "0.50179166", "0.50114286", "0.49969366", "0.49924368", "0.499172", "0.49834093" ]
0.6557539
0