query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Successively applies each of the rotations stored in givens to H_col. | def apply_rotations(H_col: jax.ShapedArray, givens: jax.ShapedArray,
k: int) -> jax.ShapedArray:
rotation_carry = (H_col, 0, k, givens)
def loop_condition(carry):
i = carry[1]
k = carry[2]
return jax.lax.cond(i < k, lambda x: True, lambda x: False, 0)
def apply_ith_rotation(carry):
H_col, i, k, givens = carry
cs = givens[0, i]
sn = givens[1, i]
H_i = cs * H_col[i] - sn * H_col[i + 1]
H_ip1 = sn * H_col[i] + cs * H_col[i + 1]
H_col = jax.ops.index_update(H_col, jax.ops.index[i], H_i)
H_col = jax.ops.index_update(H_col, jax.ops.index[i + 1], H_ip1)
return (H_col, i + 1, k, givens)
rotation_carry = jax.lax.while_loop(loop_condition,
apply_ith_rotation,
rotation_carry)
H_col = rotation_carry[0]
return H_col | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def apply_givens_rotation(H_col: jax.ShapedArray, givens: jax.ShapedArray,\n k: int) -> Tuple[jax.ShapedArray, jax.ShapedArray]:\n # This call successively applies each of the\n # Givens rotations stored in givens[:, :k] to H_col.\n H_col = apply_rotations(H_col, givens, k)\n\n cs_k, sn_k = givens_rotation(H_col[k], H_col[k + 1])\n givens = jax.ops.index_update(givens, jax.ops.index[0, k], cs_k)\n givens = jax.ops.index_update(givens, jax.ops.index[1, k], sn_k)\n\n r_k = cs_k * H_col[k] - sn_k * H_col[k + 1]\n R_col = jax.ops.index_update(H_col, jax.ops.index[k], r_k)\n R_col = jax.ops.index_update(R_col, jax.ops.index[k + 1], 0.)\n return R_col, givens",
"def cw_rotate(self):\n self.grid = [list(x) for x in zip(*self.grid[::-1])]\n self.find_edges()",
"def update_H(self):\n self.grid.H[self.loc] -= (\n self.grid.courant_number\n * self.grid.inverse_permeability[self.loc]\n * self.phi_H\n )",
"def correct_rotation(k_rotations):\n\n for key, value in Chunk.global_piece_rotations.items():\n Chunk.global_piece_rotations[key] = (k_rotations + value) % 4\n # Should I correct it for the side rotations also?",
"def test_givens_rotate(shape, indices, row, left):\n matrix = np.random.rand(*shape) * 1j + np.random.rand(*shape)\n unitary, (i, j) = matrix.copy(), indices\n if row:\n a, b = matrix[indices, j - 1]\n grot_mat = _givens_matrix(a, b, left)\n unitary[indices] = grot_mat @ unitary[indices]\n res = b / np.abs(b) * np.hypot(np.abs(a), np.abs(b)) if b else 1.0\n if left:\n assert np.isclose(unitary[i, j - 1], 0.0) and np.isclose(unitary[j, j - 1], res)\n else:\n assert np.isclose(unitary[i, j - 1], res) and np.isclose(unitary[j, j - 1], 0.0)\n else:\n a, b = matrix[j - 1, indices].T\n grot_mat = _givens_matrix(a, b, left)\n unitary[:, indices] = unitary[:, indices] @ grot_mat.T\n res = b / np.abs(b) * np.hypot(np.abs(a), np.abs(b)) if b else 1.0\n if left:\n assert np.isclose(unitary[j - 1, i], 0.0) and np.isclose(unitary[j - 1, j], res)\n else:\n assert np.isclose(unitary[j - 1, indices[0]], res) and np.isclose(\n unitary[j - 1, indices[1]], 0.0\n )",
"def transform_to_rotating_frame(H, U, D):\n \n #Determine the effective hamiltonian in the rotating frame\n Heff = lambda t: np.conj(U(t).T) @ H(t) @ U(t) + D\n \n return Heff",
"def update_H(self):\n gamma = self.get_gamma()\n delta = self.get_delta()\n summand2 = ((1 + (gamma.transpose().dot(self.H).dot(gamma) /\n delta.transpose().dot(gamma))) *\n delta.dot(delta.transpose()) / delta.transpose().dot(gamma)\n )\n summand3 = - ((delta.dot(gamma.transpose()).dot(self.H) +\n self.H.dot(gamma).dot(delta.transpose())) /\n delta.transpose().dot(gamma))\n self.H = self.H + summand2 + summand3",
"def update_rotations(pieces_to_update, k_rotations):\n # A lot of spaghetti on the next line\n for key in pieces_to_update:\n old_value = Chunk.global_piece_rotations[key]\n new_value = (k_rotations + old_value) % 4\n Chunk.global_piece_rotations[key] = new_value\n\n for piece in pieces_to_update:\n new_side_locations = list(\n map(lambda side_num: (side_num - k_rotations) % 4, Chunk.global_side_location[piece])\n )\n Chunk.global_side_location[piece] = new_side_locations",
"def rotate_components(phi, gamma = 1.0, q = 50, tol = 1e-6):\n p,k = phi.shape\n r = np.eye(k)\n d = 0\n cnt = 0\n for i in np.arange(q):\n cnt = cnt + 1\n d_old = d\n Lambda = np.dot(phi, r)\n u,s,vh = np.linalg.svd(np.dot(\n phi.T,np.asarray(Lambda)**3 - (gamma/p) * np.dot(\n Lambda, np.diag(np.diag(np.dot(Lambda.T,Lambda))))))\n print(\"Matrix u: \")\n print(u)\n print(\"Matrix s: \")\n print(s)\n print(\"Matrix vh: \")\n print(vh)\n r = np.dot(u, vh)\n d = np.sum(s)\n if d_old != 0 and d / d_old < 1 + tol:\n break\n print(\"Trace rotate_components_START\")\n print(\"Rotation matrix: \")\n print(r)\n print(\"Loop number: \" + str(cnt))\n print(\"Trace rotate_components_END\")\n return np.dot(phi, r)",
"def _inverse_ops(self, Yl, Yh):\n a = len(Yh) # No of levels.\n device = self.device\n\n # If biort has 6 elements instead of 4, then it's a modified\n # rotationally symmetric wavelet\n # FIXME: there's probably a nicer way to do this\n if len(self.biort) == 4:\n h0o, g0o, h1o, g1o = self.biort\n elif len(self.biort) == 6:\n h0o, g0o, h1o, g1o, h2o, g2o = self.biort\n else:\n raise ValueError('Biort wavelet must have 6 or 4 components.')\n\n # If qshift has 12 elements instead of 8, then it's a modified\n # rotationally symmetric wavelet\n # FIXME: there's probably a nicer way to do this\n if len(self.qshift) == 8:\n h0a, h0b, g0a, g0b, h1a, h1b, g1a, g1b = self.qshift\n elif len(self.qshift) == 12:\n h0a, h0b, g0a, g0b, h1a, h1b, \\\n g1a, g1b, h2a, h2b, g2a, g2b = self.qshift\n else:\n raise ValueError('Qshift wavelet must have 12 or 8 components.')\n\n level = a - 1\n Z = Yl\n\n # This ensures that for level 1 we never do the following\n while level >= 1:\n if self.complex:\n lh = c2q(tf.real(Yh[level][:,:,0:6:5]),\n tf.imag(Yh[level][:,:,0:6:5]))\n hl = c2q(tf.real(Yh[level][:,:,2:4:1]),\n tf.imag(Yh[level][:,:,2:4:1]))\n hh = c2q(tf.real(Yh[level][:,:,1:5:3]),\n tf.imag(Yh[level][:,:,1:5:3]))\n else:\n lh = c2q(Yh[level].real[:,:,0:6:5],\n Yh[level].imag[:,:,0:6:5])\n hl = c2q(Yh[level].real[:,:,2:4:1],\n Yh[level].imag[:,:,2:4:1])\n hh = c2q(Yh[level].real[:,:,1:5:3],\n Yh[level].imag[:,:,1:5:3])\n\n # Do even Qshift filters on columns.\n y1 = colifilt(Z, g0b, g0a, device=device, name='l%d_ll_col_low' % level) + \\\n colifilt(lh, g1b, g1a, device=device, name='l%d_lh_col_high' % level)\n\n if len(self.qshift) >= 12:\n y2 = colifilt(hl, g0b, g0a, device=device, name='l%d_hl_col_low' % level)\n y2bp = colifilt(hh, g2b, g2a, device=device, name='l%d_hh_col_bp' % level)\n\n # Do even Qshift filters on rows.\n Z = rowifilt(y1, g0b, g0a, device=device, name='l%d_ll_row_low' % level) + \\\n rowifilt(y2, g1b, g1a, device=device, name='l%d_hl_row_high' % level) + \\\n rowifilt(y2bp, g2b, g2a, device=device, name='l%d_hh_row_bp' % level)\n else:\n y2 = colifilt(hl, g0b, g0a, device=device, name='l%d_hl_col_low' % level) + \\\n colifilt(hh, g1b, g1a, device=device, name='l%d_hh_col_high' % level)\n\n # Do even Qshift filters on rows.\n Z = rowifilt(y1, g0b, g0a, device=device, name='l%d_ll_row_low' % level) + \\\n rowifilt(y2, g1b, g1a, device=device, name='l%d_hl_row_high' % level)\n\n # Check size of Z and crop as required\n Z_r, Z_c = Z.get_shape().as_list()[-2:]\n S_r, S_c = Yh[level-1].get_shape().as_list()[-2:]\n # check to see if this result needs to be cropped for the rows\n if Z_r != S_r * 2:\n Z = Z[:,:, 1:-1, :]\n # check to see if this result needs to be cropped for the cols\n if Z_c != S_c * 2:\n Z = Z[:,:, :, 1:-1]\n\n # Assert that the size matches at this stage\n Z_r, Z_c = Z.get_shape().as_list()[-2:]\n if Z_r != S_r * 2 or Z_c != S_c * 2:\n raise ValueError(\n 'Sizes of highpasses {}x{} are not '.format(Z_r, Z_c) +\n 'compatible with {}x{} from next level'.format(S_r, S_c))\n\n level = level - 1\n\n if level == 0:\n if self.complex:\n lh = c2q(tf.real(Yh[0][:,:,0:6:5]),\n tf.imag(Yh[0][:,:,0:6:5]))\n hl = c2q(tf.real(Yh[0][:,:,2:4:1]),\n tf.imag(Yh[0][:,:,2:4:1]))\n hh = c2q(tf.real(Yh[0][:,:,1:5:3]),\n tf.imag(Yh[0][:,:,1:5:3]))\n else:\n lh = c2q(Yh[0].real[:,:,0:6:5],\n Yh[0].imag[:,:,0:6:5])\n hl = c2q(Yh[0].real[:,:,2:4:1],\n Yh[0].imag[:,:,2:4:1])\n hh = c2q(Yh[0].real[:,:,1:5:3],\n Yh[0].imag[:,:,1:5:3])\n\n # Do odd top-level filters on columns.\n y1 = colfilter(Z, g0o, device=device, name='l0_ll_col_low') + \\\n colfilter(lh, g1o, device=device, name='l0_lh_col_high')\n\n if len(self.biort) >= 6:\n y2 = colfilter(hl, g0o, device=device, name='l0_hl_col_low')\n y2bp = colfilter(hh, g2o, device=device, name='l0_hh_col_bp')\n\n # Do odd top-level filters on rows.\n Z = rowfilter(y1, g0o, device=device, name='l0_ll_row_low') + \\\n rowfilter(y2, g1o, device=device, name='l0_hl_row_high') + \\\n rowfilter(y2bp, g2o, device=device, name='l0_hh_row_bp')\n else:\n y2 = colfilter(hl, g0o, device=device, name='l0_hl_col_low') + \\\n colfilter(hh, g1o, device=device, name='l0_hh_col_high')\n\n # Do odd top-level filters on rows.\n Z = rowfilter(y1, g0o, device=device, name='l0_ll_row_low') + \\\n rowfilter(y2, g1o, device=device, name='l0_hl_row_high')\n\n return Z",
"def M_g(self):\n\n print(\"\", file=self.logfile)\n print(\"Updating g\", file=self.logfile)\n M_mu1 = np.lib.stride_tricks.as_strided(self.mu_pad,\n shape=[self.P+1, self.L_h],\n strides=[self.mu_pad.strides[-1], self.mu_pad.strides[-1]])\n\n M_mu1 = M_mu1[::-1,:]\n M_mu2 = np.transpose(M_mu1[1:,:])\n M_mu1 = M_mu1*self.e2\n\n M_mu = np.dot(M_mu1, M_mu2)\n v_mu = M_mu[0,:]\n M_mu = M_mu[1:,:]\n\n M_R = np.zeros((self.P,self.P+1))\n for p in range(1,self.P+1):\n for q in range(0,self.P+1):\n M_R[p-1,q] = np.sum(np.diag(self.R, q-p)[:self.L_h-max(p,q)]*self.e2[max(p,q):self.L_h])\n\n v_R = M_R[:,0]\n M_R = M_R[:,1:]\n\n self.alpha_g = np.dot(np.linalg.inv(M_mu + M_R), v_mu+v_R)\n self.A = np.concatenate([[1], -self.alpha_g])\n\n self._propagate_A()",
"def iterate_over_hkl_compute(self, max_hkl=6):\n \n # r will contain the return value, an array with rows that contain:\n # h, k, l, qhkl, qhkl_vector\n r = []\n \n for h in range(-max_hkl,max_hkl+1):\n for k in range(-max_hkl,max_hkl+1):\n for l in range(-max_hkl,max_hkl+1):\n \n # Don't put a reflection at origin\n if not (h==0 and k==0 and l==0):\n qhkl, qhkl_vector = self.q_hkl_exp(h,k,l)\n r.append( [ h, k, l, qhkl, qhkl_vector ] )\n \n return r",
"def _rotate(self):\n \r\n if self.clr == 1: # (default rotation) \r\n # o o o o \r\n # o x x o x o o x\r\n # o o o o\r\n _colOffsets = [[-1,-1, 0, 0], [-1, 0, 0, 1], [ 1, 1, 0, 0], [ 1, 0, 0,-1]] #\r\n _rowOffsets = [[ 1, 0, 0,-1], [-1,-1, 0, 0], [-1, 0, 0, 1], [ 1, 1, 0, 0]] #\r\n elif self.clr == 2:\r\n # o o o o \r\n # o x o x x o x o\r\n # o o o o\r\n _colOffsets = [[-1,-1, 0, 0], [ 1, 0, 0,-1], [ 1, 1, 0, 0], [-1, 0, 0, 1]] #\r\n _rowOffsets = [[-1, 0, 0, 1], [-1,-1, 0, 0], [ 1, 0, 0,-1], [ 1, 1, 0, 0]] #\n \r\n elif self.clr == 3: # \r\n # o o o o \r\n # x o x o x o x o\r\n # o o o o\n \r\n _colOffsets = [[-1, 0, 0, 0], [-1,-1, 0, 1], [ 1, 0, 0, 0], [ 1, 1, 0,-1]] #\r\n _rowOffsets = [[ 1, 1, 0,-1], [-1, 0, 0, 0], [-1,-1, 0, 1], [ 1, 0, 0, 0]] #\n \r\n elif self.clr == 4:\r\n # o o o o \r\n # x o x o x o x o\r\n # o o o o\r\n _colOffsets = [[-1, 0, 0, 0], [1, 1, 0, -1], [1, 0, 0,0], [-1, -1, 0,1]]\n _rowOffsets = [[-1,-1, 0, 1], [-1,0, 0, 0], [1,1, 0,-1], [1,0, 0, 0]]\n \r\n elif self.clr == 5: # o o\r\n # o x \r\n # x o x o o o o o x o\r\n # o o \r\n _colOffsets = [[ 0, 0, 0, 0], [ 2, 1, 0,-1], [ 0, 0, 0, 0], [-2,-1, 0, 1]] #\r\n _rowOffsets = [[-2,-1, 0, 1], [ 0, 0, 0, 0], [ 2, 1, 0,-1], [ 0, 0, 0, 0]] #\r\n elif self.clr == 6: #\r\n # o o o \r\n # o x o x o x o o x o\r\n # o o o \r\n _colOffsets = [[ 0,-1, 0, 0], [-1, 0, 0, 1], [ 0, 1, 0, 0], [ 1, 0, 0,-1]] #\r\n _rowOffsets = [[ 1, 0, 0,-1], [ 0,-1, 0, 0], [-1, 0, 0, 1], [ 0, 1, 0, 0]] #\r\n elif self.clr == 7: # \r\n # o o o o o o o o\r\n # o x o x o x o x\r\n # \r\n _colOffsets = [[-1,-1, 0, 0], [-1,-1, 0, 0], [-1,-1, 0, 0], [-1,-1, 0, 0]] #@@\r\n _rowOffsets = [[ 0,-1, 0,-1], [ 0,-1, 0,-1], [ 0,-1, 0,-1], [ 0,-1, 0,-1]] #@@\n \r\n self._colOffsets = _colOffsets[self._rot] #@@\r\n self._rowOffsets = _rowOffsets[self._rot] #@@\r\n self._update() #@@\r",
"def systematize_algorithm(H: np.array) -> Tuple[np.array, np.array, np.array]:\n n, c = H.shape\n m = np.abs(n-c)\n\n G_s = np.zeros((m, c), dtype=int)\n G_s[:, :m] = np.identity(m)\n\n H_s, permutation = systematize_matrix(H, post_system=True)\n\n rev_permutation = reverse_permutation(permutation)\n\n P = H_s[:, :m]\n\n G_s[:, m:] = P.T\n\n G = G_s[:, rev_permutation]\n\n return G, G_s, H_s",
"def commutator(self, G, H):\n ggens = G.generators\n hgens = H.generators\n commutators = []\n for ggen in ggens:\n for hgen in hgens:\n commutator = rmul(hgen, ggen, ~hgen, ~ggen)\n if commutator not in commutators:\n commutators.append(commutator)\n res = self.normal_closure(commutators)\n return res",
"def G_permutation(self, W):\n Wsh = W.get_shape().as_list()\n cayley = self.cayleytable\n U = []\n for i in range(24):\n perm_mat = self.get_permutation_matrix(cayley, i)\n w = W[:,:,:,:,:,:,i]\n w = tf.transpose(w, [0,1,2,3,5,4])\n w = tf.reshape(w, [-1, 24])\n w = w @ perm_mat\n w = tf.reshape(w, Wsh[:4]+[-1,24])\n U.append(tf.transpose(w, [0,1,2,3,5,4]))\n return U",
"def rotate(self, matrix: List[List[int]]) -> None:\n height=len(matrix)\n for h in range(math.ceil(height/2)):\n for i in range(h,height-h-1):\n # print((h,i), (height-i-1,h))\n temp=matrix[h][i]\n matrix[h][i] = matrix[height-i-1][h]\n matrix[height-i-1][h] = matrix[height-h-1][height-i-1]\n matrix[height-h-1][height-i-1] = matrix[i][height-h-1]\n matrix[i][height-h-1] = temp",
"def htm0_3(joint_rotations):\n # H0_1\n r0_1 = np.dot(rot_x(90), rot_y(joint_rotations[0]))\n d0_1 = transl(0, 0, a1)\n h0_1 = htm(r0_1, d0_1)\n\n # H1_2\n r1_2 = rot_z(joint_rotations[1])\n x1_2 = a2*np.cos(np.radians(joint_rotations[1]))\n y1_2 = a2*np.sin(np.radians(joint_rotations[1]))\n z1_2 = 0\n d1_2 = transl(x1_2, y1_2, z1_2)\n h1_2 = htm(r1_2, d1_2)\n\n # H2_3\n r2_3 = rot_z(joint_rotations[2])\n x2_3 = a3*np.cos(np.radians(joint_rotations[2]))\n y2_3 = a3*np.sin(np.radians(joint_rotations[2]))\n z2_3 = 0\n d2_3 = transl(x2_3, y2_3, z2_3)\n h2_3 = htm(r2_3, d2_3)\n\n # H0_3\n h0_2 = np.dot(h0_1, h1_2)\n h0_3 = np.dot(h0_2, h2_3)\n return h0_3",
"def givens_rotation_matrix(i, j, theta, N):\n R = np.identity(N)\n c = np.cos(theta)\n s = np.sin(theta)\n R[i, i] = c\n R[j, j] = c\n R[i, j] = -s\n R[j, i] = s\n return R",
"def update_H(self):\n self.grid.H[:, -1, :, :] = self.grid.H[:, 0, :, :]",
"def update_E(self, curl_H):\n loc = (self.x, self.y)\n self.grid.E[loc] += (\n self.grid.courant_number / self.permittivity * curl_H[loc]\n )",
"def update_H(self):\n self.grid.H[:, :, -1, :] = self.grid.H[:, :, 0, :]",
"def apply_fhd(self, gfhd):\n for bl in self.data.keys():\n i,j = bl\n p1,p2 = self.pol\n G = gfhd[p1][i]*gfhd[p2][j].conj()\n ind = np.where(G != 0)[0]\n self.data[bl][self.pol][:,ind] /= G[ind]",
"def harzburgite():\n\n rho = 3200.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 226.5; C[0,1] = 75.34; C[0,2] = 74.73; C[0,3] = -0.27; C[0,4] = -2.00; C[0,5] = 1.85\n C[1,0] = C[0,1]; C[1,1] = 242.8; C[1,2] = 73.68; C[1,3] = -3.6; C[1,4] = -1.91; C[1,5] = 4.14\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 230.; C[2,3] = -4.36; C[2,4] = -4.27; C[2,5] = -0.27\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 80.75; C[3,4] = 1.81; C[3,5] = -2.19\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 76.94; C[4,5] = -1.88\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 79.15\n\n return C, rho",
"def Green_func(self):\n if self.bc == True:\n size = self.grid_size\n else:\n size = 2*self.grid_size\n self.Green = np.zeros([size, size])\n for x in range(len(self.Green[0])):\n for y in range(len(self.Green[1])):\n radius = np.sqrt(x**2 + y**2) \n if radius < self.soften: \n radius = self.soften\n self.Green[x, y]=1/(4 * np.pi * radius)\n if self.grid_size%2 == 0: \n self.Green[: size//2, size//2 : ] = np.flip(self.Green[: size//2, : size//2], axis = 1) # an intermittent step - the original grid has only been flipped once (2 x the original size)\n self.Green[ size//2 : , :] = np.flip(self.Green[: size//2, :], axis = 0)\n else: \n print(\"Exiting - Grid size is currently odd. Pleaset set to an even value.\")",
"def set_rotation_matrices(self):\r\n for i in range(len(self.vertices)):\r\n self.vertices[i].meta['C'] = rotation_matrix(self.vertices[i].meta['axis'][0], \r\n self.vertices[i].meta['axis'][1], \r\n self.vertices[i].meta['axis'][2], \r\n self.vertices[i].meta['axis_order'],\r\n degrees=True)\r\n # Todo: invert this by applying angle operations in reverse order\r\n self.vertices[i].meta['Cinv'] = np.linalg.inv(self.vertices[i].meta['C'])",
"def accumulate_homographies(H_succesive, m):\n if len(H_succesive) == 0:\n return H_succesive\n\n H2m = [np.eye(HOMOGRAPHY_RAD)]\n for i in range(m, 0, -1):\n temp_H = H2m[0].dot(H_succesive[i - 1])\n H2m.insert(0, temp_H/temp_H[2, 2])\n for i in range(m, len(H_succesive)):\n temp_H = H2m[i].dot(np.linalg.inv(H_succesive[i]))\n H2m.append(temp_H/temp_H[2, 2])\n\n return H2m",
"def rotate_along(axis: Tensor) -> Tensor:\n W = torch.einsum('ijk,j->ik', levi_civita.to(axis), axis)\n return expm(W)",
"def lherzolite():\n\n rho = 3270.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 187.4; C[0,1] = 63.71; C[0,2] = 63.87; C[0,3] = 0.78; C[0,4] = 2.02; C[0,5] = -3.2\n C[1,0] = C[0,1]; C[1,1] = 211.25; C[1,2] = 64.5; C[1,3] = -3.07; C[1,4] = 0.87; C[1,5] = -5.78\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 190.; C[2,3] = 0.38; C[2,4] = 2.38; C[2,5] = -0.12\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 67.9; C[3,4] = -2.12; C[3,5] = 1.6\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 63.12; C[4,5] = -0.55\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 66.83\n\n return C, rho",
"def forward(self, t, h):\n if self.i == 0:\n self.A = self.beta * (self.B - self.B.transpose(1, 0)) + (\n 1 - self.beta) * (self.B +\n self.B.transpose(1, 0)) - self.gamma * self.I\n self.W = self.beta * (self.C - self.C.transpose(1, 0)) + (\n 1 - self.beta) * (self.C +\n self.C.transpose(1, 0)) - self.gamma * self.I\n\n return torch.matmul(\n h, self.A) + self.tanh(torch.matmul(h, self.W) + self.z)"
] | [
"0.73926467",
"0.5681619",
"0.549413",
"0.5357701",
"0.53283906",
"0.5293677",
"0.520022",
"0.519057",
"0.5088467",
"0.50847656",
"0.50769705",
"0.50462455",
"0.50274307",
"0.49993315",
"0.49852484",
"0.4983559",
"0.4973589",
"0.49634326",
"0.49592367",
"0.49521154",
"0.49075732",
"0.49026072",
"0.48892424",
"0.48640722",
"0.48505297",
"0.48495096",
"0.48490933",
"0.4829783",
"0.48280364",
"0.48143855"
] | 0.7330481 | 1 |
Applies the Givens rotations stored in the vectors cs and sn to the vector H_col. Then constructs a new Givens rotation that eliminates H_col's k'th element, yielding the corresponding column of the R in H's QR decomposition. Returns the new column of R along with the new Givens factors. | def apply_givens_rotation(H_col: jax.ShapedArray, givens: jax.ShapedArray,
k: int) -> Tuple[jax.ShapedArray, jax.ShapedArray]:
# This call successively applies each of the
# Givens rotations stored in givens[:, :k] to H_col.
H_col = apply_rotations(H_col, givens, k)
cs_k, sn_k = givens_rotation(H_col[k], H_col[k + 1])
givens = jax.ops.index_update(givens, jax.ops.index[0, k], cs_k)
givens = jax.ops.index_update(givens, jax.ops.index[1, k], sn_k)
r_k = cs_k * H_col[k] - sn_k * H_col[k + 1]
R_col = jax.ops.index_update(H_col, jax.ops.index[k], r_k)
R_col = jax.ops.index_update(R_col, jax.ops.index[k + 1], 0.)
return R_col, givens | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def apply_rotations(H_col: jax.ShapedArray, givens: jax.ShapedArray,\n k: int) -> jax.ShapedArray:\n rotation_carry = (H_col, 0, k, givens)\n\n def loop_condition(carry):\n i = carry[1]\n k = carry[2]\n return jax.lax.cond(i < k, lambda x: True, lambda x: False, 0)\n\n def apply_ith_rotation(carry):\n H_col, i, k, givens = carry\n cs = givens[0, i]\n sn = givens[1, i]\n H_i = cs * H_col[i] - sn * H_col[i + 1]\n H_ip1 = sn * H_col[i] + cs * H_col[i + 1]\n H_col = jax.ops.index_update(H_col, jax.ops.index[i], H_i)\n H_col = jax.ops.index_update(H_col, jax.ops.index[i + 1], H_ip1)\n return (H_col, i + 1, k, givens)\n\n rotation_carry = jax.lax.while_loop(loop_condition,\n apply_ith_rotation,\n rotation_carry)\n H_col = rotation_carry[0]\n return H_col",
"def chi_rs_gmat(self, nu_s, temperature, vs=3, js=70, branches=(0,),\n del_Tv=0.):\n # Construct the v-branch-independent relaxation rate matrix\n gamma_mat = self.relax_mat(temperature, js)\n\n # For different v-branch combinations\n _js = np.arange(js)\n chi_rs = np.zeros_like(nu_s, dtype='complex128')\n for _branch in branches:\n for _v in np.arange(vs):\n # Calculate line positions\n nu_raman = self.ls_factors.line_pos(_v, _js, branch=_branch)\n\n # Construct the K_mat\n K_mat = np.diag(nu_raman) + gamma_mat*1j\n\n # Solve eigenvalue problem of K_mat\n eigvals, eigvec = np.linalg.eig(K_mat)\n eigvec_inv = np.linalg.inv(eigvec)\n\n # Compute the resonant intensity\n del_pop = self.ls_factors.pop_factor(temperature, _v, _js,\n branch=_branch,\n del_Tv=del_Tv)\n d = (self.trans_amp(_v, _js, branch=_branch))**0.5\n _term_l = d @ eigvec\n _term_r = eigvec_inv @ np.diag(del_pop) @ d\n _term = _term_l*_term_r\n\n for _j in _js:\n _term_b = ((-nu_s + np.real(eigvals[_j]))**2\n + np.imag(eigvals[_j])**2)\n # A 1/2 factor is necessary to match the magnitude from\n # isolated line assumption\n chi_rs += 1/2*_term[_j]*np.conj(\n -nu_s + eigvals[_j])/_term_b\n\n # A factor of c [cm/s] needs to be considered to convert cm^-1 to s^-1\n # by 2*pi*c\n return chi_rs/2/np.pi/self.C",
"def compute_S_r_kick(self, r, z, qOc, **kwargs):\n # Calculate the convolution quantities we need\n kr_cross_r = einsum('r, p -> rp', self.kr, r)\n # z does not change between S_r and S_r-inverse, so only need to compute once\n if kwargs['inverse'] == False:\n self.kz_cross_z = einsum('z, p -> zp', self.kz, z)\n self.convolved_sin = einsum('zp, z -> zp', sin(self.kz_cross_z), self.shape_function_z)\n self.d_convolved_sin_dz = einsum('zp, z -> zp', cos(self.kz_cross_z), self.kz*self.shape_function_z)\n # same here\n self.delta_r = np.ones(np.size(r)) * self.ptcl_width_r\n self.delta_u = einsum('r, p -> rp', self.kr, self.delta_r)\n\n self.tanhz = -np.tanh(((z-self.z_mean)**2 - self.z_mean**2)*self.tanh_width**2)\n\n j1 = self.convolved_j1(kr_cross_r, self.delta_u)\n int_j1 = einsum('rp, r -> rp', self.int_convolved_j1(kr_cross_r, self.delta_u), self.oneOkr)\n\n # Calculate Q_r for each mode\n modeQr = self.omegaOtwokz * (self.dc_coords[:,:,1] - self.omega_coords[:,:,1])\n\n # We dress the charge instead of the fields proper\n dressed_charge = self.tanhz*qOc\n\n kick_z = einsum('zr, rp, zp, p -> p', modeQr, int_j1, self.d_convolved_sin_dz, dressed_charge)\n kick_r = einsum('zr, rp, zp, p -> p', modeQr, j1, self.convolved_sin, dressed_charge)\n dFrdQ = einsum('rp, zp, p -> zr', int_j1, self.convolved_sin, dressed_charge)\n\n kick_Q0 = dFrdQ*self.omegaOtwokz\n kick_Qomega = -dFrdQ*self.omegaOtwokz\n\n return kick_z, kick_r, kick_Q0, kick_Qomega",
"def gsis(snp_mat, qr_smy_mat, proj_mat):\n\n # Set up\n n, g = snp_mat.shape\n\n # calculate the hat matrix\n zx_mat = np.dot(proj_mat, snp_mat).T\n # inv_q_zx = np.sum(zx_mat*zx_mat, axis=1)**(-1)\n q_zx = np.sum(zx_mat*zx_mat, axis=1)\n if np.min(q_zx) == 0:\n q_zx = q_zx + 0.000001\n inv_q_zx = q_zx**(-1)\n\n w, v = eig(qr_smy_mat)\n w = np.real(w)\n w[w < 0] = 0\n w_diag = np.diag(w**(1/2))\n sq_qr_smy_mat = np.dot(np.dot(v, w_diag), v.T)\n sq_qr_smy_mat = np.real(sq_qr_smy_mat)\n g_stat = np.sum(np.dot(zx_mat, sq_qr_smy_mat)**2, axis=1)*inv_q_zx\n\n # approximate of chi2 distribution\n k1 = np.mean(g_stat)\n k2 = np.var(g_stat)\n k3 = np.mean((g_stat-k1)**3)\n a = k3/(4*k2)\n b = k1-2*k2**2/k3\n d = 8*k2**3/k3**2\n g_pv = 1-chi2.cdf((g_stat-b)/a, d)\n g_pv_log10 = -np.log10(g_pv)\n\n return g_pv_log10, g_stat",
"def cer(r: list, h: list):\n # initialisation\n import numpy\n d = numpy.zeros((len(r) + 1) * (len(h) + 1), dtype=numpy.uint16)\n d = d.reshape((len(r) + 1, len(h) + 1))\n for i in tqdm(range(len(r) + 1)):\n for j in range(len(h) + 1):\n if i == 0:\n d[0][j] = j\n elif j == 0:\n d[i][0] = i\n # computation\n for i in tqdm(range(1, len(r) + 1)):\n for j in range(1, len(h) + 1):\n if r[i - 1] == h[j - 1]:\n d[i][j] = d[i - 1][j - 1]\n else:\n substitution = d[i - 1][j - 1] + 1\n insertion = d[i][j - 1] + 1\n deletion = d[i - 1][j] + 1\n d[i][j] = min(substitution, insertion, deletion)\n return d[len(r)][len(h)] / float(len(r))",
"def kuzmin_rotation(R,c,M,G=astronomicalG):\n return np.sqrt(2*G*np.power(10.,M)*R*R*np.power(c*c+R*R,-1.5))",
"def gram_schmidt(S, start_col=0):\n Q = S.copy()\n k = S.shape[1]\n assert k > 1 and start_col >= 0\n start_col = min(S.shape[1], start_col)\n if Q.dtype != np.float32 and Q.dtype != np.float64:\n Q = Q.astype(np.float64)\n\n if start_col == 0:\n Q[:, 0] = normalize_vector(Q[:, 0])\n\n uu = []\n for i in range(start_col + 1, k):\n Q[:, i] = S[:, i]\n for j in range(0, i):\n u = Q[:, j]\n v = Q[:, i]\n if len(uu) <= j:\n uu.append(u.T.dot(u))\n Q[:, i] -= u * (u.T.dot(v) / uu[j])\n\n Q[:, i] = normalize_vector(Q[:, i])\n # Re-project Q[:, i] to the orthogonal complement of Q[:, :i] to make sure they stay orthogonal.\n Q[:, i] = Q[:, i] - Q[:, :i].dot(Q[:, :i].T.dot(Q[:, i]))\n\n return Q",
"def matrix_K2(l, omega, S, cn, csn, rhos, rho):\n zt = omega * S / cn['t']\n xt = omega * S / csn['t']\n row1 = np.array((- w21(l, xt), d23(l, xt)))\n row2 = np.array((- w41(l, xt, zt, rhos, rho), d43(l, xt, zt, rhos, rho)))\n return np.array((row1, row2))",
"def rotate_components(phi, gamma = 1.0, q = 50, tol = 1e-6):\n p,k = phi.shape\n r = np.eye(k)\n d = 0\n cnt = 0\n for i in np.arange(q):\n cnt = cnt + 1\n d_old = d\n Lambda = np.dot(phi, r)\n u,s,vh = np.linalg.svd(np.dot(\n phi.T,np.asarray(Lambda)**3 - (gamma/p) * np.dot(\n Lambda, np.diag(np.diag(np.dot(Lambda.T,Lambda))))))\n print(\"Matrix u: \")\n print(u)\n print(\"Matrix s: \")\n print(s)\n print(\"Matrix vh: \")\n print(vh)\n r = np.dot(u, vh)\n d = np.sum(s)\n if d_old != 0 and d / d_old < 1 + tol:\n break\n print(\"Trace rotate_components_START\")\n print(\"Rotation matrix: \")\n print(r)\n print(\"Loop number: \" + str(cnt))\n print(\"Trace rotate_components_END\")\n return np.dot(phi, r)",
"def lherzolite():\n\n rho = 3270.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 187.4; C[0,1] = 63.71; C[0,2] = 63.87; C[0,3] = 0.78; C[0,4] = 2.02; C[0,5] = -3.2\n C[1,0] = C[0,1]; C[1,1] = 211.25; C[1,2] = 64.5; C[1,3] = -3.07; C[1,4] = 0.87; C[1,5] = -5.78\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 190.; C[2,3] = 0.38; C[2,4] = 2.38; C[2,5] = -0.12\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 67.9; C[3,4] = -2.12; C[3,5] = 1.6\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 63.12; C[4,5] = -0.55\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 66.83\n\n return C, rho",
"def compute_S_z_kick(self, r, z, qOc, **kwargs):\n # Calculate the convolution quantities we need\n\n kz_cross_z = einsum('z, p -> zp', self.kz, z)\n convolved_cos = einsum('zp, z -> zp', cos(kz_cross_z), self.shape_function_z)\n int_convolved_cos_dz = einsum('zp, z -> zp', sin(kz_cross_z), self.shape_function_z*self.oneOkz)\n\n # r does not change between S_z and S_z-inverse, so only need to compute once\n if kwargs['inverse'] == False:\n self.kr_cross_r = einsum('r, p -> rp', self.kr, r)\n self.delta_r = np.ones(np.size(r)) * self.ptcl_width_r\n self.delta_u = einsum('r, p -> rp', self.kr, self.delta_r)\n self.j0 = self.convolved_j0(self.kr_cross_r, self.delta_u)\n self.d_convolved_j0_dr = einsum('rp, r -> rp',\n -self.convolved_j1(self.kr_cross_r, self.delta_u), self.kr)\n\n # Calculate Q_z for each mode\n modeQz = self.omegaOtwokr * (self.dc_coords[:,:,1] + self.omega_coords[:,:,1])\n\n self.tanhz = -np.tanh(((z - self.z_mean) ** 2 - self.z_mean ** 2) * self.tanh_width ** 2)\n # We dress the charge instead of the fields proper\n dressed_charge = self.tanhz*qOc\n\n kick_z = einsum('zr, rp, zp -> p', modeQz, self.j0, convolved_cos)*dressed_charge\n kick_r = einsum('zr, rp, zp -> p', modeQz, self.d_convolved_j0_dr, int_convolved_cos_dz)*dressed_charge\n\n dFzdQ = einsum('rp, zp, p -> zr', self.j0, int_convolved_cos_dz, dressed_charge)\n\n kick_Q0 = dFzdQ*self.omegaOtwokr\n kick_Qomega = dFzdQ*self.omegaOtwokr\n\n return kick_z, kick_r, kick_Q0, kick_Qomega",
"def systematize_algorithm(H: np.array) -> Tuple[np.array, np.array, np.array]:\n n, c = H.shape\n m = np.abs(n-c)\n\n G_s = np.zeros((m, c), dtype=int)\n G_s[:, :m] = np.identity(m)\n\n H_s, permutation = systematize_matrix(H, post_system=True)\n\n rev_permutation = reverse_permutation(permutation)\n\n P = H_s[:, :m]\n\n G_s[:, m:] = P.T\n\n G = G_s[:, rev_permutation]\n\n return G, G_s, H_s",
"def transform_to_rotating_frame(H, U, D):\n \n #Determine the effective hamiltonian in the rotating frame\n Heff = lambda t: np.conj(U(t).T) @ H(t) @ U(t) + D\n \n return Heff",
"def build_Rdiagnol_block(self, R):\n N = self.N # number of MPC steps\n num_output = self.num_output\n \n row_list = [] # reocrd the every row in B_hat\n zero = Variable(torch.zeros(num_output, num_output*(N-1)))\n zero = self.vari_gpu(zero)\n row_long = torch.cat([zero, R, zero],1) # [0 0 ... Q 0 0 ...]\n \n for i in range(N, 0, -1):\n row_list.append(row_long[:, (i-1)*num_output : (i+N-1)*num_output])\n return torch.cat(row_list,0)",
"def computeV(H):\n # Pseudo-inverse of H\n #V = np.linalg.inv(H) # Inverse\n V = np.linalg.pinv(H) # Pseudo-inverse\n \n # Normalise columns\n [m,n] = V.shape\n for i in range(n):\n V[:,i] = V[:,i]/np.linalg.norm(V[:,i])\n \n return V",
"def test_squeezing(self, tol):\n r = 0.543\n phi = 0.123\n S = symplectic.squeezing(r, phi)\n out = S @ S.T\n\n # apply to an identity covariance matrix\n rotation = np.array(\n [[np.cos(phi / 2), -np.sin(phi / 2)], [np.sin(phi / 2), np.cos(phi / 2)]]\n )\n expected = rotation @ np.diag(np.exp([-2 * r, 2 * r])) @ rotation.T\n assert np.allclose(out, expected, atol=tol, rtol=0)",
"def harzburgite():\n\n rho = 3200.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 226.5; C[0,1] = 75.34; C[0,2] = 74.73; C[0,3] = -0.27; C[0,4] = -2.00; C[0,5] = 1.85\n C[1,0] = C[0,1]; C[1,1] = 242.8; C[1,2] = 73.68; C[1,3] = -3.6; C[1,4] = -1.91; C[1,5] = 4.14\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 230.; C[2,3] = -4.36; C[2,4] = -4.27; C[2,5] = -0.27\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 80.75; C[3,4] = 1.81; C[3,5] = -2.19\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 76.94; C[4,5] = -1.88\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 79.15\n\n return C, rho",
"def _r270(self,m):\n return np.rot90(m,3)",
"def givens_rotation_matrix(i, j, theta, N):\n R = np.identity(N)\n c = np.cos(theta)\n s = np.sin(theta)\n R[i, i] = c\n R[j, j] = c\n R[i, j] = -s\n R[j, i] = s\n return R",
"def quartz():\n\n rho = 2649.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 86.9; C[0,1] = 7.6; C[0,2] = 12.; C[0,3] = 17.8; C[0,4] = 0.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 86.9; C[1,2] = 12.; C[1,3] = -17.8; C[1,4] = 0.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 106.4; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 59.5; C[3,4] = 0.; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 59.5; C[4,5] = -17.8\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 39.6\n\n return C, rho",
"def _rmat_s_helper(chi=None, omes=None, out=None):\n if chi is not None:\n cx = np.cos(chi)\n sx = np.sin(chi)\n else:\n cx = 1.0\n sx = 0.0\n\n if omes is not None:\n # omes is an array (vector): output is as many rotation matrices as omes entries.\n n = len(omes)\n out = out if out is not None else np.empty((n,3,3), dtype=omes.dtype)\n\n if chi is not None:\n # ome is array and chi is a value... compute output\n cx = np.cos(chi)\n sx = np.sin(chi)\n for i in range(n):\n cw = np.cos(omes[i])\n sw = np.sin(omes[i])\n out[i, 0, 0] = cw; out[i, 0, 1] = 0.; out[i, 0, 2] = sw\n out[i, 1, 0] = sx*sw; out[i, 1, 1] = cx; out[i, 1, 2] = -sx*cw\n out[i, 2, 0] = -cx*sw; out[i, 2, 1] = sx; out[i, 2, 2] = cx*cw\n else:\n # omes is array and chi is None -> equivalent to chi=0.0, but shortcut computations.\n # cx IS 1.0, sx IS 0.0\n for i in range(n):\n cw = np.cos(omes[i])\n sw = np.sin(omes[i])\n out[i, 0, 0] = cw; out[i, 0, 1] = 0.; out[i, 0, 2] = sw\n out[i, 1, 0] = 0.; out[i, 1, 1] = 1.; out[i, 1, 2] = 0.\n out[i, 2, 0] = -sw; out[i, 2, 1] = 0.; out[i, 2, 2] = cw\n else:\n # omes is None, results should be equivalent to an array with a single element 0.0\n out = out if out is not None else np.empty((1, 3, 3))\n if chi is not None:\n # ome is 0.0. cw is 1.0 and sw is 0.0\n cx = np.cos(chi)\n sx = np.sin(chi)\n out[0, 0, 0] = 1.; out[0, 0, 1] = 0.; out[0, 0, 2] = 0.\n out[0, 1, 0] = 0.; out[0, 1, 1] = cx; out[0, 1, 2] = -sx\n out[0, 2, 0] = 0.; out[0, 2, 1] = sx; out[0, 2, 2] = cx\n else:\n # both omes and chi are None... return a single identity matrix.\n out[0, 0, 0] = 1.; out[0, 0, 1] = 0.; out[0, 0, 2] = 0.\n out[0, 1, 0] = 0.; out[0, 1, 1] = 1.; out[0, 1, 2] = 0.\n out[0, 2, 0] = 0.; out[0, 2, 1] = 0.; out[0, 2, 2] = 1.\n\n\n return out",
"def GramSchmidt(A):\r\n n = len(A)\r\n # Finds the number of lists in the list, which is also the number of rows\r\n m = len(A[0])\r\n # Finds the number of elements in list one, which is also the number of columns\r\n V = A\r\n R = [[0]*n for i in range(n)]\r\n # creates an empty list R with dimensions of n rows and n columns\r\n Q = [[0]*m for i in range(n)]\r\n # creates an empty list Q with dimensions of n rows and m columns\r\n inputStatus = True\r\n # inputStatus is true at this point until proven otherwise\r\n for i in range(n):\r\n for j in range(m):\r\n if ((type(A[i][j]) != int) and (type(A[i][j]) != float) and (type(A[i][j]) != complex)):\r\n inputStatus = False\r\n print(\"Invalid Input\")\r\n # this checks each value in the matrix A to make sure it is some time of number, if it isnt a number then the input status will be false \r\n # if the input status is false then an error message will be displayed stating that this is an invalid input\r\n if inputStatus == True:\r\n # if the given list does not fall under the previous if statement then the input status will continue to be true and we can continue to find the QR factorization \r\n for i in range(n):\r\n # for loop which continues as long as there are still lists in A \r\n R[i][i] = norm(V[i])\r\n # Creates the border for the upper triangle matrix R, where each value in the diagonal is the 2 norm of the corresponding vector in the original matrix A \r\n Q[i] = unit(V[i])\r\n # Each vector in Q is the unit vector of the corresponding vector in A \r\n for j in range(i+1,n):\r\n # the position j will be 1 more than the position i \r\n R[j][i] = dot(Q[i],V[j])\r\n # The element in R[i+1][i] is the dot product of Q[i] and V[i+1] \r\n temp = scalarmul(R[j][i],Q[i])\r\n # This is the scalar multiplication of R[i+1][i] and Q[i] which will be labeled as temp \r\n V[j] = subtract(V[j],temp)\r\n # V[j] is the difference between the original V[j] and temp \r\n return[Q,R]",
"def calc_vcirc(r,menc,G=1.):\n if G is None: G = 1.\n return np.sqrt(G*menc/r)",
"def toRot(q):\n R = SX.zeros(3, 3)\n qi = q[0]; qj = q[1]; qk = q[2]; qr = q[3]\n R[0, 0] = 1. - 2. * (qj * qj + qk * qk);\n R[0, 1] = 2. * (qi * qj - qk * qr);\n R[0, 2] = 2. * (qi * qk + qj * qr)\n R[1, 0] = 2. * (qi * qj + qk * qr);\n R[1, 1] = 1. - 2. * (qi * qi + qk * qk);\n R[1, 2] = 2. * (qj * qk - qi * qr)\n R[2, 0] = 2. * (qi * qk - qj * qr);\n R[2, 1] = 2. * (qj * qk + qi * qr);\n R[2, 2] = 1. - 2. * (qi * qi + qj * qj)\n\n return R",
"def reduce_kcol_to_3col(G, k):\n\n G, H = prepare_grid(G)\n print(\"grid prepared\")\n N = len(G)\n H = create_kgrid(H, N, k)\n print(\"grid created\")\n H = add_pheripherals_per_edge(G.edges, H, k)\n print(\"peripherals added\")\n\n return H",
"def hornblende():\n\n rho = 3200.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 116.; C[0,1] = 49.9; C[0,2] = 61.4; C[0,3] = 0.; C[0,4] = 4.3; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 159.7; C[1,2] = 65.5; C[1,3] = 0.; C[1,4] = -2.5; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 191.6; C[2,3] = 0.; C[2,4] = 10.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 57.4; C[3,4] = 0.; C[3,5] = -6.2\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 31.8; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 36.8\n\n return C, rho",
"def gramschmidt(A):\r\n _, k = A.shape\r\n\r\n # first basis vector\r\n Q = A[:, [0]] / np.linalg.norm(A[:, 0])\r\n for j in range(1, k):\r\n # orthogonal projection, loop-free implementation\r\n q = A[:, j] - np.dot(Q, np.dot(Q.T, A[:, j]))\r\n\r\n # check premature termination\r\n nq = np.linalg.norm(q)\r\n if nq < 1e-9 * np.linalg.norm(A[:, j]):\r\n break\r\n # add new basis vector as another column of Q\r\n Q = np.column_stack([Q, q / nq])\r\n return Q",
"def rotation(X, Y, C, S) :\n Xrot = X*C + Y*S \n Yrot = Y*C - X*S \n return Xrot, Yrot",
"def svd_compress_gs(mat, k):\n U, singular_vals, V = np.linalg.svd(mat)\n rank = len(singular_vals)\n print(\"Image rank %r\" % rank)\n if k > rank:\n print(\"k is larger than rank of image %r\" % rank)\n return mat\n # take columns less than k from U\n U_p = U[:, :k]\n # take rows less than k from V\n V_p = V[:k, :]\n # build the new S matrix with top k diagnal elements\n S_p = np.zeros((k, k), mat.dtype)\n for i in range(k):\n S_p[i][i] = singular_vals[i]\n print(\"U_p shape {0}, S_p shape {1}, V_p shape {2}\".format(\n U_p.shape, S_p.shape, V_p.shape))\n compressed = np.dot(np.dot(U_p, S_p), V_p)\n ss = ssim(mat, compressed,\n dynamic_range=compressed.max() - compressed.min())\n print(\"Strucural similarity: %r\" % ss)\n return U_p, S_p, V_p, ss",
"def get_su_eig(self, delcc):\n pc = SimpleNamespace()\n h = self.h\n if self.rbsize:\n self._inv_mrb()\n if h:\n pc.G = h\n pc.A = h * h / 3\n pc.Ap = h / 2\n if self.unc:\n pv = self._el\n else:\n pv = np.ix_(self._el, self._el)\n if self.m is not None:\n self.m = self.m[pv]\n self.k = self.k[pv]\n self.b = self.b[pv]\n self.kdof = self.nonrf[self._el]\n self.ksize = self.kdof.size\n\n self._el = np.arange(self.ksize) # testing ...\n self._rb = np.arange(0)\n\n if self.elsize:\n self._inv_m()\n A = self._build_A()\n eig_info = eigss(A, delcc)\n pc.wn = eig_info.wn\n pc.zeta = eig_info.zeta\n pc.eig_success = eig_info.eig_success\n if h:\n self._get_complex_su_coefs(pc, eig_info.lam, h)\n self._add_partition_copies(pc, eig_info.lam, eig_info.ur, eig_info.ur_inv)\n return pc"
] | [
"0.6138148",
"0.5132266",
"0.5044118",
"0.50204897",
"0.49985307",
"0.49635363",
"0.4903446",
"0.48277012",
"0.48053998",
"0.47816756",
"0.4768826",
"0.4737764",
"0.4733043",
"0.4726679",
"0.47147375",
"0.4713533",
"0.47095165",
"0.4691508",
"0.46743643",
"0.46470118",
"0.46460894",
"0.4645053",
"0.46360397",
"0.46185982",
"0.46155247",
"0.45997173",
"0.4592768",
"0.45862383",
"0.45861313",
"0.45806757"
] | 0.6756806 | 0 |
Check if quote already exists in Nostalgiabot's memory for this Person. | def has_said(self, quote: str) -> bool:
return any(q for q in self.quotes if q.content.lower() == quote.lower()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _is_term_exist(self, term):\n return term in self.postingDict",
"def check_existed_did(self):\n for wallet in self.wallet_state_manager.wallets.values():\n if (\n wallet.type() == WalletType.DECENTRALIZED_ID\n and self.did_info.origin_coin.name() == wallet.did_info.origin_coin.name()\n ):\n self.log.warning(f\"DID {self.did_info.origin_coin} already existed, ignore the wallet creation.\")\n raise ValueError(\"Wallet already exists\")",
"def isUnique(self, word):\n abbr = self.getAbbr(word)\n return abbr not in self.d or len(self.d[abbr]) == 1 and self.d[abbr][0] == word",
"def test_repeated_calls_different_quotes(self):\n quoteSet = set()\n for i in range(5):\n quoteSet.add(getRandomJoke()[\"joke\"])\n self.assertEqual(len(quoteSet) > 1, True)",
"def isUnique(self, word):\n abbr = self.gen_abbr(word)\n\n if abbr not in self.dict:\n return True\n elif len(self.dict[abbr]) == 1 and word in self.dict[abbr]:\n return True\n else:\n return False",
"def is_person_identifier_used(person_id):\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n c.execute(\"SELECT personid FROM person WHERE personid =?\", (person_id,))\n person_identifier = \"\"\n is_used = True\n for row in c:\n person_identifier = row[\"personid\"]\n conn.close()\n if len(person_identifier) == 0:\n is_used = False\n if len(person_identifier) > 0:\n is_used = True\n return is_used\n except:\n return False",
"def exists(self, proxy):\n return not self.database.zscore(self.key, proxy) == None",
"async def _exists(self, key):\n return key in SimpleMemoryBackend._cache",
"def isUnique(self, word):\n abbr = self.get_abbr(word)\n if abbr not in self.abbr:\n return True\n elif len(self.abbr[abbr]) == 1 and word == self.abbr[abbr][0]:\n return True\n else:\n return False",
"def isUnique(self, word):\n if len(word) <= 1:\n n = word\n else:\n n = word[0] + str(len(word) - 2) + word[-1] #Get the abbrviation.\n if n not in self.abbrdict or (self.abbrdict[n] == 1 and word in self.origdict): #If it is not in abbrdict or the abbrevation count is 1 and the word has appeared in dictionary, return true.\n return True\n else: #Otherwise, return false.\n return False",
"def exist(self):",
"def exists(self):\n self.cursor.execute(f\"\"\"\n SELECT 1\n FROM {self.table_name}\n WHERE {self.lookup_type}='{self.word}'\n \"\"\")\n return True if self.cursor.fetchone() else False",
"def check_person_existence(self, searched_person_id):\n self.__load_persons_from_file_into_memory()\n return super().check_person_existence(searched_person_id)",
"def test_phonebook_with_duplicate_entries_is_inconsostent(self):\n self.phonebook.add(\"Bob\", \"12345\")\n self.phonebook.add(\"Mary\", \"12345\")\n self.assertFalse(self.phonebook.is_consistent())",
"def exists(self):\n return True",
"def exists(self):\n return True",
"def checkIfExists(dbconnection, title):\n cursor = dbconnection.cursor()\n output = \"\"\n title = title.replace(\"'\", \"''\")\n try:\n cursor.execute(\"SELECT * FROM transcriptions WHERE title = '\" + title + \"';\")\n dbconnection.commit()\n output = cursor.fetchone()\n cursor.close()\n if(output is None):\n return False\n else:\n return True\n except:\n dbconnection.rollback()\n cursor.execute(\"SELECT * FROM transcriptions WHERE title = '\" + title + \"';\")\n dbconnection.commit()\n output = cursor.fetchone()\n cursor.close()\n if(output is None):\n return False\n else:\n return True",
"def checkWord(word):\r\n check = word in cachedWordList\r\n if check:\r\n print(word + \" spelt correctly\")\r\n else:\r\n print(word + \" not found in dictionary\")\r\n return check",
"def check_if_already_prepared(self, instance, product_attribute):\n attribute_exist = self.search([('ks_shopify_instance', '=', instance.id),\n ('ks_product_attribute', '=', product_attribute.id)], limit=1)\n if attribute_exist:\n return attribute_exist\n else:\n return False",
"def exists_in_db(self) -> bool:\n query = '''SELECT * \n FROM ESLReceipts \n WHERE Transaction_Number=? AND Date=? AND Description=? \n AND Memo=? AND Amount_Debit=? \n AND Amount_Credit=? AND Balance=? \n AND Check_Number=? AND Fees=? \n AND Card_Type=? AND Is_Payment=? \n AND Is_Transaction=? AND User_id=?;'''\n return len(self.db.fetchall(query, values=self.to_tuple())) > 0",
"def exists( identifier ):\n return note.exists(identifier)",
"def exists(self, answer):\n return self.find(answer) is not None",
"def party_exist(party_name: str) -> bool:\n\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select name from party where name = '{}'\".format(party_name)\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n if len(data) == 0:\n return False\n return True",
"def _object_exists(name):\n conn = sqlite3.connect('/dev/input')\n try:\n cur = conn.cursor()\n sql = 'SELECT ROWID FROM object WHERE name=? AND deleted=0'\n cur.execute(sql, (name, ))\n result = cur.fetchall()\n return len(result) > 0\n finally:\n conn.close()",
"def exists(self, obj):\n return False",
"def exists(self):\n\n if self:\n pass",
"def __contains__(self, seqno):\n self._gc()\n for item in self._queue:\n if item[0] == seqno:\n return True\n return False",
"def test_check_for_duplicates_with_duplicates(self):\n quotes = [api.Quote(\" This is an added quote.\", \"Another author\", \"Publication\", [\"tag1, tag2\"]),\n api.Quote(\" This is an added quote.\", \"Another author2\", \"Publication\", [\"tag1, tag2\"]),\n api.Quote(\" This is an added quote.\", \"Another author3\", \"Publication\", [\"tag1, tag2\"])]\n\n with self.assertRaisesRegexp(Exception, \"a duplicate quote was found on line 2 of 'stdin'. \"\n \"Quote: \\\"This is an added quote.\\\".\"):\n\n api._check_for_duplicates(quotes, \"stdin\")",
"def check_duplicate(triple: str, result: List[str]) -> bool:\n fields = triple.strip().split(', ')\n assert len(fields) == 13\n assert fields[9] == 'BERT'\n psuedo_triple = fields[:11]\n psuedo_triple[9] = 'RELEVANCE'\n return ', '.join(psuedo_triple) in result",
"def testSynonymDuplicate(self):\n\t\t\t\tone = spinner.Word.objects.get_single('mac', True)\n\t\t\t\ttwo = spinner.Word.objects.get_single('macintosh', True)\n\t\n\t\t\t\tsyn = spinner.Synonym.objects.get_single(one, two, True)\n\t\t\t\t\n\t\t\t\tsyn2 = spinner.Synonym.objects.get_single(two, one, True)\n\n\t\t\t\tassert syn == syn2\n\n\t\t\t\tsyn.delete()\n\t\t\t\tone.delete()\n\t\t\t\ttwo.delete()"
] | [
"0.5549986",
"0.5529568",
"0.55025846",
"0.54980576",
"0.54824877",
"0.54619014",
"0.5449591",
"0.52898127",
"0.52789867",
"0.5272745",
"0.5230741",
"0.52132195",
"0.5202484",
"0.5193138",
"0.5162743",
"0.5162743",
"0.5120887",
"0.51052827",
"0.5090298",
"0.50702953",
"0.50422996",
"0.50374776",
"0.5006044",
"0.50045365",
"0.4992264",
"0.49846002",
"0.4983254",
"0.49743173",
"0.49729666",
"0.4970381"
] | 0.6199974 | 0 |
Updates the stay time of visit | def update_stay_time(self):
# It would not be better to simply self.stay_time = self.get_length() ??
self.stay_time = self.get_length() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_time(self):\n pass # Do nothing",
"def update(self):\n super().update()\n self.checkTimeToLive()",
"def update(self):\n if not self.exists:\n return\n if AT.TIME_TO_EXPIRE in self.attributes:\n if not self.calculate_time_left():\n self.fire_trigger(TR.TIME_EXPIRED)",
"def post_time(self, amt):\n amtOfTime = amt + 1\n Publisher().sendMessage(\"update\", amtOfTime)",
"def _RecordVisitTime(self, mr, now=None):\n now = now or int(time.time())\n if not settings.read_only and mr.auth.user_id:\n user_pb = mr.auth.user_pb\n if (user_pb.last_visit_timestamp <\n now - framework_constants.VISIT_RESOLUTION):\n user_pb.last_visit_timestamp = now\n self.services.user.UpdateUser(mr.cnxn, user_pb.user_id, user_pb)",
"def __pass_time(self):\n self.hunger += 1\n self.boredom += 1",
"def update(self, dt):\n\t\tpass",
"def increase_time(self,s):\n self.days += 1\n if self.disease_status > 0:\n self.time_since_infection += 1\n if self.days == 365:\n self.increase_age(s)",
"def update(self, dt):",
"def update(self, dt):",
"def update(self, dt):\n pass",
"def GAME_TIME_ADVANCE(dt):",
"def setSubmitTime(t):",
"def update_trip_time(trip_path, paths, stay_time, mpoi_gains, start_end, model_params, method_use, stay_offset):\n\n trip_time = 0.0\n tot_gain = 0.\n time_list = []\n stay_list = []\n gain_list = []\n\n for idx, node in enumerate(trip_path):\n next_node = trip_path[(idx+1)%trip_path.size]\n rtime = paths[node, next_node]\n trip_time += rtime\n time_list.append(rtime)\n\n # if this is start node or end node check if it is in the tour\n if next_node in start_end and not start_end[next_node]:\n # don't add stay time\n gain_list.append(0)\n stay_list.append(0)\n else:\n # compute stay time\n if method_use == method.proposed or method_use == method.personal or method_use == method.profit:\n stime, gain = find_stay_time(model_params[next_node], rtime, stay_time[next_node], mpoi_gains[next_node], stay_offset)\n else:\n stime = stay_time[next_node]\n gain = mpoi_gains[next_node]\n trip_time += stime\n tot_gain += gain\n\n stay_list.append(stime)\n gain_list.append(gain)\n \n return trip_time, tot_gain, time_list, stay_list, gain_list",
"def update_timeval(self):\n self.timeval = self.get_timeval()",
"def update_based_on_time(self):\n for counter, agent in enumerate(self.agents):\n if self.t >= agent.getFinishTime() and self.agent_current_task[counter] != -1: # task is finished\n task_num = self.agent_current_task[counter]\n self.finish_time_per_task_dict[task_num] = self.t\n self.is_task_finished[0][task_num] = 1\n agent.changebusy(False)\n self.update_agent_is_idle_based_on_class()",
"def stay(self):\n\n pass",
"def update(self) -> None:\n\n \n #If time to live is 0\n if self.ttl == 0:\n\n #Kill itself\n self.kill()\n return\n\n #Otherwise\n else:\n\n #Reduce time to live\n self.ttl -= 1\n\n #Call superclass update\n return super().update()",
"def elapseTime(self, gameState):\n\n \"*** YOUR CODE HERE ***\"\n\n allPossible = util.Counter()\n\n for oldPos in self.legalPositions:\n actions = gameState.getLegalActions(agentIndex)\n successorStates = [gameState.generateSuccessor(action) for action in actions]\n newPosDist = {}\n for state in successorStates:\n position = state.getAgentPosition(agentIndex)\n prob = 1.0/len(actions)\n newPosDist[position] = prob\n\n for newPos, prob in newPosDist.items():\n allPossible[newPos] += prob * self.beliefs[oldPos]\n\n allPossible.normalize()\n self.beliefs = allPossible",
"def update(self, delta_time):\n pass",
"def update(self, deltatime):\n pass",
"def update(self):\n self.age += 1\n self.starve -= 1\n if self.starve < 1:\n self.alive = False\n self.move()",
"def update_isolation(self, time: int):",
"async def paydaytime(self, ctx: commands.Context, seconds: int):\r\n guild = ctx.guild\r\n if await bank.is_global():\r\n await self.config.PAYDAY_TIME.set(seconds)\r\n else:\r\n await self.config.guild(guild).PAYDAY_TIME.set(seconds)\r\n await ctx.send(\r\n _(\"Value modified. At least {num} seconds must pass between each payday.\").format(\r\n num=seconds\r\n )\r\n )",
"def update(self, time):\n raise NotImplementedError",
"def update(self, time):\n raise NotImplementedError",
"def update_activity(self, time_delta):\n if self.state == DoctorState.IN_PATIENT_EXAM_ROOM:\n return\n # We have the amount of time a doctor stays in patient room as\n # DoctorConstant.PORTION_TIME_SPENT_WITH_PATIENT\n # We have the length of time a doctor stays in patient room as\n # DoctorConstant.AVG_TIME_SPENT_WITH_PATIENT\n # Thus, for average time spent doing OTHER we use PORTION * all time\n # = TIME_SPENT_WITH_PATIENT\n total_time_unit = DoctorConstant.AVG_TIME_SPENT_WITH_PATIENT / \\\n DoctorConstant.PORTION_TIME_SPENT_WITH_PATIENT\n time_spent_doing_other = total_time_unit - \\\n DoctorConstant.AVG_TIME_SPENT_WITH_PATIENT\n chance_change_task = time_delta_to_minutes(time_delta) / \\\n time_spent_doing_other\n if self.state == DoctorState.OTHER and random.random() < \\\n chance_change_task:\n self.state = DoctorState.READY",
"def change_time(self, new_time):\r\n self.when = new_time",
"def _update_active_rides_fast(self, time: datetime) -> None:\n pass",
"def update(self, deltaTime):\n pass"
] | [
"0.6485091",
"0.63547605",
"0.6233127",
"0.5951948",
"0.59343076",
"0.58443356",
"0.58252096",
"0.57942843",
"0.5782025",
"0.5782025",
"0.5764802",
"0.5756116",
"0.57427424",
"0.5739974",
"0.57184714",
"0.5699622",
"0.56783235",
"0.56636465",
"0.56427634",
"0.56412554",
"0.56383103",
"0.5631024",
"0.55772746",
"0.5557848",
"0.5537442",
"0.5537442",
"0.54994017",
"0.5463884",
"0.54580337",
"0.544741"
] | 0.75728226 | 0 |
Repeats a message multiple times. | async def repeat(self,ctx, times: int, content='repeating...'):
for i in range(times):
await ctx.send(content) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def repeat(ctx, times: int, content='repeating...'):\n for i in range(times):\n await ctx.send(content)",
"async def repeat(times : int, content='repeating...'):\n for i in range(times):\n await bot.say(content)",
"async def repeat(ctx, times : int, content='repeating...'):\n for i in range(times):\n await bot.say(content)",
"def async_repetitive_message(message, interval_seconds):\n repeat = ['-', '\\\\', '|', '/']\n\n for switch in itertools.cycle(repeat):\n print('\\r[{}] {}'.format(switch, message), end='')\n yield from async_sleep(interval_seconds)",
"async def repeat(self, ctx, *, text):\n await ctx.send(text)",
"async def ripgupta(self, ctx, count, *, message):\n int(count)\n gupta = 468209010978455552\n channel = 617525238392946699\n mloop = 0\n int(mloop) \n while mloop > count:\n await channel.send(\"{} {}\".format(gupta.mention, message))\n int(mloop)\n mloop = mloop + 1",
"async def repeat(ctx, *, arg):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('repeat: ' + arg, extra={'invoker': ctx.message.author.name})\r\n await ctx.send(arg)",
"async def repeat(self, ctx, times : int, content : str):\n if times < 6:\n for i in range(times):\n await ctx.send(content)\n else:\n await ctx.send(\"Please don't get me banned by Discord! (Max 5)\")",
"async def do(ctx, times : int, *, command):\n msg = copy.copy(ctx.message)\n msg.content = command\n for i in range(times):\n await bot.process_commands(msg)",
"async def repeat(\n text: ('str', 'The content to repeat')\n):\n if not text:\n text = 'nothing to repeat'\n \n return InteractionResponse(text, allowed_mentions = None)",
"def cycle(self, message):\n msg_list = self.ts.get_human_readable_message(message).split(' ')\n players = self.player_queue.pop_all()\n players_str = ' '.join(players)\n channel = SOCKET_ARGS['channel']\n if len(msg_list) > 1:\n credential_str = ' '.join(msg_list[1:])\n whisper_str = 'You may now join {} to play. The credentials you need are: {}'.format(\n channel, credential_str)\n self.player_queue_credentials = credential_str\n else:\n whisper_str = 'You may now join {} to play.'.format(channel)\n self.player_queue_credentials = None\n for player in players:\n self._add_to_whisper_queue(player, whisper_str)\n # self.command_queue.appendleft(('_delete_last_row', {}))\n self._add_to_chat_queue(\"Invites sent to: {} and there are {} people left in the queue\".format(\n players_str, len(self.player_queue.queue)))",
"def cycle_one(self, message):\n msg_list = self.ts.get_human_readable_message(message).split(' ')\n channel = SOCKET_ARGS['channel']\n try:\n player = self.player_queue.pop()\n if len(msg_list) > 1:\n credential_str = ' '.join(msg_list[1:])\n whisper_str = 'You may now join {} to play. The credentials you need are: {}'.format(\n channel, credential_str)\n elif self.player_queue_credentials is not None:\n credential_str = self.player_queue_credentials\n whisper_str = 'You may now join {} to play. The credentials you need are: {}'.format(\n channel, credential_str)\n else:\n whisper_str = 'You may now join {} to play.'.format(channel)\n self._add_to_whisper_queue(player, whisper_str)\n self._add_to_chat_queue(\"Invite sent to: {} and there are {} people left in the queue\".format(player, len(self.player_queue.queue)))\n # self.command_queue.appendleft(('_delete_last_row', {}))\n except IndexError:\n self._add_to_chat_queue('Sorry, there are no more players in the queue')",
"def send_spam_msg(driver, name, message, n):\r\n\r\n for i in range(n):\r\n send_message(driver, name, message)",
"def repeat(word, repetitions):\n return word * repetitions",
"def repeat(self, count):\n return self.Sequence((self,) * count)",
"def repeat(s):\r\n\r\n return s",
"def shake(r, num_repeats=1):\n for i in range(num_repeats):\n r.go(25)\n time.sleep(.1)\n r.stop()\n time.sleep(.1)\n r.go(-25)\n time.sleep(.1)\n r.stop()\n time.sleep(.1)",
"def repeat_string_n_times(string, count):\r\n return string * int(count)",
"async def echo(ctx, *, message=None):\n message = message or \"Please provide the message to be repeated.\"\n await ctx.message.delete()\n await ctx.send(message)",
"def repeat_timers(bot, chat_id, message_id):\n\n bot_collection[chat_id].timers.repeat()\n start_timer(bot, chat_id, message_id)",
"def repeat(self, repeat: bool=None):\n self._select_interface(self._rc_repeat, self._http_repeat, repeat)",
"async def repeat(self, msg):\n if msg.guild.id in self.player:\n if msg.voice_client.is_playing() is True:\n if self.player[msg.guild.id]['repeat'] is True:\n self.player[msg.guild.id]['repeat'] = False\n return await msg.message.add_reaction(emoji='✅')\n\n self.player[msg.guild.id]['repeat'] = True\n return await msg.message.add_reaction(emoji='✅')\n\n return await msg.send(\"No audio currently playing\")\n return await msg.send(\"Bot not in voice channel or playing music\")",
"def repeat(self):\n return self._repeat",
"def repeat(self, fn, *args, **kwargs):\n return repeat_n_times(self.n, fn, *args, **kwargs)",
"def repeat(self, number_of_repeats):\n return \"G\" + str(number_of_repeats)",
"def repeated_iteration(self) -> global___Statement.Iteration.RepeatedIteration:",
"def sendMessage_0(self, messages):\n for message in messages:\n self.sendMessage(message)",
"def message_all(self, message):\n # We copy the _clients into a list to avoid dictionary changing\n # size during iteration.\n for character in self.players.values():\n character.message(message)",
"async def say(self, string, *, update=True):\r\n said = False\r\n while not said:\r\n if not self.ended:\r\n for x in range(4):\r\n try:\r\n msg = await bot.send_message(self.channel, string)\r\n said = True\r\n if update and self.player:\r\n self.player.update_message(string)\r\n return\r\n except (discord.HTTPException, OSError, aiohttp.ClientResponseError) as e:\r\n print(\"Suffered\", type(e), \"error in botcommand.say().\")\r\n print(\"info: \", string, self.channel.name, self.player.id)\r\n await asyncio.sleep(x ** x)\r\n self.end()\r\n raise CommandEndedError\r\n else:\r\n raise CommandEndedError",
"def repeat(num_times):\n\n def decorator_repeat(func):\n \"\"\"\n defines wrapper_repeat(*args, **kwargs)\n\n :returns: wrapper_repeat\n \"\"\"\n\n @functools.wraps(func)\n def wrapper_repeat(*args, **kwargs):\n \"\"\"\n func(*args, **kwargs) num_times\n\n :return: last return value\n \"\"\"\n for _ in range(num_times):\n value = func(*args, **kwargs)\n return value\n\n return wrapper_repeat\n\n return decorator_repeat"
] | [
"0.75642896",
"0.75241643",
"0.74671906",
"0.6782859",
"0.6725025",
"0.66473454",
"0.6640616",
"0.6501877",
"0.6463185",
"0.6406237",
"0.62558305",
"0.6149078",
"0.61431366",
"0.61045724",
"0.60727173",
"0.60414445",
"0.6028678",
"0.59844077",
"0.59493715",
"0.59287435",
"0.57907003",
"0.5782056",
"0.5741135",
"0.5721196",
"0.56817126",
"0.5671292",
"0.56708336",
"0.5663998",
"0.5582047",
"0.5578785"
] | 0.7628505 | 0 |
create data that use Choice model | def create_choices(question_model, text="text", total_votes = 0):
return Choice.objects.create(question=question_model,
text=text,
total_votes=total_votes) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, choice):\r\n self.choice = choice",
"def __init__(self, *args, **kwargs):\n super(ChoiceFieldType, self).__init__(*args, **kwargs)\n\n self.choices = self.get_field_info_key('choices')",
"def get_choicesdata(self):\n # selected_value = self.get_cleaned_value()\n # choicesdata = []\n # found_selected_value = False\n # for value, label in self.get_choices():\n # is_selected = value == selected_value\n # if is_selected:\n # found_selected_value = True\n # url = self.build_set_values_url(values=[value])\n # choicesdata.append({\n # 'url': url,\n # 'label': label,\n # 'is_selected': is_selected\n # })\n choicesdata, found_selected_value = self.__make_choicesdata_list(\n choices=self.get_choices(),\n selected_value=self.get_cleaned_value())\n if not found_selected_value and len(choicesdata) > 0:\n selected_index = self.get_default_is_selected_index(choicesdata=choicesdata)\n choicesdata[selected_index]['is_selected'] = True\n return choicesdata",
"def create_choice(question, choice_text, votes=0):\n return question.choice_set.create(choice_text=choice_text, votes=votes)",
"def create_question(question_text, days, choices=('choice 1',)):\n time = timezone.now() + datetime.timedelta(days=days)\n question = Question.objects.create(question_text=question_text, pub_date=time)\n for choice in choices:\n question.choice_set.create(choice_text=choice)\n return question",
"def _create_response_model(self, data):\n pass",
"def __init__(self, choices, *args, **kwargs):\n super(RangePollChoiceForm, self).__init__(*args, **kwargs)\n nominees = [(i, '%d' % i) for i in range(0, choices.count()+1)]\n for choice in choices:\n self.fields['range_poll__%s' % str(choice.id)] = (\n forms.ChoiceField(widget=forms.Select(),\n choices=nominees,\n label=choice.nominee.get_full_name()))",
"def MakeChoice(self,content):\n return self.register(Choice(content,reg=self))",
"def get_choicesdata(self):\n selected_value = self.get_cleaned_value()\n choicesdata = []\n for value, label in self.get_choices_cached():\n is_selected = value == selected_value\n url = self.build_set_values_url(values=[value])\n choicesdata.append({\n 'url': url,\n 'label': label,\n 'is_selected': is_selected,\n 'dom_id': '{}_{}'.format(self.get_inputfield_dom_id(), value)\n })\n return choicesdata",
"def get_context_data(self, **kwargs):\n question_id = int(self.kwargs['question_id'])\n question = self._get_question(question_id)\n kwargs.setdefault('question', question)\n choices = question.choice_set.all()\n kwargs.setdefault('choices', choices)\n return super().get_context_data(**kwargs)",
"def __init__(self, *args, **kwargs):\n super(TaggedContentItemForm, self).__init__(*args, **kwargs)\n wtf = Tag.objects.filter(group__system=False)\n wlist = [w for t, w in self.fields.items() if t.endswith(\"tags\")]\n choices = []\n for choice in wtf:\n choices.append((choice.id, str(choice)))\n [setattr(w, 'choices', choices) for w in wlist]",
"def add_choice(self, name, value):\r\n self.choices += [{\"name\": name, \"value\": value}]",
"def configure_list_of_choices_type_question(self, question_data):\n self.driver.find_radio_button(LIST_OF_CHOICE_RB).click()\n index = 1\n for choice in fetch_(CHOICE, from_(question_data)):\n if index > 1:\n self.driver.find(ADD_CHOICE_LINK).click()\n self.driver.find_text_box(by_xpath(CHOICE_XPATH_LOCATOR + \"[\" + str(index) + \"]\" + CHOICE_TB_XPATH_LOCATOR)).enter_text(choice)\n index += 1\n choice_type = fetch_(ALLOWED_CHOICE, from_(question_data))\n if ONLY_ONE_ANSWER == choice_type:\n self.driver.find_radio_button(ONLY_ONE_ANSWER_RB).click()\n elif MULTIPLE_ANSWERS == choice_type:\n self.driver.find_radio_button(MULTIPLE_ANSWER_RB).click()\n return self",
"def create_dummy_form(title,text,fill_choice=[],choice_length=[]):\n # fill it with blank for dummy choices\n count=0\n choices=[]\n while count < 8:\n choices.append(None)\n count+=1\n \n # fill choices based on value on fill_choice\n for i in fill_choice:\n try :\n length = choice_length[i]\n except IndexError :\n length = 10\n choices[i] = create_random_string(length)\n\n dummy_form=CreatePollQuestion(\n {\"question_title\":title,\n \"question_text\" :text,\n \"choice_1\":choices[0],\n \"choice_2\":choices[1],\n \"choice_3\":choices[2],\n \"choice_4\":choices[3],\n \"choice_5\":choices[4],\n \"choice_6\":choices[5],\n \"choice_7\":choices[6],\n \"choice_8\":choices[7],\n })\n\n return dummy_form",
"def test_distribution_choices_added_successfully(self):\n valid_choices = [\"cpu\", \"memory\"]\n for good_input in valid_choices:\n self.ocp_data[\"distribution\"] = good_input\n self.assertEqual(self.ocp_data[\"distribution\"], good_input)\n with tenant_context(self.tenant):\n instance = None\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n if serializer.is_valid(raise_exception=True):\n instance = serializer.save()\n self.assertIsNotNone(instance)\n self.assertIsNotNone(instance.uuid)",
"def data(self) -> dict:\n _data = super().data\n _choices = 'choices'\n if _choices in _data.keys():\n raise TypeError('Implementation Error: Key \\'{}\\' already defined in parent class'.format(_choices))\n _data[_choices] = sorted(list(self._choices))\n return _data",
"def genre_choices(request):\n choices = GENRES\n diction = {}\n li = []\n for data in choices:\n li.append(data[0])\n diction['GENRE_CHOICES'] = li\n return JsonResponse(data=diction, status=status.HTTP_200_OK)#, safe=False)",
"def build_choice_element(node_type, contents, tail_text, value):\r\n # When xml is being parsed numtolerance_input and decoy_input tags map to textinput type\r\n # in order to provide the template with correct rendering information.\r\n if node_type in ('numtolerance_input', 'decoy_input'):\r\n node_type = 'textinput'\r\n choice = {'type': node_type, 'contents': contents, 'tail_text': tail_text, 'value': value}\r\n return choice",
"def instrument_choices(request):\n #choices = INSTRUMENT_CLASSES\n choices = [x.name for x in Instrument.objects.all()]\n diction = {}\n li = []\n for data in choices:\n li.append(data)\n diction['INSTRUMENT_CHOICES'] = li\n return JsonResponse(data=diction, status=status.HTTP_200_OK)",
"def setUp(self):\n current_date = date.today()\n name = 'name'\n possible_meals = [Meal(date=current_date, name=name)]\n self.possible_meals_choices = [(possible_meal.id, possible_meal.name)\n for possible_meal in possible_meals]",
"def _create_question_from_dict(self, d):\n question_type_str = d[Question.TYPE]\n d_copy = d.copy()\n\n # Todo: figure out a global setting for whether select all\n # that apply questions have an automatic none option.\n if question_type_str.startswith(u\"select all that apply\"):\n self._add_none_option_to_select_all_that_apply(d_copy)\n\n # hack job right here to get this to work\n if question_type_str.endswith(u\" or specify other\"):\n question_type_str = question_type_str[:len(question_type_str)-len(u\" or specify other\")]\n d_copy[Question.TYPE] = question_type_str\n self._add_other_option_to_multiple_choice_question(d_copy)\n return [self._create_question_from_dict(d_copy),\n self._create_specify_other_question_from_dict(d_copy)]\n question_class = self._get_question_class(question_type_str)\n # todo: clean up this spaghetti code\n d_copy[u\"question_type_dictionary\"] = self._question_type_dictionary\n if question_class:\n return question_class(**d_copy)\n return []",
"def _create_choice_element(self, **kwargs):\r\n text = kwargs.get('text', '')\r\n correct = kwargs.get('correctness', \"true\")\r\n inputs = kwargs.get('inputs', [])\r\n choice_element = etree.Element(\"choice\")\r\n choice_element.set(\"correct\", correct)\r\n choice_element.text = text\r\n for inp in inputs:\r\n # Add all of the inputs as children of this choice\r\n choice_element.append(inp)\r\n\r\n return choice_element",
"def __init__(self, name, attrs={}):\n ChoiceFormat.__init__(self, name, attrs)",
"def __init__(self, name, attrs={}):\n ChoiceFormat.__init__(self, name, attrs)",
"def create_models( self ):",
"def add_model_case(wiz, choices):\n page = wiz.add_page(u\"Model definition\")\n lay = page.use(qt.QVBoxLayout())\n lay.addWidget(qt.QLabel(u\"What kind of model do you want to work on?\"))\n wfield = page.register(\"model\", choices[0][0])\n lay.addWidget(ModelSelection(wfield, choices))",
"def build_model():",
"def __init__(self, name, list_countries,list_sectors,EORA=False,list_fd_cats=[]):\n self.name = name\n self.m = ConcreteModel()\n self.countries = list_countries\n self.total_countries = len(list_countries)\n self.sectors = list_sectors\n self.fd_cat = list_fd_cats\n if EORA is True:\n self.EORA = True\n else:\n self.EORA = False",
"def test_question_with_choices(self):\n create_question(question_text='Question with choices', days=0)\n response = self.client.get(reverse('polls:index'))\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqual(response.context['latest_questions_list'], ['<Question: Question with choices>'])",
"def _initChoiceTable(self):\n\n t = self.tableWidget_choice_list # shorthand notation\n\n ### Header popluation & properties\n '''\n for (i, col_name) in enumerate(self.data.col_name_list):\n # Order the column labels as in the order of the definition\n # of the dictionary for the element property names and the\n # column names\n t.horizontalHeaderItem(i).setText(col_name)\n '''\n # or\n t.setHorizontalHeaderLabels(self.data.col_name_list)\n\n t.horizontalHeader().setMovable(True)"
] | [
"0.6384629",
"0.637918",
"0.6361359",
"0.6212173",
"0.6101701",
"0.60345143",
"0.5951211",
"0.5886236",
"0.5857764",
"0.5848246",
"0.58469194",
"0.58059937",
"0.5702655",
"0.56995493",
"0.56930524",
"0.5691047",
"0.568869",
"0.5666549",
"0.56657976",
"0.5663346",
"0.5648866",
"0.56329304",
"0.5604295",
"0.5604295",
"0.5574496",
"0.55660594",
"0.55320215",
"0.55271775",
"0.5475609",
"0.54406965"
] | 0.6831114 | 0 |
create data that use Answer model | def create_answer(question, user):
return Answer.objects.create(question=question,answered_by=user) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_features(self, answer):\n # Get the teacher's stuff\n a_stopwords = sf.remove_stopwords(self.teacher_answer)\n a_stemmed = sf.stem_sentence(a_stopwords)\n a_stemmed_ordered = sf.order_sentence(a_stemmed)\n teacher_answers = [\n a_stemmed,\n a_stemmed_ordered,\n ]\n \n # Change sentence into multiple versions\n log = dict()\n log['student_answer'] = answer\n log['teacher_answer'] = self.teacher_answer\n log['q_answer'] = answer\n log['q_stopwords'] = sf.remove_stopwords(answer)\n log['q_stemmed'] = sf.stem_sentence(answer)\n log['q_stem_ordered'] = sf.order_sentence(log['q_stemmed'])\n \n # Might need to save scaling until jsut before modeling\n log['wordcount'] = sf.word_count(answer)\n log['wordcount'] = sf.scale_column(self.word_scaler, log['wordcount'])\n\n\n# Stem sim\n log['stem_g_similarity'] = sf.generic_similarity(log['q_stemmed'], a_stemmed)\n log['stem_j_similarity'] = sf.jaccard_similarity(log['q_stemmed'], a_stemmed)\n log['stem_c_similarity'] = sf.cosine_similarity(log['q_stemmed'], a_stemmed)\n # Ordered\n log['stem_ordered_g_similarity'] = sf.generic_similarity(log['q_stem_ordered'], a_stemmed_ordered)\n log['stem_ordered_j_similarity'] = sf.jaccard_similarity(log['q_stem_ordered'], a_stemmed_ordered)\n log['stem_ordered_c_similarity'] = sf.cosine_similarity(log['q_stem_ordered'], a_stemmed_ordered)\n\n\n \n # Appending New Answer\n self.new_answers = self.new_answers.append(log, ignore_index = True)\n \n # Entity Extraction\n types_of_sentences = [\n 'q_stemmed',\n 'q_stem_ordered',\n ]\n \n for sent_type, teach_ans in zip(types_of_sentences, teacher_answers):\n \n self.new_answers = sf.unigram_entity_extraction(self.new_answers, sent_type, sent_type, teach_ans)\n self.new_answers = sf.bigram_entity_extraction(self.new_answers, sent_type, sent_type, teach_ans)\n self.new_answers = sf.trigram_entity_extraction(self.new_answers, sent_type, sent_type, teach_ans)",
"def __init__(self, question, answer):\n\n self.question = question\n self.answer = answer\n\n self.q_and_a = {\n 'Question:': self.question,\n 'Correct Answer:': self.answer,\n }",
"def _create_response_model(self, data):\n pass",
"def _setData(self):\n #offset = datetime.timedelta(prefs.getNoOfDaysBeforeQuestionSchedule())\n date_formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n def _q_data_item(q):\n item = {}\n item[\"qid\"]= \"q_%s\" % q.question_id\n if q.question_number:\n item[\"subject\"] = u\"Q %s %s\" % (q.question_number, q.short_name)\n else:\n item[\"subject\"] = q.short_name\n item[\"title\"] = q.short_name\n item[\"result_item_class\"] = \"workflow-state-%s\" % q.status\n item[\"url\"] = url.set_url_context(\"questions/obj-%s\" % q.question_id)\n item[\"status\"] = misc.get_wf_state(q)\n item[\"status_date\"] = date_formatter.format(q.status_date)\n item[\"owner\"] = \"%s %s\" %(q.owner.first_name, q.owner.last_name)\n item[\"type\"] = _(q.type)\n item[\"to\"] = q.ministry.short_name\n return item\n self._data = [ _q_data_item(question) for question in self.query.all() ]",
"def __init__(self, data):\n self.user_id = data['user_id']\n self.condition_id = data['condition_id']\n self.condition = data['condition']\n self.condition_details = data['condition_details']\n self.user_answer = data['user_answer']",
"def __init__(self):\n self.answers = []",
"def data_for_question(self, question_type):\n\t\treturn {}",
"def _setData(self):\n data_list = []\n results = self.query.all()\n formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n for result in results:\n data ={}\n data[\"qid\"]= (\"m_\" + str(result.motion_id))\n data[\"subject\"] = u\"M \" + str(result.motion_number) + u\" \" + result.short_name\n data[\"title\"] = result.short_name\n if result.approval_date:\n data[\"result_item_class\"] = (\"workflow-state-\" + \n result.status + \"sc-after-\" + \n datetime.date.strftime(result.approval_date, \"%Y-%m-%d\"))\n else:\n data[\"result_item_class\"] = \"workflow-state-\" + result.status\n data[\"url\"] = url.set_url_context(\"motions/obj-\" + str(result.motion_id))\n data[\"status\"] = misc.get_wf_state(result)\n data[\"status_date\"] = formatter.format(result.status_date)\n data[\"owner\"] = \"%s %s\" %(result.owner.first_name, result.owner.last_name)\n data[\"type\"] = _(result.type)\n data[\"to\"] = \"\"\n data_list.append(data)\n self._data = data_list",
"def __init__(self, createdby, meetup, title, body, votes,createdOn):\n self.question_id = len(Question.question_list) + 1\n self.createdon = datetime.now()\n self.createdby = createdby\n self.meetup = meetup\n self.title = title\n self.body = body\n self.votes = votes",
"def get_answers(self):\r\n pass",
"def _create_examples(self, lines, kb_data, set_type):\n examples = []\n for idx, line in enumerate(lines):\n item = json.loads(line.strip())\n question_id = \"%s-%s\" % (set_type, idx)\n \n context_a_list = kb_data[idx]['answerA']\n context_b_list = kb_data[idx]['answerB']\n context_c_list = kb_data[idx]['answerC']\n\n context_a = \"\"\n for l in context_a_list[:1]:\n context_a += l.replace(\"\\n\",\". \")\n context_a = context_a[:-1]\n\n context_b = \"\"\n for l in context_b_list[:1]:\n context_b += l.replace(\"\\n\",\". \")\n context_b = context_b[:-1]\n\n context_c = \"\"\n for l in context_c_list[:1]:\n context_c += l.replace(\"\\n\",\". \")\n context_c = context_c[:-1]\n \n \n question = item[\"context\"] + item[\"question\"]\n endings = [item[\"answerA\"],item[\"answerB\"],item[\"answerC\"] ]\n label = item[\"correct\"]\n #race_id = \"%s-%s\" % (set_type, data_raw[\"race_id\"])\n #article = data_raw[\"article\"]\n #for i in range(len(data_raw[\"answers\"])):\n #truth = str(ord(data_raw[\"answers\"][i]) - ord(\"A\"))\n #question = data_raw[\"questions\"][i]\n #options = data_raw[\"options\"][i]\n\n examples.append(\n InputExample(\n example_id=question_id,\n question=question,\n contexts=[context_a,context_b,context_c],\n endings=[endings[0], endings[1], endings[2]],#, options[3]\n label=label,\n )\n )\n return examples",
"def test_initial_answer(self):\n survey = SurveyFactory.create()\n\n data = {\n 'experiment_version': '1',\n 'response_version': 1,\n 'person_id': 'joemamma',\n 'survey_id': survey.name,\n 'flow_id': '20141113',\n 'question_id': '1',\n 'updated_ts': self.timestamp(),\n\n 'question_text': 'ou812?',\n 'variation_id': '1',\n 'score': None,\n 'max_score': None,\n 'flow_began_ts': 0,\n 'flow_offered_ts': 0,\n 'flow_voted_ts': 0,\n 'flow_engaged_ts': 0,\n 'platform': '',\n 'channel': '',\n 'version': '',\n 'locale': '',\n 'country': '',\n 'build_id': '',\n 'partner_id': '',\n 'profile_age': None,\n 'profile_usage': {},\n 'addons': {},\n 'extra': {},\n 'is_test': False\n }\n\n resp = self.client.post(\n reverse('heartbeat-api'),\n content_type='application/json',\n data=json.dumps(data))\n\n assert resp.status_code == 201\n\n ans = Answer.objects.latest('id')\n\n for field in data.keys():\n # survey_id is a special case since it's a foreign key.\n if field == 'survey_id':\n # This looks goofy because it's not the normal way to\n # do things, but the \"survey_id\" attribute is a\n # Survey rather than the pk for a Survey.\n assert ans.survey_id.name == data[field]\n continue\n\n assert getattr(ans, field) == data[field]",
"def create_questionnaire_with(self, questionnaire_data):\n questionnaire_code = fetch_(QUESTIONNAIRE_CODE, from_(questionnaire_data))\n gen_ramdom = fetch_(GEN_RANDOM, from_(questionnaire_data))\n if gen_ramdom:\n questionnaire_code = questionnaire_code + generateId()\n self.driver.find_text_box(QUESTIONNAIRE_CODE_TB).enter_text(questionnaire_code)\n self.create_default_question(questionnaire_data[DEFAULT_QUESTION], DEFAULT_QUESTION_LINK)\n for question in fetch_(QUESTIONS, from_(questionnaire_data)):\n self.driver.find(ADD_A_QUESTION_LINK).click()\n self.fill_question_and_code_tb(question)\n self.SELECT_FUNC[fetch_(TYPE, from_(question))](question)\n return self",
"def __init__(self, answers, ranking: Ranking):\n self.answers = answers\n self.ranking = ranking",
"def __init__(self):\r\n\t\twith open(\"eqs.json\") as qData:\r\n\t\t\tself.questions = json.load(qData)\r\n\t\twith open(\"eqsave.json\") as uData:\r\n\t\t\tself.records = json.load(uData)\r\n\t\tself.types = {\"1\": \"Reformer\", \"2\": \"Helper\", \"3\": \"Achiever\", \"4\": \"Individualist\", \"5\": \"Investigator\", \"6\": \"Loyalist\", \"7\": \"Enthusiast\", \"8\": \"Challenger\", \"9\": \"Peacemaker\"}",
"def generate_answer(self, question):\n\n # Recognize intent of the question using `intent_recognizer`.\n # Don't forget to prepare question and calculate features for the question.\n \n prepared_question = text_prepare(question)\n features = self.tfidf_vectorizer.transform([prepared_question])\n intent = self.intent_recognizer.Main(question)\n #intent='gcs'\n # Chit-chat part: \n if intent == 'dialogue':\n \"\"\"\n # Pass question to chitchat_bot to generate a response.\n reply=self.college.Main(question)\n if reply !=\"Please refer GCS facebook page or ask you mentor for more info :)\":\n return reply\n else: \n \"\"\"\n reply=self.college.Main(question)\n if reply!=\"Please refer GCS facebook page or ask you mentor for more info :)\":\n return reply\n else:\n reply=self.programming.Main(question)\n if reply!=\"Please refer kammand prompt discord or ask you mentor for more info :)\":\n return reply\n else:\n response = str(self.chatbot.get_response(prepared_question))\n temp=np.random.choice(2,p=[0.5,0.5])\n times=np.random.choice([1,2,3,4],p=[0.5,0.3,0.1,0.1])\n if temp==0:\n print(\"EMOJI!!!!!\")\n response= response + times*(label_to_emoji(emojifyer.predict_emoji(model,response,word_to_index)).strip())\n return response\n elif intent==\"mandi\":\n reply=self.college.Main(question)\n return reply\n # Goal-oriented part:\n elif intent==\"stackoverflow\":\n tag = self.tag_classifier.predict(features)[0]\n reply = self.thread_ranker.get_best_thread(prepared_question, tag)\n return reply",
"def __init__(self, question):\n self.question = question\n self.responses = []",
"def store(self) -> None:\n con, c = db.connect()\n if not db.exists('SELECT * FROM answers WHERE id = ?', self.id, con=con):\n c.execute('INSERT INTO answers VALUES (?, ?, ?, ?, ?, ?, ?)', (self.id, self.answer, \n self.likes, self.created, self.tell, self.user.id, self.parent_id,))\n c.execute('UPDATE answers SET answer=?, likes=?, created=?, tell=?, user=? '+\\\n 'WHERE id = ?', (self.answer, self.likes, self.created, self.tell, \n self.user.id, self.id,))\n db.close(con)",
"def create(text, is_correct, question_id):\n answer = Answer(question_id=question_id, text=text, is_correct=is_correct)\n try:\n answer.save()\n return answer\n except IntegrityError:\n return None",
"def fill_question(self, response, question_answer):\n question_answer['source_url'] = response.url\n\n question_answer['question_title'] = response.xpath('//*[@id=\"question-header\"]/h1/a/text()').extract_first()\n question_answer['question_body'] = BeautifulSoup(\n response.xpath(self.gt.css_to_xpath('.postcell .post-text')).extract_first()).text\n question_answer['question_tags'] = list(set(\n response.xpath('//*[contains(concat(\" \", normalize-space(@class), \" \"), \" post-tag \")]/text()').extract()))\n # would like to specify the hierarchy of the css tags\n question_answer['question_upvotes'] = int(response.xpath(\n '//*[contains(concat(\" \", normalize-space(@class), \" \"), \" vote-count-post \")]/text()').extract_first())\n question_answer['question_view_count'] = int(\n response.xpath(self.gt.css_to_xpath('#qinfo .label-key') + '/b/text()').extract()[1].split(' ')[0])\n\n author_name = response.xpath(\n self.gt.css_to_xpath('.owner .user-details') + '/a/text()').extract_first()\n question_answer['question_author'] = {'author_id': '{}_{}'.format(self.allowed_domains[0], author_name),\n 'author_name': author_name}\n\n se_date_format = '%b %d \\'%y at %H:%M' # if date not current year\n se_date_format_curr_year = '%b %d at %H:%M' # if date current year\n try:\n try:\n question_answer['question_date'] = date_to_solr_format(datetime.strptime(response.xpath(\n self.gt.css_to_xpath('.owner .user-action-time .relativetime') + '/text()').extract_first(),\n se_date_format))\n except ValueError:\n question_answer['question_date'] = date_to_solr_format(datetime.strptime(response.xpath(\n self.gt.css_to_xpath('.owner .user-action-time .relativetime') + '/text()').extract_first(),\n se_date_format_curr_year))\n except (ValueError, TypeError):\n pass\n # Look for duplicates\n duplicate_url = response.xpath(self.gt.css_to_xpath('.question-originals-of-duplicate')+'/ul/li/a/@href').extract_first()\n if duplicate_url:\n print('duplicate question')\n self.duplicate_count += 1\n print('duplicate question count: {}'.format(self.duplicate_count))\n duplicate_url = \"https://superuser.com\" + duplicate_url\n print(duplicate_url)\n self.logger.info('duplicate url: {}'.format(duplicate_url))\n question_answer['question_original_url'] = duplicate_url\n self.duplicate_url = duplicate_url\n\n return question_answer",
"def create(self, validated_data):\n \n if(Saved_answers.objects.filter(username=validated_data.get('username'),level=validated_data.get('level')).exists()):\n objects=Saved_answers.objects.filter(username=validated_data.get('username'),level=validated_data.get('level')).update(seconds=validated_data.get('seconds'),answer1=validated_data.get('answer1'),answer2=validated_data.get('answer2'),answer3=validated_data.get('answer3'),answer4=validated_data.get('answer4'),answer5=validated_data.get('answer5'),answer6=validated_data.get('answer6'),answer7=validated_data.get('answer7'),answer8=validated_data.get('answer8'),answer9=validated_data.get('answer9'),answer10=validated_data.get('answer10'),answer11=validated_data.get('answer11'),answer12=validated_data.get('answer12'),answer13=validated_data.get('answer13'),answer14=validated_data.get('answer14'),answer15=validated_data.get('answer15'),answer16=validated_data.get('answer16'),answer17=validated_data.get('answer17'),answer18=validated_data.get('answer18'),answer19=validated_data.get('answer19'),answer20=validated_data.get('answer20'))\n else:\n objects=Saved_answers.objects.create(seconds=validated_data.get('seconds'),username=validated_data.get('username'),level=validated_data.get('level'),answer1=validated_data.get('answer1'),answer2=validated_data.get('answer2'),answer3=validated_data.get('answer3'),answer4=validated_data.get('answer4'),answer5=validated_data.get('answer5'),answer6=validated_data.get('answer6'),answer7=validated_data.get('answer7'),answer8=validated_data.get('answer8'),answer9=validated_data.get('answer9'),answer10=validated_data.get('answer10'),answer11=validated_data.get('answer11'),answer12=validated_data.get('answer12'),answer13=validated_data.get('answer13'),answer14=validated_data.get('answer14'),answer15=validated_data.get('answer15'),answer16=validated_data.get('answer16'),answer17=validated_data.get('answer17'),answer18=validated_data.get('answer18'),answer19=validated_data.get('answer19'),answer20=validated_data.get('answer20'))\n # print >> sys.stderr, objects\n return objects",
"def initialize_new_questionnaire(questionnaire, option_type, uuid):\r\n q = {}\r\n if (type(questionnaire) == dict):\r\n for key, val in questionnaire.items():\r\n if key != 'index':\r\n\r\n q[key] = [val] if type(val) != list else val\r\n questionnaire = pd.DataFrame(q)\r\n\r\n\r\n if \"_questionnaire\" not in option_type:\r\n option_type = option_type + \"_questionnaire\"\r\n\r\n option_type = option_type.lower()\r\n if 'option_type' not in questionnaire:\r\n questionnaire['option_type'] = [option_type]\r\n questionnaire['uuid'] = [uuid]\r\n questionnaire['timestamp'] = [datetime.datetime.utcnow()]\r\n print(\"this is questionaire: \", questionnaire)\r\n\r\n questionnaire=questionnaire.set_index('uuid')\r\n print(\"this is questionaire: \", questionnaire)\r\n questionnaire.to_sql(option_type, con=Database.DATABASE.engine, if_exists=\"append\", index=True)",
"def _setData(self):\n data_list = []\n results = self.query.all()\n formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n for result in results:\n data = {}\n data[\"qid\"] = (\"i-\" + str(result.parliamentary_item_id))\n if type(result)==domain.AgendaItem:\n g = u\" \" + result.group.type + u\" \" + result.group.short_name\n else:\n g = u\"\" # !+ g?\n data[\"subject\"] = result.short_name\n data[\"title\"] = result.short_name\n data[\"result_item_class\"] = \"workflow-state-\" + result.status\n data[\"url\"] = url.set_url_context(\"%ss/obj-%i\" % (\n result.type, result.parliamentary_item_id))\n data[\"status\"] = misc.get_wf_state(result)\n data[\"status_date\"] = formatter.format(result.status_date)\n data[\"owner\"] = \"%s %s\" %(result.owner.first_name, result.owner.last_name)\n data[\"type\"] = _(result.type)\n if type(result)==domain.Question:\n data[\"to\"] = result.ministry.short_name\n else:\n data[\"to\"]= u\"\"\n # remember original domain object\n data[\"id\"] = result.parliamentary_item_id\n data[\"_obj\"] = result\n # append processed result item\n data_list.append(data)\n self._data = data_list",
"def get_or_create(cls, question, student, result, answer, correct=None):\n qa = QuestionAnswer.objects.filter(question=question, student=student,\n result=result).first()\n if qa:\n qa.answer = answer,\n qa.correct = correct\n else:\n ans_data = {\n 'question': question,\n 'student': student,\n 'result': result,\n 'answer': answer,\n 'correct': correct,\n }\n qa = QuestionAnswer(**ans_data)\n qa.save()\n return qa",
"def create(self, request):\n if not hasattr(request, \"data\"):\n request.data = request.POST\n attrs = self.flatten_dict(request.data)\n if not attrs.get('include_answer_page', None):\n if 'answer_page_title' in attrs:\n del attrs['answer_page_title']\n if 'answer_page_link' in attrs:\n del attrs['answer_page_link']\n kn = Knowledge(question = attrs['question'], \n search_keywords = attrs.get('search_keywords', ''),\n answer_summary = attrs.get('answer_summary', ''),\n answer_page_title = attrs.get('answer_page_title', ''),\n answer_page_link = attrs.get('answer_page_link', ''),\n tags = attrs.get('tags', ''),\n user=request.user)\n kn.save()\n return kn",
"def gen_questions(self, number_of_questions):",
"def __init__(self, data={}):\n\n self.config = db_config(BaseConfig.DATABASE_URI)\n self.table = 'questions'\n self.title = data.get('title')\n self.body = data.get('body')\n self.q = data.get('q')\n self.question_id = data.get('id')\n self.user_id = data.get('user_id')\n self.now = str(datetime.now())\n self.logged_in_user_id = Auth.get_logged_in_user(request)[0]['data']['user_id']",
"def add_answer(self):\n # Create users\n user = self.create_user(self.user)\n other_user = self.create_user(self.other_user)\n\n # Create question\n question = self.create_question(self.question, other_user.id)\n\n # Add answers\n answer = self.create_answer(self.answer, question.id, user.id)\n\n user_token = self.get_user_token(user)\n other_user_token = self.get_user_token(other_user)\n\n return user_token, other_user_token, question.id, answer.id",
"def post(self,request,format=None):\n id_ = request.data.get('questionID')\n selected_answer = request.data.get('answer')\n ques_obj = get_object_or_404(SingleWordQuiz,pk=id_)\n answer_obj,created = SingleWordQuizAnswer.objects.get_or_create(user=request.user.info,quiz_ques=ques_obj)\n answer_obj.selected_answer = selected_answer\n answer_obj.save()\n serializer = SingleWordQuizAnswerSerializer(answer_obj)\n return Response(data=serializer.data,status=status.HTTP_201_CREATED)",
"def post(self,request,format=None):\n id_ = request.data.get('questionID')\n selected_answer = request.data.get('answer')\n ques_obj = get_object_or_404(MultipleQuiz,pk=id_)\n answer_obj,created = MultipleQuizAnswer.objects.get_or_create(user=request.user.info,quiz_ques=ques_obj)\n answer_obj.selected_answer = selected_answer\n answer_obj.save()\n serializer = MultipleQuizAnswerSerializer(answer_obj)\n return Response(data=serializer.data,status=status.HTTP_201_CREATED)"
] | [
"0.64814955",
"0.63967526",
"0.63827956",
"0.6260587",
"0.6246229",
"0.62410545",
"0.6156967",
"0.61112285",
"0.61091954",
"0.61057824",
"0.6091933",
"0.6012156",
"0.6009843",
"0.59592485",
"0.59487975",
"0.5926686",
"0.59090936",
"0.59057355",
"0.5905192",
"0.58983946",
"0.58766586",
"0.58738375",
"0.58670944",
"0.58562773",
"0.5813829",
"0.58111894",
"0.5784844",
"0.5784303",
"0.57695705",
"0.5737954"
] | 0.6539529 | 0 |
same as create_user but using user manager | def create_user_using_manager(username,password):
manager = UserManager()
return manager.create_user(username=username, password=password) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_user(self):\n User.objects.create_user('test', '[email protected]', 'testing')",
"def create_user(self):\n return User.objects.create_user(**self.user_data)",
"def create_user(email, password, f_name, l_name):\n pass",
"def _create(cls, model_class, *args, **kwargs):\n manager = cls._get_manager(model_class)\n # The default would use ``manager.create(*args, **kwargs)``\n return manager.create_user(*args, **kwargs)",
"def sample_user(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)",
"def create_user(self):\n u = USER.objects.create(username='test_user1',\n email='[email protected]', )\n u.set_password('test_password')\n u.save()\n self.user = u\n return u",
"def create_new_user():\n return get_user_model().objects.create_user(\n email='[email protected]',\n password='test@londodnjisdjfois',\n username='tempusername'\n )",
"def create_user_object():\n user = User.objects.get_or_create(username='testuser',\n first_name='Test',\n last_name='User',\n email='[email protected]')[0]\n user.set_password('testabc123')\n user.save()\n return user",
"def create(self, data):\n # ensure 'create()' calls the specific 'create_user()' method\n # note that the 'data' gets validated\n user = get_user_model().objects.create_user(**data)\n return user",
"def create_user_object():\n user = User.objects.get_or_create(username='testuser',\n first_name='Test',\n last_name='User',\n email='[email protected]')[0]\n user.set_password('testabc123')\n user.save()\n\n return user",
"def sample_user_third(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name3\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)",
"def sample_user(email='[email protected]', password='open@123'):\n return get_user_model().objects.create_user(email, password)",
"def new_user(cls, user):\r\n pass",
"def sample_user(email, password, is_doctor, is_hospital_admin):\n return MyUser.objects.create_user(email, is_hospital_admin, is_doctor, password)",
"def create_user(username,password):\n return User.objects.create_user(username=username,password=password)",
"def users_create():",
"def sample_user_second(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name2\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)",
"def sample_user(email='[email protected]', password='testpass'):\n return get_user_model().objects.create_user(email, password)",
"def sample_user(email='[email protected]', password='testpass'):\n return get_user_model().objects.create_user(email, password)",
"def sample_user(email='[email protected]', password='testpass'):\n return get_user_model().objects.create_user(email, password)",
"def create_new_user(self):\n username = 'pseudo'\n email = '[email protected]'\n password = '00000000'\n user_created = self.user.objects.create_user(id=1, username=username,\n email=email, password=password)\n HistoryUser.objects.create(user=user_created)\n StatusUser.objects.create(user=user_created)\n\n return user_created",
"def new_user(cls, user):\n pass",
"def create_user(user, first_name, last_name, major, bio):\n return userAccount.objects.create(user=user, first_name=first_name, last_name=last_name, major=major, bio=bio)",
"def sample_user(email: str = \"[email protected]\", password: str = \"testpass\"):\n return get_user_model().objects.create_user(email, password)",
"def create_user(email='[email protected]', password='testpass123'):\n return get_user_model().objects.create_user(email=email, password=password)",
"def create_user(self):\n if not self.is_valid():\n return None\n # generate a username \n ids = User.objects.values_list('id', flat=True).order_by('-id')[:1]\n if len(ids) > 0:\n # ids[0] will be the maximum value (due to order_by: '-id')\n idnum = ids[0] + 1\n else:\n idnum = 1\n # create User object \n username = \"user%s\" % idnum\n # NOTE: store email in lower case\n email = self.clean_email().lower()\n password = self.clean_password2()\n user = User(username=username, email=email, password='tmp')\n user.save()\n # set the real password\n user.set_password(password)\n # make user inactive (until user has confirmed account)\n user.is_active = False\n # update\n user.save()\n return user",
"def sample_user_fourth(email=\"[email protected]\",\n password=\"password123\",\n name=\"some name4\"):\n return get_user_model().objects.create_user(email=email,\n password=password,\n name=name)",
"def sample_user(email='[email protected]', password='password'):\n return get_user_model().objects.create_user(email, password)",
"def new_user():\n pass",
"def create_user(UserName=None, MessageAction=None, FirstName=None, LastName=None, AuthenticationType=None):\n pass"
] | [
"0.8181188",
"0.7908192",
"0.78201514",
"0.77046186",
"0.7671698",
"0.7660212",
"0.7622274",
"0.7616442",
"0.759572",
"0.7572086",
"0.7542684",
"0.75390494",
"0.7530234",
"0.7527077",
"0.75035036",
"0.7481546",
"0.74714476",
"0.74663395",
"0.74663395",
"0.74663395",
"0.7426541",
"0.7417308",
"0.74145496",
"0.73876965",
"0.7386723",
"0.73780423",
"0.7376609",
"0.7373984",
"0.73720276",
"0.7370141"
] | 0.8066032 | 1 |
populate question object with random string and user | def populate_poll(user="",total=10):
user_list = None
#create random user only when user argument empty
if user == "":
create_random_user(20)
user_list = User.objects.all()
for i in range(total):
Question.objects.create(
created_by=random.choice(user_list) if user_list is not None else user,
title=create_random_string(seed_random(10)),
text=create_random_string(seed_random(300)),
slug=create_random_string(seed_random(100)) ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, question):\n self.question = question\n self.responses = []",
"def __init__(self, question, answer):\n\n self.question = question\n self.answer = answer\n\n self.q_and_a = {\n 'Question:': self.question,\n 'Correct Answer:': self.answer,\n }",
"def notAFan_questions(user):\n questions = {\n 1: \"GBB: How old are you? \",\n 2: \"GBB: What do you like to do in your free time? \",\n 3: \"GBB: What is your ethnicity? \",\n 4: \"GBB: What did you eat for breakfast? \",\n 5: \"GBB: Are you an early bird or a night owl? \",\n 6: \"GBB: Do you like football? \"\n }\n\n while True:\n num = random.randint(1, 6)\n answered = user['personal questions asked']\n if num not in answered:\n user['personal questions asked'].append(num)\n return questions[num]\n if len(answered) == len(questions.keys()):\n return \"GBB: Looks like I know you so well that I don't even need to ask you a question! Type anything to proceed.\"",
"async def generate_question(self) -> None:\n topic = choice(list(self._topics.keys()))\n arrayList = await self.session.get(\"https://restcountries.eu/rest/v2\")\n arrayList = await arrayList.json() # get request to the country API\n countries = []\n \n for _ in range(4):\n country = choice(arrayList)\n del arrayList[arrayList.index(country)]\n countries.append(country)\n del arrayList\n \n country = choice(countries)\n del countries[countries.index(country)]\n self.question = f\"What is the {self._topics[topic]} of {country['name']}?\"\n self.correct_order = randint(0, 3)\n self.choices = [i[topic] for i in countries]\n self.choices.insert(self.correct_order, country[topic])\n del countries, topic",
"def personal_questions(user):\n questions = {\n 1: 'GBB: How long have you been a fan of the Packers?',\n 2: 'GBB: Why are you a fan of the Packers?',\n 3: \"GBB: What team do you hate the most?\",\n 4: \"GBB: Who's your favorite player on the Packers?\",\n 5: \"GBB: Who's your least favorite player on the Packers?\",\n 6: \"GBB: Do you come from a family of Packer fans, or are you a lone ranger?\"\n }\n\n while True:\n num = random.randint(1, 6)\n answered = user['personal questions asked']\n if num not in answered:\n user['personal questions asked'].append(num)\n return questions[num]\n if len(answered) == len(questions.keys()):\n return \"GBB: Look's like I know you so well that I don't even need to ask you a question!\"",
"def __init__(self, question, correct_answer):\n\n self.question = question\n self.correct_answer = correct_answer",
"def populate_game_questions():\n indices = random.sample(range(0, len(quizquestion.questions_all)), 5) # If user doesn't specify, choose 5 random questions\n return quizquestion.QuizQuestion.get_game_questions(indices)",
"def get_question(self, user_state):\n\n if not user_state.current_session:\n\n # Create the new session\n new_session = user_state.create_session()\n # Create the new block for the session\n new_block = Block.objects.create(session=new_session)\n\n # Fill up the new block with random qandas\n while not new_block.is_full:\n QandA.objects.create(question=self.get_random(user_state), block=new_block)\n\n # Add the new block\n user_state.add_block(new_block)\n\n return user_state.current_session.current_block.get_question()",
"def create_answer(question, user):\n return Answer.objects.create(question=question,answered_by=user)",
"def create_question(self, user_id=0):\n resp = self.create_user()\n if user_id == 0:\n user_id = resp[0]\n\n params = {\n \"user_id\":user_id,\n \"text\":\"What is the fastest programming language and why do you think so?\",\n \"description\":\"I am looking for the fastest programming language in terms\\\n of memory management for a very high performance project.\"\n }\n headers = {\n \"Authorization\":\"Bearer {}\".format(resp[1]),\n \"Content-Type\":\"application/json\"\n }\n path = \"/api/v2/questions\"\n question = self.client.post(path=path,\n data=json.dumps(params),\n headers=headers)\n question_id = question.json['question_id']\n return int(question_id), question",
"def create_question(user,title='title',text='text'):\n return Question.objects.create(created_by=user, title=title, text=text)",
"def fill_question(self, response, question_answer):\n question_answer['source_url'] = response.url\n\n question_answer['question_title'] = response.xpath('//*[@id=\"question-header\"]/h1/a/text()').extract_first()\n question_answer['question_body'] = BeautifulSoup(\n response.xpath(self.gt.css_to_xpath('.postcell .post-text')).extract_first()).text\n question_answer['question_tags'] = list(set(\n response.xpath('//*[contains(concat(\" \", normalize-space(@class), \" \"), \" post-tag \")]/text()').extract()))\n # would like to specify the hierarchy of the css tags\n question_answer['question_upvotes'] = int(response.xpath(\n '//*[contains(concat(\" \", normalize-space(@class), \" \"), \" vote-count-post \")]/text()').extract_first())\n question_answer['question_view_count'] = int(\n response.xpath(self.gt.css_to_xpath('#qinfo .label-key') + '/b/text()').extract()[1].split(' ')[0])\n\n author_name = response.xpath(\n self.gt.css_to_xpath('.owner .user-details') + '/a/text()').extract_first()\n question_answer['question_author'] = {'author_id': '{}_{}'.format(self.allowed_domains[0], author_name),\n 'author_name': author_name}\n\n se_date_format = '%b %d \\'%y at %H:%M' # if date not current year\n se_date_format_curr_year = '%b %d at %H:%M' # if date current year\n try:\n try:\n question_answer['question_date'] = date_to_solr_format(datetime.strptime(response.xpath(\n self.gt.css_to_xpath('.owner .user-action-time .relativetime') + '/text()').extract_first(),\n se_date_format))\n except ValueError:\n question_answer['question_date'] = date_to_solr_format(datetime.strptime(response.xpath(\n self.gt.css_to_xpath('.owner .user-action-time .relativetime') + '/text()').extract_first(),\n se_date_format_curr_year))\n except (ValueError, TypeError):\n pass\n # Look for duplicates\n duplicate_url = response.xpath(self.gt.css_to_xpath('.question-originals-of-duplicate')+'/ul/li/a/@href').extract_first()\n if duplicate_url:\n print('duplicate question')\n self.duplicate_count += 1\n print('duplicate question count: {}'.format(self.duplicate_count))\n duplicate_url = \"https://superuser.com\" + duplicate_url\n print(duplicate_url)\n self.logger.info('duplicate url: {}'.format(duplicate_url))\n question_answer['question_original_url'] = duplicate_url\n self.duplicate_url = duplicate_url\n\n return question_answer",
"async def question(self, channel_id, user_infos, user_id, team_id):\n\n q = random.choice(self.questions) # Random question selection from list\n answers = q[\"badAnswers\"] + [q[\"goodAnswer\"]] # Save all possible answers\n goodAnswer = q[\"goodAnswer\"] # Save the good answer\n random.shuffle(answers) # Shuffle everything\n\n choices = {} # Dict of choices\n\n for i in range(len(answers)): # For every possible answer\n choices[str(i + 1)] = answers[i]; # Fill the choices dict with normal people understandable indexes\n\n message = \"{} \\n\".format(q[\"question\"]) # Start the string question message\n\n for key in sorted(choices):\n message += (\"Reponse {} : {} \\n\").format(key, choices[key]) # Add choices to question message\n\n id = 0\n for i in range(len(choices)):\n if choices[str(i + 1)] == goodAnswer: # Retrieve the good answer id (lol). Should probably do differently...\n id = i + 1\n\n self.currentAskedQuestions[user_id] = str(id) # Put the entry in the dict with good answer id\n return await self.sendText(message, channel_id,user_infos, team_id)",
"def genQuestion(line):\r\n if type(line) is str: # If the passed variable is of type string.\r\n line = TextBlob(line) # Create object of type textblob.blob.TextBlob\r\n \r\n bucket = {} # Create an empty dictionary\r\n \r\n subject_list = []\r\n question_subject=\"\"\r\n answer_subject=\"\"\r\n for i,j in enumerate(line.tags): # line.tags are the parts-of-speach in English \r\n question_subject += j[0] + \" \"\r\n if (j[1] == \"NNP\" or j[1] == \"NNS\"): \r\n subject_list.append(j[0])\r\n if j[1] not in bucket:\r\n bucket[j[1]] = i # Add all tags to the dictionary or bucket variable\r\n \r\n if len(subject_list):\r\n random_subject_val = random.randint(0, len(subject_list)-1)\r\n question_subject = question_subject.replace(str(subject_list[random_subject_val]), \"______\")\r\n answer_subject = str(subject_list[random_subject_val])\r\n \r\n return question_subject, answer_subject",
"def choose_question():\r\n random_index_question = randint(1, question.num_question + 1)\r\n random_question = question.question[random_index_question]\r\n correct_answer = question.answer[random_index_question]\r\n return random_question, correct_answer",
"def get_random_question(self):\n available_qs = self.available_qs\n if available_qs.exists():\n return random.choice(available_qs)",
"def __init__(self, name):\n self.name = name\n self.questions = []",
"def gen_questions(self, number_of_questions):",
"def generate_answer(self, question):\n\n # Recognize intent of the question using `intent_recognizer`.\n # Don't forget to prepare question and calculate features for the question.\n \n prepared_question = text_prepare(question)\n features = self.tfidf_vectorizer.transform([prepared_question])\n intent = self.intent_recognizer.Main(question)\n #intent='gcs'\n # Chit-chat part: \n if intent == 'dialogue':\n \"\"\"\n # Pass question to chitchat_bot to generate a response.\n reply=self.college.Main(question)\n if reply !=\"Please refer GCS facebook page or ask you mentor for more info :)\":\n return reply\n else: \n \"\"\"\n reply=self.college.Main(question)\n if reply!=\"Please refer GCS facebook page or ask you mentor for more info :)\":\n return reply\n else:\n reply=self.programming.Main(question)\n if reply!=\"Please refer kammand prompt discord or ask you mentor for more info :)\":\n return reply\n else:\n response = str(self.chatbot.get_response(prepared_question))\n temp=np.random.choice(2,p=[0.5,0.5])\n times=np.random.choice([1,2,3,4],p=[0.5,0.3,0.1,0.1])\n if temp==0:\n print(\"EMOJI!!!!!\")\n response= response + times*(label_to_emoji(emojifyer.predict_emoji(model,response,word_to_index)).strip())\n return response\n elif intent==\"mandi\":\n reply=self.college.Main(question)\n return reply\n # Goal-oriented part:\n elif intent==\"stackoverflow\":\n tag = self.tag_classifier.predict(features)[0]\n reply = self.thread_ranker.get_best_thread(prepared_question, tag)\n return reply",
"def add_user_answer(self, question, guess, correct):\n user_answer = UserAnswer()\n user_answer.user = self.user\n user_answer.quiz = self.quiz\n user_answer.question = question\n user_answer.answer = guess\n user_answer.correct = correct\n user_answer.save()",
"def __init__(self, createdby, meetup, title, body, votes,createdOn):\n self.question_id = len(Question.question_list) + 1\n self.createdon = datetime.now()\n self.createdby = createdby\n self.meetup = meetup\n self.title = title\n self.body = body\n self.votes = votes",
"def not_given_bot(question_intent, question, answer):\n col_q_not_given.insert_one(\n {\n\n 'Question': question,\n 'Question Intent': question_intent,\n 'Answer': answer\n }\n )",
"def convert_question(self, q):\n\n item = {}\n item['id'] = q['id']\n item['title'] = q['title']\n item['body'] = q['text']\n item['author_id'] = q['author']['id']\n item['author'] = q['author']['username']\n item['url'] = q['url']\n item['score'] = q['score']\n item['score_label'] = self.convert_count(q['score'])\n item['answer_count'] = q['answer_count']\n item['answer_count_label'] = self.convert_count(q['answer_count'])\n item['view_count'] = q['view_count']\n item['view_count_label'] = self.convert_count(q['view_count'])\n item['added_at'] = q['added_at']\n item['added_at_label'] = timeago.format(datetime.fromtimestamp(int(q['added_at']), TIMEZONE), datetime.now(TIMEZONE))\n item['last_activity'] = q['last_activity_at']\n item['last_activity_label'] = timeago.format(datetime.fromtimestamp(int(q['last_activity_at']), TIMEZONE), datetime.now(TIMEZONE))\n item['has_more_comments'] = False\n item['has_more_answers'] = False\n item['has_accepted_answer'] = q['has_accepted_answer']\n item['closed'] = q['closed']\n\n item['tags'] = []\n for tag in q['tags']:\n item['tags'].append({'name': tag})\n\n return item",
"def __init__(self, exam_name):\n\n self.name = exam_name\n self.questions = []",
"def __init__(self, data={}):\n\n self.config = db_config(BaseConfig.DATABASE_URI)\n self.table = 'questions'\n self.title = data.get('title')\n self.body = data.get('body')\n self.q = data.get('q')\n self.question_id = data.get('id')\n self.user_id = data.get('user_id')\n self.now = str(datetime.now())\n self.logged_in_user_id = Auth.get_logged_in_user(request)[0]['data']['user_id']",
"def ask_question():\n title_question = request.form.get(\"title\")\n question = request.form.get(\"question\")\n\n date_string = datetime.today().strftime('%Y-%m-%d')\n \n ask = Question(user_id = session[\"user_id\"],question_created=date_string, title_question = title_question, question = question)\n\n db.session.add(ask)\n db.session.commit()\n\n return \"question added\"",
"def __init__(self, question, correct_answer):\n self.question = question\n self.correct_answer = correct_answer.lower()",
"def create_question(self):\n\n locations = [\"meetup_id\", \"user_id\", \"title\", \"body\"]\n\n try:\n\n user = self.sql.get_username_by_id(\n int(self.question_details[\"user\"]))\n\n meetup = self.sql.fetch_details_by_criteria(\n \"meetup_id\", self.question_details[\"meetup\"], \"meetups\")\n\n existing = self.sql.fetch_details_if_text_exists(\n \"title\", self.question_details[\"title\"], \"questions\")\n\n title = self.question_details[\"title\"]\n\n body = self.question_details[\"body\"]\n\n except KeyError as keyerror:\n return self.makeresp(\"{} is a required field\".format(keyerror), 400)\n\n isempty = DataValidators(\n self.question_details).check_values_not_empty()\n\n if isinstance(isempty, str):\n return self.makeresp(isempty, 400)\n\n if not user:\n return self.makeresp(\"User not found\", 404)\n\n if not meetup:\n return self.makeresp(\"Meetup not found\", 404)\n\n if not self.check_is_error(existing):\n\n if [meet_id[1] for meet_id in existing if self.question_details[\"meetup\"] in meet_id]:\n\n return self.makeresp(\"This Question already exists\", 409)\n\n question = {\n \"meetup\": self.question_details[\"meetup\"],\n \"createdBy\": self.question_details[\"user\"],\n \"title\": title,\n \"body\": body\n }\n\n question_id = SqlHelper(question).save_to_database(\n locations, \"questions\")\n\n return self.makeresp(\n {\n \"id\": question_id,\n \"user\": question[\"createdBy\"],\n \"meetup\": question[\"meetup\"],\n \"title\": question[\"title\"],\n \"body\": question[\"body\"]\n }, 201)",
"def question(update, context):\n bot = context.bot\n user = update.message.from_user\n inc_msg = str.lower(update.message.text)\n\n # answer why questions with a reasons from database\n if 'waarom' in inc_msg:\n\n # return a random reason from file\n with open(REASONS) as file:\n lines = file.readlines()\n msg = random.choice(lines)\n\n # answer other questions with\n else:\n # TODO: introduce random silence\n rng = random.random()\n\n if rng < 0.9 and not 'rob' not in inc_msg:\n return\n options = [\n f\"Vraag het maar niet aan mij, ik ben niet alwetend.\",\n (\"https://lmgtfy.com/?q=\" + inc_msg.replace(\" \", \"+\") + \"&pp=1&s=g&t=w\"),\n f\"Ja he dat weet ik toch ook niet, google dat maar ff {user.first_name}...\"\n ]\n\n msg = random.choice(options)\n time.sleep(HUMAN_DELAY * len(msg))\n\n bot.send_message(chat_id=update.message.chat_id, text=msg,\n reply_to_message_id=update.message.message_id,\n parse_mode=ParseMode.MARKDOWN)",
"def create_freeform(cls, name, question, default_response, contacts, user): \n poll = Poll.objects.create(\n name=name,\n question=question,\n default_response=default_response, \n user=user,\n type=Poll.TYPE_TEXT)\n poll.contacts = contacts \n return poll"
] | [
"0.6517821",
"0.6408259",
"0.6362986",
"0.6347021",
"0.6313082",
"0.61639714",
"0.6121847",
"0.6114391",
"0.6093224",
"0.6044101",
"0.6040264",
"0.5993357",
"0.59512204",
"0.5943956",
"0.5889812",
"0.58555055",
"0.5852014",
"0.58435476",
"0.58026284",
"0.57996535",
"0.5797478",
"0.57950056",
"0.57722247",
"0.5760061",
"0.57479465",
"0.5746581",
"0.5733781",
"0.5723847",
"0.57101524",
"0.5651535"
] | 0.69050956 | 0 |
create CreatePollQuestion dummy form | def create_dummy_form(title,text,fill_choice=[],choice_length=[]):
# fill it with blank for dummy choices
count=0
choices=[]
while count < 8:
choices.append(None)
count+=1
# fill choices based on value on fill_choice
for i in fill_choice:
try :
length = choice_length[i]
except IndexError :
length = 10
choices[i] = create_random_string(length)
dummy_form=CreatePollQuestion(
{"question_title":title,
"question_text" :text,
"choice_1":choices[0],
"choice_2":choices[1],
"choice_3":choices[2],
"choice_4":choices[3],
"choice_5":choices[4],
"choice_6":choices[5],
"choice_7":choices[6],
"choice_8":choices[7],
})
return dummy_form | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_freeform(cls, name, question, default_response, contacts, user): \n poll = Poll.objects.create(\n name=name,\n question=question,\n default_response=default_response, \n user=user,\n type=Poll.TYPE_TEXT)\n poll.contacts = contacts \n return poll",
"def __init__(self, radio_poll, *args, **kwargs):\n super(RadioPollChoiceForm, self).__init__(*args, **kwargs)\n choices = (((None, '----'),) +\n tuple(radio_poll.answers.values_list('id', 'answer')))\n self.fields['radio_poll__%s' % str(radio_poll.id)] = (\n forms.ChoiceField(widget=forms.Select(),\n choices=choices,\n label=radio_poll.question))",
"def createForm(request):\n if request.method == 'POST':\n form = QuestionFormForm(request.POST)\n if form.is_valid():\n #return the uuid so the organization can use that link in the post to connect to the questionform\n formID = form.save().UUID\n #send them the url for the form\n messages.success(request, 'You have made your question form accessible at: ' + request.build_absolute_uri('/post/') + f'apply/{formID}')\n context = {'form': form}\n return render(request, 'scholarship.html', context=context)\n form = QuestionFormForm()\n context = {'form': form}\n return render(request, 'scholarship.html', context=context)",
"def test_create_new_form(self):\n\n survey = self._create_test_survey()\n assert survey is not None\n\n new_survey = SurveyForm.get(self.test_survey_name)\n assert new_survey is not None\n assert new_survey.form == self.test_form",
"def create_poll(question, days):\n\treturn Poll.objects.create(\n\t\tquestion=question, \n\t\tpub_date=timezone.now() + datetime.timedelta(days=days)\n\t\t)",
"def test_question_without_choices(self):\n set_up_user(self)\n self.assertFalse(self.user.is_superuser)\n\n question_no_choices = create_question_without_choices(question_text=\"Question wihout Choices.\", days=-1)\n url = reverse('polls:detail', args=(question_no_choices.id,))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 404)",
"def test_question_with_choices(self):\n create_question(question_text='Question with choices', days=0)\n response = self.client.get(reverse('polls:index'))\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqual(response.context['latest_questions_list'], ['<Question: Question with choices>'])",
"def test_question_with_choices(self):\n question = create_question(question_text='Question with choices', days=0)\n response = self.client.get(reverse('polls:details', args=(question.id, )))\n self.assertContains(response, question.question_text)",
"def test_create_single_poll_submission(self):\r\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\r\n pass",
"def get_form(self):\n return QuestionForm()",
"def _create_test_survey(self):\n return SurveyForm.create(self.test_survey_name, self.test_form)",
"def make_form(self):",
"def create_question(question_text, days, choices=('choice 1',)):\n time = timezone.now() + datetime.timedelta(days=days)\n question = Question.objects.create(question_text=question_text, pub_date=time)\n for choice in choices:\n question.choice_set.create(choice_text=choice)\n return question",
"def create_choices(question_model, text=\"text\", total_votes = 0):\n return Choice.objects.create(question=question_model, \n text=text, \n total_votes=total_votes)",
"def create_question(question_text, days, create_choice=True):\n\n time = timezone.now() + datetime.timedelta(days=days)\n question = Question.objects.create(question_text=question_text, pub_date=time)\n if create_choice:\n question.choice_set.create(choice_text=\"Choice 1\", votes=0)\n return question",
"def test_create_new_question(self):\n response = self.client().post('/questions', json=self.new_question)\n body = json.loads(response.data)\n\n question = Question.query.filter_by(id=body['created']).one_or_none()\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(body['success'], True)\n self.assertIsNotNone(question)",
"def new_from_post():\n # If you make a post request with a question_id we will assume you want a new question editor\n # we will prepopulate the question new page with data from that question (if it is a valid question id)\n question_id = request.form['question_id'] if request.form['question_id'] else ''\n\n return render_template('questionNew.html', question_id=question_id)",
"def test_create_questions(self):\n res = self.client().post('/questions',\n json={\n \"question\": \"What is chemical \\\n composition of water\",\n \"answer\": \"H2O\",\n \"category\": 1,\n \"difficulty\": 2\n })\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['created'])",
"def testQuestionField(self):\n sdq1 = getattr(self.s1, 'sdq1')\n self.app.REQUEST.form['showYMD'] = False\n self.app.REQUEST.form['showHM'] = False\n app = self.app\n dummy_controller_state = ControllerState(\n id='base_edit',\n context=sdq1,\n button='submit',\n status='success',\n errors={},\n next_action=None,)\n controller = self.portal.portal_form_controller\n controller_state = controller.validate(dummy_controller_state, app.REQUEST, ['validate_base',])\n errors = controller_state.getErrors()\n errors = sdq1.post_validate(self.app.REQUEST, errors)\n assert errors != {}, \"Validation error not raised\"\n assert errors.has_key('showYMD')\n assert errors.has_key('showHM')",
"def test_question_with_choices(self):\n question = create_question(question_text='Question with choices', days=0)\n response = self.client.get(reverse('polls:results', args=(question.id, )))\n self.assertContains(response, question.question_text)",
"def question_new_validate():",
"def test_question_without_choices_for_admin(self):\n set_up_super_user(self)\n self.assertTrue(self.user.is_superuser)\n\n question_no_choices = create_question_without_choices(question_text=\"Question wihout Choices.\", days=-1)\n url = reverse('polls:detail', args=(question_no_choices.id,))\n response = self.client.get(url)\n self.assertContains(response, question_no_choices.question_text)",
"def test_create_question(self):\n res = self.client().post('/api/questions', json=self.new_question)\n res_body = json.loads(res.data)\n\n self.assertEqual(res.status_code, 201)\n self.assertTrue(res_body['success'])\n self.assertTrue(res_body['created'])\n \n new_question = Question.query.filter(Question.id == res_body['created']).one_or_none()\n self.assertTrue(new_question)",
"def __init__(self,\n quiz_size_slug=Quiz.DEFAULT_QUIZ_SIZE_SLUG,\n *args, **kwargs):\n super(QuizForm, self).__init__(*args, **kwargs)\n quiz_json = QuizJson()\n question_count = Quiz.get_question_count_for_slug(quiz_size_slug)\n self.question_count = question_count\n\n for question_no in range(0, question_count):\n question_no_str = str(question_no)\n question_no_2_chars = question_no_str.zfill(2)\n question_key = 'question_' + question_no_2_chars\n form_question_no_str = str(question_no + 1)\n question_text = quiz_json.get_question_text(question_no)\n label = form_question_no_str + '. ' + question_text\n radio_widget = forms.RadioSelect(attrs={'class': 'quiz_answer'})\n choices = quiz_json.get_choices(question_no)\n self.fields[question_key] = forms.ChoiceField(\n widget=radio_widget, label=label, choices=choices\n )",
"def create_registration(cls, name, question, default_response, contacts, user):\n poll = Poll.objects.create(\n name=name,\n question=question,\n default_response=default_response, \n user=user,\n type=Poll.TYPE_REGISTRATION)\n poll.contacts = contacts \n return poll",
"def test_make_form():",
"def create(self):\n\n if self.data.get('hydrogeology', None):\n self.form = self._make_form(\n self.well.hydrogeology_parameter if self.well.hydrogeology_parameter else HydrogeologyParameter()\n , HydrogeologyParameterForm, self.data['hydrogeology'])\n\n if self.data['hydrogeology'].get('pumping_test'):\n self.pumping_test_form = self._make_form(\n self.form.instance.pumping_test if self.form.instance.pumping_test else PumpingTest(),\n PumpingTestForm, self.data['hydrogeology']['pumping_test']\n )",
"def test_create_new_question(self):\n\n # get number of questions before post\n questions_before = Question.query.all()\n\n # create new question and load response data\n response = self.client().post('/questions', json=self.new_question)\n data = json.loads(response.data)\n\n # get number of questions after post\n questions_after = Question.query.all()\n\n # see if the question has been created\n question = Question.query.filter_by(id=data['created']).one_or_none()\n\n # check status code and success message\n self.assertEqual(response.status_code, 200)\n self.assertEqual(data['success'], True)\n\n # check if one more question after post\n self.assertTrue(len(questions_after) - len(questions_before) == 1)\n\n # check that question is not None\n self.assertIsNotNone(question)",
"def test_meeting_poll_create(self):\n pass",
"def create_question(user,title='title',text='text'):\n return Question.objects.create(created_by=user, title=title, text=text)"
] | [
"0.68965745",
"0.664663",
"0.66033286",
"0.6529158",
"0.64919966",
"0.64177126",
"0.6361779",
"0.6354872",
"0.6351356",
"0.6324996",
"0.62474936",
"0.62169385",
"0.6213089",
"0.62068975",
"0.6191489",
"0.6160336",
"0.61465067",
"0.6138712",
"0.6129136",
"0.61282086",
"0.6116563",
"0.61049587",
"0.6067563",
"0.6061492",
"0.60264516",
"0.59967256",
"0.59941244",
"0.5980024",
"0.5972787",
"0.59696853"
] | 0.7856849 | 0 |
Converts an array with WCS to altitude and azimuth coordinates | def getAltAz(arr,header,time,location):
soln = wcs.WCS(header)
coords = cartesian([arange(arr.shape[1]),arange(arr.shape[0])])
world = soln.wcs_pix2world(coords,0)
radec = SkyCoord(ra=world[:,0],dec=world[:,1],frame='icrs',unit='deg')
altaz = radec.transform_to(AltAz(obstime=time,location=telescope))
return altaz.alt.deg,altaz.az.deg,coords[:,0],coords[:,1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def offset_to_altaz(xoff, yoff, azimuth, altitude):\n #Deal with situations where offset = 0?\n\n d = sqrt(xoff*xoff+yoff*yoff)\n pos = np.where(d==0)\n d=1e-12 * u.deg # add a very small offset to prevent math errors\n\n q = arctan(d.to(u.rad).value)\n\n sq = sin(q)\n xp1 = xoff * (sq/d)\n yp1 = yoff * (sq/d)\n zp1 = cos(q)\n\n cx = sin(altitude)\n sx = cos(altitude)\n\n xp0 = cx*xp1 - sx*zp1\n yp0 = yp1\n zp0 = sx*xp1 + cx*zp1\n\n obj_altitude = arcsin(zp0)\n obj_altitude[pos]=altitude\n obj_azimuth = arctan2(yp0,-xp0) + azimuth\n obj_azimuth[pos] = azimuth\n\n #if obj_azimuth.value < 0.:\n # obj_azimuth += 2.*pi\n #elif obj_azimuth.value >= (2.*pi ):\n # obj_azimuth -= 2.*pi\n\n return obj_altitude,obj_azimuth",
"def geo_m_v2(data_array):\n r = 6378.137 #promien ziemi w km\n delta = np.zeros(data_array.size//7-1)\n alo = data_array[0][1]\n ala = data_array[0][2]\n count = 0\n for row in data_array[1:]:\n dLat = (row[2] - ala) * math.pi/180.0\n dLon = (row[1] - alo) * math.pi/180.0\n a = math.sin(dLat/2.0)**2 + math.cos(ala * math.pi/180.0) * math.cos(row[2] * math.pi/180.0)\\\n * math.sin(dLon/2.0)**2\n delta[count] = r * 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))#w km\n count += 1\n alo = row[1]\n ala = row[2]\n return delta",
"def convert_coords(date, time_steps, azs, els, obs):\n coord_start_day = datetime(date.year, date.month, date.day)\n \n strategy = []\n for time_step, az, el in zip(time_steps, azs, els):\n if az % np.pi == 0.0: \n az += EPS\n \n ra, dec = sphere.altaz_to_ra_dec(coord_start_day + timedelta(hours=time_step), az, el, obs)\n strategy.append([ra, dec])\n \n return np.array(strategy)",
"def wind_adjust_func(uz_array, zw):\n return uz_array * 4.87 / np.log(67.8 * zw - 5.42)",
"def altaz_to_offset(obj_azimuth,obj_altitude,azimuth,altitude):\n\n daz = obj_azimuth - azimuth\n coa = cos(obj_altitude)\n\n xp0 = -cos(daz) * coa\n yp0 = sin(daz) * coa\n zp0 = sin(obj_altitude)\n\n cx = sin(altitude)\n sx = cos(altitude)\n\n xp1 = cx*xp0 + sx*zp0\n yp1 = yp0\n zp1 = -sx*xp0 + cx*zp0\n\n q = arccos(zp1)\n d = tan(q)\n alpha = arctan2(yp1,xp1)\n\n xoff = d * cos(alpha)\n yoff = d * sin(alpha)\n\n return xoff,yoff",
"def ecef2LatLonAlt(x, y, z):\n\n # Calculate the polar eccentricity\n ep = np.sqrt((EARTH.EQUATORIAL_RADIUS**2 - EARTH.POLAR_RADIUS**2)/(EARTH.POLAR_RADIUS**2))\n\n # Calculate the longitude\n lon = np.arctan2(y, x)\n\n p = np.sqrt(x**2 + y**2)\n\n theta = np.arctan2( z*EARTH.EQUATORIAL_RADIUS, p*EARTH.POLAR_RADIUS)\n\n # Calculate the latitude\n lat = np.arctan2(z + (ep**2)*EARTH.POLAR_RADIUS*np.sin(theta)**3, \\\n p - (EARTH.E**2)*EARTH.EQUATORIAL_RADIUS*np.cos(theta)**3)\n\n # Get distance from Earth centre to the position given by geographical coordinates, in WGS84\n N = EARTH.EQUATORIAL_RADIUS/math.sqrt(1.0 - (EARTH.E**2)*math.sin(lat)**2)\n\n \n # Calculate the height in meters\n\n # Correct for numerical instability in altitude near exact poles (and make sure cos(lat) is not 0!)\n if((np.abs(x) < 1000) and (np.abs(y) < 1000)):\n alt = np.abs(z) - EARTH.POLAR_RADIUS\n\n else:\n # Calculate altitude anywhere else\n alt = p/np.cos(lat) - N\n\n\n return lat, lon, alt",
"def read_elevation(filepath):\n h = 83 #distance between elevation measures\n N = 1201\n theta = np.pi / 6\n elev_array = np.zeros((N, N))\n grad_array = np.zeros((N, N, 2))\n I_array = np.zeros((N, N))\n # Read the elevation data as described in Question 3, and store in the elvation array\n f = open(filepath, \"rb\")\n for i in range(N):\n for j in range(N):\n buf = f.read(2)\n val = struct.unpack(\">h\", buf)[0]\n elev_array[i][j] = val\n f.close()\n # Populate the gradient array\n for i in range(N):\n for j in range(N):\n #This if statements handle the border cases\n if j == 0:\n grad_array[i][j][0] = (elev_array[i][j+1] - elev_array[i][j]) / h\n elif j == N - 1:\n grad_array[i][j][0] = (elev_array[i][j] - elev_array[i][j-1]) / h\n else:\n grad_array[i][j][0] = (elev_array[i][j+1] - elev_array[i][j-1]) / (2 * h)\n \n if i == 0:\n grad_array[i][j][1] = (elev_array[i][j] - elev_array[i-1][j]) / h\n elif i == N - 1:\n grad_array[i][j][1] = (elev_array[i-1][j] - elev_array[i][j]) / h\n else:\n grad_array[i][j][1] = (elev_array[i-1][j] - elev_array[i+1][j]) / (2 * h)\n \n # Populate intensities\n for i in range(N):\n for j in range(N):\n denom = np.sqrt(grad_array[i][j][0] ** 2 + grad_array[i][j][1] ** 2 + 1)\n numer = np.cos(theta) * grad_array[i][j][0] + np.sin(theta) * grad_array[i][j][1]\n I_array[i][j] = -1 * numer / denom\n \n return elev_array, I_array",
"def lambert_azimuthal(coordinate_triples, longitude_offset=pi/8,\n latitude_offset=pi/8):\n latitudes, longitudes = cartesian_to_geographical(coordinate_triples)\n k = np.sqrt(2/(1 + np.cos(latitudes - latitude_offset)\n *np.cos(longitudes - longitude_offset)))\n x_projected = (k*np.cos(latitudes - latitude_offset)\n *np.sin(longitudes - longitude_offset))\n y_projected = k*np.sin(latitudes - latitude_offset)\n return np.array([x_projected, y_projected])",
"def geo_m(data_array):\n earth_r = 12756.490 #srednica Ziemi na rowniku [km]\n delta = np.zeros(data_array.size//7-1)\n alo = data_array[0][1]\n ala = data_array[0][2]\n count = 0\n for row in data_array[1:]:\n a = (row[1] - alo) * math.cos(ala*math.pi/180.0)\n b = (row[2] - ala)\n delta[count] = math.sqrt(a*a + b*b)*math.pi*earth_r/36.0*100# wynik w m\n count += 1\n alo = row[1]\n ala = row[2]\n return delta",
"def get_altitude(points):\n altitudes = np.zeros((len(points),), dtype=\"float64\")\n for i, point in tqdm(enumerate(points), desc=\"GETTING ALTITUDE\"):\n p = Point(point[0], point[1])\n altitudes[i] = alt.NM_COTA.iloc[\n np.argmin([p.distance(alt.geometry.iloc[j]) for j in range(alt.shape[0])])\n ]\n return altitudes",
"def to_xyah(self):\n ret = self.tlwh.copy()\n ret[:2] += ret[2:] / 2\n ret[2] /= ret[3]\n return ret",
"def to_xyah(self):\n ret = self.tlwh.copy()\n ret[:2] += ret[2:] / 2\n ret[2] /= ret[3]\n return ret",
"def to_xyah(self):\n ret = self.tlwh.copy()\n ret[:2] += ret[2:] / 2\n ret[2] /= ret[3]\n return ret",
"def benthos_psa916_dict(calib, signal):\n\n #array mode\n try:\n altitude = []\n for signal_x in signal:\n temp = (300 * signal_x / calib['ScaleFactor']) + calib['Offset']\n altitude.append(temp)\n #single mode\n except:\n altitude = (300 * signal / calib['ScaleFactor']) + calib['Offset']\n return altitude",
"def horizontal_to_cartesian(altitude, azimuth):\n theta = math.pi / 2 - math.radians(altitude)\n phi = math.radians(-azimuth)\n x = math.sin(phi) * math.sin(-theta)\n y = math.sin(theta) * math.cos(phi)\n z = math.cos(theta)\n return x, y, z",
"def change_altitude_cm_m(data_array):\n data_array[:, 3] = data_array[:, 3]*0.3048\n return data_array",
"def azimuth(source):\n srcAzEl = subarrayControl.s.azel(source, 0.0);\n return srcAzEl[0];",
"def toHEC(rts):\n pathname = rts.name\n values = rts.getYArray()\n import jarray\n times = jarray.zeros(len(values),'i')",
"def test_az_za():\n Nside = 128\n obs = observatory.Observatory(latitude, longitude, fov=20, nside=Nside)\n center = [0, 0]\n lon, lat = [5, 0]\n ind0 = hp.ang2pix(Nside, lon, lat, lonlat=True)\n lon, lat = hp.pix2ang(Nside, ind0, lonlat=True)\n za, az, pix = obs.calc_azza(center, return_inds=True)\n ind = np.where(pix == ind0)\n # lon = longitude of the source, which is set to 5deg off zenith (hence, zenith angle)\n assert np.isclose(np.degrees(za[ind]), lon)\n assert np.isclose(np.degrees(az[ind]), 90.0)",
"def AEH2LatLonAlt(azim, elev, h, lat, lon, alt):\n\n # Compute the range to the point\n r = AEH2Range(azim, elev, h, lat, lon, alt)\n\n\n # Compute lat/lon/alt of the point on the line of sight\n x, y, z = AER2ECEF(azim, elev, r, lat, lon, alt)\n lat2, lon2, alt2 = ecef2LatLonAlt(x, y, z)\n lat2, lon2 = np.degrees(lat2), np.degrees(lon2)\n\n\n return lat2, lon2, alt2",
"def test_wcs_extras():\n data = np.ones([6, 6], dtype=np.float64)\n header = {'CRVAL1': 0,\n 'CRVAL2': 0,\n 'CRPIX1': 5,\n 'CRPIX2': 5,\n 'CDELT1': 10,\n 'CDELT2': 10,\n 'CUNIT1': 'arcsec',\n 'CUNIT2': 'arcsec',\n 'PC1_1': 0,\n 'PC1_2': -1,\n 'PC2_1': 1,\n 'PC2_2': 0,\n 'NAXIS1': 6,\n 'NAXIS2': 6,\n 'CTYPE1': 'HPLN-TAN',\n 'CTYPE2': 'HPLT-TAN',\n 'date-obs': '1970-01-01T00:00:00',\n 'obsrvtry': 'Foo',\n 'detector': 'bar',\n 'wavelnth': 10,\n 'waveunit': 'm',\n 'hglt_obs': 0,\n 'hgln_obs': 0,\n 'dsun_obs': 10,\n 'rsun_ref': 690000000}\n generic_map = sunpy.map.Map((data, header))\n\n wcs = generic_map.wcs\n\n assert wcs.heliographic_observer.lat.value == 0\n assert wcs.heliographic_observer.lon.value == 0\n assert wcs.heliographic_observer.radius.value == 10\n assert wcs.rsun.value == header['rsun_ref']\n\n result = solar_wcs_frame_mapping(wcs)\n\n assert isinstance(result, Helioprojective)\n assert result.observer.lat.value == 0\n assert result.observer.lon.value == 0\n assert result.observer.radius.value == 10\n assert result.rsun.value == header['rsun_ref']",
"def raDec2AltAz(ra, dec, jd, lat, lon):\n\n # Compute azim and elev using a fast cython function\n azim, elev = cyraDec2AltAz(np.radians(ra), np.radians(dec), jd, np.radians(lat), np.radians(lon))\n \n\n # Convert alt/az to degrees\n azim = np.degrees(azim)\n elev = np.degrees(elev)\n\n return azim, elev",
"def img2heliovec(bxImg,byImg,bzImg,lon,lat,lonc,latc,pAng):\n a11 = -np.sin(latc)*np.sin(pAng)*np.sin(lon - lonc) + np.cos(pAng)*np.cos(lon - lonc)\n a12 = np.sin(latc)*np.cos(pAng)*np.sin(lon - lonc) + np.sin(pAng)*np.cos(lon - lonc)\n a13 = -np.cos(latc)*np.sin(lon - lonc)\n a21 = -np.sin(lat)*(np.sin(latc)*np.sin(pAng)*np.cos(lon - lonc) + np.cos(pAng)*np.sin(lon - lonc)) - np.cos(lat)*np.cos(latc)*np.sin(pAng)\n a22 = np.sin(lat)*(np.sin(latc)*np.cos(pAng)*np.cos(lon - lonc) - np.sin(pAng)*np.sin(lon - lonc)) + np.cos(lat)*np.cos(latc)*np.cos(pAng)\n a23 = -np.cos(latc)*np.sin(lat)*np.cos(lon - lonc) + np.sin(latc)*np.cos(lat)\n a31 = np.cos(lat)*(np.sin(latc)*np.sin(pAng)*np.cos(lon - lonc) + np.cos(pAng)*np.sin(lon - lonc)) - np.sin(lat)*np.cos(latc)*np.sin(pAng)\n a32 = -np.cos(lat)*(np.sin(latc)*np.cos(pAng)*np.cos(lon - lonc) - np.sin(pAng)*np.sin(lon - lonc)) + np.sin(lat)*np.cos(latc)*np.cos(pAng)\n a33 = np.cos(lat)*np.cos(latc)*np.cos(lon - lonc) + np.sin(lat)*np.sin(latc)\n\n bxHelio = a11 * bxImg + a12 * byImg + a13 * bzImg\n byHelio = a21 * bxImg + a22 * byImg + a23 * bzImg\n bzHelio = a31 * bxImg + a32 * byImg + a33 * bzImg\n\n return bxHelio,byHelio,bzHelio",
"def parse_azimuth_elevation(filename):\n match = REGEX.match(filename)\n return int(match.group(1)), int(match.group(2))",
"def xywh_to_xyxy(boxes: np.array) -> np.array:\n boxes[..., 0] = boxes[..., 0] - boxes[..., 2]/2\n boxes[..., 1] = boxes[..., 1] - boxes[..., 3]/2\n boxes[..., 2] = boxes[..., 0] + boxes[..., 2]\n boxes[..., 3] = boxes[..., 1] + boxes[..., 3]\n return boxes",
"def extract_wind(source,la,lo,lats,lons,wd,ws):\r\n lat = source[la]\r\n lon = source[lo]\r\n wdir = []\r\n wspd = [] \r\n for coor in zip(lon,lat): \r\n in_lon = coor[0]\r\n in_lat = coor[1]\r\n # since lons are 0 thru 360, convert to -180 thru 180\r\n converted_lons = lons - ( lons.astype(np.int32) / 180) * 360\r\n # get cell of facility\r\n lat_idx = geo_idx(in_lat, lats)\r\n lon_idx = geo_idx(in_lon, converted_lons)\r\n #extract winddirection and wind speed from that cell\r\n d = wd[:,lat_idx,lon_idx][0]\r\n wdir.append(d)\r\n s = ws[:,lat_idx,lon_idx][0]\r\n wspd.append(s)\r\n \r\n return wdir,wspd",
"def tlwh_to_xyah(tlwh):\n ret = np.asarray(tlwh).copy()\n ret[:2] += ret[2:] / 2\n ret[2] /= ret[3]\n return ret",
"def azel2radec(az,el,mjd,lat=47.8781,lon=-87.6298):\n \n T_UT1 = (mjd-51544.5)/36525;\n ThetaGMST = 67310.54841 + (876600*3600 + 8640184.812866)*T_UT1 + \\\n .093104*(T_UT1**2) - (6.2e-6)*(T_UT1**3)\n ThetaGMST = np.mod((np.mod(ThetaGMST,86400*(ThetaGMST/np.abs(ThetaGMST)))/240),360)\n ThetaLST = ThetaGMST + lon\n \n DEC = asind(sind(el)*sind(lat)+cosd(el)*cosd(lat)*cosd(az))\n LHA = atand2(-sind(az)*cosd(el)/cosd(DEC), \n (sind(el)-sind(DEC)*sind(lat))/(cosd(DEC)*cosd(lat)))*(180/np.pi);\n RA = np.mod(ThetaLST-LHA,360);\n \n return RA,DEC",
"def read_affine(file):\n data = open(file, 'r').read()\n data = data.split('\\n')\n for i in range(1, 5):\n data[i] = data[i].split(':')\n int_lon = np.fromstring(data[1][1], dtype='float', sep=',')\n int_lat = np.fromstring(data[2][1], dtype='float', sep=',')\n Nlon = len(int_lon) - 1\n Nlat = len(int_lat) - 1\n data[3][1] = data[3][1].split(',')\n data[4][1] = data[4][1].split(',')\n lon_transform = np.zeros((Nlon, 2))\n lat_transform = np.zeros((Nlat, 2))\n for i in range(Nlon):\n data[3][1][i] = data[3][1][i].split(' ')\n lon_transform[i] = [data[3][1][i][0], data[3][1][i][1]]\n for i in range(Nlat):\n data[4][1][i] = data[4][1][i].split(' ')\n lat_transform[i] = [data[4][1][i][0], data[4][1][i][1]]\n lon_transform = np.array(lon_transform).astype('float')\n lat_transform = np.array(lat_transform).astype('float')\n return int_lon, int_lat, lon_transform, lat_transform",
"def GPSlatlon2XY(data_sheet, origin, theta):\n\n\tlon = np.array([[data_sheet.cell(row = i, column = 1).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\tlat = np.array([[data_sheet.cell(row = i, column = 2).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\n\tlon_u = np.array([[data_sheet.cell(row = i, column = 5).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\tlat_u = np.array([[data_sheet.cell(row = i, column = 6).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\tUz = np.array([[data_sheet.cell(row = i, column = 4).value] for i in range(2, data_sheet.max_row+1)]).reshape(data_sheet.max_row-1, )\n\n\tlon_in_km = (lon - origin[0])*111*np.cos(lat*np.pi/180)\n\tlat_in_km = (lat - origin[1])*111\n\t\n\trho_u = np.sqrt(np.power(lon_u,2) + np.power(lat_u,2))\n\ttheta_new_u = np.arctan2(lat_u,lon_u) - theta\n\n\trho = np.sqrt(np.power(lon_in_km,2) + np.power(lat_in_km,2))\n\ttheta_new = np.arctan2(lat_in_km,lon_in_km) - theta\n\n\tX, Y = rho*np.cos(theta_new), rho*np.sin(theta_new)\n\tUx, Uy = rho_u*np.cos(theta_new_u), rho_u*np.sin(theta_new_u)\n\n\treturn 1e3*X, 1e3*Y, 1e-3*Ux, 1e-3*Uy, 1e-3*Uz"
] | [
"0.55827683",
"0.544639",
"0.5397533",
"0.53178024",
"0.53166306",
"0.52501696",
"0.5224251",
"0.52204317",
"0.5159298",
"0.5143273",
"0.5093226",
"0.5093226",
"0.5093226",
"0.50815976",
"0.50683326",
"0.506293",
"0.50404876",
"0.5039063",
"0.501149",
"0.5002514",
"0.4954378",
"0.49539453",
"0.49454576",
"0.49377787",
"0.4935175",
"0.49195683",
"0.4906264",
"0.4902345",
"0.4895631",
"0.48829803"
] | 0.6210394 | 0 |
Rotates the ADP of 'atom' to match the orientation of 'source_atom. | def rotate_3D(atom, source_atom):
from lauescript.cryst.match import get_transform
lst2 = [np.array([0, 0, 0]), source_atom.orientation[0], source_atom.orientation[1]]
lst1 = [np.array([0, 0, 0]), atom.orientation[0], atom.orientation[1]]
matrix = get_transform(lst1, lst2, matrix=True)
adp = source_atom.adp['cart_int']
atom.adp['cart_int'] = rotate_adp(adp, matrix) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rotation_alignment(referent_shape, current_shape):\n numerator = 0.\n denominator = 0.\n\n for i in range(len(referent_shape.points)):\n numerator += current_shape.points[i, 0] * referent_shape.points[i, 1] - current_shape.points[i, 1] * referent_shape.points[i, 0]\n denominator += current_shape.points[i, 0] * referent_shape.points[i, 0] + current_shape.points[i, 1] * referent_shape.points[i, 1]\n\n return math.atan2(numerator, denominator)",
"def rotate_ADP_about_axis(ADP, angle, axisDirection):\n adp = get_adp_as_matrix(ADP)\n u, v = np.linalg.eig(adp)\n startPoints = [v[:, i].flatten().tolist()[0] for i in xrange(3)]\n endPoints = [rotate_point_about_axis(point, angle, axisDirection, (0, 0, 0)) for point in startPoints]\n rotMat = get_transform(startPoints, endPoints, matrix=True).transpose()\n newadp = np.dot(rotMat.transpose(), np.dot(adp, rotMat))\n return newadp[0, 0], newadp[1, 1], newadp[2, 2], newadp[0, 1], newadp[0, 2], newadp[1, 2]",
"def _rotate(self, angle):\n angle *= self._degreesPerAU\n self._orient = self._orient.rotate(angle)",
"def _rotate_about_origin(self, angle, axis):\n matrix = rotation_matrix(angle, axis)\n self._normal = matrix.dot(self._normal)\n self._position = matrix.dot(self._position)",
"def align(image):\n angle = find_angle(image)\n image = rotate(image, angle)\n return image",
"def setAzEncoderOffset(ant) :\n \n # Retrieve current azimuth offset (arcmin), elevation (degrees) \n # and az encoder pointing offset (implementation specific).\n\n azOffMpName = \"Control.Antenna%d.azimuthOffset\"%ant\n pointingConstants = pointingSetup( ant )\n\n if device.CarmaAnt().isOvro(ant):\n actualElMpName = \"Ovro%d.AntennaCommon.Drive.Track.actualElevation\"%ant\n elif device.CarmaAnt().isBima(ant): \n bimaAntNo = ant - 6\n actualElMpName = \"Bima%d.AntennaCommon.Drive.Track.actualElevation\"%bimaAntNo\n [azOffset,actualEl ] = queryMpValues([azOffMpName, actualElMpName])\n elif device.CarmaAnt().isSza(ant): \n szaAntNo = ant - 15\n actualElMpName = \"Sza%d.AntennaCommon.Drive.Track.actualElevation\"%szaAntNo\n [azOffset,actualEl ] = queryMpValues([azOffMpName, actualElMpName])\n else:\n raise Exception, \"Invalid ant\"\n\n [azOffset,actualEl ] = queryMpValues([azOffMpName, actualElMpName])\n cosEl = math.cos( actualEl * math.pi / 180.0 )\n\n if device.CarmaAnt().isOvro(ant):\n pointingConstants[0] = pointingConstants[0] + azOffset/cosEl\n ovroMountPointingConstants( pointingConstants[0],\n pointingConstants[1],\n pointingConstants[2],\n pointingConstants[3],\n pointingConstants[4], ant )\n elif device.CarmaAnt().isBima(ant): \n pointingConstants[0][0] = pointingConstants[0][0] + azOffset/cosEl\n bimaMountPointingConstants( pointingConstants[0], pointingConstants[1], ant )\n elif device.CarmaAnt().isSza(ant): \n # For SZA, the az zero (term 7 in the pointing constants) is in degrees \n pointingConstants[6] += ( ( azOffset/cosEl ) / 60.0 );\n # Avoid having to spell out all 19 arguments by using the special \n # form '*args' with a list of ordered args.\n args = pointingConstants \n args.append( ant )\n szaMountPointingConstants( *args )\n else:\n raise Exception, \"Invalid ant\"\n\n return offset(0, 0, ant)",
"def _rotate_about_origin(self, angle, axis):\n print 'Invoked abstract {}._rotate_about_origin({}, {})'.format(\n self, angle, axis)\n return",
"def orientate(self, node):\n\t\tfor i in self.SM(node):\n\t\t\tif node in self.E[i]:\n\t\t\t\tself.directArc(i,node)",
"def _rotate(self, angle):\n if self.undobuffer:\n self.undobuffer.push((\"rot\", angle, self._degreesPerAU))\n angle *= self._degreesPerAU\n neworient = self._orient.rotate(angle)\n tracing = self.screen._tracing\n if tracing == 1 and self._speed > 0:\n anglevel = 3.0 * self._speed\n steps = 1 + int(abs(angle)/anglevel)\n delta = 1.0*angle/steps\n for _ in range(steps):\n self._orient = self._orient.rotate(delta)\n self._update()\n self._orient = neworient\n self._update()",
"def addOffsetRotation(self, point):\n\n ox, oy, oz = OpenMaya.MVector(0.0, 0.0, 0.0)\n px, py, pz = point\n\n # Z Rotation\n if self.offset_rotation.z != 0.0:\n point = self.rotateZ(point)\n\n # Y Rotation\n if self.offset_rotation.y != 0.0:\n point = self.rotateY(point)\n\n # X Rotation\n if self.offset_rotation.x != 0.0:\n point = self.rotateX(point)\n\n return point",
"def _rotate_about_origin(self, angle, axis):\n matrix = rotation_matrix(angle, axis)\n self._center = matrix.dot(self._center)",
"def test_align_invert(self):\n al = align(self.amp1, self.amp2, inverse=False)\n\n al_inv = align(self.amp2, self.amp1, inverse=True)\n\n print(al.R)\n print(al_inv.R)\n\n print(al.T)\n print(al_inv.T)",
"def rotate_local(self, angle, axis=(0., 0., 1.)):\n self.rotation *= aa2q(angle, glm.vec3(axis))",
"def move_to_angle(alpha, theta, pan_pos = 0, tilt_pos = 0, base_rate = 500, object_distance_hint = -1):\n\n if abs(alpha) > absolute_pan_limit or abs(theta) > absolute_tilt_limit:\n return (-1, -1)\n\n # First calculate pan movement\n # TODO Account for displacement perpendicular to pan axis.\n # Similar calculation to tilt displacement but will have\n # to take into account left or right of axis.\n pan_steps = int(alpha / pan_deg_per_step) - pan_pos\n\n # Calculate compensation for sensor displacement\n # if object distance hint is specified.\n theta_comp_deg = 0.0\n\n if object_distance_hint > 0:\n # Cannot look \"back\"\n if object_distance_hint < sensor_displacement:\n return (-1, -1, 0, 0)\n # Compute angle compensation and compare to system's step resolution.\n # No need to bother correcting an angle that the motors cannot reach.\n angle_sensitivity = deg_per_step / gear_ratio / micro_steps\n theta_comp = math.asin(sensor_displacement / object_distance_hint)\n theta_comp_deg = theta_comp * 180.0 / math.pi\n #print(f'sensitivity={angle_sensitivity}, comp={theta_comp}[rad]/{theta_comp_deg}[deg]')\n if theta_comp_deg < angle_sensitivity:\n theta_comp_deg = 0.0\n\n # Calculate tilt movement\n tilt_steps = pan_steps + (int(round((theta - theta_comp_deg) / tilt_deg_per_step)) - tilt_pos)\n\n # Calculate relative step rate per motor and output as list\n max_delta = max(abs(pan_steps), abs(tilt_steps))\n\n if max_delta > 0:\n return (abs(pan_steps), abs(tilt_steps), int(round(base_rate * pan_steps / max_delta)), int(round(base_rate * tilt_steps / max_delta)))\n else:\n return (-1, -1, 0, 0)",
"def set_angel(self):\n self.angle = math.degrees(math.atan2(self.next.y - self.y, self.next.x - self.x)\n - math.atan2(self.prev.y - self.y, self.prev.x - self.x))\n\n if self.angle < 0:\n self.angle += 360",
"def rotate(self):\n\n self.pins = self.pins[1:] + list(self.pins[0])\n self.mapping = self.mapping[1:] + list(self.mapping[0])",
"def initRelativeRotation(self):\n self.__relRotationStartValue = self.rotation()",
"def align(self):\n number_of_Xs = 0\n xFront = \"\"\n xEnd = \"\"\n dashFront = \"\"\n dashEnd = \"\"\n\n # Determining if variable amino acids (\"X\") need to be added to the\n\t # beginning of the sequence:\n z = self.hmmStart-self.seqStart\n number_of_Xs = (self.hmmStart-1)-z\n if z > 0:\n dashFront = \"-\"*z\n xFront = \"X\"*number_of_Xs\n elif self.hmmStart-1<=self.seqStart-1:\n xFront = \"X\"*(self.hmmStart-1) \n\n # Determining if variable amino acids (\"X\") need to be added to the \n # end of the sequence:\n number_of_Xs_end = self.hmmLength - self.hmmEnd\n\n # The original sequence length; SPA format includes this\n delimeter = \"|\" #Need to fix can be \"_\" or \"|\" or something else...\n \n distToSeqEnd = self.origSeqLength - seqTo\n if distToSeqEnd >= number_of_Xs_end and number_of_Xs_end != self.hmmLength:\n xEnd = 'X'*number_of_Xs_end\n else:\n if distToSeqEnd < number_of_Xs_end:\n xEnd = 'X'*distToSeqEnd\n \tdashEnd += \"-\"*(number_of_Xs_end-distToSeqEnd)\n \t\n begin = \"{}{}\".format(dashFront, xFront)\n end = \"{}{}\".format(xEnd, dashEnd)\n self.addToFront(begin)\n self.data.extend(end)\n self.original = str(self)",
"def anatomical_reorient_workflow(workflow, resource_pool, config, name=\"_\"):\n\n import nipype.pipeline.engine as pe\n from nipype.interfaces.afni import preprocess\n\n if \"anatomical_scan\" not in resource_pool.keys():\n return workflow, resource_pool\n\n anat_deoblique = pe.Node(interface=preprocess.Refit(),\n name='anat_deoblique%s' % name)\n\n anat_deoblique.inputs.in_file = resource_pool[\"anatomical_scan\"]\n anat_deoblique.inputs.deoblique = True\n\n anat_reorient = pe.Node(interface=preprocess.Resample(),\n name='anat_reorient%s' % name)\n\n anat_reorient.inputs.orientation = 'RPI'\n anat_reorient.inputs.outputtype = 'NIFTI_GZ'\n\n workflow.connect(anat_deoblique, 'out_file', anat_reorient, 'in_file')\n\n resource_pool[\"anatomical_reorient\"] = (anat_reorient, 'out_file')\n\n return workflow, resource_pool",
"def apply_rotation_x(self, eta=0.0 ):\n \n eta = radians(eta)\n new_rotation_matrix = [[ 1 , 0 , 0 ],\n [ 0 , +cos(eta) , -sin(eta) ],\n [ 0 , +sin(eta) , +cos(eta) ]] \n \n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )",
"def orientation_ras_lps(affine: NdarrayTensor) -> NdarrayTensor:\n sr = max(affine.shape[0] - 1, 1) # spatial rank is at least 1\n flip_d = [[-1, 1], [-1, -1, 1], [-1, -1, 1, 1]]\n flip_diag = flip_d[min(sr - 1, 2)] + [1] * (sr - 3)\n if isinstance(affine, torch.Tensor):\n return torch.diag(torch.as_tensor(flip_diag).to(affine)) @ affine # type: ignore\n return np.diag(flip_diag).astype(affine.dtype) @ affine # type: ignore",
"def alignment_start_angle(angle=0.10):\n\n smi = SMI_Beamline()\n yield from smi.modeAlignment()\n\n # Set reflected beam ROI\n yield from smi.setReflectedBeamROI(total_angle=angle, technique=\"gisaxs\")",
"def obInit(position, angle, center):\n\tif angle > 360.0:\n\t angle = angle - 360\n\tif angle < - 360:\n\t angle = -angle - 360\n\tif angle > -360 and angle < 0:\n\t angle = -angle\n\tadjPosition = position - center\n\tnewposition = adjPosition.rotate(angle) + center\n\treturn newposition",
"def rotate(self, increment):\n\n # the aiden rule works for positive OR negative\n adjusted_index = (increment + self.head) % len(self.array)\n self.head = adjusted_index",
"def align_one(ptcl,ref,prefilt,align,aligncmp,ralign,raligncmp):\n\n\tif prefilt : ref=ref.process(\"filter.matchto\",{\"to\":ptcl})\n\n\t# initial alignment\n\tif align!=None :\n\t\tali=ptcl.align(align[0],ref,align[1],aligncmp[0],aligncmp[1])\n\n\t# refine alignment if requested\n\tif ralign!=None:\n\t\tralign[1][\"xform.align2d\"] = ali.get_attr(\"xform.align2d\")\n\t\tali=ptcl.align(ralign[0],ref,ralign[1],raligncmp[0],raligncmp[1])\n\n\treturn ali",
"def rotate(self, angle, reshape=False):\n return IntensityMap.rotate(self, angle, reshape=reshape)",
"def adjustToNewAngle(self):\n\n self.a,self.b,self.c = parametersFromPointAngle( 0.5*(self.point1+self.pointN), self.newAngle)\n\n #print 'adjustToNewAngle ', self, self.angle, self.newAngle\n self.angle = self.newAngle\n self.normalv = numpy.array( [ self.a, self.b ])\n self.unitv = numpy.array( [ self.b, -self.a ])\n if abs(self.angle) > numpy.pi/2 :\n if self.b > 0: self.unitv *= -1\n elif self.b<0 : self.unitv *= -1\n\n self.point1 = self.projectPoint(self.point1) # reset point1 \n if self.next is None or not self.next.isSegment():\n # move the last point (no intersect with next)\n\n pN = self.projectPoint(self.pointN)\n dirN = pN - self.point1 \n lN = length(pN, self.point1)\n self.pointN = dirN/lN*self.length + self.point1\n #print ' ... adjusting last seg angle ',p.dump() , ' normalv=', p.normalv, 'unitv ', p.unitv\n else:\n self.setIntersectWithNext()",
"def do_altangle(self):\n nave = 10000\n x, y, z, angle = cbp.phidget.main(nave)\n current_angle = angle\n #print(current_angle)\n self.altangle = current_angle\n return current_angle",
"def rotate_adp2(adp, rotmat, cell):\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmat = np.linalg.inv(rotmat)\n rotmatT = np.transpose(rotmat)\n Nmat = np.matrix([[1 / cell[0], 0, 0],\n [0, 1 / cell[1], 0],\n [0, 0, 1 / cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]",
"def get_inplane_angle(ima,ref, iring=1, fring=-1, ringstep=1, xtransSearch=0, ytransSearch=0, stp=1, center=1):\n\n\tfrom alignment import Numrinit, ringwe, Applyws, ormq\n\tfrom filter import fshift\n\n\tfirst_ring=int(iring); last_ring=int(fring); rstep=int(ringstep); xrng=int(xtransSearch); yrng=int(ytransSearch); step=int(stp)\t\n\tnx=ima.get_xsize()\n\tif(last_ring == -1): last_ring=int(nx/2)-2\n\tcnx = int(nx/2)+1\n \tcny = cnx\n \tmode = \"F\"\n \t#precalculate rings\n\tnumr = Numrinit(first_ring, last_ring, rstep, mode)\n \twr = ringwe(numr, mode)\n\tif(center==1):\n\t\tcs = [0.0]*2 # additio\n\t\tcs = ref.phase_cog()\n\t\tref1 = fshift(ref, -cs[0], -cs[1])\n\t\tcimage=Util.Polar2Dm(ref1, cnx, cny, numr, mode)\n\t\tcs = ima.phase_cog()\n\t\tima1 = fshift(ima, -cs[0], -cs[1])\n\telse:\n\t\tima1=ima.copy()\n\t\tcimage=Util.Polar2Dm(ref, cnx, cny, numr, mode)\n\tUtil.Frngs(cimage, numr)\n\tApplyws(cimage, numr, wr)\n\t[angt, sxst, syst, mirrort, peakt]=ormq(ima1, cimage, xrng, yrng, step, mode, numr, cnx, cny)\n\treturn angt,sxst, syst, mirrort, peakt"
] | [
"0.54740787",
"0.54063845",
"0.53933036",
"0.5287055",
"0.5184366",
"0.5183668",
"0.5156844",
"0.50909144",
"0.50790036",
"0.50740695",
"0.50218683",
"0.50043344",
"0.499874",
"0.49680153",
"0.49113643",
"0.49104938",
"0.4907334",
"0.489211",
"0.48913074",
"0.48873633",
"0.48745838",
"0.48705223",
"0.48688522",
"0.48675933",
"0.4849631",
"0.48493025",
"0.48284987",
"0.48226345",
"0.48057982",
"0.4765026"
] | 0.6927287 | 0 |
Reads the measured ADP from the xd.res file. The parameters are stored in atom.adp['frac_meas'] and atom.adp['cart_meas'] | def read_meas_adp(data, path='xd.res', use='meas'):
use2 = 'frac_' + use
switch = False
filepointer = open(path, 'r')
atomname = None
for line in filepointer:
if switch:
split = [i for i in line.split(' ') if len(i) > 0]
if not len(split) == 6:
print('WARNING!!! Inconsistend number of floats while\
reading measured ADP.')
data['exp'][atomname].adp[use2] = split
switch = False
if '(' in line:
split = [i for i in line.split(' ') if len(i) > 0]
if split[0][-1] == ')':
switch = True
atomname = split[0]
use = 'cart_' + use
for atom in data['exp'].atoms:
# if use == 'cart_neut': print(atom)
atom.adp[use] = rotate_adp2(atom.adp[use2],
atom.molecule.frac2cartmatrix,
atom.molecule.cell)
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def R_adp(data):\n printer('S_adp = ?')\n printer('R_adp = | (U_iso_xxx - U_iso_obs) / U_iso_obs |')\n printer('mean = sum((U_iso_xxx - U_iso_obs) / U_iso_obs) / n')\n printer('abs = sum(R_adp) / n\\n')\n printer('(geometric mean is used)\\n')\n\n printer(' | ADP_calc / ADP_obs | APD_tls / ADP_obs')\n printer(' |--------------------|-------------------')\n printer(' Atom | S_adp | R_adp | S_adp | R_adp')\n printer(' ===============================================')\n S_sum = []\n R_sum = []\n S_sum_tls = []\n R_sum_tls = []\n for atom in data['exp'].atoms:\n if not atom.element == 'H':\n U_rel_calc = cg.Uiso(atom.adp['cart_sum'])\n U_rel_obs = cg.Uiso(atom.adp['cart_meas'])\n R_adp = (U_rel_calc - U_rel_obs) / U_rel_obs\n R_sum.append(R_adp)\n S_adp = ws06(atom.adp['cart_sum'], atom.adp['cart_meas'])\n S_sum.append(S_adp)\n\n U_rel_tls = cg.Uiso(atom.adp['cart_ext'])\n R_tls = (U_rel_tls - U_rel_obs) / U_rel_obs\n R_sum_tls.append(R_tls)\n\n S_tls = ws06(atom.adp['cart_ext'], atom.adp['cart_meas'])\n S_sum_tls.append(S_tls)\n\n printer(' {0:5s}| {1:4.2f} | {2:4.2f} | {3:4.2f} | {4:4.2f}'.format(atom.name,\n S_adp,\n abs(R_adp),\n S_tls,\n abs(R_tls)))\n\n printer(' ------|----------|---------|----------|--------')\n printer(' {0:5s}| {1:4.2f} | {2:4.2f} | {3:4.2f} | {4:4.2f}'.format('mean',\n np.mean(S_sum),\n np.mean(R_sum),\n np.mean(S_sum_tls),\n np.mean(R_sum_tls)))\n printer(' {0:5s}| {1:4.2f} | {2:4.2f} | {3:4.2f} | {4:4.2f}'.format('abs',\n np.mean(S_sum),\n np.mean([abs(i) for i in R_sum]),\n np.mean(S_sum_tls),\n np.mean(\n [abs(i) for i in R_sum_tls])))\n printer(' {0:5s}| {1:4.2f} | {2:4.2f} | {3:4.2f} | {4:4.2f}'.format('SD',\n np.std(S_sum),\n np.std(R_sum),\n np.std(S_sum_tls),\n np.std(R_sum_tls)))\n if config.arg('correlate'):\n printer('\\n\\'mean R_adp (ADP_calc / ADP_obs)\\' can be\\ninterpreted as the ratio 1 - (ADP_int / ADP_obs).')\n else:\n printer('\\n\\'mean R_adp (ADP_tls / ADP_obs)\\' can be\\ninterpreted as the ratio 1 - (ADP_obs / ADP_int).')",
"def readAD(self):\n\n fname = self.ad_file\n print \"reading ad file \", fname, \" curdir = \", os.getcwd()\n try:\n fh = open(fname,'r')\n self.lines_ad = fh.readlines()\n fh.close()\n except:\n sys.stdout.write (\"Error opening {:}\\n\".format(fname))\n return 0\n\n for i in range(len(self.lines_ad)):\n ln = self.lines_ad[i].split() \n if (len(ln) >1):\n if (ln[1] == \"NumFoil\"):\n self.nSeg = int(ln[0])\n break\n if (ln[1] == \"WindFile\" and self.wind_file == None):\n self.wind_file = ln[0][1:-1]\n self.af_dict = {}\n self.af_dict['polar_idx'] = [0]*self.nSeg\n self.af_dict['polar_files'] = [0]*self.nSeg\n print \"ln, nSeg, i\", ln, self.nSeg, i\n for j in range(self.nSeg):\n lnidx = i+1+j\n ln = self.lines_ad[lnidx].split()\n afpath = fix_path(ln[0].strip().strip(\"\\\"\").strip(\"\\'\"))\n ln[0] = \"\\\"%s\\\"\" % afpath\n self.lines_ad[lnidx] = unsplit(ln)\n self.af_dict['polar_idx'][j] = j+1\n self.af_dict['polar_files'][j] = afpath",
"def ADF(self, dP, ax):\n from scipy.special import sph_harm\n ang = self._ang_part(dP)\n #scipy defines their harmonics to have `theta` be azimuthal, which is\n #opposite from physics.\n #we set $m = 0$ so that the azimuthal part doesn't contribute at all.\n result = np.zeros(len(ax))\n for l, p in ang.items():\n Ylm = sph_harm(0, l, 0, ax)*np.sqrt(2*l+1)\n #We are interested in the c* c of this value, which is multiplied\n #together to get pissnnl.\n result += p*np.sqrt(np.absolute(Ylm*Ylm.conjugate()))\n return result",
"def _update_adp_calculation(self, Temp):\n from sys import stdout\n\n self.printer('\\n ...calculating ADPs...\\n')\n\n import time\n\n start = time.time()\n\n daba_counter = 0.\n max_counter = float(len(self.keys()))\n for molecule in self.keys():\n daba_counter += 1.\n\n pstate = daba_counter / max_counter\n pstate = int(58 * pstate)\n bar = '[' + pstate * '#' + (58 - pstate) * '-' + ']'\n print ' | {}\\r'.format(bar),\n stdout.flush()\n\n try:\n self[molecule].get_adp(Temp)\n\n except KeyError:\n self.errorlog.write('Error: No ADP calculated by atom.get_adp() for {}.'.format(molecule))\n end = time.time()\n self.printer('\\n\\n Time used for ADP calculation: {:5.3f} sec on {} CPUs'.format(end - start, 1))",
"def ADP (self):",
"def read_raw_data(self, meas_name=''):\n if meas_name:\n self.selected_measure = meas_name\n else:\n meas_name = self.selected_measure\n\n is_big_endian = self._pna.data_endianess == 'big'\n data_request = 'CALCulate{}:DATA? SDATA'.format(self._channel)\n if self._pna.data_format == 'REAL,+32':\n data = self._pna.query_binary_values(data_request, datatype='f',\n is_big_endian=is_big_endian,\n container=np.ndarray)\n\n elif self._pna.data_format == 'REAL,+64':\n data = self._pna.query_binary_values(data_request, datatype='d',\n is_big_endian=is_big_endian,\n container=np.ndarray)\n\n elif self._pna.data_format == 'ASC,+0':\n data = self._pna.query_ascii_values(data_request, converter='f',\n container=np.ndarray)\n\n else:\n raise InstrIOError(cleandoc('''Agilent PNA did not return the\n channel {} formatted data for meas {}'''.format(\n self._channel, meas_name)))\n\n return data[::2] + 1j*data[1::2]",
"def getMeasurement(self):\n if(ADConverterSettings.useRealAD):\n print \"Real AD not activated\"\n #return self.adcdac.read_adc_voltage(1)\n else:\n return self.__readMeasurementFromFile()",
"def calc_dda(self, feedrate, spm):\n\n second_const = 60\n micro_second_const = 1000000\n #dda = micro_second_const / (feedrate * spm)\n dda = second_const * micro_second_const / (feedrate * spm) #Assuming feedrate in mm/min\n return dda",
"def format_gcc_dsp(self):\n result_x, doa_from_file = self.load_audio()\n\n return result_x, doa_from_file",
"def _transfer_adp(self):\n toleratedAtoms = []\n for atom in self['exp'].atoms:\n tolerated = atom.transfer_adp()\n if tolerated:\n toleratedAtoms.append(tolerated)\n for atom in toleratedAtoms:\n atom.averageADP()",
"def process_point_measurement(procstatus, dscfg, radar_list=None):\n if procstatus != 1:\n return None, None\n\n for datatypedescr in dscfg['datatype']:\n radarnr, datagroup, datatype, dataset, product = get_datatype_fields(\n datatypedescr)\n break\n field_name = get_fieldname_pyart(datatype)\n ind_rad = int(radarnr[5:8])-1\n if ((radar_list is None) or (radar_list[ind_rad] is None)):\n warn('ERROR: No valid radar')\n return None, None\n radar = radar_list[ind_rad]\n\n if field_name not in radar.fields:\n warn('Unable to extract point measurement information. ' +\n 'Field not available')\n return None, None\n\n projparams = dict()\n projparams.update({'proj': 'pyart_aeqd'})\n projparams.update({'lon_0': radar.longitude['data']})\n projparams.update({'lat_0': radar.latitude['data']})\n\n if dscfg['latlon']:\n lon = dscfg['lon']\n lat = dscfg['lat']\n alt = dscfg['alt']\n x, y = pyart.core.geographic_to_cartesian(lon, lat, projparams)\n\n if not dscfg['truealt']:\n ke = 4./3. # constant for effective radius\n a = 6378100. # earth radius\n re = a * ke # effective radius\n\n elrad = dscfg['ele'] * np.pi / 180.\n r_ground = np.sqrt(x ** 2. + y ** 2.)\n r = r_ground / np.cos(elrad)\n alt_radar = radar.altitude['data']+np.sqrt(\n r ** 2. + re ** 2. + 2. * r * re * np.sin(elrad)) - re\n alt_radar = alt_radar[0]\n else:\n alt_radar = dscfg['alt']\n\n r, az, el = pyart.core.cartesian_to_antenna(\n x, y, alt_radar-radar.altitude['data'])\n r = r[0]\n az = az[0]\n el = el[0]\n else:\n r = dscfg['rng']\n az = dscfg['azi']\n el = dscfg['ele']\n\n x, y, alt = pyart.core.antenna_to_cartesian(r, az, el)\n lon, lat = pyart.core.cartesian_to_geographic(x, y, projparams)\n\n d_az = np.min(np.abs(radar.azimuth['data'] - az))\n if d_az > dscfg['AziTol']:\n warn(' No radar bin found for point (az, el, r):(' +\n str(az)+', '+str(el)+', '+str(r) +\n '). Minimum distance to radar azimuth '+str(d_az) +\n ' larger than tolerance')\n return None, None\n\n d_el = np.min(np.abs(radar.elevation['data'] - el))\n if d_el > dscfg['EleTol']:\n warn(' No radar bin found for point (az, el, r):(' +\n str(az)+', '+str(el)+', '+str(r) +\n '). Minimum distance to radar elevation '+str(d_el) +\n ' larger than tolerance')\n return None, None\n\n d_r = np.min(np.abs(radar.range['data'] - r))\n if d_r > dscfg['RngTol']:\n warn(' No radar bin found for point (az, el, r):(' +\n str(az)+', '+str(el)+', '+str(r) +\n '). Minimum distance to radar range bin '+str(d_r) +\n ' larger than tolerance')\n return None, None\n\n ind_ray = np.argmin(np.abs(radar.azimuth['data'] - az) +\n np.abs(radar.elevation['data'] - el))\n ind_r = np.argmin(np.abs(radar.range['data'] - r))\n\n val = radar.fields[field_name]['data'].data[ind_ray, ind_r]\n time = num2date(radar.time['data'][ind_ray], radar.time['units'],\n radar.time['calendar'])\n\n # prepare for exit\n new_dataset = dict()\n new_dataset.update({'value': val})\n new_dataset.update({'datatype': datatype})\n new_dataset.update({'time': time})\n new_dataset.update(\n {'point_coordinates_WGS84_lon_lat_alt': [lon, lat, alt]})\n new_dataset.update({'antenna_coordinates_az_el_r': [az, el, r]})\n new_dataset.update(\n {'used_antenna_coordinates_az_el_r': [radar.azimuth['data'][ind_ray],\n radar.elevation['data'][ind_ray],\n radar.range['data'][ind_r]]})\n\n return new_dataset, ind_rad",
"def read_dip(fname, verbose=None):\n dipole = read_dipole(fname)\n return (dipole.times * 1000., dipole.pos, dipole.amplitude,\n 1e9 * dipole.ori * dipole.amplitude[:, np.newaxis], dipole.gof)",
"def AD(self, using, dx=0.0001, vmin=0.005, vmax=0.995):\n pits = np.array(self.PIT(using=using,dx=dx))\n mask = (pits>vmin) & (pits<vmax)\n ad_result = skgof.ad_test(pits[mask], stats.uniform())\n return ad_result.statistic, ad_result.pvalue",
"def read_formatted_data(self, meas_name=''):\n if meas_name:\n self.selected_measure = meas_name\n else:\n meas_name = self.selected_measure\n\n is_big_endian = self._pna.data_endianess == 'big'\n data_request = 'CALCulate{}:DATA? FDATA'.format(self._channel)\n if self._pna.data_format == 'REAL,+32':\n data = self._pna.query_binary_values(data_request, datatype='f',\n is_big_endian=is_big_endian,\n container=np.ndarray)\n\n elif self._pna.data_format == 'REAL,+64':\n data = self._pna.query_binary_values(data_request, datatype='d',\n is_big_endian=is_big_endian,\n container=np.ndarray)\n\n elif self._pna.data_format == 'ASC,+0':\n data = self._pna.query_ascii_values(data_request, converter='f',\n container=np.ndarray)\n\n else:\n raise InstrIOError(cleandoc('''Agilent PNA did not return the\n channel {} formatted data for meas {}'''.format(\n self._channel, meas_name)))\n\n return data",
"def ADFs(self, resolution=100, catom=False):\n if resolution not in self._adfs:\n self._adfs[resolution] = ADFCollection.from_soap(self, resolution, catom)\n return self._adfs[resolution]",
"def get_adx(data):\n if data is None:\n raise EmptyDataError('[!] Invalid data value')\n\n result = TA.ADX(data)\n if result is None:\n raise IndicatorException\n return result",
"def read_apr(self, lexclude=[], discontinuity=None, rename=None, verbose=False):\n###############################################################################\n \n import pyacs.lib.astrotime\n from pyacs.sol.gpoint import Gpoint\n\n # DEAL WITH RENAME IF PROVIDED\n \n if rename is not None:\n \n if verbose:print(\"-- Rename info provided for apr file: \", self.name)\n\n H_rename = {}\n\n # Case for a CODE rename applying for all SINEX files\n if 'all' in rename:\n \n for (code, new_code) in rename['all']:\n H_rename[code] = new_code\n \n # Case for a CODE rename applying for the current SINEX\n \n if self.name in list(rename.keys()):\n\n for (code, new_code) in rename[self.name]:\n H_rename[code] = new_code\n \n # READING APR FILE\n \n if verbose:\n print('-- Reading Globk apr file ', self.name)\n\n try:\n APR_VALUE = np.genfromtxt(self.name, comments='#', usecols=(1,2,3,4,5,6,7,8,9,10,11,12,12))\n APR_NAME = np.genfromtxt(self.name, comments='#', usecols=(0), dtype=str)\n except:\n print('!!!ERROR: could not read Globk format apr file:' , self.name)\n import sys\n sys.exit()\n \n for i in np.arange( APR_VALUE.shape[0]) :\n print('-- processing ', APR_NAME[i][:4])\n [x,y,z,sx,sy,sz,epoch, vx,vy,vz,svx,svy,svz]= APR_VALUE[i,:]\n M=Gpoint(X=x,Y=y,Z=z,\\\n SX=sx,SY=sy,SZ=sz,\\\n VX=vx,VY=vy,VZ=vz,SVX=svx,SVY=svy,SVZ=svz, \\\n epoch=epoch,code=APR_NAME[i][:4],pt='A',soln=1)\n \n self.estimates[ APR_NAME[i][:4], 1 ] = M",
"def _read_arf(file):\n with fits.open(file) as hdul:\n data = hdul[1].data\n\n return data['energ_lo'], data['energ_hi'], data['specresp']",
"def read_vmdas(self,):\n fd = self.f\n # The raw files produced by VMDAS contain a binary navigation data\n # block.\n self.cfg['sourceprog'] = 'VMDAS'\n ens = self.ensemble\n k = ens.k\n if self._source != 1 and self._debug_level >= 1:\n print(' \\n***** Apparently a VMDAS file \\n\\n')\n self._source = 1\n self.vars_read += ['time_gps',\n 'latitude_gps',\n 'longitude_gps',\n 'etime_gps',\n 'elatitude_gps',\n 'elongitude_gps',\n 'flags',\n 'ntime', ]\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[2] + utim[3] * 256, utim[1], utim[0])\n # This byte is in hundredths of seconds (10s of milliseconds):\n time = tmlib.timedelta(milliseconds=(int(fd.read_ui32(1) / 10)))\n fd.seek(4, 1) # \"PC clock offset from UTC\" - clock drift in ms?\n ens.time_gps[k] = tmlib.date2epoch(date + time)[0]\n ens.latitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.longitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.etime_gps[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) * 10)))[0]\n ens.elatitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.elongitude_gps[k] = fd.read_i32(1) * self._cfac\n fd.seek(12, 1)\n ens.flags[k] = fd.read_ui16(1)\n fd.seek(6, 1)\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[0] + utim[1] * 256, utim[3], utim[2])\n ens.ntime[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) / 10)))[0]\n fd.seek(16, 1)\n self._nbyte = 2 + 76",
"def get_all_DLP_measurements(self):\n pass",
"def Getdxdparam(Mda,Mdb,Xa):\n\n Xb = Xa.copy()\n #Xb[iulag] = Xa[iulag] + (1-Xa[iq]*Xa[iM])*(Mdb.ubar-Mda.ubar)\n Xb[Mdb.nX:Mdb.nXY] = Mdb.F(Xb[Mdb.interpstates])\n Xb[Mdb.nXY:] = Mdb.Static(Xb)\n\n if CLArgs.param == \"b\":\n D = Mdb.b() - Mda.b()\n else:\n D = Mdb.tau - Mda.tau\n\n return (Xb[iM] - Xa[iM])/D",
"def explore_FAAM_aerosol_data():\n # -- PCASP\n dsPCASP = get_FAAM_mineral_dust_calibration(instrument='PCASP',\n rtn_values=False)\n # -- CDP\n dsCDP = get_FAAM_mineral_dust_calibration(instrument='CDP',\n rtn_values=False)\n # only consider \"potential dust\" above a certain size?\n # Use 100 um for now",
"def read_adas(self):\n for name in self.files_atte:\n self.beam_atte.append(adas.ADAS21(name))\n for name in self.files_emis:\n self.beam_emis.append(adas.ADAS22(name))",
"def ADF(self, ax, catom=False):\n if catom:\n return self._get_DF(ax, \"cADF\", \"ax\", catom=True)\n else:\n return self._get_DF(ax, \"nADF\", \"ax\", catom=False)",
"def atmparamread(filename):\n f = open(filename, 'r')\n f.readline()\n line = f.readline()\n #Td = float(line.split()[0])\n #Pd = float(line.split()[1])\n #Mc = float(line.split()[2])\n #rc = float(line.split()[3])\n n = int(line.split()[0])\n f.readline()\n atm = 0*numpy.ndarray(shape=(n, ncol), dtype=float)\n S = 0*numpy.ndarray(shape=(n), dtype=float)\n for i in range(n):\n line = f.readline()\n S[i] = float(line.split()[0])\n for j in range(ncol ):\n atm[i, j] = float(line.split()[j+1])\n f.close()\n return atm, S",
"def get_admittance(self, param_name: list = ['Y11', 'Y21']):\n # TODO: move the plot in this analysis module. Renderer should recover the entire data\n return self.renderer.plot_params(param_name)",
"def RDF(self, dP, rx, fast=True):\n parts = np.zeros((len(dP), len(rx)))\n for i, dPi in enumerate(dP):\n w = np.sign(dPi[1])*np.sqrt(np.sqrt(np.abs(dPi[1])))\n parts[i,:] = w*self.apnl(dPi, rx, fast=fast)\n return np.sum(parts, axis=0)",
"def XPLMGetDatad(inDataRef):\n return float",
"def read_results():\r\n with open(\"packing.nfo\", \"r\") as fin:\r\n fin.readline()\r\n fin.readline()\r\n por_theory = float(fin.readline().split()[2])\r\n por_final = float(fin.readline().split()[2])\r\n print('Theoretical porosity:', por_theory)\r\n print('Final porosity:', por_final)\r\n with open(\"packing.xyzd\", \"rb\") as fin:\r\n btxt = fin.read()\r\n txt = list(struct.unpack(\"<\" + \"d\" * (len(btxt) // 8), btxt))\r\n data = array(zip(*[iter(txt)] * 4))\r\n data[:, 3] = data[:, 3] * \\\r\n ((1 - por_final) / (1 - por_theory))**(1 / 3)\r\n return data",
"def handle_dpad(self):\n # pylint: disable=no-member\n x_raw = self.microbit.accelerometer.get_x()\n y_raw = self.microbit.accelerometer.get_y()\n minus_sens = self.sensitivity * -1\n if x_raw < minus_sens:\n x_state = ('Absolute', 0x10, -1)\n elif x_raw > self.sensitivity:\n x_state = ('Absolute', 0x10, 1)\n else:\n x_state = ('Absolute', 0x10, 0)\n\n if y_raw < minus_sens:\n y_state = ('Absolute', 0x11, -1)\n elif y_raw > self.sensitivity:\n y_state = ('Absolute', 0x11, 1)\n else:\n y_state = ('Absolute', 0x11, 1)\n\n return x_state, y_state"
] | [
"0.58027154",
"0.546091",
"0.5428187",
"0.5426959",
"0.5236421",
"0.5206721",
"0.5204981",
"0.5189248",
"0.5178479",
"0.51778173",
"0.51361907",
"0.5132914",
"0.51140034",
"0.51137125",
"0.5094768",
"0.505379",
"0.5024168",
"0.49520048",
"0.49162722",
"0.4861708",
"0.48393014",
"0.4827799",
"0.48126784",
"0.4803462",
"0.47973534",
"0.47912204",
"0.475231",
"0.47431862",
"0.4728082",
"0.4724658"
] | 0.78116286 | 0 |
Returns the ADP after reflection on the plane defined by its normal vector 'planev'. | def reflect_adp(adp, planev):
M = np.identity(4)
M[:3, :3] -= 2.0 * np.outer(planev, planev)
M[:3, 3] = (2.0 * np.dot(np.array([0, 0, 0]), planev)) * planev
return rotate_adp(adp, M[:3, :3]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plane(self):\n return Plane(Point(0, self.evaluations.exposedWing.edges[2].point1.y, 0), Vector(0, 1, 0),\n hidden=True)",
"def GetPlane(plane):\r\n pass",
"def get_adp_from_calc(vx, vy, vz):\n ## lx=np.linalg.norm(vx)\n ## ly=np.linalg.norm(vy)\n ## lz=np.linalg.norm(vz)\n lx = vx\n ly = vy\n lz = vz\n L = np.matrix([[lx, 0, 0],\n [0, ly, 0],\n [0, 0, lz]])\n\n\n ## Vx=vx/lx\n ## Vy=vy/ly\n ## Vz=vz/lz\n Vx = np.array([1, 0, 0])\n Vy = np.array([0, 1, 0])\n Vz = np.array([0, 0, 1])\n V = np.matrix([[Vx[0], Vy[0], Vz[0]],\n [Vx[1], Vy[1], Vz[1]],\n [Vx[2], Vy[2], Vz[2]]])\n Vinv = np.linalg.inv(V)\n #print V,Vinv\n M = np.dot(np.dot(Vinv, L), V)\n #print M\n return M",
"def ADP (self):",
"def getPlane(entry):\n\n \n \n a,b,c = getNewLattice(entry,2)\n a_vector = np.linalg.solve(np.array(entry[0].lattice.as_dict()['matrix']).T,a)\n b_vector = np.linalg.solve(np.array(entry[0].lattice.as_dict()['matrix']).T,b)\n fracs = np.cross(a_vector,b_vector)\n fracs /= min([x for x in fracs if abs(x)>1E-4])\n \n return(fracs)",
"def plane(self):\r\n from lsst.analysis import utils\r\n return utils.fitplane(self.points, self.z)",
"def test_antinormal_reflection(self):\n n1 = 1.0\n n2 = 1.5\n normal = (0.0, 0.0, -1.0)\n angle = 0.0\n ray = Ray(position=(0.0, 0.0, 0.0), direction=(0.0, 0.0, 1.0), wavelength=None)\n fresnel = FresnelReflection()\n assert np.isclose(fresnel.reflectivity(angle, n1, n2), 0.04)\n new_ray = fresnel.transform(ray, {\"normal\": normal})\n assert np.allclose(flip(ray.direction), new_ray.direction)",
"def pr(self, vertex):\n log_pr = self.log_pr(vertex)\n return np.exp(log_pr - self.logZ)",
"def plot_plane(unit_normal, x_array, y_array, fore):\n # print'unit normal = ', unit_normal\n z = (((unit_normal[0] * (fore[0] - x_array)) + (unit_normal[1] * (fore[1] - y_array))) / unit_normal[2]) + fore[2]\n # print 'plane numbers\\n', z\n return z",
"def p(self):\n return 'Plane'",
"def project_onto_plane(self,z):\n U=self.U\n Q=self.Q_p\n #print(((z-Q[-2,:,[2]])/P[-2,:,[2]]).T)\n #print(P[-2])\n return ((z-Q[-2,:,[2]])/U[-2,:,[2]]).T*U[-2]+Q[-2]",
"def plane(self):\n return plane(self.N, self.o)",
"def plane_equation(point_a, point_b, point_c):\n v1 = np.subtract(point_a, point_c)\n v2 = np.subtract(point_a, point_b)\n normal = np.cross(v1, v2)\n # print 'b4 norm', normal\n unit_normal = norm_vect(normal)\n # print 'unityyy', unit_normal\n return unit_normal",
"def get_real_pwv(pwv, altitude):\n zenith_angle = 90-altitude\n airmass = 1/np.cos(zenith_angle*np.pi/180)\n return pwv*airmass",
"def project_point_plane(point, plane):\n base, normal = plane\n normal = normalize_vector(normal)\n vector = subtract_vectors(point, base)\n snormal = scale_vector(normal, dot_vectors(vector, normal))\n return subtract_vectors(point, snormal)",
"def reflect_line_plane(line, plane, epsilon=1e-6):\n intx_pt = intersection_line_plane(line, plane, epsilon)\n if not intx_pt:\n return None\n vec_line = subtract_vectors(line[1], line[0])\n vec_reflect = mirror_vector_vector(vec_line, plane[1])\n if angle_smallest_vectors(plane[1], vec_reflect) > 0.5 * pi:\n return None\n return [intx_pt, add_vectors(intx_pt, vec_reflect)]",
"def plane_point_side_v3(p: np.ndarray, v: np.ndarray) -> Any:\n return p[:3].dot(v) + p[3]",
"def plane_distance(p, plane):\n x, y, z = p\n A, B, C, D = plane\n return A*x + B*y + C*z + D",
"def planarize(self):\r\n from lsst.analysis import utils\r\n assert numpy.isfinite(self.z).all()\r\n self.z -= utils.evalplane(self.plane(), self.points)",
"def OAVolterra_direct(p0,wD,dt,Nt):\n # INITIALIZATION ------------------------------------------------------\n pz = np.zeros(Nt) # oa signal at detection point\n K0 = wD # oa propagator: K(0,0) \n K1 = wD*np.exp(-wD*dt) # oa propagator: K(1,0) \n K1_K0 = np.exp(-wD*dt) # quotient: K(i+1)/K(i)\n\n # SOLVE FORWARD PROBLEM VIA RECURRENCE RELATION -----------------------\n I = 0 \n pz[0] = p0[0] \n for i in range(1,Nt):\n I = I*K1_K0 + 0.5*dt*(K1*p0[i-1] + K0*p0[i])\n pz[i] = p0[i] - I\n return pz",
"def distance_from_plane(n,p,r,nnorm=None):\n #return np.abs(np.dot(n,(p-r)))/np.linalg.norm(n)\n #return np.abs(np.dot(n,(p-r)))/nnorm\n # the normal vector is already a unit vector!\n return np.abs(np.dot(n,(p-r)))",
"def extract_phase(eigvector, point_arr=[]):\n pa = point_arr\n if np.size(pa) == 0:\n pa = np.arange(len(evY))\n\n evX = eigvector[2 * pa]\n evY = eigvector[2 * pa + 1]\n phase = np.arctan2(evY.real, evX.real)\n # print 'evY[0] =', evY[0]\n # print 'evX[0] =', evX[0]\n # print 'phase[0] = ', phase[0]\n return phase",
"def VaporPressure(dwpt):\n\n return 611.2*exp(17.67*dwpt/(243.5+dwpt))",
"def test_point_on_plane(self, point, plane):\n _dist = point.dot(plane[:3]) + plane[3]\n if _dist <= epsilon:\n print('OK => point on plane')\n else:\n print('NO => point not on plane')",
"def plane_desc(self) -> str:\n return self.planes[0].join(' ') + self.planes[1].join(' ') + self.planes[2].join(' ')",
"def test_reflection_vector(self):\n\n # A ray approaching at 45 degrees\n v = vectors.Vector(1, -1, 0)\n n = vectors.Vector(0, 1, 0)\n r = v.reflect(n)\n self.assertEqual(r, vectors.Vector(1, 1, 0))\n\n # Ray along an axis hits a surface at an angle\n v = vectors.Vector(0, -1, 0)\n n = vectors.Vector(math.sqrt(2)/2, math.sqrt(2)/2, 0)\n r = v.reflect(n)\n self.assertEqual(r, vectors.Vector(1, 0, 0))",
"def Drepp(self):\n sinE = np.sin(self.E())\n cosE = np.cos(self.E())\n return -self.alpha()*cosE-(self.beta()+self.GAMMA)*sinE",
"def distance_point_plane(point, plane):\n base, normal = plane\n vector = subtract_vectors(point, base)\n return fabs(dot_vectors(vector, normal))",
"def ransac_plane_estimation (numpy_cloud, threshold, fixed_point=None, w = .9, z = 0.95 ):\r\n\r\n # variables\r\n current_consensus = 0 # keeps track of how many points match the current plane\r\n best_consensus = 0 # shows how many points matched the best plane yet\r\n consensus_points = np.array([]) # np.ndarray of points matching the cloud\r\n best_normal_vector = np.array ([]) # current best normal vector\r\n\r\n # determine probabilities and number of draws\r\n b = np.float_power(w, 3 ) # probability that all three observations belong to the model\r\n k = ceil(np.log(1-z ) / np.log(1-b )) # estimated number of draws\r\n\r\n # copy cloud\r\n numpy_cloud = numpy_cloud[:, 0:3].copy ()\r\n\r\n # estimate k * 3 random planes, defined through one normal vector and one plane parameter d, respectively\r\n normal_vectors, plane_parameters_d = random_plane_estimation (numpy_cloud, k * 3, fixed_point )\r\n\r\n # iterate through all planes found to see which one performs best\r\n for (normal_vector, d) in zip (normal_vectors, plane_parameters_d ):\r\n\r\n # count all points that consent with the plane\r\n current_consensus, current_consensus_points = plane_consensus (numpy_cloud, normal_vector, d, threshold )\r\n\r\n # is the current consensus match higher than the previous ones?\r\n if (current_consensus > best_consensus ):\r\n\r\n # keep best consensus set\r\n consensus_points = current_consensus_points\r\n best_normal_vector = normal_vector\r\n best_consensus = current_consensus\r\n\r\n return best_normal_vector, consensus_points",
"def reflect_ghost(self, p0):\n # Instead of self.p1, one could take any point on the line p1--p2.\n dist = self.p1 - p0\n alpha = numpy.einsum(\"ij, ij->i\", dist, self.mirror_edge)\n # q is sits at the perpendicular intersection of the reflection\n q = dist - (alpha / self.beta)[:, None] * self.mirror_edge\n return p0 + 2 * q"
] | [
"0.6131728",
"0.57741225",
"0.56889206",
"0.56324",
"0.55201805",
"0.55160964",
"0.5514133",
"0.5479282",
"0.5462305",
"0.5436542",
"0.52982074",
"0.52627504",
"0.52521896",
"0.5238149",
"0.52197284",
"0.52163255",
"0.51973253",
"0.5194445",
"0.5152425",
"0.5139117",
"0.5137654",
"0.5130722",
"0.51272553",
"0.51077765",
"0.5082127",
"0.5051763",
"0.50444925",
"0.50361395",
"0.50334024",
"0.50302327"
] | 0.6869817 | 0 |
Calculates an ADP in its matrix representation from the three principle axis representing the displacement ellipsoid. The three principle axis of the ellipsoid are needed as arguments. A Matrix representation of the ADP is returned. | def get_adp_from_calc(vx, vy, vz):
## lx=np.linalg.norm(vx)
## ly=np.linalg.norm(vy)
## lz=np.linalg.norm(vz)
lx = vx
ly = vy
lz = vz
L = np.matrix([[lx, 0, 0],
[0, ly, 0],
[0, 0, lz]])
## Vx=vx/lx
## Vy=vy/ly
## Vz=vz/lz
Vx = np.array([1, 0, 0])
Vy = np.array([0, 1, 0])
Vz = np.array([0, 0, 1])
V = np.matrix([[Vx[0], Vy[0], Vz[0]],
[Vx[1], Vy[1], Vz[1]],
[Vx[2], Vy[2], Vz[2]]])
Vinv = np.linalg.inv(V)
#print V,Vinv
M = np.dot(np.dot(Vinv, L), V)
#print M
return M | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def A_coefficients_ellipsoid(v, DD, bDDisDelta=False):\n #v can be given as an array with X/Y/Z cartesian dimensions being the last.\n #\"\"\"\n if bDDisDelta:\n delta=DD\n else:\n delta=Ddelta_ellipsoid(dd)\n #v=_sanitise_v(v)\n #v2=np.square(v)\n #v4=np.square(v2)\n #fact2=np.multiply(0.75,np.sum(v4))-0.25\n v2 = [ v[i]*v[i] for i in range(3) ]\n v4 = [ v2[i]*v2[i] for i in range(3) ]\n fact2 = 0.25*( 3.0*(v4[0]+v4[1]+v4[2])-1.0)\n fact3 = 1.0/12.0*(delta[0]*(3*v4[0]+6*v2[1]*v2[2]-1) + delta[1]*(3*v4[1]+6*v2[0]*v2[2]-1) + delta[2]*(3*v4[2]+6*v2[0]*v2[1]-1))\n A=np.zeros(5)\n A[0]= 3*v2[1]*v2[2]\n A[1]= 3*v2[0]*v2[2]\n A[2]= 3*v2[0]*v2[1]\n A[3]= fact2-fact3\n A[4]= fact2+fact3\n return A",
"def c1(adp1, adp2):\n\n def get_axis(adp):\n \"\"\"\n Returns ADP as its three principle axis representation.\n :param adp: List/Array type of length 6.\n :returns: List of three arrays of length 3.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n w, v = np.linalg.eig(adp)\n return [np.array((w[j] * v[:, j]).flatten().tolist()[0]) for j \\\n in xrange(3)]\n\n adp1_axis = get_axis(adp1)\n adp2_axis = get_axis(adp2)\n\n val = 0\n for i in xrange(3):\n addval = abs(norm(adp1_axis[i] - adp2_axis[i]))\n addval = addval * abs((1 - abs(np.dot(adp1_axis[i], adp2_axis[i]))))\n val += addval\n return val",
"def __getAVEA(a, b, c, d):\n\tassert(a>-1)\n\tassert(b>-1)\n\tassert(c>-1)\n\tassert(d>-1)\n\tm1i = a+b\n\tm0i = c+d\n\tn1i = a+c\n\tn0i = b+d\n\tni = a+b+c+d\n\tv = 0.0\n\tea = 0.0\n\tif ni<=1:\n\t\t# Avoid divide-by-zero\n\t\tea = n1i*m1i\n\telse:\n\t\tv = n1i*n0i*m1i*m0i/float((ni-1)*ni*ni)\n\t\tea = n1i*m1i/float(ni)\n\treturn (a,v,ea)",
"def calc_main_axis(self):\n #Clarify why the above step has been done\n c0, c1, c2 = self.calc_geom_center ()\n M = numpy.zeros ((3, 3), dtype=float)\n M = [[0] * 3, [0] * 3, [0] * 3]\n for x in self.atomcoords:\n xi = x[0] - c0\n yi = x[1] - c1\n zi = x[2] - c2\n M[0][0] = M[0][0] + xi * xi\n M[0][1] = M[0][1] + xi * yi\n M[0][2] = M[0][2] + xi * zi\n M[1][1] = M[1][1] + yi * yi\n M[1][2] = M[1][2] + yi * zi\n M[2][2] = M[2][2] + zi * zi\n M[1][0] = M[0][1]\n M[2][0] = M[0][2]\n M[2][1] = M[1][2]\n M = numpy.array (M)\n d = sum (numpy.diag (M))\n M = -M\n M[0, 0] = M[0, 0] + d\n M[1, 1] = M[1, 1] + d\n M[2, 2] = M[2, 2] + d\n\n eigenVals, eigenVecs = numpy.linalg.eig (M)\n eigenVecs = eigenVecs.transpose ()\n return eigenVecs",
"def reflect_adp(adp, planev):\n M = np.identity(4)\n M[:3, :3] -= 2.0 * np.outer(planev, planev)\n M[:3, 3] = (2.0 * np.dot(np.array([0, 0, 0]), planev)) * planev\n\n return rotate_adp(adp, M[:3, :3])",
"def get_A3():\n\n return array([[0.68557183+0.46550108j, 0.12934765-0.1622676j,\n 0.24409518+0.25335939j],\n [0.1531015 + 0.66678983j, 0.45112492+0.18206976j,\n -0.02633966+0.43477693j],\n [-0.10817164-1.16879196j, -0.18446849+0.03755672j,\n 0.06430325-0.44757084j]])",
"def eigenv2tensor(axis):\n vec = np.ones((3, 3))\n vecval = np.ones((3, 3))\n for i in xrange(len(axis)):\n vmag = np.linalg.norm(axis[i])\n v = axis[i] / vmag\n #print v\n vec[:, i] = v\n vecval[:, i] = axis[i]\n adp = np.linalg.solve(vec, vecval)\n return adp",
"def get_axis(adp):\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n w, v = np.linalg.eig(adp)\n return [np.array((w[j] * v[:, j]).flatten().tolist()[0]) for j \\\n in xrange(3)]",
"def estimate_affine_matrix_3d_to_2d(X, x):\n assert x.shape[0] == X.shape[0]\n assert x.shape[0] >= 4\n X = X.T # (3, n)\n x = x.T # (2, n)\n n = x.shape[1]\n\n ###---- 1. normalization\n ## 2d points\n mean = np.mean(x, 1) # (2, )\n x = x - np.tile(mean[:, np.newaxis], [1, n]) # (2, n)\n average_norm = np.mean(np.sqrt(np.sum(x ** 2, 0)))\n scale = np.sqrt(2) / average_norm\n x = scale * x\n\n # T = [[scale, 0, -mean * scale], \n # [ 0, scale, -mean * scale], \n # [ 0, 0, 1 ]]\n T = np.zeros((3, 3), dtype=np.float32)\n T[0, 0] = T[1, 1] = scale\n T[:2, 2] = -mean * scale\n T[2, 2] = 1\n\n ## 3d points\n X_homo = np.vstack((X, np.ones((1, n)))) # (4, n)\n mean = np.mean(X, 1) # (3, )\n X = X - np.tile(mean[:, np.newaxis], [1, n]) # (3, n)\n m = X_homo[: 3, :] - X\n average_norm = np.mean(np.sqrt(np.sum(X ** 2, 0)))\n scale = np.sqrt(3) / average_norm\n X = scale * X\n\n U = np.zeros((4, 4), dtype=np.float32)\n U[0, 0] = U[1, 1] = U[2, 2] = scale\n U[: 3, 3] = -mean * scale\n U[3, 3] = 1\n\n ###---- 2. equations\n A = np.zeros((n * 2, 8), dtype=np.float32)\n X_homo = np.vstack((X, np.ones((1, n)))).T\n A[: n, : 4] = X_homo\n A[n: , 4: ] = X_homo\n b = np.reshape(x, [-1, 1]) # (2n, 1)\n\n ###---- 3.solution\n p_8 = np.linalg.pinv(A).dot(b) # (8, 2n) x (2n, 1) -> (8, 1)\n p = np.zeros((3, 4), dtype=np.float32)\n p[0, :] = p_8[:4, 0]\n p[1, :] = p_8[4:, 0]\n p[-1, -1] = 1\n\n ###---- 4. denormalization\n P_Affine = np.linalg.inv(T).dot(p.dot(U))\n return P_Affine",
"def ADF(self, dP, ax):\n from scipy.special import sph_harm\n ang = self._ang_part(dP)\n #scipy defines their harmonics to have `theta` be azimuthal, which is\n #opposite from physics.\n #we set $m = 0$ so that the azimuthal part doesn't contribute at all.\n result = np.zeros(len(ax))\n for l, p in ang.items():\n Ylm = sph_harm(0, l, 0, ax)*np.sqrt(2*l+1)\n #We are interested in the c* c of this value, which is multiplied\n #together to get pissnnl.\n result += p*np.sqrt(np.absolute(Ylm*Ylm.conjugate()))\n return result",
"def matrix_exp_pade3(matrix, multiplication_rule=None):\n b = [120.0, 60.0, 12.0]\n b = [tf.constant(x, matrix.dtype) for x in b]\n ident = tf.linalg.eye(\n tf.shape(matrix)[-2],\n batch_shape=tf.shape(matrix)[:-2],\n dtype=matrix.dtype)\n matrix_2 = tf.linalg.matmul(matrix, matrix)\n tmp = matrix_2 + b[1] * ident\n matrix_u = tf.linalg.matmul(matrix, tmp)\n matrix_v = b[2] * matrix_2 + b[0] * ident\n return matrix_u, matrix_v",
"def get_aa_tpdm(self) -> Tuple['Nparray', 'Nparray']:\n dveca, _ = self.calculate_dvec_spin()\n alpha_opdm = numpy.tensordot(dveca, self.coeff.conj(), axes=2)\n nik_njl_aa = numpy.transpose(numpy.tensordot(dveca.conj(),\n dveca,\n axes=((2, 3), (2, 3))),\n axes=(1, 2, 0, 3))\n for ii in range(nik_njl_aa.shape[1]):\n nik_njl_aa[:, ii, ii, :] -= alpha_opdm\n return alpha_opdm, -nik_njl_aa",
"def to_amdl(self):\n from .adipls import ADIPLSStellarModel\n\n ioff = (0 if self.r[0] < 1e6 else 1) # mimic ADIPLS's FGONG to AMDL script\n A = np.zeros((len(self.data) + ioff, 6))\n\n # we can safely ignore division by 0 here\n with np.errstate(divide='ignore', invalid='ignore'):\n A[ioff:,0] = self.x\n A[ioff:,1] = self.q/self.x**3\n A[ioff:,2] = self.Vg\n A[ioff:,3] = self.Gamma_1\n A[ioff:,4] = self.AA\n A[ioff:,5] = self.U\n\n A[0,0] = 0.\n A[0,1] = 4.*np.pi/3.*self.rho[0]*self.R**3/self.M\n A[0,2] = 0.\n A[0,3] = self.Gamma_1[0]\n A[0,4] = 0.\n A[0,5] = 3.\n\n D = np.zeros(8)\n D[0] = self.M\n D[1] = self.R\n D[2] = self.P[0]\n D[3] = self.rho[0]\n D[4] = 4.*np.pi/3.*self.G*(self.rho[0]*self.R)**2/(self.P[0]*self.Gamma_1[0])\n D[5] = D[4]\n D[6] = -1.0\n D[7] = 0.0\n\n return ADIPLSStellarModel(D, A, G=self.G)",
"def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \\\n e.potinflayers(self.xcin[icp], self.ycin[icp], self.layers, aq=self.aqin) / self.aqin.Tcol - \\\n e.potinflayers(self.xcout[icp], self.ycout[icp], self.layers, aq=self.aqout) / self.aqout.Tcol\n ieq += e.nunknowns\n else:\n rhs[istart:istart + self.nlayers] -= \\\n e.potentiallayers(self.xcin[icp], self.ycin[icp], self.layers, aq=self.aqin) / self.aqin.T - \\\n e.potentiallayers(self.xcout[icp], self.ycout[icp], self.layers, aq=self.aqout) / self.aqout.T\n return mat, rhs",
"def __convert(args):\n a, b, zone, ellipsoid, datum, inverse = args\n projection = Proj(\"+proj=utm +zone={}, +ellps={} +datum={} +units=m +no_defs\".format(zone, ellipsoid, datum))\n c, d = projection(a, b, inverse=inverse)\n\n return c, d",
"def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()/self.pbaspect[0]\n ymin, ymax = self.get_ylim3d()/self.pbaspect[1]\n zmin, zmax = self.get_zlim3d()/self.pbaspect[2]\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0 \n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates \n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down \n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M",
"def e(self):\n return np.matrix([self.y - self.arg[0,0]*self.x**3 - self.arg[1,0]*self.x**2 - self.arg[2,0]*self.x**1 - self.arg[3,0]])",
"def _propagate_A(self):\n A_roots = np.roots(self.A)\n A_roots_norm = [r if np.abs(r) < 1 else 1/np.conj(r) for r in A_roots]\n A_poly = np.poly(A_roots_norm)\n self.alpha_g = -A_poly[1:]\n self.A = np.concatenate([[1], -self.alpha_g])\n\n self.rev_A = self.A[::-1]\n\n self.pie = np.dot(self.M_mu, self.rev_A)\n self.pi = self.pie*self.e\n self.p = self.pi*self.d\n\n\n M_R = np.lib.stride_tricks.as_strided(self.R_pad,\n shape=[self.L_h, self.L_h, self.P+1],\n strides=[self.R_pad.strides[0], self.R_pad.strides[1], self.R_pad.strides[0]])\n self.half_pie_var = np.dot(M_R, self.rev_A)\n self.half_pie_var_pad = np.pad(self.half_pie_var, [(0, 0), (self.P, 0)], 'constant')\n self.M_half_pie_var_pad = np.lib.stride_tricks.as_strided(self.half_pie_var_pad,\n shape=[self.L_h, self.P+1],\n strides=[self.half_pie_var_pad.strides[0]+self.half_pie_var_pad.strides[1], self.half_pie_var_pad.strides[1]])\n\n self.pie_var = np.dot(self.M_half_pie_var_pad, self.rev_A)",
"def get_matrix(adp):\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n return adp",
"def project(self, a):\n for g in xrange(0, len(a), 3):\n\n ax = a[g + 0]\n ay = a[g + 1]\n az = a[g + 2]\n anorm = ax ** 2.0 + ay ** 2.0 + az ** 2.0\n i = anorm > 1.0\n\n anorm_i = anorm[i] ** 0.5 # Square root is taken here. Faster.\n ax[i] = np.divide(ax[i], anorm_i)\n ay[i] = np.divide(ay[i], anorm_i)\n az[i] = np.divide(az[i], anorm_i)\n\n a[g + 0] = ax\n a[g + 1] = ay\n a[g + 2] = az\n\n return a",
"def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n # rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n rhs = self.pc.copy()\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n # rhs[istart:istart+self.nlayers] = self.pc[]\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \\\n e.potinflayers(self.xc[icp], self.yc[icp], self.layers)\n if e == self:\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] -= self.resfac[icp]\n ieq += e.nunknowns\n else:\n rhs[istart:istart + self.nlayers] -= \\\n e.potentiallayers(self.xc[icp], self.yc[icp],\n self.layers) # Pretty cool that this works, really\n return mat, rhs",
"def principal_axis(alpha_carbons):\n # alpha carbons coordinates as a numpy array\n coord = numpy.array(alpha_carbons, float)\n\n # get geometrical center\n center = numpy.mean(coord, 0)\n coord = coord - center\n\n # create inertia matrix and extract eigenvectors and values\n inertia = numpy.dot(coord.transpose(), coord)\n e_values, e_vectors = numpy.linalg.eig(inertia)\n\n # sort eigenvalues\n order = numpy.argsort(e_values)\n\n # axis1 is the principal axis with the greatest eigenvalue\n _, _, axis1 = e_vectors[:, order].transpose()\n\n axis_direction = axis1 / numpy.linalg.norm(axis1)\n\n return center, axis_direction",
"def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n # rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n rhs = self.hc.copy()\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n # rhs[istart:istart+self.nlayers] = self.pc[]\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \\\n e.potinflayers(self.xc[icp], self.yc[icp], self.layers) / self.aq.Tcol[self.layers]\n if e == self:\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] -= self.resfac[icp]\n ieq += e.nunknowns\n else:\n rhs[istart:istart + self.nlayers] -= \\\n e.potentiallayers(self.xc[icp], self.yc[icp], self.layers) / self.aq.T[\n self.layers] # Pretty cool that this works, really\n return mat, rhs",
"def Compute3d(self, *args):\n return _BRepAlgo.BRepAlgo_NormalProjection_Compute3d(self, *args)",
"def ap(self, P):\n if P.divides(self.conductor()):\n if (P*P).divides(self.conductor()):\n # It is 0, because the reduction is additive.\n return ZZ(0)\n else:\n # TODO: It is +1 or -1, but I do not yet know how to\n # compute which without using the L-function.\n return '?'\n else:\n return self._S.hecke_matrix(P)[0,0]",
"def adjugate_matrix(self, determinant, transposed_cofactor):\n if transposed_cofactor.__class__.__name__ != \"Matrix3\":\n raise TypeError(self._ERRORS[0])\n\n r1 = transposed_cofactor.row_1\n r2 = transposed_cofactor.row_2\n r3 = transposed_cofactor.row_3\n\n r1[0] /= determinant\n r1[1] /= determinant\n r1[2] /= determinant\n\n r2[0] /= determinant\n r2[1] /= determinant\n r2[2] /= determinant\n\n r3[0] /= determinant\n r3[1] /= determinant\n r3[2] /= determinant\n\n return Matrix3(r1, r2, r3)",
"def anl_solution(self):\r\n\r\n m = float(self.mass) / self.nu_m\r\n qe = 1 / self.nu_m * (self.nu_t * self.nu_t / self.nu_x) * 1.0 \\\r\n / float(self.size_tick * self.size_tick)\r\n print 'qE=', qe\r\n c = self.light_vel\r\n for i in range(0, len(self.obs.obt_g)):\r\n ddt = float(self.obs.obt[i] - self.obs.obt[i - 1])\r\n x = m * c ** 2 / qe * (math.sqrt(1.0 + (qe * self.t[i] / (m\r\n * c)) ** 2) - 1.0)\r\n self.xa_track.append(x)\r\n p = qe * self.t[i]\r\n self.pa.append(p)\r\n v = p / math.sqrt(m ** 2 + (p / c) ** 2)\r\n jv = self.t[i] * qe / (m * c)\r\n v = math.sqrt(jv * jv / (1 + jv * jv)) * c\r\n self.va.append(v)\r\n print 'Analytical solution of the differential equation of motion'",
"def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n rhs[istart:istart + self.nlayers] = self.pc\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \\\n e.potinflayers(self.xc[icp], self.yc[icp], self.layers)\n ieq += e.nunknowns\n else:\n rhs[istart:istart + self.nlayers] -= \\\n e.potentiallayers(self.xc[icp], self.yc[icp],\n self.layers) # Pretty cool that this works, really\n return mat, rhs",
"def equation(self):\n mat = np.zeros((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n rhs[0:self.nlayers - 1] = 0.0\n rhs[self.nlayers - 1] = self.Qc\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n head = e.potinflayers(self.xc, self.yc, self.layers) / self.aq.Tcol[self.layers, :]\n mat[0:self.nlayers - 1, ieq:ieq + e.nunknowns] = head[:-1] - head[1:]\n if e == self:\n for i in range(self.nlayers - 1):\n mat[i, ieq + i] -= self.resfac[i]\n mat[i, ieq + i + 1] += self.resfac[i + 1]\n mat[self.nlayers - 1, ieq:ieq + self.nlayers] = 1.0\n ieq += e.nunknowns\n else:\n head = e.potentiallayers(self.xc, self.yc, self.layers) / self.aq.T[self.layers]\n rhs[0:self.nlayers - 1] -= head[:-1] - head[1:]\n return mat, rhs",
"def equation(self):\n mat = np.empty((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n for icp in range(self.ncp):\n istart = icp * self.nlayers\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n qx, qy = e.disvecinflayers(self.xc[icp], self.yc[icp], self.layers)\n mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = \\\n qx * self.cosnorm[icp] + qy * self.sinnorm[icp] - self.resfac[:, np.newaxis] * \\\n (e.potinflayers(self.xcin[icp], self.ycin[icp], self.layers, aq=self.aq) / self.aq.Tcol[\n self.layers] - \\\n e.potinflayers(self.xcout[icp], self.ycout[icp], self.layers, aq=self.aq) / self.aq.Tcol[\n self.layers])\n ieq += e.nunknowns\n else:\n qx, qy = e.disveclayers(self.xc[icp], self.yc[icp], self.layers)\n rhs[istart:istart + self.nlayers] -= qx * self.cosnorm[icp] + qy * self.sinnorm[icp] + self.resfac * \\\n (e.potentiallayers(self.xcin[icp], self.ycin[icp], self.layers,\n aq=self.aq) / self.aq.T[self.layers] -\n e.potentiallayers(self.xcout[icp], self.ycout[icp],\n self.layers, aq=self.aq) / self.aq.T[\n self.layers])\n return mat, rhs"
] | [
"0.6180545",
"0.5795742",
"0.5749824",
"0.56150705",
"0.55909604",
"0.55889744",
"0.5540044",
"0.5520576",
"0.5389443",
"0.53017646",
"0.52529544",
"0.5236133",
"0.5234051",
"0.52170014",
"0.5192699",
"0.5171544",
"0.51684576",
"0.5142525",
"0.51380336",
"0.5134151",
"0.5129932",
"0.51243186",
"0.5115185",
"0.5096115",
"0.50739235",
"0.50687164",
"0.50564426",
"0.5033324",
"0.5014548",
"0.4993605"
] | 0.59788334 | 1 |
Determines the the quaternion representing the best possible transformation of two coordinate systems into each other using a least sqare approach. This function is used by the get_refined_rotation() function. | def get_best_quaternion(coordlist1, coordlist2):
M = np.matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
if len(coordlist1) <= len(coordlist2):
number = len(coordlist1)
else:
number = len(coordlist2)
for i in xrange(number):
aaa = np.matrix(np.outer(coordlist1[i], coordlist2[i]))
M = M + aaa
N11 = float(M[0][:, 0] + M[1][:, 1] + M[2][:, 2])
N22 = float(M[0][:, 0] - M[1][:, 1] - M[2][:, 2])
N33 = float(-M[0][:, 0] + M[1][:, 1] - M[2][:, 2])
N44 = float(-M[0][:, 0] - M[1][:, 1] + M[2][:, 2])
N12 = float(M[1][:, 2] - M[2][:, 1])
N13 = float(M[2][:, 0] - M[0][:, 2])
N14 = float(M[0][:, 1] - M[1][:, 0])
N21 = float(N12)
N23 = float(M[0][:, 1] + M[1][:, 0])
N24 = float(M[2][:, 0] + M[0][:, 2])
N31 = float(N13)
N32 = float(N23)
N34 = float(M[1][:, 2] + M[2][:, 1])
N41 = float(N14)
N42 = float(N24)
N43 = float(N34)
N = np.matrix([[N11, N12, N13, N14],
[N21, N22, N23, N24],
[N31, N32, N33, N34],
[N41, N42, N43, N44]])
values, vectors = np.linalg.eig(N)
w = list(values)
quat = vectors[:, w.index(max(w))]
quat = np.array(quat).reshape(-1, ).tolist()
return quat, max(w) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_best_rotation(q1, q2, allow_reflection = False, only_xy = False):\n if q1.ndim != 2 or q2.ndim != 2:\n raise Exception(\"This only supports curves of shape (N,M) for N dimensions and M samples\")\n\n n = q1.shape[0]\n\n # if only_xy, strip everything but the x and y coordinates of q1 and q2\n if only_xy:\n _q1 = q1[0:2, :]\n _q2 = q2[0:2, :]\n else:\n _q1 = q1\n _q2 = q2\n\n _n = _q1.shape[0]\n A = _q1@_q2.T\n U, s, Vh = svd(A)\n S = eye(_n)\n\n # if reflections are not allowed and the determinant of A is negative,\n # then the entry corresponding to the smallest singular value is negated\n # as in the Kabsch algorithm\n if det(A) < 0 and not allow_reflection:\n S[-1, -1] = -1 # the last entry of the matrix becomes -1\n\n _R = U@S@Vh # optimal\n \n # if only_xy, the top left block of the matrix is _R and the rest is identity matrix\n if only_xy:\n R = eye(n)\n R[0:2, 0:2] = _R\n else:\n R = _R\n \n q2new = R@q2\n\n return (q2new, R)",
"def rotation_only(q_1: Q, h: Q) -> Q:\n h_4_rotation = vector_q(h)\n return rotation_and_or_boost(q_1, h_4_rotation)",
"def next_rotation(q_1: Q, q_2: Q) -> Q:\n q_1.check_representations(q_2)\n\n if not math.isclose(q_1.t, q_2.t):\n raise ValueError(f\"Oops, to be a rotation, the first values must be the same: {q_1.t} != {q_2.t}\")\n\n if not math.isclose(norm_squared(q_1).t, norm_squared(q_2).t):\n raise ValueError(f\"Oops, the norm squared of these two are not equal: {norm_squared(q_1).t} != {norm_squared(q_2).t}\")\n\n next_rot = product(q_1, q_2)\n v_abs_q_1 = abs_of_vector(q_1).t\n next_vector_normalized = normalize(vector_q(next_rot), v_abs_q_1)\n next_vector_normalized.t = q_1.t\n\n return next_vector_normalized",
"def setup_s_matrix(dq_1, dq_2):\n scalar_parts_1 = dq_1.scalar()\n scalar_parts_2 = dq_2.scalar()\n\n assert np.allclose(\n scalar_parts_1.dq, scalar_parts_2.dq,\n atol=5e-2), (\n \"\\ndq1:\\n{},\\nscalar_parts_1:\\n{},\\ndq2:\\n{},\\nscalar_parts_2:\\n{}\\n\"\n \"Scalar parts should always be equal.\".format(dq_1, scalar_parts_1, dq_2,\n scalar_parts_2))\n\n s_matrix = np.zeros([6, 8])\n s_matrix[0:3, 0:3] = skew_from_vector(dq_1.q_rot.q[0:-1] + dq_2.q_rot.q[0:-1])\n s_matrix[0:3, 3] = dq_1.q_rot.q[0:-1] - dq_2.q_rot.q[0:-1]\n s_matrix[3:6, 0:3] = skew_from_vector(dq_1.q_dual.q[0:-1] +\n dq_2.q_dual.q[0:-1])\n s_matrix[3:6, 3] = dq_1.q_dual.q[0:-1] - dq_2.q_dual.q[0:-1]\n s_matrix[3:6, 4:7] = skew_from_vector(dq_1.q_rot.q[0:-1] + dq_2.q_rot.q[0:-1])\n s_matrix[3:6, 7] = dq_1.q_rot.q[0:-1] - dq_2.q_rot.q[0:-1]\n # print(\"S: \\n{}\".format(s_matrix))\n\n rank_s_matrix = np.linalg.matrix_rank(s_matrix)\n assert rank_s_matrix <= 6, s_matrix\n return s_matrix.copy()",
"def next_rotation_randomized(q_1: Q, q_2: Q) -> Q:\n q_1.check_representations(q_2)\n\n if not math.isclose(q_1.t, q_2.t):\n raise ValueError(f\"Oops, to be a rotation, the first values must be the same: {q_1.t} != {q_2.t}\")\n\n if not math.isclose(norm_squared(q_1).t, norm_squared(q_2).t):\n raise ValueError(f\"Oops, the norm squared of these two are not equal: {norm_squared(q_1).t} != {norm_squared(q_2).t}\")\n\n next_rot = product(product(q_1, q_2), qrandom())\n v_abs_q_1 = abs_of_vector(q_1).t\n next_vector_normalized = normalize(vector_q(next_rot), v_abs_q_1)\n next_vector_normalized.t = q_1.t\n\n return next_vector_normalized",
"def quat_diff(q1, q2):\n q1 = np.asarray(q1)\n if np.dot(q1, q2) < 0:\n # Quaternions have opposite handedness, flip q1 since it's already an ndarray\n q1 = -1 * q1\n q_inv = q1 * np.array([1.0, -1.0, -1.0, -1.0])\n q_inv = q_inv / np.dot(q_inv, q_inv)\n\n # We only coare about the scalar component, compose only that\n z0 = q_inv[0] * q2[0] - q_inv[1] * q2[1] - q_inv[2] * q2[2] - q_inv[3] * q2[3]\n return 2 * float(np.arccos(min(1, max(-1, z0))))",
"def slerp(cls, q0, q1, amount=0.5):\n # Ensure quaternion inputs are unit quaternions and 0 <= amount <=1\n q0._fast_normalise()\n q1._fast_normalise()\n amount = np.clip(amount, 0, 1)\n\n dot = np.dot(q0.q, q1.q)\n\n # If the dot product is negative, slerp won't take the shorter path.\n # Note that v1 and -v1 are equivalent when the negation is applied to all four components.\n # Fix by reversing one quaternion\n if (dot < 0.0):\n q0.q = -q0.q\n dot = -dot\n\n # sin_theta_0 can not be zero\n if (dot > 0.9995):\n qr = Quaternion(q0.q + amount*(q1.q - q0.q))\n qr._fast_normalise()\n return qr\n\n theta_0 = np.arccos(dot) # Since dot is in range [0, 0.9995], np.arccos() is safe\n sin_theta_0 = np.sin(theta_0)\n\n theta = theta_0*amount\n sin_theta = np.sin(theta)\n\n s0 = np.cos(theta) - dot * sin_theta / sin_theta_0\n s1 = sin_theta / sin_theta_0\n qr = Quaternion((s0 * q0.q) + (s1 * q1.q))\n qr._fast_normalise()\n return qr",
"def rotation_and_or_boost(q_1: Q, h: Q, verbose=False) -> Q:\n q_1.check_representations(h)\n end_q_type = f\"{q_1.q_type}rotation/boost\"\n\n if not h.is_symbolic():\n\n if (not math.isclose(h.t, 0) and not equal(q0(), vector_q(h))) or equal(h, q0()):\n\n if not math.isclose(square(h).t, 1):\n # The scalar part of h will be used to calculate cosh(h.t) and sinh(h.t)\n # The normalized vector part will point sinh(t) in the direction of vector_q(h)\n h_scalar = scalar_q(h)\n h_nomralized_vector = normalize(vector_q(h))\n\n if np.abs(h_scalar.t) > 1:\n h_scalar = inverse(h_scalar)\n\n h_cosh = product(add(exp(h_scalar), exp(flip_sign(h_scalar))), q1(1.0 / 2.0))\n h_sinh = product(dif(exp(h_scalar), exp(flip_sign(h_scalar))), q1(1.0 / 2.0))\n\n h = add(h_cosh, product(h_nomralized_vector, h_sinh))\n\n if verbose:\n h.print_state(\"To do a Lorentz boost, adjusted value of h so scalar_q(h²) = 1\")\n\n else:\n if not math.isclose(norm_squared(h).t, 1):\n h = normalize(h)\n if verbose:\n h.print_state(\"To do a 3D rotation, adjusted value of h so scalar_q(h h^*) = 1\")\n\n triple_1 = triple_product(h, q_1, conj(h))\n triple_2 = conj(triple_product(h, h, q_1))\n triple_3 = conj(triple_product(conj(h), conj(h), q_1))\n\n triple_23 = dif(triple_2, triple_3)\n half_23 = product(triple_23, Q([0.5, 0, 0, 0], representation=q_1.representation))\n triple_123 = add(triple_1, half_23)\n triple_123.q_type = end_q_type\n triple_123.representation = q_1.representation\n\n return triple_123",
"def compute_error_minimizing_rotation(Points1, Points2):\r\n #TODO: implement me\r\n\r\n H_1_1 = 0\r\n H_1_2 = 0\r\n H_2_1 = 0\r\n H_2_2 = 0\r\n\r\n for t in range(1, len(Points1)):\r\n H_1_1 = H_1_1 + (Points1[t][0] * Points2[t][0])\r\n H_1_2 = H_1_2 + (Points1[t][1] * Points2[t][0])\r\n H_2_1 = H_2_1 + (Points1[t][0] * Points2[t][1])\r\n H_2_2 = H_2_2 + (Points1[t][1] * Points2[t][1])\r\n\r\n H = [[H_1_1,H_1_2],[H_2_1,H_2_2]]\r\n\r\n U, S, V = numpy.linalg.svd(H)\r\n\r\n V = numpy.transpose(V)\r\n\r\n R_1_1 = (U[0][0] * V[0][0]) +((U[0][1] * V[1][0]))\r\n R_1_2 = (U[0][0] * V[0][1]) +((U[0][1] * V[1][1]))\r\n R_2_1 = (U[1][0] * V[0][0]) +((U[1][1] * V[1][0]))\r\n R_2_2 = (U[1][0] * V[0][1]) +((U[1][1] * V[1][1]))\r\n\r\n R = [[R_1_1,R_1_2],[R_2_1,R_2_2]]\r\n\r\n return R",
"def find_rotation_and_seed_q(q1, q2, closed=0, rotation=True, method=\"DP\"):\n\n n, T = q1.shape\n scl = 4.\n minE = 4000\n if closed == 1:\n end_idx = int(floor(T/scl))\n scl = 4\n else:\n end_idx = 0\n \n for ctr in range(0, end_idx+1):\n if closed == 1:\n q2n = shift_f(q2, scl*ctr)\n else:\n q2n = q2\n \n if rotation:\n q2new, R = find_best_rotation(q1, q2n)\n else:\n q2new = q2n.copy()\n R = eye(n)\n\n # Reparam\n if norm(q1-q2new,'fro') > 0.0001:\n gam = optimum_reparam_curve(q2new, q1, 0.0, method)\n gamI = uf.invertGamma(gam)\n q2new = group_action_by_gamma(q2new,gamI)\n if closed == 1:\n q2new = project_curve(q2new)\n else:\n gamI = linspace(0,1,T)\n \n tmp = innerprod_q2(q1,q2new)\n if tmp > 1:\n tmp = 1\n if tmp < -1:\n tmp = -1\n Ec = arccos(tmp)\n if Ec < minE:\n Rbest = R\n q2best = q2new\n gamIbest = gamI\n minE = Ec\n\n return (q2best, Rbest, gamIbest)",
"def is_same_quaternion(q0, q1):\r\n q0 = numpy.array(q0)\r\n q1 = numpy.array(q1)\r\n return numpy.allclose(q0, q1) or numpy.allclose(q0, -q1)",
"def quaternion_product(q1, q2):\r\n Wa = q1[0]\r\n Wb = q2[0]\r\n Xa = q1[1]\r\n Xb = q2[1]\r\n Ya = q1[2]\r\n Yb = q2[2]\r\n Za = q1[3]\r\n Zb = q2[3]\r\n x = Xa * Wb + Ya * Zb - Za * Yb + Wa * Xb\r\n y = -Xa * Zb + Ya * Wb + Za * Xb + Wa * Yb\r\n z = Xa * Yb - Ya * Xb + Za * Wb + Wa * Zb\r\n w = -Xa * Xb - Ya * Yb - Za * Zb + Wa * Wb\r\n return [w, x, y, z]",
"def find_rotation_and_seed_unique(q1, q2, closed=0, lam=0.0, rotation=True, method=\"DP\"):\n\n n, T = q1.shape\n\n scl = 4.\n minE = 1000\n if closed == 1:\n end_idx = int(floor(T/scl))\n scl = 4\n else:\n end_idx = 0\n \n for ctr in range(0, end_idx+1):\n if closed == 1:\n q2n = shift_f(q2, scl*ctr)\n else:\n q2n = q2.copy()\n \n if rotation:\n q2new, R = find_best_rotation(q1, q2n)\n else:\n q2new = q2n\n R = eye(n)\n\n # Reparam\n if norm(q1-q2new,'fro') > 0.0001:\n gam = optimum_reparam_curve(q2new, q1, lam, method)\n gamI = uf.invertGamma(gam)\n p2n = q_to_curve(q2n)\n p2n = group_action_by_gamma_coord(p2n,gamI)\n q2new = curve_to_q(p2n)[0]\n if closed == 1:\n q2new = project_curve(q2new)\n else:\n gamI = linspace(0,1,T)\n \n tmp = innerprod_q2(q1,q2new)\n if tmp > 1:\n tmp = 1\n if tmp < -1:\n tmp = -1\n Ec = arccos(tmp)\n if Ec < minE:\n Rbest = R\n q2best = q2new\n gamIbest = gamI\n minE = Ec\n\n return (q2best, Rbest, gamIbest)",
"def qdist(q1: np.ndarray, q2: np.ndarray) -> float:\n _quaternions_guard_clauses(q1, q2)\n q1, q2 = np.copy(q1), np.copy(q2)\n if q1.ndim == 1:\n q1 /= np.linalg.norm(q1)\n q2 /= np.linalg.norm(q2)\n if np.allclose(q1, q2) or np.allclose(-q1, q2):\n return 0.0\n return min(np.linalg.norm(q1-q2), np.linalg.norm(q1+q2))\n q1 /= np.linalg.norm(q1, axis=1)[:, None]\n q2 /= np.linalg.norm(q2, axis=1)[:, None]\n return np.r_[[np.linalg.norm(q1-q2, axis=1)], [np.linalg.norm(q1+q2, axis=1)]].min(axis=0)",
"def quaternion_multiplication(self, q1, q2):\n\n # Unpack these quaternions\n a_scalar, a_vecx, a_vecy, a_vecz = torch.unbind(q1,\n dim=-1)\n b_scalar, b_vecx, b_vecy, b_vecz = torch.unbind(q2,\n dim=-1)\n\n r_scalar = a_scalar * b_scalar - a_vecx * b_vecx - a_vecy * b_vecy - a_vecz * b_vecz\n r_vecx = a_scalar * b_vecx + a_vecx * b_scalar + a_vecy * b_vecz - a_vecz * b_vecy\n r_vecy = a_scalar * b_vecy + a_vecy * b_scalar + a_vecz * b_vecx - a_vecx * b_vecz\n r_vecz = a_scalar * b_vecz + a_vecz * b_scalar + a_vecx * b_vecy - a_vecy * b_vecx\n\n \"\"\"\n a = torch.randn([2, 3, 4])\n b = torch.randn([2, 3, 4])\n print(a) # 2 matrices of size 3 x 4\n print(b) # 2 matrices of size 3 x 4\n print(torch.stack([a, b])) # 4 matrices of size 3 x 4, first a, then b\n \"\"\"\n return torch.stack(\n [r_scalar, r_vecx, r_vecy, r_vecz],\n dim=-1\n )",
"def compute_subspace_angles(S1, S2):\n # Check the if the input arrays are 1D or 2D\n if S1.ndim == 1:\n # mat1 = np.reshape(S1, (1,S1.size))\n mat1 = np.reshape(S1, (S1.size, 1))\n elif S1.ndim == 2:\n mat1 = S1\n else:\n raise ValueError('The function is intended only to handle 1D and 2D numpy arrays')\n if S2.ndim == 1:\n # mat2 = np.reshape(S2, (1,S2.size))\n mat2 = np.reshape(S2, (S2.size, 1))\n elif S2.ndim == 2:\n mat2 = S2\n else:\n raise ValueError('The function is intended only to handle 1D and 2D numpy arrays')\n\n\n # Do a QR Factorization of S1 and S2\n Q1, R1 = np.linalg.qr(mat1)\n # print('S1 = \\n', S1)\n # print('Q1 = \\n', Q1)\n Q2, R2 = np.linalg.qr(mat2)\n # print('S1 = \\n', S2)\n # print('Q2 = \\n', Q2)\n intmat = np.matmul(Q1.T, Q2)\n # print('intmat = \\n', intmat)\n Y, s, Z = np.linalg.svd(intmat)\n # print('Y = \\n', Y)\n # print('U = \\n', np.matmul(Q1, Y))\n # print('V = \\n', np.matmul(Q2, Y))\n # print('s = \\n', s)\n\n # NaN prevention check\n indices = np.where(s > 1) # Get the indices where the violation exisits\n for entry in indices: # Loop over these indices to fix the violation\n for i in entry:\n if s[i] - 1 < 1.e-13: # This violation limit is pulled out of thin air!\n s[i] = 1.0\n\n s_radians = np.arccos(s)\n\n return s_radians",
"def compare_quaternion_lists(new_quats, ref_quats, tol=0.05):\n nquats = len(ref_quats) # 3 for multiruby case\n\n # FIRST CHECK THAT NUMBER OF ORIENTATIONS MATCHES\n if len(new_quats) != nquats:\n raise RuntimeError(\n \"Incorrect number of orientations found; should be %d\" % nquats\n + \", currently found %d\" % len(new_quats)\n )\n\n # NEXT CHECK THE ACTUAL MISORIENTATIONS\n # !!! order may be different\n for i, nq in enumerate(new_quats):\n ang, mis = misorientation(nq.reshape(4, 1), ref_quats.T)\n if np.min(ang) > np.radians(tol):\n raise RuntimeError(\n \"Misorientation for test orientation %d \" % i\n + \"is greater than threshold\"\n )",
"def qslerp(q, x1, x2, geometric=False, eq_tolerance=1e-12):\n qi = deepcopy(q)\n x2i = deepcopy(x2)\n x1i = deepcopy(x1)\n\n # check that quaternions are consistent with generic quaternion invariants\n qi = qvalidate(qi,'qi','qslerp')\n\n if isinstance(qi, int):\n return qi\n\n # check that input quaternions are unit length\n qn = qnorm(qi)\n\n idx = np.argwhere(np.abs(qn - 1.0) > eq_tolerance).flatten()\n if len(idx) > 0:\n logging.error('At least one input quaternion is not unit length')\n return\n\n if qi.shape[0] != len(x1i):\n logging.error('Number of input abscissa values does not match the number of input quaternions')\n return\n\n # check that input abscissa values are monotonic\n if len(x1i) > 1:\n idx = np.argwhere((x1i[1:len(x1i)]-x1i[0:len(x1i)-1]) < 0)\n if len(idx) > 0:\n logging.error('input abscissa values not monotonic')\n return\n\n # check that output abscissa values are strictly monotonic\n if len(x2i) > 1:\n idx = np.argwhere((x2i[1:len(x2i)]-x2i[0:len(x2i)-1]) < 0)\n if len(idx) > 0:\n logging.error('output abscissa values not monotonic')\n return\n\n # construct the output array\n q_out = np.zeros((len(x2i), 4))\n\n # if output abscissa values are outside of the range of input abscissa\n # values constant extrapolation is used\n idx = np.argwhere(x2i < x1i[0]).flatten()\n\n if len(idx) > 0:\n q_out[idx, :] = np.array(idx.size*[qi[0, :]])\n\n idx = np.argwhere(x2i > x1i[-1]).flatten()\n\n if len(idx) > 0:\n q_out[idx, :] = np.array(idx.size*[qi[-1, :]])\n\n out_idx = np.argwhere((x2i >= x1i[0]) & (x2i <= x1i[-1])).flatten()\n\n if len(out_idx) == 0:\n return q_out.reshape((-1, 4))\n\n x2i = x2i[out_idx]\n\n # construct arguments to the slerp function, this includes the source\n # quaternion list, the target quaternions list, and the proportion of\n # interpolation list for each quaternion pair. They should all have\n # the same number of elements as the output abscissa value list\n\n t_temp = interpol(np.arange(qi.shape[0], dtype='float64'), x1i, x2i)\n\n t_list = t_temp % 1.0\n\n q_idx = np.int64(np.floor(t_temp))\n\n # if the last abscissa values are identical,the indexing scheme to\n # generate the q_list could generate an overflow, the two conditionals\n # below prevent this\n idx = np.argwhere(np.abs(t_list) <= eq_tolerance).flatten() # where t_list =~ 0.0\n if len(idx) > 0:\n q_out[out_idx[idx], :] = qi[q_idx[idx], :]\n\n slerp_idx = np.argwhere(np.abs(t_list) > eq_tolerance).flatten() # where t_list !=~ 0.0\n\n # if there is nothing left, then we're done\n if slerp_idx.size == 0:\n return q_out.reshape((-1, 4))\n\n q_idx = q_idx[slerp_idx]\n out_idx = out_idx[slerp_idx]\n t_list = t_list[slerp_idx]\n\n q1_list = qi[q_idx, :]\n\n q2_list = qi[q_idx + 1, :]\n\n # calculate the dot product which is needed to to flip the\n # appropriate quaternions to guarantee interpolation is done along the\n # shortest path\n dotp = qdotp(q1_list, q2_list)\n\n if dotp.ndim == 0 and dotp == -1:\n return -1\n\n # the following code flips quaternions in q2_list to ensure the\n # shortest path is followed\n idx = np.argwhere(dotp < 0.0).flatten()\n\n if idx.size != 0:\n q2_list[idx, :] = -q2_list[idx, :]\n\n # interpolation cannot be performed on colinear quaternions\n # it is assumed that colinear quaternions will be returned unchanged\n # since dotp(q1,q2) = cos(angle between q1,q2) if dotp = 1.0 the\n # quaternions are colinear\n idx = np.argwhere(np.abs(dotp - 1.0) <= eq_tolerance).flatten() # where dotp = 1.0\n\n # store colinear quaternions into output array\n if idx.size != 0:\n q_out[out_idx[idx], :] = q1_list[idx, :]\n\n # copy non-colinear quaternions for processing\n idx = np.argwhere(np.abs(dotp - 1.0) > eq_tolerance).flatten()\n\n if idx.size == 0:\n return q_out.reshape((-1, 4)) # if no non-colinear quaternions are left, we are done\n\n dotp = dotp[idx]\n t_list = t_list[idx]\n q1_list = q1_list[idx, :]\n q2_list = q2_list[idx, :]\n out_idx = out_idx[idx]\n\n # now the actual processing begins\n\n # testing both methods to verify results\n if geometric:\n theta = np.arccos(dotp)\n\n sin_theta = np.sin(theta)\n\n theta_t = theta * t_list\n\n co1 = np.sin(theta - theta_t) / sin_theta\n co2 = np.sin(theta_t) / sin_theta\n\n q_out[out_idx, 0] = co1 * q1_list[:, 0] + co2 * q2_list[:, 0]\n q_out[out_idx, 1] = co1 * q1_list[:, 1] + co2 * q2_list[:, 1]\n q_out[out_idx, 2] = co1 * q1_list[:, 2] + co2 * q2_list[:, 2]\n q_out[out_idx, 3] = co1 * q1_list[:, 3] + co2 * q2_list[:, 3]\n else:\n # slerp will be performed by calculating:\n # ((q2*(q1^-1))^t)*q1\n # since the quaternions are unit q1^-1 = conjugate(q1)\n # exponentiation can be calculated by transforming to\n # polar form cos(theta*t)+v*sin(theta*t)\n # theta = acos(q[0])\n # NOTE: this potentially more numerically stable implementation needs\n # to be verified by comparison to the geometric slerp\n q1_conj = qconj(q1_list)\n\n q2_q1_prod = qdecompose(qmult(q2_list, q1_conj))\n\n if isinstance(q2_q1_prod, int):\n return -1\n\n # sometimes a dimension disappears.\n if q2_q1_prod.ndim == 1 and q2_q1_prod.size == 4:\n q2_q1_prod = q2_q1_prod.reshape((1, 4))\n\n theta_scale = q2_q1_prod[:, 0] * t_list\n\n q_total = qmult(qcompose(q2_q1_prod[:, 1:4], theta_scale), q1_list)\n\n if isinstance(q_total, int):\n return -1\n\n q_out[out_idx, :] = q_total\n\n return qnormalize(q_out)",
"def make_q(v0, v2):\n return (v0.y - v2.y)/(v0.x - v2.x)",
"def test_quaternion_dist_for_almost_identical_rotations():\n random_state = np.random.RandomState(0)\n\n for _ in range(5):\n a = pr.random_axis_angle(random_state)\n q1 = pr.quaternion_from_axis_angle(a)\n r = 1e-4 * random_state.randn(4)\n q2 = -pr.quaternion_from_axis_angle(a + r)\n assert_almost_equal(pr.quaternion_dist(q1, q2), 0.0, places=3)",
"def test_quaternion_dist():\n random_state = np.random.RandomState(0)\n\n for _ in range(5):\n q1 = pr.quaternion_from_axis_angle(pr.random_axis_angle(random_state))\n q2 = pr.quaternion_from_axis_angle(pr.random_axis_angle(random_state))\n q1_to_q1 = pr.quaternion_dist(q1, q1)\n assert_almost_equal(q1_to_q1, 0.0)\n q2_to_q2 = pr.quaternion_dist(q2, q2)\n assert_almost_equal(q2_to_q2, 0.0)\n q1_to_q2 = pr.quaternion_dist(q1, q2)\n q2_to_q1 = pr.quaternion_dist(q2, q1)\n assert_almost_equal(q1_to_q2, q2_to_q1)\n assert_greater(2.0 * np.pi, q1_to_q2)",
"def quaternion_difference(q1, q2):\n q1_abs = np.ndarray(4)\n q1_con = np.ndarray(4)\n q1_inv = np.ndarray(4)\n\n q1_con[0] = q1[0]\n q1_con[1] = -q1[1]\n q1_con[2] = -q1[2]\n q1_con[3] = -q1[3]\n\n functions.mju_mulQuat(q1_abs, q1, q1_con)\n q1_abs[0] += q1_abs[1] + q1_abs[2] + q1_abs[3]\n q1_inv = q1_con / q1_abs[0]\n\n q_diff = np.ndarray(4)\n functions.mju_mulQuat(q_diff, q2, q1_inv)\n\n return q_diff",
"def test_quaternion_dist_for_identical_rotations():\n random_state = np.random.RandomState(0)\n\n for _ in range(5):\n q = pr.quaternion_from_axis_angle(pr.random_axis_angle(random_state))\n assert_array_almost_equal(pr.matrix_from_quaternion(q),\n pr.matrix_from_quaternion(-q))\n assert_equal(pr.quaternion_dist(q, -q), 0.0)",
"def quaternion_subtraction(self, q1, q2):\n\n # Unpack these quaternions\n a_scalar, a_vecx, a_vecy, a_vecz = torch.unbind(q1,\n dim=-1)\n b_scalar, b_vecx, b_vecy, b_vecz = torch.unbind(q2,\n dim=-1)\n\n r_scalar = a_scalar - b_scalar\n r_vecx = a_vecx - b_vecx\n r_vecy = a_vecy - b_vecy\n r_vecz = a_vecz - b_vecz\n\n return torch.stack(\n [r_scalar, r_vecx, r_vecy, r_vecz],\n dim=-1\n )",
"def test_align_sanity(self):\n # QWERTY resemblance matrix:\n R = qwerty_distance()\n diff, u, r = min_difference_align(\"polynomial\", \"exponential\", R)\n # Warning: we may (read: 'will') use another matrix!\n self.assertEqual(diff, 15)\n # Warning: there may be other optimal matchings!\n self.assertEqual(u, '--polyn-om-ial')\n self.assertEqual(r, 'exp-o-ne-ntial')",
"def rotation_matrix_to_quaternion(rotation_matrix):\n trace = np.trace(rotation_matrix)\n\n if trace > 0:\n S = np.sqrt(trace + 1) * 2\n q_w = 0.25 * S\n q_x = (rotation_matrix[2, 1] - rotation_matrix[1, 2]) / S\n q_y = (rotation_matrix[0, 2] - rotation_matrix[2, 0]) / S\n q_z = (rotation_matrix[1, 0] - rotation_matrix[0, 1]) / S\n return np.asarray([q_w, q_x, q_y, q_z])\n\n elif ((rotation_matrix[0, 0] > rotation_matrix[1, 1]) and\n (rotation_matrix[0, 0] > rotation_matrix[2, 2])):\n\n S = np.sqrt(1.0 + rotation_matrix[0, 0] - rotation_matrix[1, 1] -\n rotation_matrix[2, 2]) * 2\n q_w = (rotation_matrix[2, 1] - rotation_matrix[1, 2]) / S\n q_x = 0.25 * S\n q_y = (rotation_matrix[0, 1] + rotation_matrix[1, 0]) / S\n q_z = (rotation_matrix[0, 2] + rotation_matrix[2, 0]) / S\n\n elif rotation_matrix[1, 1] > rotation_matrix[2, 2]:\n\n S = np.sqrt(1.0 + rotation_matrix[1, 1] - rotation_matrix[0, 0] -\n rotation_matrix[2, 2]) * 2\n q_w = (rotation_matrix[0, 2] - rotation_matrix[2, 0]) / S\n q_x = (rotation_matrix[0, 1] + rotation_matrix[1, 0]) / S\n q_y = 0.25 * S\n q_z = (rotation_matrix[1, 2] + rotation_matrix[2, 1]) / S\n\n else:\n S = np.sqrt(1.0 + rotation_matrix[2, 2] - rotation_matrix[0, 0] -\n rotation_matrix[1, 1]) * 2\n q_w = (rotation_matrix[1, 0] - rotation_matrix[0, 1]) / S\n q_x = (rotation_matrix[0, 2] + rotation_matrix[2, 0]) / S\n q_y = (rotation_matrix[1, 2] + rotation_matrix[2, 1]) / S\n q_z = 0.25 * S\n\n if q_w >= 0:\n return np.asarray([q_w, q_x, q_y, q_z])\n else:\n return -1 * np.asarray([q_w, q_x, q_y, q_z])",
"def test_quaternion_rotation_consistent_with_multiplication():\n random_state = np.random.RandomState(1)\n for _ in range(5):\n v = pr.random_vector(random_state)\n q = pr.random_quaternion(random_state)\n v_im = np.hstack(((0.0,), v))\n qv_mult = pr.concatenate_quaternions(\n q, pr.concatenate_quaternions(v_im, pr.q_conj(q)))[1:]\n qv_rot = pr.q_prod_vector(q, v)\n assert_array_almost_equal(qv_mult, qv_rot)",
"def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)",
"def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)",
"def test_interpolate_quaternion():\n n_steps = 10\n random_state = np.random.RandomState(0)\n a1 = pr.random_axis_angle(random_state)\n a2 = pr.random_axis_angle(random_state)\n q1 = pr.quaternion_from_axis_angle(a1)\n q2 = pr.quaternion_from_axis_angle(a2)\n\n traj_q = [pr.quaternion_slerp(q1, q2, t)\n for t in np.linspace(0, 1, n_steps)]\n traj_R = [pr.matrix_from_quaternion(q) for q in traj_q]\n R_diff = np.diff(traj_R, axis=0)\n R_diff_norms = [np.linalg.norm(Rd) for Rd in R_diff]\n assert_array_almost_equal(R_diff_norms,\n R_diff_norms[0] * np.ones(n_steps - 1))"
] | [
"0.71130097",
"0.6103423",
"0.60989857",
"0.6034236",
"0.5984659",
"0.5808696",
"0.5793859",
"0.5789661",
"0.57519877",
"0.57339966",
"0.5732701",
"0.56862444",
"0.56858575",
"0.56503344",
"0.563665",
"0.5622966",
"0.554343",
"0.5540692",
"0.5537474",
"0.55108356",
"0.55057293",
"0.5496861",
"0.54470605",
"0.54389596",
"0.5432244",
"0.54125637",
"0.53605336",
"0.5350723",
"0.5350723",
"0.53500175"
] | 0.6384881 | 1 |
Returns the rotation matrix equivalent of the given quaternion. This function is used by the get_refined_rotation() function. | def get_rotation_matrix_from_quaternion(q):
R = np.matrix([[q[0] * q[0] + q[1] * q[1] - q[2] * q[2] - q[3] * q[3],
2 * (q[1] * q[2] - q[0] * q[3]),
2 * (q[1] * q[3] + q[0] * q[2])],
[2 * (q[2] * q[1] + q[0] * q[3]),
q[0] * q[0] - q[1] * q[1] + q[2] * q[2] - q[3] * q[3],
2 * (q[2] * q[3] - q[0] * q[1])],
[2 * (q[3] * q[1] - q[0] * q[2]),
2 * (q[3] * q[2] + q[0] * q[1]),
q[0] * q[0] - q[1] * q[1] - q[2] * q[2] + q[3] * q[3]]])
return R | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def quaternion_to_rotation_matrix(quaternion):\n\n q_w, q_x, q_y, q_z = quaternion\n sqw, sqx, sqy, sqz = np.square(quaternion)\n norm = (sqx + sqy + sqz + sqw)\n rotation_matrix = np.zeros((3, 3))\n\n # division of square length if quaternion is not already normalized\n rotation_matrix[0, 0] = (+sqx - sqy - sqz + sqw) / norm\n rotation_matrix[1, 1] = (-sqx + sqy - sqz + sqw) / norm\n rotation_matrix[2, 2] = (-sqx - sqy + sqz + sqw) / norm\n\n tmp1 = q_x * q_y\n tmp2 = q_z * q_w\n rotation_matrix[1, 0] = 2.0 * (tmp1 + tmp2) / norm\n rotation_matrix[0, 1] = 2.0 * (tmp1 - tmp2) / norm\n\n tmp1 = q_x * q_z\n tmp2 = q_y * q_w\n rotation_matrix[2, 0] = 2.0 * (tmp1 - tmp2) / norm\n rotation_matrix[0, 2] = 2.0 * (tmp1 + tmp2) / norm\n tmp1 = q_y * q_z\n tmp2 = q_x * q_w\n rotation_matrix[2, 1] = 2.0 * (tmp1 + tmp2) / norm\n rotation_matrix[1, 2] = 2.0 * (tmp1 - tmp2) / norm\n return rotation_matrix",
"def quaternion_matrix(quaternion):\r\n q = numpy.array(quaternion, dtype=numpy.float64, copy=True)\r\n n = numpy.dot(q, q)\r\n if n < _EPS:\r\n return numpy.identity(4)\r\n q *= math.sqrt(2.0 / n)\r\n q = numpy.outer(q, q)\r\n return numpy.array([\r\n [1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],\r\n [ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],\r\n [ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],\r\n [ 0.0, 0.0, 0.0, 1.0]])",
"def quat_to_rotmat(quat):\n norm_quat = quat\n norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True)\n w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, 2], norm_quat[:, 3]\n B = quat.size(0)\n w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)\n wx, wy, wz = w * x, w * y, w * z\n xy, xz, yz = x * y, x * z, y * z\n rotMat = torch.stack([w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, 2 * wz + 2 * xy, w2 - x2 + y2 - z2, 2 * yz - 2 * wx, 2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)\n return rotMat",
"def quat_to_rotmat(quat): \n norm_quat = quat\n norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)\n w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3]\n\n B = quat.size(0)\n\n w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)\n wx, wy, wz = w*x, w*y, w*z\n xy, xz, yz = x*y, x*z, y*z\n\n rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,\n 2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,\n 2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)\n return rotMat",
"def quaternion_matrix(quaternion):\n q = np.array(quaternion, dtype=np.float64, copy=True)\n n = np.dot(q, q)\n if n < _EPS:\n return np.identity(4)\n q *= math.sqrt(2.0 / n)\n q = np.outer(q, q)\n return np.array([\n [1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],\n [ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],\n [ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],\n [ 0.0, 0.0, 0.0, 1.0]])",
"def quat_to_rotmat(quat):\n norm_quat = quat\n norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True)\n w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, 2], norm_quat[:, 3]\n\n B = quat.size(0)\n\n w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)\n wx, wy, wz = w * x, w * y, w * z\n xy, xz, yz = x * y, x * z, y * z\n\n rotMat = torch.stack([w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz,\n 2 * wz + 2 * xy, w2 - x2 + y2 - z2, 2 * yz - 2 * wx,\n 2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)\n return rotMat",
"def quaternion_to_Rot(q: array):\n\n # Create a vector from the quaternion parameters (and check dimensions)\n q = array(q).reshape(4)\n\n # Normalize the quaternion\n q = divide(q, sqrt(sum(power(q, 2))))\n\n # Auxiliary matrix\n q_hat = zeros((3, 3))\n q_hat[0, 1] = -q[3]\n q_hat[0, 2] = q[2]\n q_hat[1, 2] = -q[1]\n q_hat[1, 0] = q[3]\n q_hat[2, 0] = -q[2]\n q_hat[2, 1] = q[1]\n\n # Return the rotation matrix\n return eye(3) + 2 * dot(q_hat, q_hat) + 2 * dot(q[0], q_hat)",
"def rotation_matrix(self):\n self._normalise()\n product_matrix = np.dot(self._q_matrix(), self._q_bar_matrix().conj().transpose())\n return product_matrix[1:][:,1:]",
"def quat2mat(quat):\n norm_quat = torch.cat([quat[:, :1].detach() * 0 + 1, quat], dim=1)\n norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True)\n w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, 2], norm_quat[:, 3]\n\n B = quat.size(0)\n\n w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)\n wx, wy, wz = w * x, w * y, w * z\n xy, xz, yz = x * y, x * z, y * z\n\n rotMat = torch.stack([w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz,\n 2 * wz + 2 * xy, w2 - x2 + y2 - z2, 2 * yz - 2 * wx,\n 2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)\n return rotMat",
"def quat2mat(quat):\r\n norm_quat = torch.cat([quat[:,:1].detach()*0 + 1, quat], dim=1)\r\n norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)\r\n w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3]\r\n\r\n B = quat.size(0)\r\n\r\n w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)\r\n wx, wy, wz = w*x, w*y, w*z\r\n xy, xz, yz = x*y, x*z, y*z\r\n\r\n rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,\r\n 2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,\r\n 2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).reshape(B, 3, 3)\r\n return rotMat",
"def quat2mat(quat):\n norm_quat = torch.cat([quat[:,:1].detach()*0 + 1, quat], dim=1)\n norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)\n w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3]\n\n B = quat.size(0)\n\n w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)\n wx, wy, wz = w*x, w*y, w*z\n xy, xz, yz = x*y, x*z, y*z\n\n rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,\n 2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,\n 2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).reshape(B, 3, 3)\n return rotMat",
"def quaternion_to_matrix(quaternions):\n r, i, j, k = torch.unbind(quaternions, -1)\n two_s = 2.0 / (quaternions * quaternions).sum(-1)\n\n o = torch.stack(\n (\n 1 - two_s * (j * j + k * k),\n two_s * (i * j - k * r),\n two_s * (i * k + j * r),\n two_s * (i * j + k * r),\n 1 - two_s * (i * i + k * k),\n two_s * (j * k - i * r),\n two_s * (i * k - j * r),\n two_s * (j * k + i * r),\n 1 - two_s * (i * i + j * j),\n ),\n -1,\n )\n return o.reshape(quaternions.shape[:-1] + (3, 3))",
"def _rotate_quaternion(self, q):\n self._normalise()\n return self * q * self.conjugate",
"def quaternion_to_rotation_matrix(q0, q1, q2, q3) -> np:\n\n # First row of the rotation matrix\n r00 = 2 * (q0 * q0 + q1 * q1) - 1\n r01 = 2 * (q1 * q2 - q0 * q3)\n r02 = 2 * (q1 * q3 + q0 * q2)\n\n # Second row of the rotation matrix\n r10 = 2 * (q1 * q2 + q0 * q3)\n r11 = 2 * (q0 * q0 + q2 * q2) - 1\n r12 = 2 * (q2 * q3 - q0 * q1)\n\n # Third row of the rotation matrix\n r20 = 2 * (q1 * q3 - q0 * q2)\n r21 = 2 * (q2 * q3 + q0 * q1)\n r22 = 2 * (q0 * q0 + q3 * q3) - 1\n\n # 3x3 rotation matrix\n rot_matrix = np.array([[r00, r01, r02],\n [r10, r11, r12],\n [r20, r21, r22]])\n\n return rot_matrix",
"def rotmat(p, q):\n rot = numpy.dot(refmat(q, -p), refmat(p, -p))\n return rot",
"def rotation_mat_to_quat(R, q):\n q[0] = np.sqrt(R[0] + R[4] + R[8]) / 2\n q[1] = (R[7] - R[5]) / (4. * q[0])\n q[2] = (R[2] - R[6]) / (4. * q[0])\n q[3] = (R[3] - R[1]) / (4. * q[0])",
"def rotation_matrix_to_quaternion(rotation_matrix):\n trace = np.trace(rotation_matrix)\n\n if trace > 0:\n S = np.sqrt(trace + 1) * 2\n q_w = 0.25 * S\n q_x = (rotation_matrix[2, 1] - rotation_matrix[1, 2]) / S\n q_y = (rotation_matrix[0, 2] - rotation_matrix[2, 0]) / S\n q_z = (rotation_matrix[1, 0] - rotation_matrix[0, 1]) / S\n return np.asarray([q_w, q_x, q_y, q_z])\n\n elif ((rotation_matrix[0, 0] > rotation_matrix[1, 1]) and\n (rotation_matrix[0, 0] > rotation_matrix[2, 2])):\n\n S = np.sqrt(1.0 + rotation_matrix[0, 0] - rotation_matrix[1, 1] -\n rotation_matrix[2, 2]) * 2\n q_w = (rotation_matrix[2, 1] - rotation_matrix[1, 2]) / S\n q_x = 0.25 * S\n q_y = (rotation_matrix[0, 1] + rotation_matrix[1, 0]) / S\n q_z = (rotation_matrix[0, 2] + rotation_matrix[2, 0]) / S\n\n elif rotation_matrix[1, 1] > rotation_matrix[2, 2]:\n\n S = np.sqrt(1.0 + rotation_matrix[1, 1] - rotation_matrix[0, 0] -\n rotation_matrix[2, 2]) * 2\n q_w = (rotation_matrix[0, 2] - rotation_matrix[2, 0]) / S\n q_x = (rotation_matrix[0, 1] + rotation_matrix[1, 0]) / S\n q_y = 0.25 * S\n q_z = (rotation_matrix[1, 2] + rotation_matrix[2, 1]) / S\n\n else:\n S = np.sqrt(1.0 + rotation_matrix[2, 2] - rotation_matrix[0, 0] -\n rotation_matrix[1, 1]) * 2\n q_w = (rotation_matrix[1, 0] - rotation_matrix[0, 1]) / S\n q_x = (rotation_matrix[0, 2] + rotation_matrix[2, 0]) / S\n q_y = (rotation_matrix[1, 2] + rotation_matrix[2, 1]) / S\n q_z = 0.25 * S\n\n if q_w >= 0:\n return np.asarray([q_w, q_x, q_y, q_z])\n else:\n return -1 * np.asarray([q_w, q_x, q_y, q_z])",
"def _cubelet_rotation_matrix(self, cubelet_meta_info, qpos_array):\n euler_angles = qpos_array[cubelet_meta_info[\"euler_qpos\"]]\n return rotation.euler2mat(euler_angles)",
"def rotation_matrix_to_quaternion(rotation_matrix: np) -> object:\n\n cosine_for_pitch = math.sqrt(rotation_matrix[0][0] ** 2 + rotation_matrix[1][0] ** 2)\n is_singular = cosine_for_pitch < 10 ** -6\n if not is_singular:\n yaw = math.atan2(rotation_matrix[1][0], rotation_matrix[0][0])\n pitch = math.atan2(-rotation_matrix[2][0], cosine_for_pitch)\n roll = math.atan2(rotation_matrix[2][1], rotation_matrix[2][2])\n else:\n yaw = math.atan2(-rotation_matrix[1][2], rotation_matrix[1][1])\n pitch = math.atan2(-rotation_matrix[2][0], cosine_for_pitch)\n roll = 0\n\n e = (yaw, pitch, roll)\n\n return euler_to_quaternion(e)",
"def rotationMatrix(self):\n\n R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n self.exteriorOrientationParameters[5])\n\n return R",
"def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = angles[0:3]\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)",
"def _get_rotation_matrix(transform):\n # caution: UE4 is using left-hand ortation order\n roll = np.deg2rad(-transform.rotation.roll)\n pitch = np.deg2rad(-transform.rotation.pitch)\n yaw = np.deg2rad(transform.rotation.yaw)\n sr, cr = np.sin(roll), np.cos(roll)\n sp, cp = np.sin(pitch), np.cos(pitch)\n sy, cy = np.sin(yaw), np.cos(yaw)\n rotation_matrix = np.array([[cy * cp, -sy * sr + cy * sp * sr, cy * sp * cr + sy * sr],\n [sy * cp, cy * sp * sr + cy * sr, -cy * sr + sy * sp * cr],\n [-sp, cp * sr, cp * cr]])\n return rotation_matrix",
"def quaternion_to_angle(self, q):\n\tx, y, z, w = q.x, q.y, q.z, q.w\n\troll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n\treturn yaw",
"def test_to_rotation(self):\r\n q = np.array([-1, 1, 3, 2])\r\n q = q / np.linalg.norm(q)\r\n R_gt = np.array([\r\n [-1/3., -14/15., -2/15.],\r\n [2/3., -1/3., 2/3.],\r\n [-2/3., 2/15., 11/15.]]).T\r\n R = to_rotation(q)\r\n\r\n zero_matrix = R - R_gt\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n for _ in range(20):\r\n q = np.random.randn(4)\r\n q /= np.linalg.norm(q)\r\n q_inv = quaternion_conjugate(q)\r\n\r\n R = to_rotation(q)\r\n R_inv = to_rotation(q_inv)\r\n\r\n zero_matrix = R @ R_inv - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n # orthogonal matrix\r\n zero_matrix = R @ R.T - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)",
"def quat2mat(self,quat):\n\t quat = np.asarray(quat, dtype=np.float64)\n\t assert quat.shape[-1] == 4, \"Invalid shape quat {}\".format(quat)\n\n\t w, x, y, z = quat[..., 0], quat[..., 1], quat[..., 2], quat[..., 3]\n\t Nq = np.sum(quat * quat, axis=-1)\n\t s = 2.0 / Nq\n\t X, Y, Z = x * s, y * s, z * s\n\t wX, wY, wZ = w * X, w * Y, w * Z\n\t xX, xY, xZ = x * X, x * Y, x * Z\n\t yY, yZ, zZ = y * Y, y * Z, z * Z\n\n\t mat = np.empty(quat.shape[:-1] + (3, 3), dtype=np.float64)\n\t mat[..., 0, 0] = 1.0 - (yY + zZ)\n\t mat[..., 0, 1] = xY - wZ\n\t mat[..., 0, 2] = xZ + wY\n\t mat[..., 1, 0] = xY + wZ\n\t mat[..., 1, 1] = 1.0 - (xX + zZ)\n\t mat[..., 1, 2] = yZ - wX\n\t mat[..., 2, 0] = xZ - wY\n\t mat[..., 2, 1] = yZ + wX\n\t mat[..., 2, 2] = 1.0 - (xX + yY)\n\t return np.where((Nq > _FLOAT_EPS)[..., np.newaxis, np.newaxis], mat, np.eye(3))",
"def quat2mat(self,quat):\n quat = np.asarray(quat, dtype=np.float64)\n assert quat.shape[-1] == 4, \"Invalid shape quat {}\".format(quat)\n\n w, x, y, z = quat[..., 0], quat[..., 1], quat[..., 2], quat[..., 3]\n Nq = np.sum(quat * quat, axis=-1)\n s = 2.0 / Nq\n X, Y, Z = x * s, y * s, z * s\n wX, wY, wZ = w * X, w * Y, w * Z\n xX, xY, xZ = x * X, x * Y, x * Z\n yY, yZ, zZ = y * Y, y * Z, z * Z\n\n mat = np.empty(quat.shape[:-1] + (3, 3), dtype=np.float64)\n mat[..., 0, 0] = 1.0 - (yY + zZ)\n mat[..., 0, 1] = xY - wZ\n mat[..., 0, 2] = xZ + wY\n mat[..., 1, 0] = xY + wZ\n mat[..., 1, 1] = 1.0 - (xX + zZ)\n mat[..., 1, 2] = yZ - wX\n mat[..., 2, 0] = xZ - wY\n mat[..., 2, 1] = yZ + wX\n mat[..., 2, 2] = 1.0 - (xX + yY)\n return np.where((Nq > _FLOAT_EPS)[..., np.newaxis, np.newaxis], mat, np.eye(3))",
"def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi) = angles[0:2]\n omega = np.deg2rad(self.omega)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)",
"def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)",
"def make_q_rot_matrix(self, angles):\n #For other instruments, this method may be different.\n (phi, chi, omega) = self.get_phi_chi_omega(angles)\n\n #In Q space, detector coverage rotates OPPOSITE to what the real space rotation is.\n #Because that is where the detectors and incident beam go, AS SEEN BY THE SAMPLE.\n\n #So wee need to invert the sample orientation matrix to find the one that will apply to the Q vector.\n return numpy_utils.opposite_rotation_matrix(phi, chi, omega)",
"def py_rotation_from_matrix(matrix):\n return np.float32(quat2angle_axis(mat2quat(matrix)))"
] | [
"0.8077829",
"0.79751414",
"0.7973847",
"0.797261",
"0.79455817",
"0.79306656",
"0.79097867",
"0.780534",
"0.7727341",
"0.77201724",
"0.77022475",
"0.7419111",
"0.74067664",
"0.7311962",
"0.7208562",
"0.7142397",
"0.71323454",
"0.71113116",
"0.70910096",
"0.7048041",
"0.69940317",
"0.6971428",
"0.6941373",
"0.69386965",
"0.69329625",
"0.6930702",
"0.69224066",
"0.6919321",
"0.6919321",
"0.69137985"
] | 0.8253612 | 0 |
Calculates the geometrical center of a set of points. | def get_geom_center(coordlist):
return sum(coordlist) / len(coordlist) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def centre_of_points(list_of_points):\n\n cp = np.average(list_of_points, axis=0)\n return cp",
"def pointcenter(x):\n return point(x)",
"def center(self):\n points = set()\n for face in self._points:\n points.update(face)\n x_points = [point[0] for point in points]\n y_points = [point[1] for point in points]\n z_points = [point[2] for point in points]\n return \\\n (np.average(x_points), np.average(y_points), np.average(z_points))",
"def calculate_center(self):\n return [(self.startX + self.endX) / 2., (self.startY + self.endY) / 2.]",
"def centre(self):\n n = len(self.point)\n return Point(\n sum(map(lambda p: p.x, self.point)) / n,\n sum(map(lambda p: p.y, self.point)) / n\n )",
"def calculateCenter(self):\n y_avg = int(sum(self.points[:,0])/float(len(self.points)))\n x_avg = int(sum(self.points[:,1])/float(len(self.points)))\n self.center = (x_avg, y_avg)\n return(x_avg,y_avg)",
"def center(self):\n if not hasattr(self, '_center'):\n self._center = np.unique(self.points, axis=0).mean(axis=0)\n return self._center",
"def center_point(self) -> tuple:\n return (self.min_lat + self.max_lat) / 2, (self.min_lon + self.max_lon) / 2",
"def get_centroid(points):\n\n xs, ys = points[:, 0], points[:, 1]\n\n a = xs[:-1] * ys[1:]\n b = ys[:-1] * xs[1:]\n\n A = numpy.sum(a - b) / 2.\n\n cx = xs[:-1] + xs[1:]\n cy = ys[:-1] + ys[1:]\n\n Cx = numpy.sum(cx * (a - b)) / (6. * A)\n Cy = numpy.sum(cy * (a - b)) / (6. * A)\n\n return Cx, Cy",
"def get_centre(self):\n # just get the centroid\n # perhaps try something like:\n # https://github.com/mapbox/polylabel/blob/master/polylabel.js\n # in the future\n coords = np.array([(n.x, n.y) for n in self.nodes])\n centre_x = coords[:, 0].mean()\n centre_y = coords[:, 1].mean()\n return centre_x, centre_y",
"def getCenter(self):\n return Point.average(self.points)",
"def calc_centroid(self, points):\n\t\tself.canvas.create_polygon(points)\n\t\tx = [i[0] for i in points] # all the math is wrong :(\n\t\ty = [j[1] for j in points]\n\n\t\tarea = x[0] * (y[0] - y[-1])\n\t\tx_hat = (x[0] ** 2) * (y[0] - y[-1]) / (2) \n\t\ty_hat = -(y[0] ** 2) * (x[0] - x[-1]) / (2)\n\n\t\tfor i in range(1, len(points) - 1):\n\t\t\tdt = length(x[i], y[i], x[i - 1], y[i - 1])\n\t\t\tdy = y[i] - y[i - 1]\n\t\t\tdx = x[i] - x[i - 1]\n\t\t\tarea += 2 * x[i] * dy\n\t\t\tx_hat += (x[i] ** 2) * dy\n\t\t\ty_hat -= (y[i] ** 2) * dx\n\n\t\tarea += x[-1] * (y[-1] - y[-2])\n\t\tx_hat += (x[-1] ** 2) * (y[-1] - y[-2]) / 2\n\t\ty_hat -= (y[-1] ** 2) * (x[-1] - x[-2]) / 2\n\t\tarea /= 2\n\t\tx_hat /=2\n\t\ty_hat /= 2\n\t\tprint(\"Area: %s\\nX: %s\\nY: %s\" % (area, x_hat/area, y_hat/area))\n\t\treturn x_hat/area, y_hat/area",
"def center(x):\n if ispoint(x):\n # return pointcenter(x)\n return point(x)\n elif isline(x):\n return linecenter(x)\n elif isarc(x):\n return arccenter(x)\n elif ispoly(x):\n return polycenter(x)\n elif isgeomlist(x):\n pl = []\n for g in x:\n pl.append(center(g))\n return polycenter(pl)\n else:\n raise ValueError(\"inappropriate type for center(): \",format(x))",
"def getcenter(self):\n return self.centro.cartesianas()",
"def _get_center_pos(self):\n if not hasattr(self, 'lon_center'):\n raise ValueError('ERROR: You need to specify first the center position!')\n d = np.abs((self.x.lon - self.lon_center) ** 2. + (self.x.lat - self.lat_center) ** 2.)\n dmin = d.min()\n m = d == dmin\n\n idx = np.indices(d.shape)\n i = idx[0][m][0]\n j = idx[1][m][0]\n\n if (np.abs(1. - self.x.lon[i, j] / self.lon_center) > 0.05) or (np.abs(1. - self.x.lat[i, j] / self.lat_center) > 0.05): # at least 5% acc.\n print 'lon: ', self.x.lon[i, j], self.lon_center\n print 'lat: ', self.x.lat[i, j], self.lat_center\n i = None\n j = None\n return i, j",
"def CenterOfMass(points):\n A = AreaOfPolygon(points)\n N = len(points)\n cx = 0\n cy = 0\n for i in xrange(0, N):\n x_i = points[i][0]\n y_i = points[i][1]\n x_ip1 = points[(i+1) % N][0]\n y_ip1 = points[(i+1) % N][1]\n part = (x_i * y_ip1 - x_ip1 * y_i)\n cx += ((x_i + x_ip1) * part)\n cy += ((y_i + y_ip1) * part)\n return (cx/(6*A), cy/(6*A), abs(A))",
"def get_center(self):\n lon, lat = self.coordinates\n\n dimx = lon.shape[0]\n dimy = lon.shape[1]\n \n return (lon[dimx/2][dimy/2],lat[dimx/2][dimy/2])",
"def center_coords(self):\n coords = set()\n for x in range(self.radius, self.container.width - self.radius):\n for y in range(self.radius, self.container.height - self.radius):\n coords.add((x, y))\n\n return coords",
"def centroid_of_points(pts):\n xs, ys, zs = 0, 0, 0\n for pt in pts:\n xs += pt[0]\n ys += pt[1]\n if len(pt) > 2:\n zs += pt[2]\n if len(pts) > 0:\n xs /= len(pts)\n ys /= len(pts)\n if len(pts[0]) > 2:\n zs /= len(pts)\n return xs, ys, zs\n return xs, ys",
"def get_center_location(self):\n latitude = 0\n longitude = 0\n for centroid in self.centroids:\n latitude += centroid[0]\n longitude += centroid[1]\n return [latitude / len(self.centroids), longitude / len(self.centroids)]",
"def centroid_points(points):\n p = float(len(points))\n x, y, z = zip(*points)\n return sum(x) / p, sum(y) / p, sum(z) / p",
"def centroid(self):\n x, y = self.coordinates\n A = 0.5 * sum(x[i]*y[i+1] - x[i+1]*y[i] for i in range(-1, len(self)-1))\n cx = sum((x[i] + x[i+1]) * (x[i]*y[i+1] - x[i+1]*y[i])\n for i in range(-1, len(self)-1)) / (6*A)\n cy = sum((y[i] + y[i+1]) * (x[i]*y[i+1] - x[i+1]*y[i])\n for i in range(-1, len(self)-1)) / (6*A)\n return Point((cx, cy), properties=self.properties, crs=self.crs)",
"def get_center_point(self):\n raise NotImplementedError()",
"def get_center(self):\n\n x = np.array(self.x)\n y = np.array(self.y)\n return np.mean(x), np.mean(y)",
"def get_center_coordinates(self):\n totalX = 0\n totalY = 0\n totalZ = 0\n for atom in self.get_atoms():\n totalX += atom.get_x()\n totalY += atom.get_y()\n totalZ += atom.get_z()\n \n xCenter = totalX / len(self.get_atoms())\n yCenter = totalY / len(self.get_atoms())\n zCenter = totalZ / len(self.get_atoms())\n \n return xCenter, yCenter, zCenter",
"def compute_center(self, mole_object):\r\n if mole_object.plugin_type == \"PyMOL\":\r\n sel = PymolPlugin.PymolPlugin().get_model('all')\r\n cnt = len(sel.atom)\r\n\r\n else:\r\n sel = ChimeraPlugin.ChimeraPlugin().select()\r\n cnt = len(ChimeraPlugin.ChimeraPlugin().current_atoms())\r\n\r\n cent_x = 0\r\n cent_y = 0\r\n cent_z = 0\r\n\r\n if cnt == 0:\r\n return 0, 0, 0\r\n\r\n if mole_object.plugin_type == \"PyMOL\":\r\n\r\n for a in sel.atom:\r\n cent_x += a.coord[0]\r\n cent_y += a.coord[1]\r\n cent_z += a.coord[2]\r\n\r\n else:\r\n\r\n for a in ChimeraPlugin.ChimeraPlugin().current_atoms():\r\n cent_x += a.coord()[0]\r\n cent_y += a.coord()[1]\r\n cent_z += a.coord()[2]\r\n\r\n cent_x /= cnt\r\n cent_y /= cnt\r\n cent_z /= cnt\r\n\r\n self.point_x.component('entryfield').setentry(cent_x)\r\n self.point_y.component('entryfield').setentry(cent_y)\r\n self.point_z.component('entryfield').setentry(cent_z)\r\n\r\n self.show_crisscross(mole_object)",
"def ComputeCentroid(self, vtkPoints, int_tuple, p_float=..., p_float=..., p_float=...):\n ...",
"def center(self, obj):\n mn0 = self.master.xy >= obj.center\n mn1 = self.master.xy <= obj.center\n\n point_list = [self.master.xy[mn0], self.master.xy[mn1], self.master.xy[mn0[0], mn1[1]], self.master.xy[mn1[0], mn0[1]]] # 4 physical points near the center coordinate.\n dist_list = []\n idx = 0\n for point in point_list:\n dist_list.append([idx, np.linalg.norm(point - obj.center)]) # Calculate Euclidean distances.\n idx += 1\n dist_sorted = sorted(dist_list, key=lambda distance : distance[1]) # Sort distances in ascending order.\n return self.master.mn(point_list[dist_sorted[0][0]]) # Convert the closest point to abstract coordinate and then return.",
"def compute_platform_center(self):\n base = self.platform_vertices[1] - self.platform_vertices[0] # base of triangle, vector\n x = np.linalg.norm(base) # base length, scalar\n m = self.platform_vertices[0] + base/2 # midpoint on the base, vector\n cm = x/(2*np.sqrt(3)) # length from m to center c, scalar\n cm_dir = self.platform_vertices[2] - m # direction to center from midpoint, vector\n cm_vec = cm_dir*cm/np.linalg.norm(cm_dir) # make cm_dir a unit vector and multiply by the length, vector\n c = m + cm_vec # center position, vector\n return c",
"def find_center(self):\n x = np.int(np.rint((len(self.grid[0][0]))/2))\n center = np.array([x, x, x])\n self.grid[center[0]][center[1]][center[2]] = 1\n return self.grid, center"
] | [
"0.7816311",
"0.7496035",
"0.74138576",
"0.7411869",
"0.735026",
"0.73026866",
"0.72750986",
"0.7227958",
"0.7207048",
"0.7157561",
"0.71556854",
"0.7141065",
"0.71169",
"0.70823437",
"0.7073579",
"0.70721674",
"0.7064834",
"0.7059459",
"0.70201606",
"0.7017879",
"0.7004704",
"0.69872516",
"0.6964822",
"0.6964499",
"0.6910163",
"0.6906823",
"0.687036",
"0.6852701",
"0.68488294",
"0.68258137"
] | 0.75747705 | 1 |
Moves the geometrical center of the atoms in atomlist to the given point. | def move_center_to_point(atomlist, point):
for atom in range(len(atomlist)):
atomlist[atom] = atomlist[atom] - point
return atomlist | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def recenter(self, point=(0, 0)):\n self.center = Point(*point)",
"def centerOn(self, point):\n rect = self.rect()\n x = point.x() - rect.width() / 2.0\n y = point.y() - rect.height() / 2.0\n \n self.setPos(x, y)",
"def centerOnPoint(self, point):\n\n inClass = point.__class__.__name__.lower()\n # check if we've been passed an OpenCV Point2f object\n if inClass == 'point2f':\n # convert the Point2f object to a simple list\n point = QPointF(point.x, point.y)\n\n # check if we've been passed a list\n elif inClass == 'list':\n # convert the Point2f object to a simple list\n point = QPointF(point[0], point[1])\n\n self.isZooming = True\n self.centerPoint = point\n self.centerOn(self.centerPoint)\n self.isZooming = False",
"def move_to(self, mobject_or_point):\n layer_center = self.surrounding_rectangle.get_center()\n if isinstance(mobject_or_point, Mobject):\n target_center = mobject_or_point.get_center() \n else:\n target_center = mobject_or_point\n\n self.shift(target_center - layer_center)",
"def setCenter(self, center):\n p = center - self.center\n for i in range(len(self.points)):\n self.points[i] += p",
"def __moveCenterTo(self, x, y):\n x0, y0, w, h = self.currentBox\n x2, y2 = x - (w/2), y - (h/2)\n self.__moveTo(x2, y2)",
"def center(self, destination):\n self.move(destination=destination, origin=self.center)",
"def pointcenter(x):\n return point(x)",
"def center(self, obj):\n mn0 = self.master.xy >= obj.center\n mn1 = self.master.xy <= obj.center\n\n point_list = [self.master.xy[mn0], self.master.xy[mn1], self.master.xy[mn0[0], mn1[1]], self.master.xy[mn1[0], mn0[1]]] # 4 physical points near the center coordinate.\n dist_list = []\n idx = 0\n for point in point_list:\n dist_list.append([idx, np.linalg.norm(point - obj.center)]) # Calculate Euclidean distances.\n idx += 1\n dist_sorted = sorted(dist_list, key=lambda distance : distance[1]) # Sort distances in ascending order.\n return self.master.mn(point_list[dist_sorted[0][0]]) # Convert the closest point to abstract coordinate and then return.",
"def set_center(self,structure):\n for i,b in enumerate(self.bfs):\n b.set_center( structure[ self.LIST1[i] ] ) \n return",
"def center_from_tuple(self, center):\n self.center = Point(*center)",
"def position_center(self, x, y):\n self.x = x\n self.y = y\n self.pos[0] = x - self.pos[2]/2\n self.pos[1] = y - self.pos[3]/2",
"def move_center(obj):\n desktop = QApplication.desktop()\n dw = desktop.width()\n dh = desktop.height()\n size = obj.size()\n mw = size.width()\n mh = size.height()\n obj.move(dw/2-mw/2, dh/2-mh/2)",
"def setCenter(self, np):\n p = self.getCenter()\n v = Vector.createFromTwoPoints(p, np)\n for i in range(len(self.points)):\n self.points[i] = v(self.points[i])",
"def setCentroid(self, center):\n p = center - self.centroid\n for i in range(len(self.points)):\n self.points[i] += p",
"def move_to_origin(x):\n # Correct x so it is centered at (0,0)\n tx = np.mean(x[:no_points, :])\n ty = np.mean(x[no_points:, :])\n x[:no_points, :] = (x[:no_points, :] - tx)\n x[no_points:, :] = (x[no_points:, :] - ty)\n return x, tx, ty",
"def center(self, x):\n\n shape = x.shape\n nx = shape[1]\n ny = shape[0]\n hnx = nx // 2\n hny = ny // 2\n\n temp = x[0:hny, 0:hnx].copy()\n x[0:hny, 0:hnx] = x[hny:ny, hnx:nx].copy()\n x[hny:ny, hnx:nx] = temp\n\n temp = x[0:hny, hnx:nx].copy()\n x[0:hny, hnx:nx] = x[hny:ny, 0:hnx].copy()\n x[hny:ny, 0:hnx] = temp",
"def update_center(self): \r\n \r\n self.grfx[0].center = self.center\r\n\r\n self.update_bbox()",
"def centre_of_points(list_of_points):\n\n cp = np.average(list_of_points, axis=0)\n return cp",
"def center(self):\n cp = self.dat.flowsheet.getCenter()\n self.centerOn(cp[0], cp[1])",
"def center_on_spawn(self):\n self.center_on(*self.world.metadata['playerStart'])",
"def set_atomic_position(molecule, atom_index, x_coord, y_coord, z_coord):\n molecule.SetAtomPosition(atom_index, x_coord, y_coord, z_coord)",
"def move_point(start, end, bbox):\n vector = end - start\n shift = calculate_shift(start, vector, bbox)\n if shift is not None and 0 < shift < 1:\n start = start + shift * vector\n return start",
"def centerOnMark(self, mark):\n\n # get the center of the mark\n point = mark.mapToScene(mark.pos())\n\n # and center the view on it\n self.centerOnPoint(point)",
"def setCenter(self, p):\n self.__center = p",
"def _move_actor(self, actor):\n\n actor.center_x = actor.center_x + actor.change_x\n actor.center_y = actor.center_y + actor.change_y",
"def centre(self):\n n = len(self.point)\n return Point(\n sum(map(lambda p: p.x, self.point)) / n,\n sum(map(lambda p: p.y, self.point)) / n\n )",
"def _setCenter(self, value, index):\n item = self.item()\n if item is not None:\n if value == 'Origin':\n value = 0.\n elif value not in self._ROTATION_CENTER_OPTIONS:\n value = float(value)\n else:\n value = value.lower()\n\n center = list(item.getRotationCenter())\n center[index] = value\n item.setRotationCenter(*center)",
"def update_to_coord(self, point):\r\n if self._index_of_sel_point != -1 and self._index_of_sel_point <= len(self.points)-1:\r\n self._command_stack.do(model.structure.UpdatePoint(\r\n self._structure, self._index_of_sel_point, round(point[0]), round(point[1])))\r\n elif self._index_of_sel_point == len(self.points) or not self.points:\r\n self._command_stack.do(model.structure.AddPoint(\r\n self._structure, self._index_of_sel_point+1, round(point[0]), round(point[1])))\r\n if self._index_of_sel_point+1 >= len(self.points):\r\n self.winfo_toplevel().update()\r\n self._index_of_sel_point = len(self.points)\r\n else:\r\n self._set_selection(self._index_of_sel_point+1)\r\n self.winfo_toplevel().update()",
"def center(self, center):\n if not isinstance(center, Point):\n raise TypeError(\"The center must be a Point!\")\n self._center = center"
] | [
"0.70135695",
"0.6748774",
"0.6072873",
"0.60708416",
"0.60040206",
"0.59520334",
"0.5908273",
"0.5892884",
"0.57873654",
"0.5775457",
"0.57008934",
"0.56635845",
"0.5663209",
"0.5656083",
"0.5635438",
"0.5621651",
"0.5585128",
"0.5584832",
"0.5545488",
"0.55017734",
"0.5447869",
"0.544242",
"0.5438934",
"0.54364634",
"0.5425978",
"0.5415568",
"0.53619677",
"0.5353338",
"0.53324926",
"0.5298903"
] | 0.8722849 | 0 |
Rotates the adp with its corresponding rotation matrix. | def rotate_adp(adp, rotmat):
adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],
[float(adp[3]), float(adp[1]), float(adp[5])],
[float(adp[4]), float(adp[5]), float(adp[2])]])
rotmatT = np.transpose(rotmat)
adp = np.dot(rotmatT, adp)
adp = np.dot(adp, rotmat)
# print '=\n',adp,'\n-------------------------------------------------\n\n\n\n\n\n'
adp = np.array(adp).flatten().tolist()
return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rotate(mat,angle):\n return np.dot(Mueller.rotator(angle), np.dot(mat, Mueller.rotator(-angle)))",
"def rotate_adp2(adp, rotmat, cell):\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmat = np.linalg.inv(rotmat)\n rotmatT = np.transpose(rotmat)\n Nmat = np.matrix([[1 / cell[0], 0, 0],\n [0, 1 / cell[1], 0],\n [0, 0, 1 / cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]",
"def rotate_adp3(adp, rotmat, cell):\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmati = np.matrix(rotmat)\n rotmatiT = np.transpose(rotmati)\n rotmat = np.linalg.inv(rotmat)\n\n Nmat = np.matrix([[1 / cell[0], 0, 0],\n [0, 1 / cell[1], 0],\n [0, 0, 1 / cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n adp = np.dot(rotmati, adp)\n adp = np.dot(adp, rotmatiT)\n\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]",
"def rotate_adp_reverse(adp, rotmat):\n\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmatT = np.transpose(rotmat)\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]",
"def rotate(mat,angle):\n return np.dot(Jones.rotator(angle), np.dot(mat, Jones.rotator(-angle)))",
"def rotmat(p, q):\n rot = numpy.dot(refmat(q, -p), refmat(p, -p))\n return rot",
"def _rotate(self, affine):\n dims = affine.shape[0]\n if not np.isscalar(self.rotation):\n raise Exception('this class requires exactly one entry for rotation!')\n theta = (self.deformrandomstate.rand() - 0.5) * 2 * self.rotation\n if dims == 4:\n\n # sample unit vector:\n u = np.random.random(3)\n u /= np.sqrt(np.sum([uu ** 2 for uu in u]) + 1e-8)\n ct = np.cos(theta)\n st = np.sin(theta)\n rot = np.eye(4)\n rot[:3, :3] = [\n [ct + u[0] ** 2 * (1 - ct), u[0] * u[1] * (1 - ct) - u[2] * st, u[0] * u[2] * (1 - ct) + u[2] * st],\n [u[1] * u[0] * (1 - ct) + u[2] * st, ct + u[1] ** 2 * (1 - ct), u[1] * u[2] * (1 - ct) - u[0] * st],\n [u[2] * u[0] * (1 - ct) - u[1] * st, u[2] * u[1] * (1 - ct) + u[0] * st, ct + u[2] ** 2 * (1 - ct)]]\n\n elif dims == 3:\n rot = np.eye(3)\n rot[:2, :2] = np.asarray([[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]])\n else:\n raise Exception(\n 'implement this for each dimension, since not yet implemented for dimension {}'.format(dims))\n\n return np.matmul(rot, affine)",
"def rotate_z(p, a=0):\n # turn value to radians\n a = math.radians(a)\n translation_mat = np.matrix([\n [math.cos(a),math.sin(a),0,0],\n [-math.sin(a),math.cos(a),0,0],\n [0,0,0,0],\n [0,0,0,1],\n ], dtype=\"float32\")\n\n new_p = p @ translation_mat\n\n return new_p",
"def _rotate(self, angle):\n angle *= self._degreesPerAU\n self._orient = self._orient.rotate(angle)",
"def rotate(self, a):\n ca = cos(a)\n sa = sin(a)\n rM = Matrix([\n [ca, -sa],\n [sa, ca]\n ])\n p0 = self.p0\n self.c = p0 + rM @ (self.c - p0)\n dp = p0 - self.c\n self.a0 = atan2(dp.y, dp.x)\n return self",
"def rotate(self, angle):\n\t\tif not isinstance(angle, Angle):\n\t\t\tangle = Angle(angle)\n\t\treturn angle.matrix() * self",
"def rotate(self,X):\n alpha = random.rand() * 2*pi\n beta = self.beta_sample()\n R = Rotator.rotation_matrix(alpha,beta,0.0)\n X = np.dot(R, X)\n if self.random_flip and (random.rand() > 0.5):\n X[2,:] = -X[2,:]\n X[1,:] = -X[1,:]\n return X",
"def apply_rotation(self, eta=0.0, phi=0.0, theta=0.0):\n \n new_rotation_matrix = self.rotation_elements( eta, phi, theta )\n \n #self.rotation_matrix_exp = np.dot( self.rotation_matrix_exp , new_rotation_matrix )\n self.rotation_matrix_exp = np.dot( new_rotation_matrix, self.rotation_matrix_exp )",
"def rotate(p,q,A,V): \n n = A.shape[0]\n App, Aqq, Apq = A[p,p], A[q,q], A[p,q] #Initial values\n phi = 0.5*math.atan2(2*Apq, Aqq-App) #Find the rotation value\n c, s = math.cos(phi), math.sin(phi) #Calculate sin and cos\n\n #Update the matrix diagonal elements\n A[p,p] = c*c*App + s*s*Aqq - 2*s*c*Apq \n A[q,q] = s*s*App + c*c*Aqq + 2*s*c*Apq\n A[p,q] = 0 #This is zero by construction\n \n \n #Iterate over and update remaining off-diagonal elements\n for i in range(p):\n Aip, Aiq = A[i,p], A[i,q]\n A[i,p] = c*Aip - s*Aiq\n A[i,q] = c*Aiq + s*Aip\n \n for i in range(p+1,q):\n Api, Aiq = A[p,i], A[i,q]\n A[p,i] = c*Api - s*Aiq\n A[i,q] = c*Aiq + s*Api\n \n for i in range(q+1,n):\n Api, Aqi = A[p,i], A[q,i]\n A[p,i] = c*Api - s*Aqi\n A[q,i] = c*Aqi + s*Api\n \n #Update eigenvectors in matrix V\n for i in range(n):\n Vip, Viq = V[i,p], V[i,q]\n V[i,p] = c*Vip - s*Viq\n V[i,q] = s*Vip + c*Viq\n \n return A, V",
"def rotate_x(p, a=0):\n # turn value to radians\n a = math.radians(a)\n translation_mat = np.matrix([\n [1,0,0,0],\n [0,math.cos(a),math.sin(a),0],\n [0,-math.sin(a),math.cos(a),0],\n [0,0,0,1],\n ], dtype=\"float32\")\n\n new_p = p @ translation_mat\n\n return new_p",
"def rotate(self, angle, reshape=False):\n return IntensityMap.rotate(self, angle, reshape=reshape)",
"def rotate(self, angle):\n perp = TwoDV(-self[1], self[0])\n angle = angle * math.pi / 180.0\n c, s = math.cos(angle), math.sin(angle)\n return TwoDV(self[0]*c+perp[0]*s, self[1]*c+perp[1]*s)",
"def rotation(self):\n\t\treturn self.piv.a.rotate.v",
"def rotate_3D(atom, source_atom):\n from lauescript.cryst.match import get_transform\n\n lst2 = [np.array([0, 0, 0]), source_atom.orientation[0], source_atom.orientation[1]]\n lst1 = [np.array([0, 0, 0]), atom.orientation[0], atom.orientation[1]]\n\n matrix = get_transform(lst1, lst2, matrix=True)\n\n adp = source_atom.adp['cart_int']\n\n atom.adp['cart_int'] = rotate_adp(adp, matrix)",
"def _rotate(self, angle):\n if self.undobuffer:\n self.undobuffer.push((\"rot\", angle, self._degreesPerAU))\n angle *= self._degreesPerAU\n neworient = self._orient.rotate(angle)\n tracing = self.screen._tracing\n if tracing == 1 and self._speed > 0:\n anglevel = 3.0 * self._speed\n steps = 1 + int(abs(angle)/anglevel)\n delta = 1.0*angle/steps\n for _ in range(steps):\n self._orient = self._orient.rotate(delta)\n self._update()\n self._orient = neworient\n self._update()",
"def rotate_ADP_about_axis(ADP, angle, axisDirection):\n adp = get_adp_as_matrix(ADP)\n u, v = np.linalg.eig(adp)\n startPoints = [v[:, i].flatten().tolist()[0] for i in xrange(3)]\n endPoints = [rotate_point_about_axis(point, angle, axisDirection, (0, 0, 0)) for point in startPoints]\n rotMat = get_transform(startPoints, endPoints, matrix=True).transpose()\n newadp = np.dot(rotMat.transpose(), np.dot(adp, rotMat))\n return newadp[0, 0], newadp[1, 1], newadp[2, 2], newadp[0, 1], newadp[0, 2], newadp[1, 2]",
"def rotation(self, p1, p2, p3):\n return (p2[0] - p1[0]) * (p3[1] - p1[1]) - (p2[1] - p1[1]) * (p3[0] - p1[0])",
"def _rotate(self, angles, dj_matrix=None):\n if dj_matrix is None:\n dj_matrix = djpi2(self.lmax + 1)\n self.coeffs = SHRotateRealCoef(self.coeffs, angles, dj_matrix)",
"def _rotate(self, angles, dj_matrix=None):\n if dj_matrix is None:\n dj_matrix = djpi2(self.lmax + 1)\n self.coeffs = SHRotateRealCoef(self.coeffs, angles, dj_matrix)",
"def rotate(self):\n pass",
"def _rotate(self, arr, theta):\n # Rotation Matrix R\n R = [[np.cos(theta), -np.sin(theta)], \n [np.sin(theta), np.cos(theta)]]\n\n return np.matmul(R, arr.T).T",
"def rotate(self, a):\n ca = cos(a)\n sa = sin(a)\n self.v = Matrix([\n [ca, -sa],\n [sa, ca]\n ]) @ self.v\n return self",
"def rotate(self,r):\n return r.hprod( self.hprod( r.inv() ) )",
"def rotateAlignXform(self):\n r = np.concatenate(([self.x], [self.y], [self.z]), 0)\n r = np.concatenate((r, np.array([[0,0,0]])), 0)\n r = np.concatenate((r, np.array([0,0,0,1]).reshape(-1,1)), 1)\n return r",
"def rotate(self,X):\n alpha = random.rand() * 2*pi\n R = Rotator.rotation_matrix(alpha,0.0,0.0)\n return np.dot(R,X)"
] | [
"0.73167443",
"0.73074406",
"0.7229753",
"0.7127177",
"0.7113275",
"0.7051355",
"0.6992302",
"0.6686556",
"0.66458803",
"0.66007537",
"0.657009",
"0.6538239",
"0.6492567",
"0.64686793",
"0.6467943",
"0.6442462",
"0.64174455",
"0.63961196",
"0.6388264",
"0.63866466",
"0.63760054",
"0.6368504",
"0.63672537",
"0.63672537",
"0.6365199",
"0.63406956",
"0.63355124",
"0.6325487",
"0.63123417",
"0.6312132"
] | 0.79376036 | 0 |
Returns the normal vector of a plane defined by the points p1,p2 and p3. | def get_normal_vector_of_plane(p1, p2, p3):
v12 = np.array(p1) - np.array(p2)
v13 = np.array(p1) - np.array(p3)
nvec = np.cross(v12, v13)
## print 'norm: '+str(np.linalg.norm(nvec))
return nvec / np.linalg.norm(nvec) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def normal(self) -> Vec:\n # The three points are in clockwise order, so compute differences\n # in the clockwise direction, then cross to get the normal.\n point_1 = self.planes[1] - self.planes[0]\n point_2 = self.planes[2] - self.planes[1]\n\n return Vec.cross(point_1, point_2).norm()",
"def normal_vector_3p(a: Vector, b: Vector, c: Vector) -> Vector:\n return (b - a).cross(c - a).normalize()",
"def get_normal_vectors(self, p, x1, y1, z1, x2, y2, z2, x3, y3, z3):\n x1.value, y1.value, z1.value, x2.value, y2.value, z2.value, x3.value, y3.value, z3.value = self._get_normal_vectors(p, x1.value, y1.value, z1.value, x2.value, y2.value, z2.value, x3.value, y3.value, z3.value)",
"def plane_equation(point_a, point_b, point_c):\n v1 = np.subtract(point_a, point_c)\n v2 = np.subtract(point_a, point_b)\n normal = np.cross(v1, v2)\n # print 'b4 norm', normal\n unit_normal = norm_vect(normal)\n # print 'unityyy', unit_normal\n return unit_normal",
"def _normal_vector(o, p0_3d, p1_3d):\n # The vector between middle point of v1-v2 and object center location\n # is the normal vector I'm looking for\n vn = p0_3d.lerp(p1_3d, 0.5) - o.matrix_world.translation\n # normalize so I can to length computation on it\n vn.normalize()\n return vn",
"def PlaneNormalVector(h, k, l):\r\n vec = np.array([h, k, l])\r\n return vec/np.linalg.norm(vec)",
"def normal(self) -> Vector:\n return normalize(cross(self.d1, self.d2))",
"def uVectNorm(x1,y1,z1, # P\n x2,y2,z2, # Q\n x3,y3,z3): # R\n p1 = np.array([x1,y1,z1])\n p2 = np.array([x2,y2,z2])\n p3 = np.array([x3,y3,z3])\n\n v1 = p3-p1\n v2 = p2-p1\n\n cp = np.cross(v1,v2)\n a,b,c = cp\n\n d = np.dot(cp, p3)\n\n print(a,b,c)",
"def normal(point_one, point_two):\n return numpy.array([point_one[1] - point_two[1], point_two[0] - point_one[0]])",
"def GetNormal(self, *args):\n return _itkLineSpatialObjectPointPython.itkLineSpatialObjectPoint3_GetNormal(self, *args)",
"def surface_norm(self, pt):\n\n return self.normal.normalize()",
"def plane_equation(p1, p2, p3):\n a1 = p2[0] - p1[0]\n b1 = p2[1] - p1[1]\n c1 = p2[2] - p1[2]\n a2 = p3[0] - p1[0]\n b2 = p3[1] - p1[1]\n c2 = p3[2] - p1[2]\n a = b1 * c2 - b2 * c1\n b = a2 * c1 - a1 * c2\n c = a1 * b2 - b1 * a2\n # Points are collinear\n if (abs(a) < 1e-6) and (abs(b) < 1e-6) and (abs(c) < 1e-6):\n return None\n # All clear\n d = (- a * p1[0] - b * p1[1] - c * p1[2])\n return a, b, c, d",
"def compute_normalvect(self):\n normvect = np.zeros((len(self.tri_pnts),3,3))\n zvec = np.array([0, 0, 1])\n for itri, tri in enumerate(self.tri_pnts):\n #import pdb; pdb.set_trace()\n tri0, tri1, tri2 = tri\n x1,y1 = self.points[tri1]-self.points[tri0]\n v1 = np.array([x1,y1,0])\n x2,y2 = self.points[tri2]-self.points[tri1]\n v2 = np.array([x2,y2,0])\n x3,y3 = self.points[tri0]-self.points[tri2]\n v3 = np.array([x3,y3,0])\n v1 = v1/np.linalg.norm(v1)\n v2 = v2/np.linalg.norm(v2)\n v3 = v3/np.linalg.norm(v3)\n #import pdb; pdb.set_trace()\n normvect[itri,:,:] = np.cross(v1,zvec), np.cross(v2,zvec), np.cross(v3,zvec)\n #import pdb; pdb.set_trace()\n return normvect",
"def normal(self,points):\n ez=np.array([[0,0,1]])\n v=((points-self.pos()*ez)*self.C-ez)\n return (v/np.linalg.norm(v,axis=1)[:,np.newaxis])#*np.sign(self.C)",
"def plane_point_side_v3(p: np.ndarray, v: np.ndarray) -> Any:\n return p[:3].dot(v) + p[3]",
"def unit_normals(p,q,r): \n vx1 = p[0] - r[0] # x1 - x3. \n vy1 = p[1] - r[1] # y1 - y3. \n vz1 = p[2] - r[2] # z1 - z3. \n\n vx2 = q[0] - r[0] # x2 - x3. \n vy2 = q[1] - r[1] # y2 - y3. \n vz2 = q[2] - r[2] # z2 - z3. \n\n vnx = vy1*vz2 - vz1*vy2 \n vny = vz1*vx2 - vx1*vz2 \n vnz = vx1*vy2 - vy1*vx2 \n\n len_vn = math.sqrt(vnx*vnx + vny*vny + vnz*vnz) \n vnx = vnx/len_vn \n vny = vny/len_vn \n vnz = vnz/len_vn \n\n return vnx, vny, vnz",
"def normal(self, t=0):\n n = Line3d()\n n.p = self.lerp(t)\n n.v = self.cross\n return n",
"def normal(self) -> Vec:\n return abs(self.up_axis.cross(self.forward()))",
"def normal_triangle(triangle, unitized=True):\n assert len(triangle) == 3, \"Three points are required.\"\n a, b, c = triangle\n ab = subtract_vectors(b, a)\n ac = subtract_vectors(c, a)\n n = cross_vectors(ab, ac)\n if not unitized:\n return n\n lvec = length_vector(n)\n return n[0] / lvec, n[1] / lvec, n[2] / lvec",
"def generate_normals(v1, v2, v3, normalize_result=True):\n # make vectors relative to v2\n # we assume opengl counter-clockwise ordering\n a = v1 - v2\n b = v3 - v2\n n = cross(b, a)\n if normalize_result:\n n = normalize(n)\n return n",
"def normal_vector(origin, vectors):\n return np.cross(vectors[0] - origin, vectors[1] - origin)",
"def getNormalizedNormalVec(self):\n TriPos = self.position\n # calc normalized normal vecor for Tri\n # get vectors Vert1Vert2 & Vert2Vert3\n TriVectors = np.subtract(TriPos[1:],TriPos[:-1])\n # get crossproduct of Vert1Vert2 & Vert2Vert3 (= surface normal)\n TriNorm = np.cross(TriVectors[0],TriVectors[1])+0.0\n # get length of surface normal\n length = np.linalg.norm(TriNorm)\n # divide each component of surface normal by length (= normalized surface normal)\n NormalizedNormalVec = np.around(TriNorm / length, decimals=5) # rounded, otherwise different values, equals not found\n # create string of tuple for segment dict \n #SegmDict = str(tuple(NormalizedNormalVec))\n return NormalizedNormalVec.tolist()",
"def twoDNormal(self):\n return vector((-1) * self.y, self.x, 0)",
"def normal_vector(self, facet):\n assert len(facet) == 3\n pos = self.cluster.get_positions()\n v1 = pos[facet[1], :] - pos[facet[0], :]\n v2 = pos[facet[2], :] - pos[facet[0], :]\n n = np.cross(v1, v2)\n length = np.sqrt(np.sum(n**2))\n return n / length",
"def project_3d_points_to_plane(points, p1, p2 ,p3, numpoints):\n p1 = np.asarray(p1)\n p2 = np.asarray(p2)\n p3 = np.asarray(p3)\n\n # get vectors in plane\n v1 = p3 - p1\n v2 = p2 - p1\n\n # compute cross product\n cp = np.cross(v1, v2)\n a, b, c = cp # normal to plane is ax + by + cz\n\n # evaluate d\n d = np.dot(cp, p3)\n\n # thus, normal is given by\n plane = vtk.vtkPlane()\n origin = p1\n normal = normalize(np.array([a,b,c]))\n plane.SetOrigin(p1)\n plane.SetNormal(normal)\n\n if numpoints == 1:\n proj = [0,0,0]\n plane.ProjectPoint(points, origin, normal, proj)\n return proj\n else:\n projected_pts = np.zeros((numpoints, 3), dtype=float)\n\n for i in range(numpoints):\n proj = [0,0,0]\n plane.ProjectPoint(points[i], origin, normal, proj)\n projected_pts[i] = proj\n\n return projected_pts",
"def normal(self, u, v):\n result = np.cross(self.du(u, v), self.dv(u, v))\n result = result / np.sqrt(vectordot(result, result))[:, None]\n return result",
"def normalize(x: float, y: float, z: float) -> Point3D:\n mag = math.sqrt(x*x + y*y + z*z)\n return x/mag, y/mag, z/mag",
"def calcular_norma_r3():\n x, y, z = carga_vector()\n norma = math.sqrt(x**2 + y**2 + z**2)\n print('\\nLa norma del vector ({},{},{}) es: {}'.format(x, y, z, norma))",
"def normal(vx,vy,n):\n if vx==0:\n if vy==0: \n return (0,0)\n else:\n return (0,n)\n elif vy==0:\n return (n,0)\n else:\n return (n/sqrt(1+(vy/vx)**2),n/sqrt(1+(vx/vy)**2))",
"def normalVect(self, n=2):\n L = len(self.vertices)\n normals = []\n while len(normals) < n:\n j = randrange(L)\n v0 = vector(self.vertices[j].coords())\n v1 = vector(self.vertices[int(j + L / 3) % L].coords())\n v2 = vector(self.vertices[int(j + 2 * L / 3) % L].coords())\n try:\n normals.append(((v1 - v0) * (v2 - v0)).normalize())\n except ValueError:\n pass\n return (1 / len(normals)) * sum(normals, vector(0, 0, 0))"
] | [
"0.7993883",
"0.75612503",
"0.7551127",
"0.75036335",
"0.73087484",
"0.70266545",
"0.6976466",
"0.69476783",
"0.69432104",
"0.68638694",
"0.6826915",
"0.6760271",
"0.67263806",
"0.670698",
"0.66756666",
"0.66190445",
"0.65483314",
"0.6541516",
"0.6535791",
"0.6529659",
"0.6512842",
"0.6507131",
"0.64994264",
"0.6490822",
"0.6474776",
"0.64673924",
"0.6458305",
"0.6434979",
"0.64240444",
"0.6362717"
] | 0.88373834 | 0 |
Returns a list where every element is a list of three atomnames. The second and third names are the closest neighbours of the first names. The argument is a list as returned by frac_to_cart and the number of neighbours to be returned. | def get_closest_neighbours(atomlist, neighbours=2):
print('atomlist', atomlist)
neighbourlist = []
for atom in atomlist:
listline = [atom[0][0]]
dists = []
distsc = []
for partner in atomlist:
dists.append(np.linalg.norm(atom[1] - partner[1]))
distsc.append(np.linalg.norm(atom[1] - partner[1]))
dists.remove(min(dists))
for _ in range(neighbours):
if min(dists) < 2.5:
listline.append(atomlist[distsc.index(min(dists))][0][0])
dists.remove(min(dists))
#listline.append(atomlist[distsc.index(min(dists))][0][0])
neighbourlist.append(listline)
return neighbourlist | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_CX_neighbours(list_of_atoms, atom_list):\n my_list = []\n atom_numbers = []\n for atom in list_of_atoms:\n for element in identify_bonds(atom, atom_list):\n if (((element[0].atom_name == \"CX\") or (element[0].atom_name == \"CY\")) and (element[0].atom_number not in atom_numbers)):\n my_list.append(element[0])\n atom_numbers.append(element[0].atom_number)\n return my_list",
"def neighbours(number: int, number_sectors: int) -> [int, int, int, int]:\n col = number % number_sectors\n row = number // number_sectors\n\n nieg = [number - number_sectors, number + number_sectors, number - 1, number + 1]\n\n if row == 0:\n nieg[0] = -1\n if row == number_sectors - 1:\n nieg[1] = -1\n if col == 0:\n nieg[2] = -1\n if col == number_sectors - 1:\n nieg[3] = -1\n return nieg",
"def compose_listofr(atom_name, listofn):\n c = 1.06\n c2 = 1.4\n listofr = []\n for x in range(len(listofn)):\n if (atom_name[0] == \"N\"):\n if (listofn[x].atom_name[0] == \"H\"):\n listofr.append(1.010*c)\n if (listofn[x].atom_name[0] == \"O\"):\n listofr.append(1.060*c)\n if (listofn[x].atom_name[0] == \"C\"):\n listofr.append(1.475*c)\n if (listofn[x].atom_name[0] == \"N\"):\n listofr.append(1.450*c)\n if (atom_name[0] == \"O\"):\n if (listofn[x].atom_name[0] == \"H\"):\n listofr.append(0.970*c)\n if (listofn[x].atom_name[0] == \"O\"):\n listofr.append(1.490*c)\n if (listofn[x].atom_name[0] == \"C\"):\n listofr.append(1.160*c)\n if (listofn[x].atom_name[0] == \"N\"):\n listofr.append(1.060*c)\n if (atom_name[0] == \"C\"):\n if (listofn[x].atom_name[0] == \"H\"):\n listofr.append(1.090*c)\n if (listofn[x].atom_name[0] == \"O\"):\n listofr.append(1.160*c)\n if (listofn[x].atom_name[0] == \"C\"):\n listofr.append(1.540*c)\n if (listofn[x].atom_name[0] == \"N\"):\n listofr.append(1.475*c)\n if (atom_name[0] == \"H\"):\n if (listofn[x].atom_name[0] == \"H\"):\n listofr.append(0.740*c2)\n if (listofn[x].atom_name[0] == \"O\"):\n listofr.append(0.970*c2)\n if (listofn[x].atom_name[0] == \"C\"):\n listofr.append(1.090*c2)\n if (listofn[x].atom_name[0] == \"N\"):\n listofr.append(1.010*c2)\n return listofr",
"def get_framework_neighbours(atom, useH=True):\n neighbourlist = []\n for atom2 in atom.partner[:5]:\n #if not 'H(' in atom2.name and np.linalg.norm(atom.cart-atom2.cart)<=1.6:\n if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius[atom.element]) + float(\n covalence_radius[atom2.element]) + .1:\n if not 'H' == atom2.element or useH:\n neighbourlist.append(atom2)\n return neighbourlist",
"def define_neighbors(x: int, y: int, z: int) -> list:\n diffs = range(-1, 2)\n coords = []\n # might need to add some if guards (if x > 0) (if x < len(blah) etc)\n xdiffs = (x + diff for diff in diffs)\n ydiffs = (y + diff for diff in diffs)\n zdiffs = (z + diff for diff in diffs)\n neighbors = product(xdiffs, ydiffs, zdiffs)\n for index, neighbor in enumerate(neighbors):\n if neighbor != (x, y, z) and all(c >= 0 for c in neighbor):\n coords.append(neighbor)\n return coords",
"def get_neighbours(lat, long):\n # ns = north east, ew = east west (ratio between 1 feet and degree) \n # its different on diferent places on earth (sphere)!!\n ns = 0.0025\n ew = 0.0025\n walk = []\n for i in range(-2, 3):\n for j in range(-2, 3):\n thiscell = CellId.from_lat_lng(LatLng.from_degrees(lat + ns*i, long + ew*j)).parent(S2_CELL_LEVEL)\n if abs(i * j) < 4:\n walk.append(thiscell.id())\n return sorted(walk)",
"def rankNeighbors(Data):\r\n strokeDist = []\r\n for i in range(len(Data)):\r\n strokeDist.append([])\r\n index = 0\r\n for point1 in Data:\r\n dist = []\r\n index1=0\r\n for point2 in Data:\r\n #dist.append(math.sqrt((center1[0]-center2[0])**2+(center1[1]-center2[1])**2))\r\n dist.append((index1,math.sqrt((point1[0]-point2[0])**2+(point1[1]-point2[1])**2+(point1[2]-point2[2])**2)))\r\n index1+=1\r\n #x = copy.deepcopy(dist)\r\n #print(x)\r\n dist.sort(key= lambda x:x[1])\r\n #print(x)\r\n # Get rank for each element\r\n idx1 =0\r\n for e in dist:\r\n #i = x.index(e)\r\n strokeDist[index].append(e)\r\n idx1 +=1\r\n index+=1\r\n return strokeDist",
"def lonlat(n_lon: int, n_lat: int) -> List[Tuple[float, float]]:\n grid = []\n for lon in np.linspace(0, 360.0 - 360.0 / n_lon, n_lon):\n for lat in np.linspace(-90, 90, n_lat):\n grid.append((lon, lat))\n return grid",
"def get_neighbours(pos):\n neighbours = {tuple(sum(x) for x in zip(pos, offset)) for offset in relative_positions}\n return neighbours",
"def computeNearestNeighbor(itemName, itemVector, items):\n # \"Chris Cagle/ I Breathe In. I Breathe Out\" [1, 5, 2.5, 1, 1, 5, 1]\n distances = []\n for otherItem in items:\n if otherItem != itemName:\n # print('itemVector =>', itemVector)\n # print('items[otherItem] =>', items[otherItem])\n distance = manhattan(itemVector, items[otherItem])\n distances.append((distance, otherItem))\n # sort based on distance -- closest first\n distances.sort()\n return distances",
"def get_all_neighbor_coords(tiles):\n return [add(tile, neighbor) for tile in tiles for neighbor in NEIGHBORS]",
"def getDistancesWithNames(twoDList):\n matrix = []\n for i in range(0,len(twoDList)):\n for j in range(len(twoDList) - len(twoDList) + i):\n SD = determineIdenticalBases(data[i][1], data[j][1])\n temp = []\n if SD[1] != 0:\n p = calculateP(SD[0]+SD[1], SD[1])\n temp.append(data[i][0])\n temp.append(data[j][0]) \n temp.append(estimateMutationsPerSite(p))\n matrix.append(temp)\n return matrix",
"def getNeighborNodes(self, signature):\n x, y, z = signature[0], signature[1], signature[2]\n return [(x+1, y+1, z+1), (x+1, y, z+1), (x+1, y-1, z+1),\n (x, y+1, z+1), (x, y, z+1), (x, y-1, z+1),\n (x-1, y+1, z+1), (x-1, y, z+1), (x-1, y-1, z+1),\n (x+1, y+1, z-1), (x+1, y, z-1), (x+1, y-1, z-1),\n (x, y+1, z-1), (x, y, z-1), (x, y-1, z-1),\n (x-1, y+1, z-1), (x-1, y, z-1), (x-1, y-1, z-1),\n (x+1, y+1, z), (x+1, y, z), (x+1, y-1, z),\n (x, y+1, z), (x, y, z), (x, y-1, z),\n (x-1, y+1, z), (x-1, y, z), (x-1, y-1, z)]",
"def get_contour(atom_list):\n initial = [atom for atom in atom_list if ((0 < len(identify_bonds(atom, atom_list)) < 3) and (check_connected(atom, identify_bonds(atom, atom_list)) == False))]\n \n extra_1 = []\n for atom in atom_list:\n neighbours = [bond[0] for bond in identify_bonds(atom, atom_list)]\n for i in neighbours:\n neighbours2 = [bond[0] for bond in identify_bonds(i, atom_list)]\n for j in neighbours2:\n if j in initial:\n extra_1.append(atom)\n\n extra_2 = []\n for atom in atom_list:\n neighbours = [bond[0] for bond in identify_bonds(atom, atom_list)]\n check = 0\n for i in neighbours:\n if i in initial:\n check += 1\n if ((check == 2) and (atom not in initial)):\n extra_2.append(atom) \n return (initial + extra_1 + extra_2)",
"def calc_distances(marker_list, rf_pairs):\n final_distance = [[marker_list[0], 0]]\n\n for i in range(1, len(marker_list)):\n cur_markers = [marker_list[i-1], marker_list[i]]\n for rf_pair in rf_pairs:\n if rf_pair[0] in cur_markers and rf_pair[1] in cur_markers:\n final_distance.append([cur_markers[1], rf_pairs[rf_pair]])\n break\n return final_distance",
"def makeNewickList(distancesWithNames):\n i = 0\n oldDistance = 0\n while len(distancesWithNames) > 1:\n smallestindex = findSmallest(distancesWithNames)\n distancesWithNames, oldDistance = newMatrixWithSmallest(distancesWithNames, smallestindex, beforeDistance=oldDistance)\n i+=1\n retString = \"(\" + distancesWithNames[0][0] + \",\" + distancesWithNames[0][1] + \");\"\n return retString",
"def find_ngrams(input_list, n=3):\n return zip(*[input_list[i:] for i in range(n)])",
"def calculate_distance_matrix(atomlist):\n distlist = []\n for atom in atomlist:\n atomdict = {}\n for partner in atomlist:\n if not str(int(partner[0][1])) in atomdict.keys():\n atomdict[str(int(partner[0][1]))] = []\n atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1]))\n else:\n atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1]))\n atomdict[str(int(partner[0][1]))].sort()\n\n distlist.append(atomdict)\n\n return distlist",
"def get_neighbors(n):\n if n < 3:\n return ValueError(\"Integer must be greater than 3.\")\n p = generate()\n q = []\n l = 0\n g = 0\n while g <= n:\n q = next(p)\n g = q[-1]\n if q[-1] == n:\n l = q[0][-2]\n q = next(p)\n g = q[-1]\n elif q[-1] > n:\n l = q[0][-3]\n return l, g",
"def nine_to_3x3(listy):\n new_side = []\n k = int(len(listy) / 3)\n \n for i in range(k):\n intermediate = []\n for j in range(3):\n intermediate.append(listy.pop(0))\n \n new_side.append(intermediate)\n return new_side",
"def n_closest_waters(coordinates, atom, oxygens, n):\n\n waters = []\n for i in range(n):\n index = find_closest_atom(atom, oxygens)\n closest_oxygen = oxygens[index]\n if closest_oxygen in coordinates:\n oxygen_index = coordinates.index(closest_oxygen)\n OT = coordinates[oxygen_index]\n HT1 = coordinates[oxygen_index+1]\n HT2 = coordinates[oxygen_index+2]\n water = [OT, HT1, HT2]\n waters.append(water)\n oxygens = remove_atom(oxygens, index)\n return waters",
"def get_influence_atoms(atomlist):\n enviromentlist = []\n trunclist = []\n neighbourlist = get_closest_neighbours(atomlist, 4)\n for neighbours in neighbourlist:\n if neighbours[0][0] == \"H\":\n neighbours = neighbours[:2]\n if neighbours[0][0] == \"O\":\n neighbours = neighbours[:3]\n trunclist.append(neighbours)\n for atom in trunclist:\n newatom = []\n for atom1partner in atom[1:]:\n for partner in trunclist:\n if partner[0] == atom1partner:\n counter = 0\n\n for atomi in partner:\n if atomi[0] == 'H':\n counter += 1\n\n if counter < 2 or (partner[0] in atom and atom[0][0] == 'H'):\n newatom += atom + partner[1:]\n\n newatom = make_list_unique(newatom)\n newatom.sort()\n enviromentlist.append(newatom)\n return enviromentlist",
"def calcDistanceList(work_list):\n distance_list = []\n for swap in work_list: # for every work item find distance\n distance_list.append(Cluster.calcDistance(*swap))\n return distance_list",
"def neighbor_list(i, j, k, nx):\n left_center = (i-1, j, k)\n right_center = (i+1, j, k)\n top_center = (i, j+1, k)\n bottom_center = (i, j-1, k)\n left_up = (i, j, k + 1)\n left_down = (i, j, k -1)\n return np.mod([left_center, right_center, top_center, bottom_center, left_up, left_down], nx)",
"def GetBonds(Bonds):\n b = sorted([(min(x), max(x)) for x in Bonds])\n Bonds13, Bonds14 = [], []\n for (a1,b1) in b:\n #check for bonds with a1 at the center of a 1-3 interaction,\n #letting b1 be the higher number of the two flanking\n clist = [b2 for (a2,b2) in b if a2 == a1 and b2 < b1] + \\\n [a2 for (a2,b2) in b if b2 == a1 and a2 < b1]\n Bonds13.extend([(min(c,b1), max(c,b1)) for c in clist])\n #check for bonds with b1 at the center of a 1-3 interaction,\n #letting a1 be the higher number of the two flanking\n clist = [b2 for (a2,b2) in b if a2 == b1 and b2 < a1] + \\\n [a2 for (a2,b2) in b if b2 == b1 and a2 < a1]\n Bonds13.extend([(min(c,a1), max(c,a1)) for c in clist])\n #find atoms connected to a1\n clist = [b2 for (a2,b2) in b if a1==a2 and not b1==b2] +\\\n [a2 for (a2,b2) in b if a1==b2 and not b1==a2]\n #find atoms connected to b1\n dlist = [a2 for (a2,b2) in b if b1==b2 and not a1==a2] +\\\n [b2 for (a2,b2) in b if b1==a2 and not a1==b2]\n Bonds14.extend([(min(c,d), max(c,d)) for c in clist for d in dlist])\n Bonds1213 = b + Bonds13\n #sort\n Bonds1213.sort()\n Bonds14.sort()\n #get unique values in case of loops\n Bonds1213 = [x for (i,x) in enumerate(Bonds1213) if i == 0 or x != Bonds1213[i-1]]\n Bonds14 = [x for (i,x) in enumerate(Bonds14) if i == 0 or x != Bonds14[i-1]]\n #convert to arrays \n Bonds1213 = array(Bonds1213, int)\n Bonds14 = array(Bonds14, int)\n return Bonds1213, Bonds14",
"def construct_graph_connection(coord_list, radie):\n\n connection_distance = []\n connection = []\n for j, data in enumerate(coord_list):\n '''Calculate the relative distance of the nodes'''\n distance = np.hypot(coord_list[:,0]-data[0], coord_list[:,1]-data[1])\n '''save nodes which are in range'''\n #for i, data in enumerate(distance):\n for i in range(j+1, len(distance)):\n data = distance[i]\n if data < radie:\n connection.append([j, i])\n connection_distance.append(data)\n\n\n connection_distance = np.array(connection_distance)\n connection = np.array(connection)\n return connection, connection_distance",
"def get_framework_neighbors(atom, useH=True):\n neighborlist = []\n for atom2 in atom.partner[:5]:\n if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius[atom.element]) + float(\n covalence_radius[atom2.element]) + .1:\n if not 'H' == atom2.element or useH:\n neighborlist.append(atom2)\n return neighborlist",
"def moore_neighbourhood(self, grid_position: tuple, radius: int) -> list:\n result = []\n u = [grid_position[0] - radius, grid_position[1] - radius]\n for i in range(2 * radius + 1):\n for j in range(2 * radius + 1):\n # This does not make much sense, since u is a list and i and j are integers\n result.append([u + i, u + j])\n return result",
"def neighbors(districts, r, c):\r\n n_list = []\r\n if r>0:\r\n n_list += [districts[r-1,c]]\r\n if r<4:\r\n n_list += [districts[r+1,c]]\r\n if c>0:\r\n n_list += [districts[r,c-1]]\r\n if c<4:\r\n n_list += [districts[r,c+1]]\r\n return n_list",
"def _generate_immediate_neighbours(pattern: str) -> list:\n generated = []\n for i in range(len(pattern)):\n if pattern[i] == 'A':\n generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_A])\n elif pattern[i] == 'C':\n generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_C])\n elif pattern[i] == 'T':\n generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_T])\n elif pattern[i] == 'G':\n generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_G])\n\n return generated"
] | [
"0.5994788",
"0.56360227",
"0.56093794",
"0.55970883",
"0.54962254",
"0.54728734",
"0.5409299",
"0.53933924",
"0.5324893",
"0.53109133",
"0.5306111",
"0.5260367",
"0.5244169",
"0.5237841",
"0.5218403",
"0.5203575",
"0.5183413",
"0.5180326",
"0.5171642",
"0.5169028",
"0.5147983",
"0.5126392",
"0.51196516",
"0.5116443",
"0.5109819",
"0.51059335",
"0.5102457",
"0.51004213",
"0.50574064",
"0.50545406"
] | 0.67386407 | 0 |
Calculates for every atom the distances to all other atoms in atomlist. Returns a list where every element is a list of all distances. | def calculate_distance_matrix(atomlist):
distlist = []
for atom in atomlist:
atomdict = {}
for partner in atomlist:
if not str(int(partner[0][1])) in atomdict.keys():
atomdict[str(int(partner[0][1]))] = []
atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1]))
else:
atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1]))
atomdict[str(int(partner[0][1]))].sort()
distlist.append(atomdict)
return distlist | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _compute_distances(self, atoms: List[CellAtom]):\n muon = self._cell_atoms[self._muon_index]\n\n for atom in atoms:\n atom.distance_from_muon = np.linalg.norm(muon.position - atom.position)",
"def calcDistanceList(work_list):\n distance_list = []\n for swap in work_list: # for every work item find distance\n distance_list.append(Cluster.calcDistance(*swap))\n return distance_list",
"def _calculate_distances(self):\n all_dists = []\n for ref in range(len(self.atoms)):\n if self.atoms[ref].symbol in self.exclude:\n continue\n indices = list(range(ref+1, len(self.atoms)))\n indices = self._filter_excluded(indices)\n if len(indices) == 0:\n continue\n dists = self.atoms.get_distances(ref, indices, mic=True)\n all_dists += list(dists)\n \n # Normalize by the mean distance\n return np.array(all_dists)/np.mean(all_dists)",
"def distance_list(coordinate_list):\n for item1 in coordinate_list:\n L2 = []\n d_list.append(L2)\n for item2 in coordinate_list:\n if item1 != item2:\n distance = math.sqrt((item2[1] - item1[1]) ** 2 \\\n + (item2[2] - item1[2]) ** 2)\n L2.append((item2[0], distance))\n return d_list",
"def _get_distances(self):\n for molecule in self.values():\n molecule.get_distances()\n\n # for atom in self.atoms:\n # atom.get_distances()",
"def get_distances(self, crds):\n self.all_dist = np.zeros((self.natom, self.natom))\n # Loop over upper triangle of atom pairs\n for iat in range(self.natom-1):\n # Get the atom indices\n at_inds = np.arange(len(crds))\n\n # Calc distances between atoms (only upper triangle though)\n at_msk = at_inds > iat\n all_ut_dist = crds[at_msk] - crds[iat]\n all_ut_dist = np.linalg.norm(all_ut_dist, axis=1)\n\n self.all_dist[iat, iat+1:] = all_ut_dist\n\n # Get lower triangle indices\n self.all_dist = self.all_dist + self.all_dist.T",
"def calculate_distances(data_point, centroids):\n distances = []\n for centroid_index, centroid_value in enumerate(centroids):\n distances.append(distance(data_point, centroid_value))\n return distances",
"def compute_distances(Ls):\n if not isinstance(Ls, list):\n Ls = [Ls]\n\n dists = []\n for L in Ls:\n N,D = L.shape\n # 1xNxD - Nx1xD (L1 distance)\n dist = (np.abs(L[None,:,:] - L[:,None,:])).sum(axis=2)\n dists.append(dist)\n\n return dists",
"def get_closest_neighbours(atomlist, neighbours=2):\n print('atomlist', atomlist)\n neighbourlist = []\n for atom in atomlist:\n listline = [atom[0][0]]\n dists = []\n distsc = []\n for partner in atomlist:\n dists.append(np.linalg.norm(atom[1] - partner[1]))\n distsc.append(np.linalg.norm(atom[1] - partner[1]))\n dists.remove(min(dists))\n for _ in range(neighbours):\n if min(dists) < 2.5:\n listline.append(atomlist[distsc.index(min(dists))][0][0])\n dists.remove(min(dists))\n #listline.append(atomlist[distsc.index(min(dists))][0][0])\n neighbourlist.append(listline)\n return neighbourlist",
"def calcDistortionList(work_list):\n distortion_list = []\n for swap in work_list:\n distortion_list.append(Cluster.calcDistortion(*swap)) # call calcDistortion with tuple expansion as args\n return distortion_list",
"def get_all_distances(cls, indices, dist_mat):\n distances = []\n for i, j in combinations(indices, 2):\n distances.append(cls.get_dist(dist_mat, i, j))\n return distances",
"def measure_distance(self, mat):\n if len(mat) == 1:\n print(\"chain has only one CAatom\")\n return\n self.dists = []\n for num in range(0, len(mat)):\n if num + 1 <= len(mat) - 1:\n c1 = mat[num]\n c2 = mat[num + 1]\n d = c2 - c1\n self.dists.append(math.sqrt(np.sum(d * d)))\n return self.dists",
"def link_atoms_by_distance(distlist1, atomlist1, distlist2, atomlist2, keys):\n hitlist = []\n\n for atom in distlist1:\n atomtype = int(atomlist1[distlist1.index(atom)][0][1])\n valuelist = []\n for partner in distlist2:\n partnertype = int(atomlist2[distlist2.index(partner)][0][1])\n if atomtype == partnertype:\n partnervalue = 0\n keylist = partner.keys()\n for key in keylist:\n for element in xrange(len(atom[key])):\n partnervalue += abs(atom[key][element] - partner[key][element])\n else:\n partnervalue = 9999999\n valuelist.append(partnervalue)\n minvalue = min(valuelist)\n besthit = valuelist.index(minvalue)\n hitlist.append(besthit)",
"def get_distances(self):\n N = len(self.cells) # Number of cells\n distances = np.zeros([N, N]) # distances between cells\n positions = self.position_matrix() # positions of cells \n \n # get distances between cells (exploit symmetry between upper and lower triangular form)\n for i, position in enumerate(positions[:-1, :]): # Iterate matrix except the last one\n directions = positions[i+1:, :] - position # direction from i to j > i\n distances[i, i+1:] = np.linalg.norm(directions, axis=1) # length of directions\n \n return distances + distances.T # Add lower triangle of matrix to upper ",
"def calc_distances(marker_list, rf_pairs):\n final_distance = [[marker_list[0], 0]]\n\n for i in range(1, len(marker_list)):\n cur_markers = [marker_list[i-1], marker_list[i]]\n for rf_pair in rf_pairs:\n if rf_pair[0] in cur_markers and rf_pair[1] in cur_markers:\n final_distance.append([cur_markers[1], rf_pairs[rf_pair]])\n break\n return final_distance",
"def distances(self):\n self._sort_measurements()\n return self._distances",
"def link_atoms_by_distance_diff(distlist1, atomlist1, distlist2, atomlist2, keys):\n hitlist = []\n\n for atom in distlist1:\n atomtype = int(atomlist1[distlist1.index(atom)][0][1])\n valuelist = []\n for partner in distlist2:\n partnertype = int(atomlist2[distlist2.index(partner)][0][1])\n if atomtype == partnertype:\n partnervalue = 0\n keylist = partner.keys()\n for key in keylist:\n for element in xrange(len(atom[key])):\n value = abs(atom[key][element] - partner[key][element])\n partnervalue += value\n else:\n partnervalue = 9999999\n valuelist.append(partnervalue)\n minvalue = min(valuelist)\n besthit = valuelist.index(minvalue)\n hitlist.append(besthit)",
"def pairwise_distances(data):\n distances = []\n for x in data:\n distances_row = []\n for y in data:\n distances_row.append(metric(x, y)**2)\n distances.append(distances_row)\n return distances",
"def _calculate_distance(self, ordered_list):\r\n\r\n total_distance = 0\r\n previous_point = None\r\n for point in ordered_list:\r\n if previous_point is not None:\r\n angle, distance = previous_point.angleAndDistanceTo(point) # geodesic distance in meters\r\n total_distance += distance\r\n previous_point = point\r\n\r\n return total_distance",
"def _set_distances(results: List[(Place, float)]) -> List[Place]:\n all_entities = []\n\n for entity, distance in results:\n entity.distance = distance\n all_entities.append(entity)\n\n return all_entities",
"def find_CX_neighbours(list_of_atoms, atom_list):\n my_list = []\n atom_numbers = []\n for atom in list_of_atoms:\n for element in identify_bonds(atom, atom_list):\n if (((element[0].atom_name == \"CX\") or (element[0].atom_name == \"CY\")) and (element[0].atom_number not in atom_numbers)):\n my_list.append(element[0])\n atom_numbers.append(element[0].atom_number)\n return my_list",
"def filter_carbon_atoms(atom_list, rings):\n list_3 = []\n list_2 = []\n list_2n = []\n for atom in atom_list:\n if (check_connected(atom, identify_bonds(atom, atom_list)) == False):\n if (len(identify_bonds(atom, atom_list)) == 3):\n list_3.append(atom)\n elif (len(identify_bonds(atom, atom_list)) == 2):\n list_2.append(atom)\n for neighbour in identify_bonds(atom, atom_list):\n if (len(identify_bonds(neighbour[0], atom_list)) == 2):\n for ring in rings:\n if( (atom in ring) and (neighbour[0] in ring)):\n list_2n.append(atom) \n return list_3, list_2, list_2n",
"def _euclidian_distances(stop_list):\n e_dists2 = [transitfeed.approximate_distance_between_stops(stop, tail) for\n (stop, tail) in zip(stop_list, stop_list[1:])]\n\n return e_dists2",
"def calculate_distances(coords: List[Tuple[float, float]]) -> List[Dict]:\n miles = 0\n od = []\n for idx in range(len(coords)):\n if idx == 0:\n continue\n dist = distance(coords[idx], coords[idx - 1]).miles\n miles = miles + dist\n od.append(\n {\n \"start\": coords[idx - 1],\n \"stop\": coords[idx],\n \"distance\": dist,\n \"total\": miles,\n }\n )\n return od",
"def subtree_distances(self, root):\r\n\r\n nodes = root.get_terminals()\r\n nodes.reverse()\r\n node_pairs = itertools.ifilter(\r\n lambda (a1, a2): a1.name < a2.name,\r\n itertools.product(nodes, nodes))\r\n\r\n distances = [self._node_distance(pair[0], pair[1])\r\n for pair in node_pairs]\r\n\r\n return distances",
"def get_distances_list(mid_points):\n n = len(mid_points)\n dist_list = np.zeros((n,n))\n\n for i in range(n):\n for j in range(i+1, n):\n dist_list[i][j] = compute_distance(mid_points[i], mid_points[j])\n \n return dist_list",
"def get_influence_atoms(atomlist):\n enviromentlist = []\n trunclist = []\n neighbourlist = get_closest_neighbours(atomlist, 4)\n for neighbours in neighbourlist:\n if neighbours[0][0] == \"H\":\n neighbours = neighbours[:2]\n if neighbours[0][0] == \"O\":\n neighbours = neighbours[:3]\n trunclist.append(neighbours)\n for atom in trunclist:\n newatom = []\n for atom1partner in atom[1:]:\n for partner in trunclist:\n if partner[0] == atom1partner:\n counter = 0\n\n for atomi in partner:\n if atomi[0] == 'H':\n counter += 1\n\n if counter < 2 or (partner[0] in atom and atom[0][0] == 'H'):\n newatom += atom + partner[1:]\n\n newatom = make_list_unique(newatom)\n newatom.sort()\n enviromentlist.append(newatom)\n return enviromentlist",
"def atomList(self):\n\n\t\tal = []\t\n\t\tfor chain in self.chain:\n\t\t\tfor res in chain.residue:\n\t\t\t\tfor atom in res.atom:\n\t\t\t\t\tal.append(atom)\n\n\t\treturn al",
"def total_cost_2D(self, final_list):\n total_cost = 0\n for i in range(len(final_list) - 1):\n temp = self.pairwise_distance(final_list[i], final_list[i + 1])\n total_cost = total_cost + temp\n print(\"Total distance: \" + str(total_cost))",
"def _calc_distance(self, X):\n distances = np.zeros((X.shape[0], self.n_clusters))\n print(distances.shape)\n for i, centroid in enumerate(self.centroids):\n distances[:, i] = np.linalg.norm(X - centroid, axis=1)\n return distances"
] | [
"0.73647845",
"0.7127275",
"0.7080569",
"0.67939436",
"0.66801125",
"0.66210407",
"0.6395463",
"0.6352536",
"0.60293406",
"0.59641975",
"0.5953034",
"0.5883599",
"0.5874572",
"0.5861031",
"0.5844821",
"0.5841001",
"0.5830898",
"0.5818882",
"0.5806686",
"0.57949203",
"0.5784324",
"0.5754123",
"0.5749961",
"0.5739553",
"0.57180345",
"0.56448495",
"0.562841",
"0.5622595",
"0.5604913",
"0.55522674"
] | 0.78830993 | 0 |
Calls read_coordinates and frac_to_cart for every path=name in fragmentnames and returns a dictionary where every returnvalue of frac_to_cart is keyed to its fragment name. | def read_multiple_coordinates(fragmentnames):
fragdict = {}
for name in fragmentnames:
path = name + '/'
cell, pos = read_coordinates(path)
atomlist = frac_to_cart(cell, pos)
atomdict = {}
for atom in atomlist:
atomdict[atom[0][0]] = atom[1]
fragdict[name] = atomlist
return fragdict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_files():\n with open(\"CvixLerC9.loc\") as loc, open(\"CvixLerC9.qua\") as qua:\n qua_file = (qua.read().split('\\n'))\n qua_file = qua_file[8:-1]\n new_qua = []\n for q in qua_file:\n new_qua.append(q.split('\\t')) # [['1', '1.279502474'], ['3', '0.303712231']....]\n\n new_loc = {}\n header = ''\n read = False\n for i in loc:\n i = i.replace(\"\\n\", '')\n if read:\n for j in i:\n if \" \" != j:\n if header in new_loc.keys():\n new_loc[header].append(j)\n else:\n new_loc[header] = [j]\n if \"(a,b)\" in i:\n header = i\n read = True\n else:\n read = False\n\n elif read:\n for j in i:\n if \" \" != j:\n if header in new_loc.keys():\n new_loc[header].append(j)\n else:\n new_loc[header] = [j]\n\n return new_loc, new_qua",
"def readSurfaceGeo(b18path):\n if not os.path.isfile(b18path):\n print(\"b18 building file not found! Please check!\")\n pass\n else:\n b18file = open(b18path,\"r\")\n b18data = b18file.readlines()\n srfGeoBlock = getDataParagraph(\"_EXTENSION_BuildingGeometry_START_\", \"_EXTENSION_BuildingGeometry_END_\", b18data)\n #now get vertex's coordinate xyz\n vertexdict = dict() #{vertexID:[x,y,z]}\n srfbasicinfo = dict() #{surfaceID:[vertexID]}\n srfInfo = dict() #{surfaceID:[vertices coordinate]}\n for line in srfGeoBlock:\n dline = line.split()\n if \"vertex\" in dline:\n vertexdict[int(dline[1])] = [float(xyz) for xyz in dline[2:]] #{vertexID:[x,y,z]}\n if \"wall\" in dline or \"window\" in dline or \"floor\" in dline or \"ceiling\" in dline or \"roof\" in dline:\n srfbasicinfo[int(dline[1])] = [[int(nrID) for nrID in dline[2:]],dline[0]] #{surfaceID:[[vertexID],construction]}\n #print srfbasicinfo[int(dline[1])]\n for key in srfbasicinfo.keys():\n srfInfo[key] = []\n for vertices in srfbasicinfo[key][0]:\n srfInfo[key].append(vertexdict[vertices])\n b18file.close()\n return srfInfo,vertexdict,srfbasicinfo\n #actually only need srfInfo\n #just getting everything out for now, incase will need to use those",
"def _get_positions(self):\n position_map = dict()\n # Assumes that the positions are indexed in the order of Row-->Well-->FOV\n for well in self.wells:\n for pos in self.store[well].attrs.get('well').get('images'):\n pos_name = pos['path']\n # pos name is 'Pos_xxx'\n pos_idx = int(pos_name.split('_')[-1])\n position_map[pos_idx] = {'name': pos_name, 'well': well}\n return position_map",
"def load_fragGC_pickle(inFH):\n fojb = pickle.load(inFH)\n\n d = dict()\n for x in fojb:\n taxon_name = x[0]\n d[taxon_name] = dict()\n d[taxon_name]['fragLength'] = []\n d[taxon_name]['fragGC'] = []\n \n for scaf,v in x[1].items(): \n for z in v:\n # fragStart, fragLength, fragGC\n d[taxon_name]['fragLength'].append(z[1])\n d[taxon_name]['fragGC'].append(z[2]) \n return d",
"def get_electrode_positions():\n positions = dict()\n with io.open(\"electrode_positions.txt\", \"r\") as pos_file:\n for line in pos_file:\n parts = line.split()\n positions[parts[0]] = tuple([float(part) for part in parts[1:]])\n return positions",
"def get_electrode_positions():\n positions = dict()\n with io.open(\"electrode_positions.txt\", \"r\") as pos_file:\n for line in pos_file:\n parts = line.split()\n positions[parts[0]] = tuple([float(part) for part in parts[1:]])\n return positions",
"def _calculate_fragmentation(buddyinfo_output):\n\n frag_dict = {}\n \n for line in buddyinfo_output:\n node, frag_info = line.split(',')\n zone, free_pages = frag_info.split()[1], frag_info.split()[2:]\n\n # Convert all the strings to ints\n free_pages = map(int, free_pages)\n\n frag_dict.setdefault(node, {})\n frag_dict[node][zone] = {}\n\n total_free_pages = 0\n\n for order, free_count in enumerate(free_pages):\n total_free_pages += (2**order) * free_count\n\n for order, free_count in enumerate(free_pages):\n frag_pct = 0\n\n # really inefficient, but who cares\n for _order, _free_count in enumerate(free_pages[order:]):\n frag_pct += (2**(_order + order)) * _free_count\n \n frag_pct = float(total_free_pages - frag_pct)/total_free_pages\n \n frag_dict[node][zone][order] = (free_count, frag_pct)\n\n return frag_dict",
"def build_dict(infile):\n\n coords = {}\n sizes = {}\n\n for line in infile:\n fields = line.split()\n ref_st, ref_end, qry_st, qry_end = map(int, fields[0:4])\n qry_chr, qry_size = fields[14], int(fields[8])\n if qry_chr not in coords:\n coords[qry_chr] = {0:[], 1:[]} # 0=ref; 1=qry\n sizes[qry_chr] = qry_size\n coords[qry_chr][0].append([ref_st, ref_end])\n coords[qry_chr][1].append(sorted([qry_st, qry_end]))\n \n return coords, sizes",
"def create_chunks(file_names):\n\n\tnew_chunks = []\n\n\tfor name in file_names:\n\n\t\t# Find the .inf file and read the details stored within\n\t\ttry:\n\t\t\tdetails = open(name + suffix + 'inf', 'r').readline()\n\t\texcept IOError:\n\n\t\t\ttry:\n\t\t\t\tdetails = open(name + suffix + 'INF', 'r').readline()\n\t\t\texcept IOError:\n\t\t\t\tprint(\"Couldn't open information file, %s\" % name+suffix+'inf')\n\t\t\t\tsys.exit()\n\n\t\t# Parse the details\n\t\tdetails = [string.rstrip(details)]\n\n\t\tsplitters = [' ', '\\011']\n\n\t\t# Split the details up where certain whitespace characters occur\n\t\tfor s in splitters:\n\n\t\t\tnew_details = []\n\n\t\t\t# Split up each substring (list entry)\n\t\t\tfor d in details:\n\n\t\t\t\tnew_details = new_details + string.split(d, s)\n\n\t\t\tdetails = new_details\n\n\t\t# We should have details about the load and execution addresses\n\n\t\t# Open the file\n\t\ttry:\n\t\t\tin_file = open(name, 'rb')\n\t\texcept IOError:\n\t\t\tprint(\"Couldn't open file, %s\" % name)\n\t\t\tsys.exit()\n\n\t\t# Find the length of the file (don't rely on the .inf file)\n\t\tin_file.seek(0, 2)\n\t\tlength = in_file.tell()\n\t\tin_file.seek(0, 0)\n\n\t\t# Examine the name entry and take the load and execution addresses\n\t\tdot_at = string.find(details[0], '.')\n\t\tif dot_at != -1:\n\t\t\treal_name = details[0][dot_at+1:]\n\t\t\tload, exe = details[1], details[2]\n\t\telse:\n\t\t\treal_name = get_leafname(name)\n\t\t\tload, exe = details[0], details[1]\n\n\t\tload = hex2num(load)\n\t\texe = hex2num(exe)\n\n\t\tif load == None or exe == None:\n\t\t\tprint('Problem with %s: information is possibly incorrect.' % name+suffix+'inf')\n\t\t\tsys.exit()\n\n\t\t# Reset the block number to zero\n\t\tblock_number = 0\n\n\t\t# Long gap\n\t\tgap = 1\n\t\n\t\t# Write block details\n\t\twhile True:\n\t\t\tblock, last = write_block(in_file, real_name, load, exe, length, block_number)\n\n\t\t\tif gap == 1:\n\t\t\t\tnew_chunks.append((0x110, number(2,0x05dc)))\n\t\t\t\tgap = 0\n\t\t\telse:\n\t\t\t\tnew_chunks.append((0x110, number(2,0x0258)))\n\n\t\t\t# Write the block to the list of new chunks\n\n\t\t\t# For old versions, just write the block\n\t\t\tif UEF_major == 0 and UEF_minor < 9:\n\t\t\t\tnew_chunks.append((0x100, block))\n\t\t\telse:\n\t\t\t\tnew_chunks.append((0x100, block))\n\n\t\t\tif last == 1:\n\t\t\t\tbreak\n\n\t\t\t# Increment the block number\n\t\t\tblock_number = block_number + 1\n\n\t\t# Close the input file\n\t\tin_file.close()\n\n\t# Write some finishing bytes to the list of new chunks\n#\tnew_chunks.append((0x110, number(2,0x0258)))\n#\tnew_chunks.append((0x112, number(2,0x0258)))\n\n\t# Return the list of new chunks\n\treturn new_chunks",
"def fragment_to_keys(fragment):\n return fragment.strip(\"#\").strip(\"/\").split(\"/\")",
"def extract_segment_props(self):\n props = {}\n num_segments = int(self.general['force-scan-series.force-segments.count'])\n for segment in range(num_segments):\n segment_props = ForceArchive(self.file_path).read_properties(\n 'segments/{}/segment-header.properties'.format(segment))\n # noinspection SpellCheckingInspection\n name_jpk = segment_props['force-segment-header.name.name'].replace('-cellhesion200', '')\n normal_name = self.convert_segment_name(name_jpk)\n props[normal_name] = segment_props\n props[normal_name][\"name_jpk\"] = name_jpk\n props[normal_name][\"name\"] = normal_name\n props[normal_name][\"segment_number\"] = str(segment)\n\n return props",
"def load_store(filename):\n result = {}\n # Open file\n with open(filename, 'r') as file:\n # Read first character\n char = file.read(1)\n while char:\n # ; defines a new point\n if char == \";\":\n # The next characters are of the form (x,y,e)\n char = file.read(1) # left bracket\n\n char = file.read(1) # x\n x = char\n char = file.read(1) # comma or second digit\n\n # This means x is a two digit number\n if char != ',':\n # Add the second digit and then cast\n x += char\n x = int(x)\n char = file.read(1) # Now read the comma\n else:\n # One digit number so just cast\n print(char)\n x = int(x)\n \n # Follow a similar process for y and e\n char = file.read(1) # y\n\n y = char\n char = file.read(1) # comma or second digit\n if char != ',':\n y += char\n y = int(y)\n char = file.read(1)\n else:\n y = int(y)\n\n char = file.read(1) # encoded product\n e = char\n char = file.read(1)\n if char != ')':\n e += char\n e = int(e)\n char = file.read(1)\n else:\n e = int(e)\n \n # Add to the dictionary\n coords = (x,y)\n result[(x,y)] = e\n\n char = file.read(1)\n return result",
"def read_data(self, path, **kwargs):\n\n from glob import glob\n import os\n sc = self.sc\n pdt_lc = np.dtype([('pos', 'f4', 3),('vel', 'f4', 3)])\n\n blockids = kwargs['blockids']\n\n def set_particle_IDs_partition(index, iterator): \n \"\"\"\n Use the aggregate partition counts to set monotonically increasing \n particle indices\n \"\"\"\n p_counts = partition_counts.value\n local_index = 0\n start_index = sum([p_counts[i] for i in range(index)])\n for arr in iterator:\n arr['iOrder'] = range(start_index + local_index, start_index + local_index + len(arr))\n arr['iGroup'] = loc_to_glob_map_b.value[index]\n local_index += len(arr)\n yield arr\n \n def read_file(index, i, chunksize=102400): \n for part,filename in i:\n timein = time.time()\n with open(filename,'rb') as f: \n header = f.read(62500)\n while True:\n chunk = f.read(chunksize*24)\n if len(chunk): \n p_arr = np.frombuffer(chunk, pdt_lc)\n new_arr = np.zeros(len(p_arr), dtype=pdt)\n new_arr['pos'] = p_arr['pos']\n yield new_arr\n else: \n t_elapsed = time.time()-timein\n rate = os.path.getsize(filename)/1e6/t_elapsed\n print 'spark_fof: reading %s took %d seconds in partition %d, %f MB/sec'%(filename, t_elapsed, index, rate)\n break\n \n # determine which files to read\n get_block_ids = re.compile('blk\\.(\\d+)\\.(\\d+)\\.(\\d+)?')\n\n if blockids is None: \n files = glob(os.path.join(self.path,'*/*'))\n else: \n files = []\n for dirname, subdirlist, filelist in os.walk(path):\n try: \n dirnum = int(os.path.basename(dirname))\n if dirnum in blockids: \n for f in filelist:\n ids = get_block_ids.findall(f)\n if len(ids) > 0:\n if all(int(x) in blockids for x in ids[0]):\n files.append(os.path.join(dirname,f))\n except ValueError: \n pass\n\n files.sort()\n nfiles = len(files) \n self.nPartitions = nfiles\n\n print 'spark_fof: Number of input files: ', nfiles\n\n # get particle counts per partition\n nparts = {i:_get_nparts(filename,62500,pdt_lc.itemsize) for i,filename in enumerate(files)}\n\n print 'spark_fof: Total number of particles: ', np.array(nparts.values()).sum()\n \n # set up the map from x,y,z to partition id \n ids = map(lambda x: tuple(map(int, get_block_ids.findall(x)[0])), files)\n ids_map = {x:i for i,x in enumerate(ids)}\n self.ids_map = ids_map\n loc_to_glob_map_b = self.local_to_global_map\n \n ids_map_b = sc.broadcast(ids_map)\n loc_to_glob_map_b = sc.broadcast(loc_to_glob_map_b)\n\n partition_counts = sc.broadcast(nparts)\n\n rec_rdd = (sc.parallelize(zip(ids,files), numSlices=self.nPartitions)\n .map(lambda (id,filename): (ids_map_b.value[id],filename))\n .partitionBy(self.nPartitions).cache()\n .mapPartitionsWithIndex(read_file, preservesPartitioning=True)\n .mapPartitionsWithIndex(set_particle_IDs_partition, \n preservesPartitioning=True))\n \n return rec_rdd",
"def parse_triangle_files(self):\n nodes = {}\n boundary_nodes = []\n\n # parse node file into nodes\n with open(self.files['node']) as node_file:\n header = True\n for line in node_file:\n if header:\n header = False\n continue\n content = list(filter(lambda a: bool(a), line.split(' '))) # pylint: disable=W0108\n if not '#' in content[0]:\n is_boundary = content[3] == '1\\n'\n nodes[int(content[0])] = {\n 'id': int(content[0]),\n 'coords': [int(content[1]), int(content[2])],\n 'distance': 0 if is_boundary else None,\n 'relations': [],\n 'level_cycles': [], # ids of any level cycles this node is a part of\n 'level_paths': [], # ids of any level paths this node is a part of\n 'is_root_element': False,\n 'betweener_paths': []\n }\n if is_boundary:\n boundary_nodes.append(int(content[0]))\n node_file.close()\n\n # parse edge files into node relations\n with open(self.files['edge']) as edge_file:\n header = True\n for line in edge_file:\n if header:\n header = False\n continue\n content = list(filter(bool, line.split(' ')))\n if not '#' in content[0]:\n nodes[int(content[1])]['relations'].append(int(content[2]))\n nodes[int(content[2])]['relations'].append(int(content[1]))\n edge_file.close()\n\n # with open(self.files['ele']) as ele_file:\n # header = True\n # for line in edge_file:\n # if header:\n # header = False\n # continue\n # content = list(filter(bool, line.split(' ')))\n # if not '#' in content[0]:\n # nodes[int(content[1])]['relations'].append(int(content[2]))\n # nodes[int(content[2])]['relations'].append(int(content[1]))\n # edge_file.close()\n\n # sorts relations clockwise\n for node_id, node in nodes.items():\n nodes[node_id]['relations'] = sorted(node['relations'], key=(\n lambda related_node_id: (\n self.calculate_clockwise_angle_and_distance(node, nodes.get(related_node_id)) # pylint: disable=W0640\n )\n ))\n\n levels = self.get_levels(nodes, boundary_nodes)\n\n for level in levels:\n for node_id in level['node_ids']:\n self.identify_special_nodes(nodes, node_id)\n\n return nodes, boundary_nodes, levels",
"def _txt_to_basis_dict(basis_txt):\n\n symbol = basis_txt[0].split()[0]\n\n def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n basis_pure = basis_txt[1:]\n\n section_marks = []\n for i, line in enumerate(basis_pure):\n if not is_number(line.split()[0]):\n section_marks.append(i)\n\n shells = []\n for i in section_marks[:-1]:\n type, n_func, _ = basis_pure[i].split()\n n_func = int(n_func)\n\n if type.upper() in ['SP']:\n p_exponent, con_coefficients, p_con_coefficients = np.array([line.split()\n for line in basis_pure[i + 1:i + n_func + 1]],\n dtype=float).T\n else:\n p_exponent, con_coefficients = np.array([line.split()\n for line in basis_pure[i + 1:i + n_func + 1]],\n dtype=float).T\n p_con_coefficients = np.zeros_like(p_exponent)\n\n\n shells.append({'shell_type': type,\n 'p_exponents': list(p_exponent),\n 'con_coefficients': list(con_coefficients),\n 'p_con_coefficients': list(p_con_coefficients)})\n\n return {'symbol': symbol,\n 'shells': shells}",
"def get_expressions( useful_genes, expr_file):\n\n\t#open expressions file\n\texpression_stream = gzip.open(expr_file, \"r\")\n \n\t#reset line number\n\tlinenum = 0\n\n\texpressions_dict = {}\n\n\texpressions_header = [] \n\n\t#initialize progress bar\n\tfor line in expression_stream:\n\n\t\tlinenum += 1\n \n\t\t#skip first line, as those are the labels\n\n\n\t\tif isinstance(line, bytes) and not isinstance(line, str):\n\n\t\t\t\t\tline = line.decode()\n\t\tif line[0] != \"#\":\n\n\t\t\t#parse line\n\t\t\tline_content = line.rstrip().split(\",\")\n\t\t\t#if variant pos and gene match some value\n\t\t\tif line_content[0].split(\".\")[0] in useful_genes :\n\n\t\t\t\t#save the expression data for all the samples\n\n\t\t\t\tvar_expr = line_content[1:]\n\t\t\t\texpressions_dict[line_content[0].split(\".\")[0]] = var_expr\n\t\t\t\t#processed another variant\n\n\n\n\n\t\t\telif line.split(',')[0] == 'Name':\n \n\t\t\t\t#this is our header\n\t\t\t\texpressions_header = line.replace(\"\\n\",\"\").split(',')\n\n\treturn [expressions_dict, expressions_header]",
"def _read_expression_direct(cls):\n\n expression_data = {}\n expression_columns = cls._get_columns(EXPRESSION_MANIFEST)\n expression_psvs = cls._get_component_psvs(EXPRESSION_MANIFEST)\n\n for expression_psv in expression_psvs:\n for row in gzip.GzipFile(fileobj=io.BytesIO(cls._read_s3_url(expression_psv))):\n row_dict = dict(zip(expression_columns, row.strip().split(b'|')))\n expression_data.setdefault(\n row_dict[\"cellkey\"].decode(), {})[row_dict[\"featurekey\"].decode()] = \\\n float(row_dict[\"exrpvalue\"])\n\n return expression_data",
"def extract_energies(self):\n path2save = 'Analysis/energies.pkl'\n #check, if I have to extract them, or they are already extracted. This the latter case, load them.\n if os.path.exists(path2save):\n print(\"extraction of the polarizaion has already been done. Loading polarizations from from pkl\")\n # TODO delete to check if exists above and do load without doing\n with open('Analysis/energies.pkl', 'rb') as fid:\n [self.E0_plus, self.E0_0, self.E0_minus,\n self.V0_plus, self.V0_0, self.V0_minus,\n self.V_env_plus, self.V_env_0, self.V_env_minus,\n self.E_env_plus, self.E_env_0, self.E_env_minus,\n self.n_mols] \\\n = pickle.load(fid)\n else:\n print('Energies are being extracting and will be saved to pkl')\n for i, radius in enumerate(self.radii):\n self.E_sd_plus[radius] = {}\n self.E_sd_0[radius] = {}\n self.E_sd_minus[radius] = {}\n\n self.E_sum_env_plus[radius] = {}\n self.E_sum_env_0[radius] = {}\n self.E_sum_env_minus[radius] = {}\n\n self.V0_plus[radius] = {}\n self.V0_0[radius] = {}\n self.V0_minus[radius] = {}\n\n self.E_env_plus[radius] = {}\n self.E_env_0[radius] = {}\n self.E_env_minus[radius] = {}\n\n self.V_env_plus[radius] = {}\n self.V_env_0[radius] = {}\n self.V_env_minus[radius] = {}\n\n self.n_mols[radius] = {}\n\n for j, core_id in enumerate(self.core_ids):\n #path2file_ip = \\\n # 'Analysis/' + self.dict_radii_folder_IP[radius] + '/Matrix-analysis-IP_' \\\n # + self.mol_name + '-Mol_' + str(core_id) + '_C_1.yml'\n\n path2file_ip = \\\n 'Analysis/IP_by_radius/' + self.dict_radii_folder_IP[radius]\\\n + '/Matrix-analysis-IP_' + self.mol_name + '.yml' # new\n path2file_ea = \\\n 'Analysis/EA_by_radius/' + self.dict_radii_folder_EA[radius]\\\n + '/Matrix-analysis-EA_' + self.mol_name + '.yml'\n\n # IP. Charged states: \"+\" and \"0\"\n with open(path2file_ip) as fid:\n ip_dict = yaml.load(fid, Loader=yaml.SafeLoader)\n with open(path2file_ea) as fid:\n ea_dict = yaml.load(fid, Loader=yaml.SafeLoader)\n\n\n # number of mols extraction\n self.n_mols[radius][core_id] = len(ip_dict[int(core_id)]['energies'])\n\n # sd extraction. E_sd = E_0 + V_0\n self.E_sd_plus[radius][core_id] = ip_dict[int(core_id)]['energies'][int(core_id)]['total_e_charged'] #new\n self.E_sd_0[radius][core_id] = ip_dict[core_id]['energies'][int(core_id)]['total_e_uncharged']\n self.E_sd_minus[radius][core_id] = ea_dict[int(core_id)]['energies'][int(core_id)]['total_e_charged']\n # E_0\n self.E0_plus[core_id] = ip_dict[int(core_id)]['energies'][int(core_id)]['total_e_charged_vacuum']\n self.E0_0[core_id] = ip_dict[int(core_id)]['energies'][int(core_id)]['total_e_uncharged_vacuum']\n self.E0_minus[core_id] = ea_dict[int(core_id)]['energies'][int(core_id)]['total_e_charged_vacuum']\n # # E_0_vacuum\n # self.E0_plus_vacuum[core_id] =\n # self.E0_0_vacuum[core_id] =\n # self.E0_minus_vacuum[core_id] =\n\n\n # V_0\n self.V0_plus[radius][core_id] = self.E_sd_plus[radius][core_id] - self.E0_plus[core_id]\n self.V0_0[radius][core_id] = self.E_sd_0[radius][core_id] - self.E0_0[core_id]\n self.V0_minus[radius][core_id] = self.E_sd_minus[radius][core_id] - self.E0_minus[core_id]\n\n # E_sum_env = \\sum_i\\ne 0 E_i \\sum_{j=0}^{N} V_{ij}\n ip_env_sub_dict = ip_dict[int(core_id)]['energies']#new\n del ip_env_sub_dict[int(core_id)]\n # del ip_env_sub_dict['info'] # TODO: do I need to dlt this?\n\n\n ea_env_sub_dict = ea_dict[int(core_id)]['energies'] # new\n del ea_env_sub_dict[int(core_id)]\n # del ea_env_sub_dict['info'] # TODO: do I need to dlt this?\n\n # tmp = ip_env_sub_dict['energies'][]\n\n list_total_e_env_plus = [ip_env_sub_dict[env_id]['total_e_charged'] for env_id in ip_env_sub_dict]\n self.E_sum_env_plus[radius][int(core_id)] = np.sum(list_total_e_env_plus) if not list_total_e_env_plus == [] else 0.0\n list_total_e_env_0 = [ip_env_sub_dict[env_id]['total_e_uncharged'] for env_id in ip_env_sub_dict]\n self.E_sum_env_0[radius][int(core_id)] = np.sum(list_total_e_env_0) if not list_total_e_env_0 == [] else 0.0\n list_total_e_env_minus = [ea_env_sub_dict[env_id]['total_e_charged'] for env_id in ea_env_sub_dict]\n self.E_sum_env_minus[radius][int(core_id)] = np.sum(list_total_e_env_minus) if not list_total_e_env_minus == [] else 0.0\n\n # E_env = \\sum_i \\ne 0 E_i. sum of DFT env energies.\n list_vacuum_env_e_plus = [ip_env_sub_dict[env_id]['total_e_charged_vacuum'] for env_id in ip_env_sub_dict]\n self.E_env_plus[radius][int(core_id)] = np.sum(list_vacuum_env_e_plus) if not list_vacuum_env_e_plus == [] else 0.0\n list_vacuum_env_e_0 = [ip_env_sub_dict[env_id]['total_e_uncharged_vacuum'] for env_id in ip_env_sub_dict]\n self.E_env_0[radius][int(core_id)] = np.sum(list_vacuum_env_e_0) if not list_vacuum_env_e_0 == [] else 0.0\n list_vacuum_env_e_minus = [ea_env_sub_dict[env_id]['total_e_charged_vacuum'] for env_id in ea_env_sub_dict]\n self.E_env_minus[radius][int(core_id)] = np.sum(list_vacuum_env_e_minus) if not list_vacuum_env_e_minus == [] else 0.0\n\n # V_env = 0.5 (\\sum_{i=1} \\sum_{j=1} V_{ij}). classical interaction of env. mols\n self.V_env_plus[radius][core_id] = 0.5 * (self.E_sum_env_plus[radius][core_id]\n - self.E_env_plus[radius][core_id]\n - self.V0_plus[radius][core_id])\n\n self.V_env_0[radius][core_id] = 0.5 * (self.E_sum_env_0[radius][core_id]\n - self.E_env_0[radius][core_id]\n - self.V0_0[radius][core_id])\n\n self.V_env_minus[radius][core_id] = 0.5 * (self.E_sum_env_minus[radius][core_id]\n - self.E_env_minus[radius][core_id]\n - self.V0_minus[radius][core_id])\n\n\n append_dict_with_mean(self.V0_plus, self.V0_0, self.V0_minus,\n self.V_env_plus, self.V_env_0, self.V_env_minus,\n self.E_env_plus, self.E_env_0, self.E_env_minus,\n self.E0_plus, self.E0_0, self.E0_minus,\n self.n_mols) # compute and add \"mean\" to all mentioned dicts\n\n with open('Analysis/energies.pkl', 'wb') as fid:\n pickle.dump([self.E0_plus, self.E0_0, self.E0_minus,\n self.V0_plus, self.V0_0, self.V0_minus,\n self.V_env_plus, self.V_env_0, self.V_env_minus,\n self.E_env_plus, self.E_env_0, self.E_env_minus,\n self.n_mols],\n fid)\n print(\"Energies are extracted and dumped to pkl\")",
"def get_positions(self) -> Dict[str, int]:\n\n with self._lock:\n return {\n name: self._return_factor * i\n for name, i in self._current_positions.items()\n }",
"def read_geometry(filepath, read_metadata=False, read_stamp=False):\n volume_info = OrderedDict()\n\n TRIANGLE_MAGIC = 16777214\n QUAD_MAGIC = 16777215\n NEW_QUAD_MAGIC = 16777213\n with open(filepath, \"rb\") as fobj:\n magic = _fread3(fobj)\n if magic in (QUAD_MAGIC, NEW_QUAD_MAGIC): # Quad file\n nvert = _fread3(fobj)\n nquad = _fread3(fobj)\n (fmt, div) = (\">i2\", 100.) if magic == QUAD_MAGIC else (\">f4\", 1.)\n coords = np.fromfile(fobj, fmt, nvert * 3).astype(np.float) / div\n coords = coords.reshape(-1, 3)\n quads = _fread3_many(fobj, nquad * 4)\n quads = quads.reshape(nquad, 4)\n #\n # Face splitting follows\n #\n faces = np.zeros((2 * nquad, 3), dtype=np.int)\n nface = 0\n for quad in quads:\n if (quad[0] % 2) == 0:\n faces[nface] = quad[0], quad[1], quad[3]\n nface += 1\n faces[nface] = quad[2], quad[3], quad[1]\n nface += 1\n else:\n faces[nface] = quad[0], quad[1], quad[2]\n nface += 1\n faces[nface] = quad[0], quad[2], quad[3]\n nface += 1\n\n elif magic == TRIANGLE_MAGIC: # Triangle file\n create_stamp = fobj.readline().rstrip(b'\\n').decode('utf-8')\n fobj.readline()\n vnum = np.fromfile(fobj, \">i4\", 1)[0]\n fnum = np.fromfile(fobj, \">i4\", 1)[0]\n coords = np.fromfile(fobj, \">f4\", vnum * 3).reshape(vnum, 3)\n faces = np.fromfile(fobj, \">i4\", fnum * 3).reshape(fnum, 3)\n\n if read_metadata:\n volume_info = _read_volume_info(fobj)\n else:\n raise ValueError(\"File does not appear to be a Freesurfer surface\")\n\n coords = coords.astype(np.float) # XXX: due to mayavi bug on mac 32bits\n\n ret = (coords, faces)\n if read_metadata:\n if len(volume_info) == 0:\n warnings.warn('No volume information contained in the file')\n ret += (volume_info,)\n if read_stamp:\n ret += (create_stamp,)\n\n return ret",
"def loci_parsed(loci_file):\n #\n ga_list = [\"Ang_30\",\"Ang_29\"]\n\n gb_list = [\"Ang_67\", \"Ang_21\"]\n\n cc_list = [\"Cg12063\", \"Cg125212\", \"Cg126212\", \"Cg12758\", \"Cg_432\"]\n\n loci_dic = {}\n\n loci_list = {\"ga\": None, \"gb\": None, \"cc\": None}\n\n\n\n for files in loci_file:\n\n name= files.strip().split (\"/\")\n name_loci = name[12].split(\"_\")\n name_loci_1 = name_loci[1].split(\".\")\n real_name_loci = name_loci_1[0]\n\n loci_file = open(files)\n\n\n for line in loci_file:\n\n if line[:1] in \"0123456789\":\n pass\n else:\n\n line_information = line.strip().split()\n isolate = line_information[0]\n sequence = line_information [1]\n\n # if \"-\" in sequence:\n # sequence = sequence.replace (\"-\", \"\")\n\n if isolate in ga_list and loci_list[\"ga\"] == None:\n loci_list[\"ga\"] = sequence\n if isolate in gb_list and loci_list[\"gb\"] == None:\n loci_list[\"gb\"] = sequence\n if isolate in cc_list and loci_list[\"cc\"] == None:\n loci_list[\"cc\"] = sequence\n loci_dic[real_name_loci] = loci_list\n\n\n\n loci_list = {\"ga\": None, \"gb\": None, \"cc\": None}\n\n return loci_dic",
"def cereal_protein_fractions(cereals):\n result = {}\n for cereal in cereals:\n total_grams = float(cereal[\"weight\"]) * 28.35\n result[cereal[\"name\"]] = float(cereal[\"protein\"]) / total_grams\n\n return result",
"def read_zp(file):\n with open(file) as f_in:\n head = f_in.readline()\n units = f_in.readline()\n for line in f_in:\n try:\n zpWave[line.split(' ')[0].replace('\"', '')] = float(line.split(' ')[1])\n zpF0[line.split(' ')[0].replace('\"', '')] = float(line.split(' ')[2])\n \n except NameError:\n zpWave = {line.split(' ')[0].replace('\"', '') : float(line.split(' ')[1])}\n zpF0 = {line.split(' ')[0].replace('\"', '') : float(line.split(' ')[2])}\n \n return zpWave, zpF0",
"def get_shape_dicts(route_short_name, septa_fn):\n \n #modify this path to a sqlite file with\n #the gtfs data in it. \n #to create this file, i used\n #https://github.com/jarondl/pygtfs.git\n e = create_engine(septa_fn)\n Session = sessionmaker(bind = e)\n s = Session()\n\n route_block_to_shape = {}\n q = \"SELECT routes.route_short_name, trips.block_id, trips.shape_id \\\n FROM routes INNER JOIN trips \\\n ON routes.route_id == trips.route_id \\\n WHERE routes.route_short_name == :rsn \\\n GROUP BY trips.block_id\"\n results = s.execute(q, {\"rsn\":route_short_name})\n \n for r in results:\n route_block_to_shape[(r.route_short_name, r.block_id)] = r.shape_id\n\n s_ids = set(route_block_to_shape.values())\n shape_to_path = {}\n for s_id in s_ids:\n q = \"SELECT shapes.shape_pt_lat, shapes.shape_pt_lon \\\n FROM shapes \\\n WHERE shapes.shape_id == :s_id\"\n\n results = s.execute(q, {'s_id':s_id})\n path = [tuple(r) for r in results]\n shape_to_path[s_id] = path\n \n s.close()\n\n return route_block_to_shape, shape_to_path",
"def test_split_str_zmat(self):\n zmat_str_1 = \"\"\" C\n H 1 R1\n H 1 R1 2 A1\n H 1 R1 2 A1 3 D1\n H 1 R1 2 A1 3 D2\nVariables:\nA1=109.4712\nD1=120.0000\nD2=240.0000\nR1=1.0912\n\"\"\"\n coords, vars_ = converter.split_str_zmat(zmat_str_1)\n expected_coords = \"\"\" C\n H 1 R1\n H 1 R1 2 A1\n H 1 R1 2 A1 3 D1\n H 1 R1 2 A1 3 D2\"\"\"\n expected_vars = \"\"\"A1=109.4712\nD1=120.0000\nD2=240.0000\nR1=1.0912\"\"\"\n self.assertEqual(coords, expected_coords)\n self.assertEqual(vars_, expected_vars)\n\n zmat_str_2 = \"\"\"A1=109.4712\nD1=120.0000\nD2=240.0000\nR1=1.0912\n\n C\n H, 1, R1\n H, 1, R1, 2, A1\n H, 1, R1, 2, A1, 3, D1\n H, 1, R1, 2, A1, 3, D2\n\"\"\"\n coords, vars_ = converter.split_str_zmat(zmat_str_2)\n expected_coords = \"\"\" C\n H, 1, R1\n H, 1, R1, 2, A1\n H, 1, R1, 2, A1, 3, D1\n H, 1, R1, 2, A1, 3, D2\"\"\"\n expected_vars = \"\"\"A1=109.4712\nD1=120.0000\nD2=240.0000\nR1=1.0912\"\"\"\n self.assertEqual(coords, expected_coords)\n self.assertEqual(vars_, expected_vars)\n\n zmat_str_3 = \"\"\" C\n H 1 R1\n H 1 R1 2 A1\n H 1 R1 2 A1 3 D1\n H 1 R1 2 A1 3 D2\n\nA1 109.4712\nD1 120.0000\nD2 240.0000\nR1 1.0912\n\"\"\"\n coords, vars_ = converter.split_str_zmat(zmat_str_3)\n expected_coords = \"\"\" C\n H 1 R1\n H 1 R1 2 A1\n H 1 R1 2 A1 3 D1\n H 1 R1 2 A1 3 D2\"\"\"\n expected_vars = \"\"\"A1 109.4712\nD1 120.0000\nD2 240.0000\nR1 1.0912\"\"\"\n self.assertEqual(coords, expected_coords)\n self.assertEqual(vars_, expected_vars)\n\n zmat_str_4 = \"\"\" C\n H 1 R1\n H 1 R1 2 A1\n H 1 R1 2 A1 3 D1\n H 1 R1 2 A1 3 D2\n\nA1=109.4712\nD1=120.0000\nD2=240.0000\nR1=1.0912\n\"\"\"\n coords, vars_ = converter.split_str_zmat(zmat_str_4)\n expected_coords = \"\"\" C\n H 1 R1\n H 1 R1 2 A1\n H 1 R1 2 A1 3 D1\n H 1 R1 2 A1 3 D2\"\"\"\n expected_vars = \"\"\"A1=109.4712\nD1=120.0000\nD2=240.0000\nR1=1.0912\"\"\"\n self.assertEqual(coords, expected_coords)\n self.assertEqual(vars_, expected_vars)\n\n zmat_str_5 = \"\"\" C\n H 1 1.0912\n H 1 1.0912 2 109.4712\n H 1 1.0912 2 109.4712 3 120.0000\n H 1 1.0912 2 109.4712 3 240.0000\n\"\"\"\n coords, vars_ = converter.split_str_zmat(zmat_str_5)\n self.assertEqual(coords, zmat_str_5)\n self.assertIsNone(vars_)",
"def frac_to_cart(cell, positions):\n atomlist = []\n counter = 1\n a, b, c = cell[0], cell[1], cell[2]\n alpha, beta, gamma = cell[3] / 180 * np.pi, cell[4] / 180 * np.pi, cell[5] / 180 * np.pi\n v = np.sqrt(1 - np.cos(alpha) * np.cos(alpha) - np.cos(beta) * np.cos(beta) - np.cos(gamma) * np.cos(gamma) \\\n + 2 * np.cos(alpha) * np.cos(beta) * np.cos(gamma))\n transmatrix = np.matrix([[a, b * np.cos(gamma), c * np.cos(beta)],\n [0, b * np.sin(gamma), c * (np.cos(alpha) - np.cos(beta) * np.cos(gamma)) / np.sin(gamma)],\n [0, 0, c * v / np.sin(gamma)]])\n\n for atom in positions:\n coordmatrix = np.dot(transmatrix, positions[str(atom)])\n coordmatrix = np.array(coordmatrix).flatten().tolist()\n atomlist.append([])\n atomlist[-1].append([atom, atomtable[atom[0]]])\n counter += 1\n atomlist[-1].append(np.array(coordmatrix))\n return atomlist",
"def read(self, filePath):\n \n result = {\n 'coordinates': {\n 'count': 0,\n 'nodes': []\n },\n 'element_groups': { \n 'number_of_elements': 0,\n 'count': 0,\n 'groups': []\n },\n 'bars': [],\n 'materials': {\n 'count': 0,\n 'materials': []\n },\n 'geometric_properties': {\n 'count': 0\n },\n 'bcnodes': {\n 'count': 0\n },\n 'loads': {\n 'count': 0\n }\n }\n # print(result['coordinates']['nodes'])\n \n with open(filePath,'r') as f:\n lines = f.readlines()\n elementCounter = 0\n groupCounter = 0\n geometricCounter = 0\n\n for line in lines:\n line = line.strip()\n el = line.split(' ')\n \n if len(line) == 0:\n continue\n\n if len(line) != 0 and line[0] == \"*\":\n section = line[1:].lower()\n continue\n \n if section == 'coordinates':\n if len(el) == 1 :\n result[section]['count'] = el[0]\n else:\n result[section]['nodes'].append(Node(int(el[0]), float(el[1]), float(el[2])))\n \n elif section == 'element_groups':\n if len(line) == 1:\n result[section]['count'] = int(el[0])\n else: \n result[section]['groups'].append(Group(el[0], el[1], el[2]))\n result[section]['number_of_elements'] += int(el[1])\n\n elif section == 'incidences':\n groups = result['element_groups']['groups']\n nodes = result['coordinates']['nodes']\n print(el)\n\n currentGroup = groups[groupCounter]\n if (currentGroup.amount == 0):\n groupCounter += 1\n currentGroup = groups[groupCounter]\n \n print(\"Group n: {} count: {}\".format(currentGroup.n, currentGroup.amount))\n \n bar = Bar(el[0], nodes[int(el[1])-1], nodes[int(el[2])-1], groups[groupCounter])\n print(\n \"\"\"\n Bar {} created \n Start node: {} End Node: {} Group: {}\n \"\"\".format(bar.id, bar.startNode.n, bar.endNode.n, bar.group))\n result['bars'].append(bar)\n currentGroup.amount -= 1\n \n elif section == 'materials':\n if len(el) == 1:\n result[section]['count'] = el[0]\n groupCounter = 0\n else:\n material = Material(el[0], el[1], el[2])\n result[section]['materials'].append(material)\n result['element_groups']['groups'][groupCounter].setMaterial(material)\n groupCounter += 1\n\n elif section == 'geometric_properties':\n if geometricCounter == 0:\n result[section]['count'] = el[0]\n else:\n result['element_groups']['groups'][geometricCounter - 1].setSectionArea(\n el[0]\n )\n geometricCounter += 1\n\n elif section == 'bcnodes':\n if len(el) == 1:\n result[section]['count'] = el[0]\n else:\n nodeIndex = next((e for e, item in enumerate(\n result['coordinates']['nodes']) if item.n == int(el[0])), None\n )\n result['coordinates']['nodes'][nodeIndex].setRestriction(int(el[1]))\n\n elif section == 'loads':\n if len(el) == 1:\n result[section]['count'] = el[0]\n else:\n load = Load(el[1], el[2])\n nodeIndex = next((e for e, item in enumerate(\n result['coordinates']['nodes']) if item.n == int(el[0])), None\n )\n result['coordinates']['nodes'][nodeIndex].addLoad(load)\n\n for bar in result['bars']:\n bar.createLocalArray()\n\n print('---------- Parsing complete! ----------')\n pprint(result)\n print('---------------------------------------')\n\n return result",
"def parse_gff3(filename):\n genes = OrderedDict()\n transcript_to_locus = {}\n\n count_per_transcript = defaultdict(lambda: 1)\n\n with open(filename) as gff_in:\n for line in gff_in:\n # Skip comments\n if not line.strip()[0] == '#':\n line_data = parse_line(line)\n\n # Parts (e.g. CDS or Exon) might not have an ID. One will be added here\n if ID_ATTRIBUTE not in line_data['attributes'].keys() and line_data['feature'] in PARTS_FEATURES:\n if PARENT_ATTRIBUTE in line_data['attributes'].keys():\n counter_id = line_data['attributes'][PARENT_ATTRIBUTE] + '.' + line_data['feature'] + '.'\n new_id = counter_id + str(count_per_transcript[counter_id])\n count_per_transcript[counter_id] += 1\n line_data['attributes'][ID_ATTRIBUTE] = new_id\n\n # Every line needs a valid ID\n if ID_ATTRIBUTE in line_data['attributes'].keys():\n\n if line_data['feature'] in LOCUS_FEATURES:\n genes[line_data['attributes'][ID_ATTRIBUTE]] = {\n 'data': line_data,\n 'transcripts': OrderedDict()\n }\n\n elif line_data['feature'] in TRANSCRIPT_FEATURES:\n if PARENT_ATTRIBUTE in line_data['attributes'].keys():\n parent_id = line_data['attributes'][PARENT_ATTRIBUTE]\n\n if parent_id in genes.keys():\n genes[parent_id]['transcripts'][line_data['attributes'][ID_ATTRIBUTE]] = {\n 'data': line_data,\n 'parts': []\n }\n\n transcript_to_locus[line_data['attributes'][ID_ATTRIBUTE]] = \\\n line_data['attributes'][PARENT_ATTRIBUTE]\n\n elif line_data['feature'] in PARTS_FEATURES:\n\n if PARENT_ATTRIBUTE in line_data['attributes'].keys():\n parent_id = line_data['attributes'][PARENT_ATTRIBUTE]\n grandparent_id = transcript_to_locus[parent_id]\n\n genes[grandparent_id]['transcripts'][parent_id]['parts'].append(line_data)\n\n return genes",
"def load_n3d_coords(file_path): \n \n import core.nuc_io as io\n\n seq_pos_dict = {}\n coords_dict = {} \n \n with io.open_file(file_path) as file_obj:\n chromo = None\n \n for line in file_obj:\n \n data = line.split()\n n_items = len(data)\n \n if not n_items:\n continue\n \n elif data[0] == '#':\n continue\n \n elif n_items == 3:\n chromo, n_coords, n_models = data\n \n #if chromo.lower()[:3] == 'chr':\n # chromo = chromo[3:]\n \n if chromo in coords_dict:\n raise Exception('Duplicate chromosome \"%s\" records in file %s' % (chromo, file_path))\n \n n_coords = int(n_coords)\n n_models = int(n_models)\n \n chromo_seq_pos = np.empty(n_coords, int)\n chromo_coords = np.empty((n_models, n_coords, 3), float)\n \n coords_dict[chromo] = chromo_coords\n seq_pos_dict[chromo] = chromo_seq_pos\n \n check = (n_models * 3) + 1\n i = 0\n \n elif not chromo:\n raise Exception('Missing chromosome record in file %s' % file_path)\n \n elif n_items != check:\n msg = 'Data size in file %s does not match Position + Models * Positions * 3'\n raise Exception(msg % file_path)\n \n else:\n chromo_seq_pos[i] = int(data[0])\n \n coord = [float(x) for x in data[1:]]\n coord = np.array(coord).reshape(n_models, 3)\n chromo_coords[:,i] = coord\n i += 1\n \n return seq_pos_dict, coords_dict",
"def get_phi_comps_from_recfile(recfile):\n iiter = 1\n iters = {}\n f = open(recfile, \"r\")\n while True:\n line = f.readline()\n if line == \"\":\n break\n if (\n \"starting phi for this iteration\" in line.lower()\n or \"final phi\" in line.lower()\n ):\n contributions = {}\n while True:\n line = f.readline()\n if line == \"\":\n break\n if \"contribution to phi\" not in line.lower():\n iters[iiter] = contributions\n iiter += 1\n break\n raw = line.strip().split()\n val = float(raw[-1])\n group = raw[-3].lower().replace('\"', \"\")\n contributions[group] = val\n return iters"
] | [
"0.5323081",
"0.511978",
"0.50651133",
"0.50311476",
"0.4981396",
"0.4981396",
"0.49392763",
"0.49328926",
"0.49008498",
"0.48904055",
"0.48638704",
"0.48584062",
"0.48568657",
"0.48379332",
"0.4833694",
"0.48149553",
"0.47972798",
"0.47770527",
"0.47520956",
"0.47463682",
"0.47423008",
"0.47419065",
"0.47095552",
"0.46859553",
"0.46523467",
"0.46411744",
"0.46396038",
"0.46207905",
"0.4617517",
"0.461293"
] | 0.7859618 | 0 |
Returns the compound name and the cell parameters from a xd.mas style file specified by 'path'. | def read_xd_master_file(path, errorpointer):
filepointer = open(path, 'r')
for line in filepointer.readlines():
if 'TITLE' in line:
compound_name = line.partition('!')[2].lstrip().rstrip()
if 'CELL' in line:
cell = [float(i) for i in line.split(" ") if '.' in i]
break
filepointer.close()
try:
return compound_name, cell
except:
errorpointer.write(path + '\n')
return None, None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_compound_properties(path):\n filepointer = open(path)\n charge = None\n NE = None\n E_HF = None\n dipole = None\n read_dipole = False\n for line in filepointer:\n if read_dipole:\n read_dipole = False\n dipole = [float(value) for value in line.split(' ') if '.' in value]\n dipole = np.linalg.norm(dipole)\n elif 'Charge' in line and not charge:\n charge = line.split(' ')[-1].rstrip('\\n')\n elif 'Number of electrons' in line and not NE:\n NE = line.split(' ')[-1].rstrip('\\n')\n elif 'Total Energy' in line and not E_HF:\n E_HF = line.split(' ')[-1].rstrip('\\n')\n elif 'Dipole Moment' in line and not dipole:\n read_dipole = True\n if charge and NE and E_HF and dipole:\n break\n return [charge, NE, dipole, E_HF]",
"def readParams(path):\n tiles = open(path, \"r\")\n #--- Starting date\n tiles.readline()\n index = tiles.readline()[:-1]\n \n #--- Starting date\n tiles.readline()\n B_date = tiles.readline()[:-1]\n \n #--- Stopping date\n tiles.readline()\n E_date = tiles.readline()[:-1]\n \n #--- DATA \n tiles.readline()\n DATA_path = tiles.readline()[:-1]\n \n #--- Csv \n tiles.readline()\n out = tiles.readline()[:-1]\n \n #--- Shapefile\n tiles.readline()\n shp = tiles.readline()[:-1]\n \n #--- Water mask\n water = DATA_path + '/waterMask'\n \n return index, B_date, E_date, DATA_path, out, shp, water",
"def load_from_file(self, path):\n structure = None\n if re.search(\".pdb\", path):\n parser = PDBParser()\n else:\n parser = MMCIFParser()\n\n path = path.strip()\n model_id = os.path.basename(path)\n #if os.path.basename(path).split('.')[-1] == 'gz':\n # GZ = gzip.open(path, 'rb')\n # GZ.close()\n #else :\n\n structure = parser.get_structure(model_id, open_file( path ))\n header = parser.get_header()\n\n return structure, header",
"def read_coordinates(path='', sort=True):\n maspointer = open(path + 'xd.mas', 'r')\n respointer = open(path + 'xd.res', 'r')\n\n positions = {}\n keylist = [] #Needed to keep the atomlist order. This is important for the frequency read function.\n for line in maspointer.readlines():\n if 'CELL ' in line:\n cell = [float(i) for i in line.split(\" \") if '.' in i]\n break\n for line in respointer.readlines():\n if '(' in line and not '!' in line:\n coords = [float(i) for i in line.split(\" \") if '.' in i]\n coords = coords[:-1]\n key = line.split(\" \")[0]\n keylist.append(key)\n positions[key] = coords\n if sort:\n sortkeylist = []\n for i in xrange(len(keylist)):\n j = i + 1\n for key in keylist:\n number = get_number(key)\n if j == int(number):\n sortkeylist.append(key)\n else:\n sortkeylist = keylist\n return cell, positions, sortkeylist",
"def process(path):\n # get parameter value:\n with open('config.cym', 'r') as f:\n line = f.readline()\n #print(line)\n pam = float(line[1:])\n f.close()\n # get position of aster:\n with open('aster.txt', 'r') as f:\n for line in f:\n if len(line)>3 and not line[0]=='%':\n #print(line)\n val = line.split()\n x = float(val[2])\n y = float(val[3])\n #z = float(val[4])\n #pos = math.sqrt(x*x+y*y+z*z)\n pos = math.sqrt(x*x+y*y)\n\n f.close()\n return (pam, pos)",
"def get_new_cell(path: tuple(str, ...)) -> dict[str, Any]:\n return {\n \"path\": path,\n \"type\": \"cell\",\n # \"celltype\": \"structured\", # or \"text\" for help cells\n \"datatype\": \"mixed\",\n \"hash_pattern\": None,\n \"UNTRANSLATED\": True,\n }",
"def extract_geometry(file_path):\r\n file_path = Path(file_path)\r\n \r\n \"\"\"singleSlash = \"\\\\\" # WOW THIS IS INCREDIBLE FRUSTRATING--I think the tests folder might need to be capatilized...\r\n doubleSlash = \"\\\\\\\\\"---THE FILEPATH NEEDS TO BE SINGLE FORWARD SLASHES FOR THE PATH FUNCTION TO WORK\r\n file_path_geom = file_path.replace(singleSlash,doubleSlash)\"\"\"\r\n \r\n \"\"\" Going to put the conversion functionality in the parse arguments section\"\"\"\r\n \r\n workbook = xlrd.open_workbook(file_path)\r\n worksheet = workbook.sheet_by_name('Outputs')\r\n pt1x = worksheet.cell(1,2).value\r\n pt1z = worksheet.cell(1,3).value\r\n pt1y = worksheet.cell(1,4).value\r\n pt2x = worksheet.cell(2,2).value\r\n pt2z = worksheet.cell(2,3).value\r\n pt2y = worksheet.cell(2,4).value\r\n pt3x = worksheet.cell(3,2).value\r\n pt3z = worksheet.cell(3,3).value\r\n pt3y = worksheet.cell(3,4).value\r\n pt4x = worksheet.cell(4,2).value\r\n pt4z = worksheet.cell(4,3).value\r\n pt4y = worksheet.cell(4,4).value\r\n pt5x = worksheet.cell(5,2).value\r\n pt5z = worksheet.cell(5,3).value\r\n pt5y = worksheet.cell(5,4).value\r\n pt6x = worksheet.cell(6,2).value\r\n pt6z = worksheet.cell(6,3).value\r\n pt6y = worksheet.cell(6,4).value\r\n pt7x = worksheet.cell(7,2).value\r\n pt7z = worksheet.cell(7,3).value\r\n pt7y = worksheet.cell(7,4).value\r\n pt8x = worksheet.cell(8,2).value\r\n pt8z = worksheet.cell(8,3).value\r\n pt8y = worksheet.cell(8,4).value\r\n pt9x = worksheet.cell(9,2).value\r\n pt9z = worksheet.cell(9,3).value\r\n pt9y = worksheet.cell(9,4).value\r\n pt10x = worksheet.cell(10,2).value\r\n pt10z = worksheet.cell(10,3).value\r\n pt10y = worksheet.cell(10,4).value\r\n pt11x = worksheet.cell(11,2).value\r\n pt11z = worksheet.cell(11,3).value\r\n pt11y = worksheet.cell(11,4).value\r\n pt12x = worksheet.cell(12,2).value\r\n pt12z = worksheet.cell(12,3).value\r\n pt12y = worksheet.cell(12,4).value\r\n pt13x = worksheet.cell(13,2).value\r\n pt13z = worksheet.cell(13,3).value\r\n pt13y = worksheet.cell(13,4).value\r\n pt14x = worksheet.cell(14,2).value\r\n pt14z = worksheet.cell(14,3).value\r\n pt14y = worksheet.cell(14,4).value\r\n pt15x = worksheet.cell(15,2).value\r\n pt15z = worksheet.cell(15,3).value\r\n pt15y = worksheet.cell(15,4).value\r\n pt16x = worksheet.cell(16,2).value\r\n pt16z = worksheet.cell(16,3).value\r\n pt16y = worksheet.cell(16,4).value\r\n #U_100x = worksheet.cell(17,2).value\r\n #U_100z = worksheet.cell(17,3).value # Not really using the other 2-dimensions for now\r\n #U_100y = worksheet.cell(17,4).value\r\n \r\n if pt16z == 0:\r\n print(\"Top point has a 0 height value--error in data import\")\r\n return pt1x, pt1z, pt1y, pt2x, pt2z, pt2y, pt3x, pt3z, pt3y, pt4x, pt4z, pt4y, pt5x, pt5z, pt5y, pt6x, pt6z, pt6y, pt7x, pt7z, pt7y, pt8x, pt8z, pt8y, pt9x, pt9z, pt9y, pt10x, pt10z, pt10y, pt11x, pt11z, pt11y, pt12x, pt12z, pt12y, pt13x, pt13z, pt13y, pt14x, pt14z, pt14y, pt15x, pt15z, pt15y, pt16x, pt16z, pt16y",
"def extract(path,\n top_flag='CARTESIAN COORDINATES',\n bottom_flag='Empirical Formula:',\n pattern=r'([\\d]*)\\s+([a-zA-Z]*)\\s+(\\-?\\d+\\.\\d+)\\s+(\\-?\\d+\\.\\d+)\\s+(\\-?\\d+\\.\\d+)'):\n top = tuple()\n bottom = tuple()\n rows = list()\n\n with open(path, 'r') as p:\n for num, line in enumerate(p, 1):\n if top_flag in line:\n top = num\n if bottom_flag in line:\n bottom = num\n\n with open(path, 'r') as p:\n for num, line in enumerate(p, 1):\n if (top <= num) and (bottom >= num):\n rows += re.findall(pattern, line)\n\n return pandas.DataFrame(rows, columns=['num', 'atom_name', 'x', 'y', 'z'])",
"def format_script_for_cell(path):\n header = '\\n# Cell content replaced by load magic replacement.\\n'\n with open(str(path), encoding='utf8') as f:\n solution = f.read()\n if not solution:\n raise RuntimeError('Solution {} has no content.'.format(path))\n return header + solution",
"def load_params_from_file(path):\n save_dict = mx.nd.load(path)\n arg_params = {}\n aux_params = {}\n for k, v in save_dict.items():\n tp, name = k.split(':', 1)\n if tp == 'arg':\n arg_params[name] = v\n if tp == 'aux':\n aux_params[name] = v\n return arg_params, aux_params",
"def from_file(path):\n\n filename = os.path.basename(path)\n\n base, suffix = os.path.splitext(filename);\n\n if suffix == '.bin':\n g = bgy3d.from_file(path)\n elif suffix == '.m':\n g = contf.m2dat(path)\n else:\n print 'Unknown file suffix.'\n exit()\n\n return g",
"def readFile(file_name):\n if file_name.split('.')[-1] == 'thid':\n x,m,w = readThid(file_name)\n e = np.empty_like(x)\n e[:] = np.nan\n return x,m,w,e\n else:\n return readParams(file_name)",
"def readfile(path):\n with open(path, 'r', encoding='utf-8') as f:\n param = tuple(f.readlines())\n return param",
"def Read_CSSR(filename):\n f = open(filename)\n#\n# First read unit cell\n#\n tokens = f.readline().split()\n if len(tokens) != 3: \n print \"Format mismatch -- first cell line\"\n sys.exit(1)\n a, b, c = map(float,tokens[:])\n tokens = f.readline().split()\n if len(tokens) < 3: \n print \"Format mismatch -- second cell line\"\n sys.exit(1)\n alpha, beta, gamma = map(float,tokens[0:3])\n\n cell = N.zeros((3,3),N.Float)\n\n alpha, beta, gamma = map(lambda x: x*pi/180.0, (alpha,beta,gamma))\n va = N.array((a,0.0,0.0),N.Float)\n vb = N.array((b*cos(gamma), b*sin(gamma), 0.0),N.Float)\n xxx = (cos(alpha)-cos(beta)*cos(gamma)) / sin(gamma)\n vc = N.array((c*cos(beta), c*xxx, c*sqrt(sin(beta)**2 - xxx**2)),N.Float)\n\n cell[0,:] = va[:]\n cell[1,:] = vb[:]\n cell[2,:] = vc[:]\n\n#\n# Now the atoms\n#\n tokens = f.readline().split()\n natoms = int(tokens[0])\n f.readline() # empty line\n\n crystal = Structure([])\n import re\n p = re.compile(\"[A-z]+\")\n for a in range(natoms):\n tokens = f.readline().split()\n number, tag, x, y, z = tokens[0:5]\n m = p.match(tag)\n if m:\n symbol = m.group()\n else:\n print \"Cannot match \", tag \n crystal.append(Atom(symbol, [float(x), float(y), float(z)]))\n\n crystal.SetUnitCell(cell)\n crystal.SetBoundaryConditions(periodic=True)\n\n return crystal",
"def _load_template(self, path):\n mol = Chem.RWMol()\n extension = os.path.basename(path).split(\".\")[1]\n\n if extension == \"sdf\":\n mol = Chem.MolFromMolFile(path, sanitize=True, removeHs=True)\n elif extension == \"pdb\":\n mol = Chem.MolFromPDBFile(path, sanitize=True, removeHs=True)\n else:\n raise ValueError(\"Unsupported molecule type '{}'\".format(extension))\n\n p = Chem.AdjustQueryParameters()\n p.makeAtomsGeneric = True\n p.makeBondsGeneric = True\n\n mol = Chem.AdjustQueryProperties(mol, p)\n\n return mol",
"def read_stellar_properties(path = os.path.join(HERE, '../inputs/stellar_properties.txt')):\n\n # Read in table of stellar types\n data = np.loadtxt(path, skiprows=19, dtype = str)\n\n # Parse\n stypes = data[:,0]\n masses = np.array(data[:,1], dtype=float)\n lums = np.array(data[:,2], dtype=float)\n rads = np.array(data[:,3], dtype=float)\n temps = np.array(data[:,4], dtype=float)\n mvs = np.array(data[:,6], dtype=float)\n\n # Construct dictionary\n dic = {\n \"stypes\" : stypes,\n \"masses\" : masses,\n \"lums\" : lums,\n \"rads\" : rads,\n \"temps\" : temps,\n \"mvs\" : mvs\n }\n\n return dic",
"def readCrystParam(crystfile):\n \n # Default values\n ccell1 = np.eye(3)\n ccell2 = np.eye(3)\n planehkl = [1,0,0]\n diruvw = [0,1,0]\n \n try:\n with open(crystfile,\"r\") as f:\n content = f.readlines()\n except FileNotFoundError:\n content = []\n\n for l in content:\n if l[0].rstrip() == \"#\":\n continue\n line = l.split('=')\n if len(line) == 2:\n if line[0].rstrip()==\"ccell1\":\n ccell1 = eval(line[1].rstrip())\n elif line[0].rstrip()==\"ccell2\":\n ccell2 = eval(line[1].rstrip())\n elif line[0].rstrip()==\"planehkl\":\n planehkl = eval(line[1].rstrip())\n elif line[0].rstrip()==\"diruvw\":\n diruvw = eval(line[1].rstrip())\n else:\n print(\"WARNING: %s is not a supported input\"%(line[0].rstrip()))\n elif len(line) > 2:\n raise SyntaxError(l)\n\n return ccell1, ccell2, planehkl, diruvw",
"def read(path):",
"def get_parameters(path):\n f = open(path, \"r\")\n line = f.readline()\n line = line.strip('\\n')\n values = line.split(',')\n parameter = [[0 for i in range(len(values))] for i in range(6)]\n row = 0\n while line:\n line = line.strip('\\n')\n values = line.split(',')\n for i in range(len(values)):\n if row != 0:\n parameter[row][i] = (float)(values[i])\n else:\n parameter[row][i] = values[i]\n row += 1\n line = f.readline()\n f.close()\n return parameter",
"def load_a_couple(self, path):\n return pd.read_hdf(path[0], key='s'), np.load(path[1])",
"def parse_dem_header(path):\n lookup = _parse_header(path)\n\n # NB: many lookup fields have multiple elements, eg ['1000', 'Hz']\n subset = {ifc.PYRATE_NCOLS: int(lookup[GAMMA_WIDTH][0]), ifc.PYRATE_NROWS: int(lookup[GAMMA_NROWS][0])}\n\n expected = ['decimal', 'degrees']\n for k in [GAMMA_CORNER_LAT, GAMMA_CORNER_LONG, GAMMA_X_STEP, GAMMA_Y_STEP]:\n units = lookup[GAMMA_CORNER_LAT][1:]\n if units != expected: # pragma: no cover\n msg = \"Unrecognised units for GAMMA %s field\\n. Got %s, expected %s\"\n raise GammaException(msg % (k, units, expected))\n\n subset[ifc.PYRATE_LAT] = float(lookup[GAMMA_CORNER_LAT][0])\n subset[ifc.PYRATE_LONG] = float(lookup[GAMMA_CORNER_LONG][0])\n subset[ifc.PYRATE_Y_STEP] = float(lookup[GAMMA_Y_STEP][0])\n subset[ifc.PYRATE_X_STEP] = float(lookup[GAMMA_X_STEP][0])\n subset[ifc.PYRATE_DATUM] = \"\".join(lookup[GAMMA_DATUM])\n subset[ifc.PYRATE_INSAR_PROCESSOR] = GAMMA\n return subset",
"def open_file(path):\n book = xlrd.open_workbook(path)\n # print number of sheets\n #print book.nsheets\n # print sheet names\n #print book.sheet_names()\n # get the first worksheet\n first_sheet = book.sheet_by_index(0)\n # read a row\n #print first_sheet.row_values(0)\n # read a cell\n cell = first_sheet.cell(1,0)\n #print cell\n #print cell.value\n # read a row slice\n #print first_sheet.row_slice(rowx=0,start_colx=0,end_colx=2)\n\n \"\"\"\n if Junipter.search_junipter_rule(first_sheet,1) == 0:\n print \"Juniper rule doesn't match\"\n else:\n print \"Juniper rule match\"\n \"\"\"\n\n \"\"\"\n if Mitac.search_mitac_rule(first_sheet,1) == 0:\n print \"Mitac rule doesn't match\"\n else:\n print \"Mitac rule match\"\n \"\"\"\n\n if Fabrinet.search_fabrinet_rule(first_sheet,3) == 0:\n print \"fabrinet rule doesn't match\"\n else:\n print \"fabrinet rule match\"",
"def parse_geometry(path: str) -> Optional[Dict[str, tuple]]:\n if not os.path.isfile(path):\n raise InputError(f'Could not find file {path}')\n if path.endswith('.yml'):\n content = read_yaml_file(path)\n if isinstance(content, dict):\n if 'xyz' in content.keys():\n return content['xyz'] if isinstance(content['xyz'], dict) else str_to_xyz(content['xyz'])\n elif 'opt_xyz' in content.keys():\n return content['opt_xyz'] if isinstance(content['opt_xyz'], dict) else str_to_xyz(content['opt_xyz'])\n software = identify_ess(path)\n xyz_str = ''\n if software == 'xtb':\n lines = _get_lines_from_file(path)\n final_structure, coord, first_line = False, False, True\n for line in lines:\n if '$' in line or 'END' in line or len(line.split()) < 10:\n coord = False\n if coord:\n splits = line.split()\n xyz_str += f'{qcel.periodictable.to_E(splits[3])} {splits[0]} {splits[1]} {splits[2]}\\n'\n if final_structure and ('$coord' in line or len(line.split()) > 15):\n coord = True\n if len(line.split()) > 15 and first_line:\n splits = line.split()\n xyz_str += f'{qcel.periodictable.to_E(splits[3])} {splits[0]} {splits[1]} {splits[2]}\\n'\n first_line = False\n if 'final structure:' in line:\n final_structure = True\n return str_to_xyz(xyz_str)\n\n log = ess_factory(fullpath=path, check_for_errors=False)\n try:\n coords, number, _ = log.load_geometry()\n except LogError:\n logger.debug(f'Could not parse xyz from {path}')\n\n # Try parsing Gaussian standard orientation instead of the input orientation parsed by Arkane.\n lines = _get_lines_from_file(path)\n for i in range(len(lines)):\n if 'Standard orientation:' in lines[i]:\n xyz_str = ''\n j = i\n while len(lines) and not lines[j].split()[0].isdigit():\n j += 1\n while len(lines) and '-------------------' not in lines[j]:\n splits = lines[j].split()\n xyz_str += f'{qcel.periodictable.to_E(int(splits[1]))} {splits[3]} {splits[4]} {splits[5]}\\n'\n j += 1\n break\n\n if xyz_str:\n return str_to_xyz(xyz_str)\n return None\n\n return xyz_from_data(coords=coords, numbers=number)",
"def get_gml_data(file_path):\n\n bbox = (2.34592e7,100+6.704e6,2.34603e7,700+6.704e6)\n return gpd.read_file(file_path, bbox=bbox)",
"def read_xyz(filename):\n\n config = {}\n\n with open(filename, 'r') as f:\n # number of atoms (spins)\n config['nat'] = int(re.findall('\\S+', f.readline())[0])\n\n # box parameters (type, dimension, shape, periodicity)\n sarr = re.findall('\\S+', f.readline())\n config['latt_type'] = sarr[0]\n dims = list(map(int, sarr[1:4]))\n config['latt_box'] = np.array(dims)\n config['box'] = np.diag(dims)\n config['pbc'] = list(map(int, sarr[4:7]))\n if len(sarr) > 7:\n dim_intra = len(sarr) - 7\n\n atom_types = []\n xyz = []\n config['latt_i'] = np.zeros(dims, dtype=int)\n config['latt_atoms'] = np.zeros(dims, dtype=int)\n config['latt_intra'] = np.zeros(tuple(dims) + (dim_intra,), dtype='float64')\n for i in range(config['nat']):\n sarr = re.findall('\\S+', f.readline())\n t = int(sarr[0])\n r = tuple(map(int, sarr[1:4]))\n\n atom_types.append(t)\n xyz.append(r)\n\n config['latt_i'][r] = i\n config['latt_atoms'][r] = t\n\n for j in range(dim_intra):\n ci = float(sarr[4 + j])\n config['latt_intra'][r[0], r[1], r[2], j] = ci\n\n config['atom_types'] = np.array(atom_types)\n config['xyz'] = np.array(xyz)\n \n return config",
"def readfile(self, path, filename):\n # The DataStudio software uses ISO-8859-1 encoding (especially for the degree sign in temperature files)\n file = open(path + filename, encoding=\"iso-8859-1\")\n rowlist = file.readlines()\n\n title = rowlist[0].strip(\"\\n\")\n labels = rowlist[1].strip(\"\\n\").split(sep=\"\\t\")\n\n data = np.zeros((len(rowlist)-2, 2))\n\n for i in range(2, len(rowlist)):\n columns = rowlist[i].split(sep=\"\\t\")\n data[i-2, 0] = float(columns[0].replace(\",\", \".\"))\n data[i-2, 1] = float(columns[1].replace(\",\", \".\"))\n\n return data, title, labels",
"def ReadAtomParameter(AtomParameterPath):\r\n\r\n AtomParameter=os.path.join(AtomParameterPath,'AtomParameter')\r\n\r\n Key1,Key2,Key3=False,False,False\r\n MaterialAtomDictionary,GasAtomDictionary,MassDictionary={},{},{}\r\n SpecialPair,SpecialPairList=[],[]\r\n\r\n with open(AtomParameter, 'r') as File:\r\n for Line in File.readlines():\r\n if Line.strip():\r\n WordList=Line.strip().split()\r\n if WordList[0]=='#':\r\n continue\r\n elif WordList[0]=='MaterialAtom:':\r\n Key1=True\r\n elif WordList[0]=='GasAtom:':\r\n Key1=False\r\n Key2=True\r\n elif WordList[0]=='SpecialPair:':\r\n Key2=False\r\n Key3=True\r\n\r\n # MaterialAtom\r\n elif Key1==True and WordList[0]!='Number':\r\n MaterialAtomDictionary[WordList[1]]=WordList[2:4]\r\n MassDictionary[WordList[1]] = WordList[5]\r\n elif Key2==True and WordList[0]!='Number':\r\n GasAtomDictionary[WordList[1]]=WordList[2:4]\r\n MassDictionary[WordList[1]] = WordList[4]\r\n elif Key3==True and WordList[0]!='Number':\r\n SpecialPair.append(WordList[1:3])\r\n SpecialPair.append(WordList[3:5])\r\n\r\n SpecialPairList.append(SpecialPair)\r\n\r\n return MaterialAtomDictionary,GasAtomDictionary,SpecialPairList,MassDictionary",
"def parse_info_from_file(path):\n try:\n filename = os.path.split(path)[1]\n filename = os.path.splitext(filename)[0]\n age, gender, race, _ = filename.split('_')\n\n return int(age), dataset_dict['gender_id'][int(gender)], dataset_dict['race_id'][int(race)]\n except Exception as ex:\n return None, None, None",
"def macro(path):\n import pandas as pd\n path = os.path.expanduser(path)\n filename = 'macro.csv'\n if not os.path.exists(os.path.join(path, filename)):\n url = 'http://dustintran.com/data/r/Zelig/macro.csv'\n maybe_download_and_extract(path, url,\n save_file_name='macro.csv',\n resume=False)\n\n data = pd.read_csv(os.path.join(path, filename), index_col=0,\n parse_dates=True)\n x_train = data.values\n metadata = {'columns': data.columns}\n return x_train, metadata",
"def load_data(path):\n data = loadmat(path)\n return data['X'], data['y']"
] | [
"0.6235567",
"0.61867106",
"0.5885496",
"0.5595712",
"0.5569049",
"0.5382577",
"0.5362518",
"0.53287804",
"0.5324073",
"0.53078204",
"0.52826846",
"0.52695364",
"0.52454114",
"0.51977813",
"0.5176313",
"0.5159904",
"0.51186925",
"0.50939804",
"0.5091242",
"0.5027156",
"0.49919435",
"0.4978368",
"0.4972508",
"0.4964574",
"0.4943375",
"0.49425912",
"0.49364504",
"0.49329692",
"0.49271902",
"0.49262348"
] | 0.65298444 | 0 |
Reads the cell parameters from a 'xd.mas' file and the atomic positions from a 'xd.res' file. The function returns a list with the cell parameters and an dictionary which keys the atom name to its fractional coordinates. | def read_coordinates(path='', sort=True):
maspointer = open(path + 'xd.mas', 'r')
respointer = open(path + 'xd.res', 'r')
positions = {}
keylist = [] #Needed to keep the atomlist order. This is important for the frequency read function.
for line in maspointer.readlines():
if 'CELL ' in line:
cell = [float(i) for i in line.split(" ") if '.' in i]
break
for line in respointer.readlines():
if '(' in line and not '!' in line:
coords = [float(i) for i in line.split(" ") if '.' in i]
coords = coords[:-1]
key = line.split(" ")[0]
keylist.append(key)
positions[key] = coords
if sort:
sortkeylist = []
for i in xrange(len(keylist)):
j = i + 1
for key in keylist:
number = get_number(key)
if j == int(number):
sortkeylist.append(key)
else:
sortkeylist = keylist
return cell, positions, sortkeylist | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_xyz(filename):\n\n config = {}\n\n with open(filename, 'r') as f:\n # number of atoms (spins)\n config['nat'] = int(re.findall('\\S+', f.readline())[0])\n\n # box parameters (type, dimension, shape, periodicity)\n sarr = re.findall('\\S+', f.readline())\n config['latt_type'] = sarr[0]\n dims = list(map(int, sarr[1:4]))\n config['latt_box'] = np.array(dims)\n config['box'] = np.diag(dims)\n config['pbc'] = list(map(int, sarr[4:7]))\n if len(sarr) > 7:\n dim_intra = len(sarr) - 7\n\n atom_types = []\n xyz = []\n config['latt_i'] = np.zeros(dims, dtype=int)\n config['latt_atoms'] = np.zeros(dims, dtype=int)\n config['latt_intra'] = np.zeros(tuple(dims) + (dim_intra,), dtype='float64')\n for i in range(config['nat']):\n sarr = re.findall('\\S+', f.readline())\n t = int(sarr[0])\n r = tuple(map(int, sarr[1:4]))\n\n atom_types.append(t)\n xyz.append(r)\n\n config['latt_i'][r] = i\n config['latt_atoms'][r] = t\n\n for j in range(dim_intra):\n ci = float(sarr[4 + j])\n config['latt_intra'][r[0], r[1], r[2], j] = ci\n\n config['atom_types'] = np.array(atom_types)\n config['xyz'] = np.array(xyz)\n \n return config",
"def read_xyz(filename):\n #print('Reading geom from:'),filename\n atoms = []\n coordinates = []\n\t\n xyz = open(filename)\n n_atoms = int(xyz.readline())\n title = xyz.readline()\n for line in xyz:\n\tif len(line.strip()) == 0:\n\t\tpass\n\t\tbreak\t\n\tatom,x,y,z = line.split()\n\tatoms.append(atom)\n\tcoordinates.append([float(x), float(y), float(z)])\n xyz.close()\n coordinates = [[w * angtobh for w in ww] for ww in coordinates] #ang to bh\n\n if n_atoms != len(coordinates):\n \tprint('Number of atoms in xyz file doesnt equal to the number of lines.')\n\tsys.exit(1)\n \n return atoms, coordinates",
"def ReadAtomParameter(AtomParameterPath):\r\n\r\n AtomParameter=os.path.join(AtomParameterPath,'AtomParameter')\r\n\r\n Key1,Key2,Key3=False,False,False\r\n MaterialAtomDictionary,GasAtomDictionary,MassDictionary={},{},{}\r\n SpecialPair,SpecialPairList=[],[]\r\n\r\n with open(AtomParameter, 'r') as File:\r\n for Line in File.readlines():\r\n if Line.strip():\r\n WordList=Line.strip().split()\r\n if WordList[0]=='#':\r\n continue\r\n elif WordList[0]=='MaterialAtom:':\r\n Key1=True\r\n elif WordList[0]=='GasAtom:':\r\n Key1=False\r\n Key2=True\r\n elif WordList[0]=='SpecialPair:':\r\n Key2=False\r\n Key3=True\r\n\r\n # MaterialAtom\r\n elif Key1==True and WordList[0]!='Number':\r\n MaterialAtomDictionary[WordList[1]]=WordList[2:4]\r\n MassDictionary[WordList[1]] = WordList[5]\r\n elif Key2==True and WordList[0]!='Number':\r\n GasAtomDictionary[WordList[1]]=WordList[2:4]\r\n MassDictionary[WordList[1]] = WordList[4]\r\n elif Key3==True and WordList[0]!='Number':\r\n SpecialPair.append(WordList[1:3])\r\n SpecialPair.append(WordList[3:5])\r\n\r\n SpecialPairList.append(SpecialPair)\r\n\r\n return MaterialAtomDictionary,GasAtomDictionary,SpecialPairList,MassDictionary",
"def read(self, FN, natoms=None, return_title=False, \\\n multiplier=None, trajectory=False):\n if not os.path.isfile(FN):\n raise Exception('Coordinate file %s does not exist!' % FN)\n if FN.endswith('.gz'):\n import gzip\n F = gzip.open(FN, 'r')\n else:\n F = open(FN, 'r')\n dat = F.read().strip().split('\\n')\n F.close()\n\n title = dat.pop(0) # Title\n\n if len(dat[0].split()) > 1:\n # VMD format (does not specify number of atoms)\n crd = []\n for line in dat:\n crd = crd + [float(x) for x in line.split()]\n crd = np.resize(crd, (len(crd) / 3, 3))\n else:\n # AMBER format\n file_natoms = int(dat.pop(0)) # Number of atoms\n if (natoms is not None) and (file_natoms != natoms):\n print \"Incorrect number of atoms in crd file\"\n return np.array([])\n\n if trajectory:\n w = 8 # For mdcrd\n else:\n w = 12 # For inpcrd\n crd = []\n for line in dat:\n crd = crd + [float(line[x:x + w]) for x in range(0, len(line), w)]\n crd = np.resize(crd, (len(crd) / 3, 3))\n\n if multiplier is not None:\n crd = multiplier * crd\n if (natoms is not None):\n crd = np.vsplit(crd, crd.shape[0] / natoms)\n print \" read %d configurations from %s\" % (len(crd), FN)\n\n if return_title:\n return (crd, title)\n else:\n return crd",
"def rd_xyz(self):\n nmol = self.__rd_xyz_nmol()\n fpin = open(self.config['xyzfile'], \"r\")\n tmol = self.template['molspec']['atoms']\n ntatom = self.template['molspec']['n_atoms']\n mol = []\n for i in range(nmol):\n # number of atom,\n line = fpin.readline().strip()\n natom = int(line)\n line = fpin.readline()\n\n jobname = \"%s\" % line[:-1]\n atom = []\n\n if ntatom != natom:\n print \"geometry data in template file is not consistant with xyz file. check the template.\"\n for j in range(natom):\n line = fpin.readline()\n rec = line.split()\n if len(rec) == 5:\n atomname, x, y, z, imove = rec\n elif len(rec) == 4:\n atomname, x, y, z = rec\n else:\n print \"nothing to do...\"\n exit(1)\n frg = tmol[j]['frg']\n record = {'name': atomname, 'coord': [float(x),float(y),float(z)], 'frg':frg}\n atom.append(record)\n onemol = {'natom': natom, 'jobname': jobname, 'info': '', 'atom':atom}\n mol.append(onemol)\n self.model['mol'] = mol\n fpin.close()\n return",
"def read_parameters_diff_file(coords):\n param_map = hp.read_map(source +\n \"kids_data/\"\n \"COM_CompMap_Compton-SZMap-milca-\"\n \"ymaps_2048_R2.00.fits\")\n params = []\n for point in coords:\n ra, dec = point\n index = declratoindex(dec, ra)\n params.append(param_map[index])\n return params",
"def extended_xyz_parse(xyz_d):\n \n s_properties = ['rot_A', \n 'rot_B', \n 'rot_C', \n 'dipole', \n 'polarizability', \n 'homo', \n 'lumo', \n 'band_gap', \n 'ese', \n 'zpe', \n 'u_0K', \n 'u_298.15K', \n 'h_298.15K', \n 'f_298.15K', \n 'cp_298.15K']\n\n mol_properties = {}\n\n\n lines = xyz_d.replace('*^','e').splitlines()\n \n r_no_atoms = lines[0]\n no_atoms = int(r_no_atoms)\n\n r_scalars = lines[1]\n mol_id = r_scalars.split()[:2]\n scalar_properties = np.array(r_scalars.split()[2:], np.float32)\n\n r_mcoords = lines[2:2+no_atoms]\n symbols = [m.split()[0] for m in r_mcoords]\n coords = np.array([m.split()[1:4] for m in r_mcoords], dtype=np.float32)\n \n charges = np.array([m.split()[4] for m in r_mcoords], dtype=np.float32)\n\n r_vibfreqs = lines[2+ no_atoms]\n vib_freqs = np.array([float(freq) for freq in r_vibfreqs.split()], dtype=np.float32)\n\n smiles = lines[3+no_atoms].split()\n inchi = lines[4+no_atoms].split()\n\n mol_properties['no_atoms'] = no_atoms\n mol_properties['mol_id'] = mol_id\n \n for i, p in enumerate(s_properties):\n mol_properties[p] = scalar_properties[i]\n\n mol_properties['symbols'] = symbols\n mol_properties['coords'] = coords\n mol_properties['charges'] = charges\n mol_properties['vib_freqs'] = vib_freqs\n mol_properties['smiles'] = smiles\n mol_properties['inchi'] = inchi\n \n return mol_properties",
"def read_xd_master_file(path, errorpointer):\n filepointer = open(path, 'r')\n for line in filepointer.readlines():\n if 'TITLE' in line:\n compound_name = line.partition('!')[2].lstrip().rstrip()\n if 'CELL' in line:\n cell = [float(i) for i in line.split(\" \") if '.' in i]\n break\n filepointer.close()\n try:\n return compound_name, cell\n except:\n errorpointer.write(path + '\\n')\n return None, None",
"def setupdict(parfile):\n pardict = {}\n with open(parfile,'r+') as f:\n for line in f:\n flags = line[56:65].split(' ')\n try:\n flags = [int(f) for f in flags]\n except:\n continue\n # if we found res pars\n if( all(flags) <= 3 ):\n # if any varied pars\n if( any(flags) > 0 ):\n # energies are dict keys\n estring = endf_float_str(float(line[0:11]))\n pardict[estring] = []\n pars = [float(line[0+11*i:11+11*i]) for i in range(len(flags))]\n for i,flag in enumerate(flags):\n if( flag > 0 ):\n pardict[estring].append((i,pars[i]))\n return pardict",
"def readResiduals(in_c_file):\n\n DataDict = {}\n in_mjd, in_res, in_reserr, in_orbphs = [], [], [], []\n\n for line in open(in_c_file, \"r\").readlines():\n \n if ('#' not in line):\n elements = line.split()\n \n in_mjd.append(float(elements[6]))\n in_res.append(float(elements[2]))\n in_reserr.append(float(elements[3]))\n in_orbphs.append(float(elements[5]))\n \n # store as dictionary.\n DataDict['mjd'] = np.array(in_mjd)\n DataDict['residuals'] = np.array(in_res)\n DataDict['residuals_err'] = np.array(in_reserr)\n DataDict['orbital_phase'] = np.array(in_orbphs)\n\n return DataDict",
"def get_coordinates_xyz(filename):\n\n f = open(filename, 'r')\n V = list()\n atoms = list()\n n_atoms = 0\n\n # Read the first line to obtain the number of atoms to read\n try:\n n_atoms = int(f.readline())\n except ValueError:\n print(\"Could not obtain the number of atoms in the .xyz file. \"+filename)\n return None\n\n # Skip the title line\n f.readline()\n\n # Use the number of atoms to not read beyond the end of a file\n for lines_read, line in enumerate(f):\n\n if lines_read == n_atoms:\n break\n\n atom = re.findall(r'[a-zA-Z]+', line)[0]\n # atom = atom.upper()\n\n numbers = re.findall(r'[-]?\\d+\\.\\d*(?:[Ee][-\\+]\\d+)?', line)\n numbers = [float(number) for number in numbers]\n\n # The numbers are not valid unless we obtain exacly three\n if len(numbers) == 3:\n V.append(np.array(numbers))\n atoms.append(atom)\n else:\n exit(\"Reading the .xyz file failed in line {0}. Please check the format.\".format(lines_read + 2))\n\n f.close()\n atoms = np.array(atoms)\n V = np.array(V)\n return atoms, V",
"def getParams(self, resname, atomname):\n charge = None\n radius = None\n\n # print self.map.keys()\n\n if resname in self.map:\n resid = self.map[resname]\n if resid.hasAtom(atomname):\n atom = resid.atoms[atomname]\n charge = atom.charge\n radius = atom.radius\n\n return charge, radius",
"def load_xyz(filename):\n periodic = load_periodic()\n #read molecule\n with open(filename) as f:\n size = int(next(f))\n title = next(f).strip()\n molecule = Molecule(title,size)\n for _ in range(size):\n row = next(f).split()\n tag = row[0]\n element = periodic[tag]\n coordinate = []\n for j in range(3):\n coordinate.append(float(row[j+1]))\n atom = Atom(element,coordinate)\n\n molecule.append(atom)\n f.close()\n \n return molecule",
"def read_multiple_coordinates(fragmentnames):\n fragdict = {}\n for name in fragmentnames:\n path = name + '/'\n cell, pos = read_coordinates(path)\n atomlist = frac_to_cart(cell, pos)\n atomdict = {}\n for atom in atomlist:\n atomdict[atom[0][0]] = atom[1]\n fragdict[name] = atomlist\n return fragdict",
"def readParams(file_name):\n try:\n info = np.load(file_name,allow_pickle=True)[()]\n except FileNotFoundError:\n if file_name.split('/')[-2] == 'checkpoint':\n lfc_id_dir = '/expres/extracted/lfc_cal/lfc_id/'\n file_name = lfc_id_dir + os.path.basename(file_name)\n info = np.load(file_name,allow_pickle=True)[()]\n else:\n raise FileNotFoundError\n # Assemble information into \"fit-able\" form\n num_orders = len(info['params'])\n lines = [p[:,1] for p in info['params'] if p is not None]\n errs = [np.sqrt(cov[:,1,1]) for cov in info['cov'] if cov is not None]\n ordrs = [o for o in np.arange(86) if info['params'][o] is not None]\n waves = [w for w in info['wvln'] if w is not None]\n # I believe, but am not sure, that the wavelengths are multiplied by order\n # to separate them from when orders overlap at the edges\n waves = [wvln for order, wvln in zip(ordrs,waves)]\n ordrs = [np.ones_like(x) * m for m,x in zip(ordrs, lines)]\n\n x = np.concatenate(lines)\n y = np.concatenate(ordrs)\n e = np.concatenate(errs)\n w = np.concatenate(waves)\n # Note: default of pipeline includes ThAr lines, which we're not including here\n \n return (x,y,w,e)",
"def read_params(fname):\n f = open(fname, 'r')\n par = {} #output\n for i in range(10): # esta dentro de las primeras 10 lineas\n l = f.readline().split()\n #print \" ---> \", l\n number = u'%s' % l[-1] # presumably a number\n if not number.replace('.','').replace('-','').isnumeric():\n if l[0]=='#####':\n break\n else:\n continue # we proceed ONLY IF this is numeric string\n #print ' FIRST: ', l[0]\n if l[0]=='#####':\n #print \"IM I HERE????\"\n break # end of header\n\n name = l[1][:-1] # l[0] es '#', y -1 para comernos el \":\"\n value = np.float(l[2]) # l[2] es el valor\n par[name] = value\n\n return par",
"def _read_xyz(ds,datafile,long_format=False):\n\n from cheml.io.xyz import get_molecules\n\n ds.list_of_mol = get_molecules(datafile,ds.nmol,long_format)\n ds.nmol = len(ds.list_of_mol)",
"def read_xyz(self, filename):\n # first line contains number of atoms\n self.numatom = int(filename.readline().split()[0])\n # second line contains a comment\n self.comment = filename.readline()[:-3]\n # rest of the lines contain coordinates structured Element X Y Z\n string = \"Element X Y Z \\n\" + filename.read()\n self.contents = pd.read_table(StringIO(string), sep=r'\\s+')",
"def _load_dat(self):\n modelfile = self.filename\n with open(modelfile) as f:\n content = f.readlines()\n\n self.comment = content.pop(0) # Comment line\n content = [x for x in content if not x.startswith('#')]\n\n for line in content:\n if('atoms' in line): self.natoms = int(line.split()[0])\n if('xlo' in line and 'xhi' in line):\n self.xsize = abs(float(line.split()[0])) + abs(float(line.split()[1]))\n if('ylo' in line and 'yhi' in line):\n self.ysize = abs(float(line.split()[0])) + abs(float(line.split()[1]))\n if('zlo' in line and 'zhi' in line):\n self.zsize = abs(float(line.split()[0])) + abs(float(line.split()[1]))\n if('atom types' in line): nelems = int(line.split()[0])\n if('Masses' in line): mflag = content.index(line) + 1\n if('Atoms' in line): aflag = content.index(line) + 1\n try:\n mflag\n except NameError:\n raise Exception(\"ERROR! You need to define the masses in the .dat file.\")\n atomtypes = {}\n while(nelems > 0):\n if(len(content[mflag].split()) == 2):\n atomtypes[int(content[mflag].split()[0])] = masses.get_znum(float(content[mflag].split()[1]))\n nelems -= 1\n mflag += 1\n self.atoms = []\n natoms = self.natoms\n while(natoms > 0):\n sline = content[aflag].split()\n if(len(sline) >= 5):\n # We found an atom\n id = int(sline[0])\n type = int(sline[1])\n x = float(sline[2])\n y = float(sline[3])\n z = float(sline[4])\n znum = atomtypes[type]\n # Add it to the model\n self.atoms.append(Atom(id,znum,x,y,z))\n natoms -= 1\n aflag += 1",
"def process(path):\n # get parameter value:\n with open('config.cym', 'r') as f:\n line = f.readline()\n #print(line)\n pam = float(line[1:])\n f.close()\n # get position of aster:\n with open('aster.txt', 'r') as f:\n for line in f:\n if len(line)>3 and not line[0]=='%':\n #print(line)\n val = line.split()\n x = float(val[2])\n y = float(val[3])\n #z = float(val[4])\n #pos = math.sqrt(x*x+y*y+z*z)\n pos = math.sqrt(x*x+y*y)\n\n f.close()\n return (pam, pos)",
"def readFile(file_name):\n if file_name.split('.')[-1] == 'thid':\n x,m,w = readThid(file_name)\n e = np.empty_like(x)\n e[:] = np.nan\n return x,m,w,e\n else:\n return readParams(file_name)",
"def parse_params(filename):\n\n all_dicts = []\n\n with open(filename) as f:\n\n for line in f:\n\n params = line.strip().split()\n\n temp_dict = {\"die\": float(params[0])}\n\n temp_dict.update({i: float(params[i]) for i in range(1, 7)})\n\n all_dicts.append(temp_dict)\n\n f.close()\n\n return all_dicts",
"def get_electrode_positions():\n positions = dict()\n with io.open(\"electrode_positions.txt\", \"r\") as pos_file:\n for line in pos_file:\n parts = line.split()\n positions[parts[0]] = tuple([float(part) for part in parts[1:]])\n return positions",
"def get_electrode_positions():\n positions = dict()\n with io.open(\"electrode_positions.txt\", \"r\") as pos_file:\n for line in pos_file:\n parts = line.split()\n positions[parts[0]] = tuple([float(part) for part in parts[1:]])\n return positions",
"def load_params_from_file(path):\n save_dict = mx.nd.load(path)\n arg_params = {}\n aux_params = {}\n for k, v in save_dict.items():\n tp, name = k.split(':', 1)\n if tp == 'arg':\n arg_params[name] = v\n if tp == 'aux':\n aux_params[name] = v\n return arg_params, aux_params",
"def read_gbvi_parameters(filename):\n\n parameters = dict()\n \n infile = open(filename, 'r')\n for line in infile:\n # Strip trailing comments\n index = line.find('%')\n if index != -1:\n line = line[0:index] \n\n # Parse parameters\n elements = line.split()\n if len(elements) == 3:\n [atomtype, radius, gamma] = elements\n parameters['%s_%s' % (atomtype,'radius')] = float(radius)\n parameters['%s_%s' % (atomtype,'gamma')] = float(gamma)\n\n return parameters",
"def read_abinit(filename='abinit.in'):\n\n from ase import Atoms, units\n\n if isinstance(filename, str):\n f = open(filename)\n else: # Assume it's a file-like object\n f = filename\n\n lines = f.readlines()\n if type(filename) == str:\n f.close()\n\n full_file = ''\n for line in lines:\n if '#' in line:\n meat, comment = line.split('#')\n else:\n meat = line\n full_file = full_file + meat + ' '\n\n full_file.strip()\n tokens = full_file.lower().split()\n\n # note that the file can not be scanned sequentially\n\n index = tokens.index(\"acell\")\n unit = 1.0\n if(tokens[index+4].lower()[:3] != 'ang'):\n unit = units.Bohr\n acell = [unit*float(tokens[index+1]),\n unit*float(tokens[index+2]),\n unit*float(tokens[index+3])]\n\n index = tokens.index(\"natom\")\n natom = int(tokens[index+1])\n\n index = tokens.index(\"ntypat\")\n ntypat = int(tokens[index+1])\n\n index = tokens.index(\"typat\")\n typat = []\n for i in range(natom):\n typat.append(int(tokens[index+1+i]))\n\n index = tokens.index(\"znucl\")\n znucl = []\n for i in range(ntypat):\n znucl.append(int(tokens[index+1+i]))\n\n index = tokens.index(\"rprim\")\n rprim = []\n for i in range(3):\n rprim.append([acell[i]*float(tokens[index+3*i+1]),\n acell[i]*float(tokens[index+3*i+2]),\n acell[i]*float(tokens[index+3*i+3])])\n\n # create a list with the atomic numbers\n numbers = []\n for i in range(natom):\n ii = typat[i] - 1\n numbers.append(znucl[ii])\n\n # now the positions of the atoms\n if \"xred\" in tokens:\n index = tokens.index(\"xred\")\n xred = []\n for i in range(natom):\n xred.append([float(tokens[index+3*i+1]),\n float(tokens[index+3*i+2]),\n float(tokens[index+3*i+3])])\n atoms = Atoms(cell=rprim, scaled_positions=xred, numbers=numbers,\n pbc=True)\n else:\n if \"xcart\" in tokens:\n index = tokens.index(\"xcart\")\n unit = units.Bohr\n elif \"xangst\" in tokens:\n unit = 1.0\n index = tokens.index(\"xangst\")\n else:\n raise IOError(\n \"No xred, xcart, or xangs keyword in abinit input file\")\n\n xangs = []\n for i in range(natom):\n xangs.append([unit*float(tokens[index+3*i+1]),\n unit*float(tokens[index+3*i+2]),\n unit*float(tokens[index+3*i+3])])\n atoms = Atoms(cell=rprim, positions=xangs, numbers=numbers, pbc=True)\n \n try:\n i = tokens.index('nsppol')\n except ValueError:\n nsppol = None\n else:\n nsppol = int(tokens[i + 1])\n\n if nsppol == 2:\n index = tokens.index('spinat')\n magmoms = [float(tokens[index + 3 * i + 3]) for i in range(natom)]\n atoms.set_initial_magnetic_moments(magmoms)\n\n return atoms",
"def getCoords(file):\n global demag\n name = file.split('.')[0]\n name = name.split('_')\n x = int(name[2])//demag\n y = int(name[3])//demag\n return(int(x),int(y))",
"def load_mxm2msd():\n res = {}\n with open(mxm2msd()) as f:\n for line in f:\n mxm, msd = line.strip().split(',')\n res[mxm] = msd\n return res",
"def _read_dx(self, FN):\n if FN.endswith('.dx'):\n F = open(FN, 'r')\n else:\n import gzip\n F = gzip.open(FN, 'r')\n\n # Read the header\n line = F.readline()\n while line.find('object') == -1:\n line = F.readline()\n header = {}\n header['counts'] = [int(x) for x in line.split(' ')[-3:]]\n for name in ['origin', 'd0', 'd1', 'd2']:\n header[name] = [float(x) for x in F.readline().split(' ')[-3:]]\n F.readline()\n header['npts'] = int(F.readline().split(' ')[-3])\n\n # Test to make sure the grid type is okay.\n # These conditions are not absolultely essential,\n # but they reduce the number of subtraction operations.\n if not (header['d0'][1] == 0 and header['d0'][2] == 0\n and header['d1'][0] == 0 and header['d1'][2] == 0\n and header['d2'][0] == 0 and header['d2'][1] == 0):\n raise Exception('Trilinear grid must be in original basis')\n if not (header['d0'][0] > 0 and header['d1'][1] > 0\n and header['d2'][2] > 0):\n raise Exception('Trilinear grid must have positive coordinates')\n\n # Read the data\n vals = np.ndarray(shape=header['npts'], dtype=float)\n index = 0\n while index < header['npts']:\n line = F.readline()[:-1]\n items = [float(item) for item in line.split()]\n vals[index:index + len(items)] = items\n index = index + len(items)\n F.close()\n\n data = {\n 'origin':np.array(header['origin']), \\\n 'spacing':np.array([header['d0'][0],header['d1'][1],header['d2'][2]]), \\\n 'counts':np.array(header['counts']), \\\n 'vals':vals}\n return data"
] | [
"0.62354904",
"0.6009959",
"0.57815313",
"0.57030374",
"0.5664513",
"0.5592775",
"0.5527532",
"0.5496724",
"0.5496124",
"0.5451055",
"0.5448297",
"0.53548825",
"0.5279107",
"0.52720135",
"0.526696",
"0.5173193",
"0.5169754",
"0.5153554",
"0.5111259",
"0.51051176",
"0.5093884",
"0.5085157",
"0.50798553",
"0.50798553",
"0.50619435",
"0.5043534",
"0.5043248",
"0.50341827",
"0.50300485",
"0.5029082"
] | 0.68994904 | 0 |
Returns the number in the brackets of an atomname. | def get_number(atomname):
switch = False
number = ''
for char in atomname:
if char == ')':
switch = False
if switch:
number += char
if char == '(':
switch = True
return number | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def atomic_number(name):\n try:\n return symbols.index(name.capitalize()) + 1\n except ValueError:\n return lower_names.index(name.lower()) + 1",
"def getNameIndex(name):\n try:\n location = len(name) - \"\".join(reversed(name)).index(\".\")\n index = int(name[location:])\n except Exception:\n index = 0\n return index",
"def get_num(elem):\n if isinstance(elem, str):\n return _find_index(elem)\n else:\n for atm in elem:\n if atm not in sym and atm[0] not in ['X', 'D']:\n raise ValueError('Unrecognized atomic symbol \\'' + atm +\n '\\'. Use X prefix for dummy atoms.')\n return np.array([_find_index(atm) for atm in elem])",
"def value(name):\r\n return sum(alpha.index(str(l)) + 1 for l in name)",
"def atomic_number(self, element_name):\n return self.GetAtomicNumber(element_name)",
"def _name2idx(name):\n match = re.search(r\"eth(\\d+)\", name, re.I)\n if not match:\n raise exception.CloudbaseInitException(\n \"invalid NetworkDetails name {!r}\"\n .format(name)\n )\n return int(match.group(1))",
"def get_amount_of_digits(self, name: str):\n x = -1\n while name[x - 1].isdigit():\n x -= 1\n if name[:x].endswith(\"/streaming/p\"):\n return x",
"def _get_natom(self, file):\n f = open_general(file)\n tmptxt = f.readlines()\n f.close()\n itmp = search_string('NATOM is', tmptxt)\n natom = int(tmptxt.pop(itmp).split()[-1])\n return natom",
"def get_atom_intention(self, atom_name):\n source, _clone = self._atomdetail_by_name(atom_name)\n return source.intention",
"def getbarvarnameindex(self,somename_):\n if isinstance(somename_,unicode):\n somename_ = somename_.encode(\"utf-8\",errors=\"replace\")\n asgn_ = ctypes.c_int32()\n index_ = ctypes.c_int32()\n res = __library__.MSK_XX_getbarvarnameindex(self.__nativep,somename_,ctypes.byref(asgn_),ctypes.byref(index_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n asgn_ = asgn_.value\n _asgn_return_value = asgn_\n index_ = index_.value\n _index_return_value = index_\n return (_asgn_return_value,_index_return_value)",
"def getResidueNumber(self, iAtom):\n return self._getResiduePointer(iAtom)+1",
"def number(self):\n return re.match(r'^.*?([0-9]+)$', self._alias).groups()[0]",
"def _get_cindex(circ, name, index):\n ret = 0\n for reg in circ.cregs:\n if name != reg.name:\n ret += reg.size\n else:\n return ret + index\n return ret + index",
"def get_number_from_symbol(symbol):\n return elements[symbol]['number']",
"def item_no(self, name):\n sources = self.sources(self._maskname_from_item(name))\n return sources.index(name) + 1",
"def get_atomic_number(molecule, atom_index):\n return molecule.GetAtomAtomicNumber(atom_index)",
"def number(cls, tileName):\n return TILENAMEMAP[tileName]['Number'] if tileName in TILENAMEMAP else None",
"def annulus_ident(self) -> int:\n return self._ann_ident",
"def atom(token):\n if REGEX_INTEGER.match(token):\n return int(token)\n else:\n return token",
"def extract_journal(name):\n match = re.search(\"\\d+\", name)\n if match != None: \n return name[:match.start()], int(name[match.start(): match.end()])\n else: \n return \"\", 0",
"def getNameNum(name):\n dicto = {'a':1,'b':2,'c':3,'d':4,'e':5,'f':6,'g':7,'h':8,'i':9,'j':10,'k':11,'l':12,'m':13,'n':14,'o':15,'p':16,'q':17,'r':18,'s':19,'t':20,'u':21,'v':22,'w':23,'x':24,'y':25,'z':26}\n summ = 0\n for letter in name:\n summ += dicto.get(letter.lower())\n return summ",
"def getBranchIndex(self):\n\n data = self.name.split('-')\n return int(data[2])",
"def getOqiNameIndx( self, name ):\n \n if not self.oqiNames:\n self.getOqiNames( )\n\n if name in self.oqiNames:\n return self.oqiNames[ name ]\n elif name in self.oqiNames.values():\n return name\n else:\n return -1",
"def get_index(s):\n return int(s[s.find(\"[\")+1:s.find(\"]\")])",
"def parse_num(path):\n nbasename = path.basename.lower()\n if nbasename.startswith(nprefix):\n try:\n return int(nbasename[len(nprefix) :])\n except ValueError:\n pass",
"def getOthNameIndx( self, name ):\n \n if not self.othNames:\n self.getOthNames( )\n\n if name in self.othNames:\n return self.othNames[ name ]\n elif name in self.othNames.values():\n return name\n else:\n return -1",
"def pname(name):\n ranks = list(reversed(name.split(';')))\n for i, rank in enumerate(ranks):\n if rank in ['Others', 'Unassigned']:\n return rank\n if rank == '__':\n continue\n if rank.split('__')[1] is '':\n return ranks[i+1] + ';' + rank\n return rank",
"def _get_freq(name):\n try:\n counts = int(name.split(\"_x\")[1])\n except:\n return 0\n return counts",
"def _parse_atom_index(index):\n try:\n return int(index)\n except:\n return int(index, 16) - 0xA0000 + 100000",
"def get_natom(self):\n return"
] | [
"0.7131723",
"0.6602477",
"0.6242266",
"0.6130583",
"0.6037743",
"0.5923889",
"0.58768123",
"0.5873613",
"0.5861094",
"0.5821463",
"0.58060014",
"0.5772363",
"0.5768761",
"0.5733707",
"0.5732828",
"0.5719784",
"0.56835663",
"0.56269145",
"0.5624526",
"0.5605099",
"0.5601558",
"0.5591843",
"0.55889714",
"0.5549441",
"0.55389607",
"0.5518706",
"0.55121636",
"0.5510011",
"0.55072707",
"0.5506972"
] | 0.78789806 | 0 |
Create and init a conv1d layer with spectral normalization | def _conv1d_spect(ni, no, ks=1, stride=1, padding=0, bias=False):
conv = nn.Conv1d(ni, no, ks, stride=stride, padding=padding, bias=bias)
nn.init.kaiming_normal_(conv.weight)
if bias: conv.bias.data.zero_()
return spectral_norm(conv) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def V1_init(layer, size, spatial_freq, center=None, scale=1., bias=False, seed=None, tied=False):\n classname = layer.__class__.__name__\n assert classname.find('Conv2d') != -1, 'This init only works for Conv layers'\n\n out_channels, in_channels, xdim, ydim = layer.weight.shape\n data = layer.weight.data.numpy().copy()\n # same weights for each channel\n if tied:\n W = V1_weights(out_channels, (xdim, ydim),\n size, spatial_freq, center, scale, seed=seed)\n for chan in range(in_channels):\n if not tied:\n W = V1_weights(out_channels, (xdim, ydim),\n size, spatial_freq, center, scale, seed=seed)\n data[:, chan, :, :] = W.reshape(out_channels, xdim, ydim)\n data = Tensor(data)\n with torch.no_grad():\n layer.weight.copy_(data)\n\n if bias == False:\n layer.bias = None",
"def __init__(self,\n channels: int,\n kernel_size: int=15,\n activation: nn.Layer=nn.ReLU(),\n norm: str=\"batch_norm\",\n causal: bool=False,\n bias: bool=True,\n adaptive_scale: bool=False,\n init_weights: bool=False):\n assert check_argument_types()\n super().__init__()\n self.bias = bias\n self.channels = channels\n self.kernel_size = kernel_size\n self.adaptive_scale = adaptive_scale\n if self.adaptive_scale:\n ada_scale = self.create_parameter(\n [1, 1, channels], default_initializer=I.Constant(1.0))\n self.add_parameter('ada_scale', ada_scale)\n ada_bias = self.create_parameter(\n [1, 1, channels], default_initializer=I.Constant(0.0))\n self.add_parameter('ada_bias', ada_bias)\n\n self.pointwise_conv1 = Conv1D(\n channels,\n 2 * channels,\n kernel_size=1,\n stride=1,\n padding=0,\n bias_attr=None\n if bias else False, # None for True, using bias as default config\n )\n\n # self.lorder is used to distinguish if it's a causal convolution,\n # if self.lorder > 0:\n # it's a causal convolution, the input will be padded with\n # `self.lorder` frames on the left in forward (causal conv impl).\n # else: it's a symmetrical convolution\n if causal:\n padding = 0\n self.lorder = kernel_size - 1\n else:\n # kernel_size should be an odd number for none causal convolution\n assert (kernel_size - 1) % 2 == 0\n padding = (kernel_size - 1) // 2\n self.lorder = 0\n\n self.depthwise_conv = Conv1D(\n channels,\n channels,\n kernel_size,\n stride=1,\n padding=padding,\n groups=channels,\n bias_attr=None\n if bias else False, # None for True, using bias as default config\n )\n\n assert norm in ['batch_norm', 'layer_norm']\n if norm == \"batch_norm\":\n self.use_layer_norm = False\n self.norm = BatchNorm1D(channels)\n else:\n self.use_layer_norm = True\n self.norm = LayerNorm(channels)\n\n self.pointwise_conv2 = Conv1D(\n channels,\n channels,\n kernel_size=1,\n stride=1,\n padding=0,\n bias_attr=None\n if bias else False, # None for True, using bias as default config\n )\n self.activation = activation\n\n if init_weights:\n self.init_weights()",
"def conv_init(conv, act='linear'):\r\n n = conv.kernel_size[0] * conv.kernel_size[1] * conv.out_channels\r\n conv.weight.data.normal_(0, math.sqrt(2. / n))",
"def test_conv1d():\n filters = 3\n kernel_size = 2\n strides = 1\n batch_size = 2\n in_channels = 3\n input_size = 5\n input_shape = (batch_size, input_size, in_channels)\n\n keras_layer = keras.layers.Conv1D(filters=filters, kernel_size=kernel_size, strides=strides, use_bias=True, bias_initializer=\"ones\")\n input_layer = keras.Input(batch_shape=input_shape)\n keras_model = keras.models.Model(input=input_layer, outputs=keras_layer(input_layer))\n\n new_weights = np.arange(18).reshape(2, 3, 3)\n keras_layer.set_weights([new_weights, keras_layer.get_weights()[1]])\n\n kinput = np.arange(batch_size * input_size * in_channels).reshape(input_shape)\n kout = keras_model.predict(kinput)\n\n torch_model, _ = translate.translate_layer(keras_layer)\n tinput = torch.Tensor(kinput).permute(0, 2, 1)\n tout = torch_model(tinput).permute(0, 2, 1)\n assert np.isclose(kout, tout.cpu().data.numpy()).all()",
"def __init__(self, in_channels, out_channels):\n super(CNN, self).__init__()\n self.conv1 = nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=5, padding=1)",
"def dir_conv_layer(model, nb_filters, rate):\n\n model = Conv1D(filters=nb_filters, kernel_size=3, padding='causal', dilation_rate=rate, activation='relu')(model)\n model = BatchNormalization()(model)\n\n # exponentially increase dilated convolution receptive field\n # receptive field size loops back around when rate = 16 to create [1...8] block\n rate *= 2\n if rate == 16:\n rate = 1\n return model, rate",
"def conv_init(m):\r\n\r\n classname = m.__class__.__name__\r\n if classname.find('Conv') != -1:\r\n init.xavier_uniform_(m.weight, gain = np.sqrt(2))\r\n elif classname.find('BatchNorm') != -1:\r\n init.constant_(m.weight, 1)\r\n init.constant_(m.bias, 0)",
"def SNConv2d(*args, **kwargs):\n return spectral_norm(nn.Conv2d(*args, **kwargs))",
"def Linear1d(\n in_channels: int,\n out_channels: int,\n stride: int = 1,\n bias: bool = True,\n) -> torch.nn.Module:\n return nn.Conv1d(in_channels, out_channels, kernel_size=1, stride=stride, bias=bias)",
"def __init__(self, dim_in, dim_out):\n super(Conv1D, self).__init__()\n self.dim_in = dim_in\n self.dim_out = dim_out\n w = torch.empty(dim_in, dim_out)\n nn.init.normal_(w, std=0.02)\n self.w = nn.Parameter(w)\n self.b = nn.Parameter(torch.zeros(dim_out))",
"def sn_conv1x1(x, output_dim, training=True, name='sn_conv1x1'):\n with tf.variable_scope(name, custom_getter=sn_gettr(training=training)):\n w = tf.get_variable(\n 'weights', [1, 1, x.get_shape()[-1], output_dim],\n initializer=tf.keras.initializers.VarianceScaling(\n scale=1.0, mode='fan_avg', distribution='uniform'))\n conv = tf.nn.conv2d(\n input=x, filters=w, strides=[1, 1, 1, 1], padding='SAME')\n return conv",
"def add_conv_type1(model, depth, input_shape=None):\n if input_shape is not None:\n model.add(Convolution2D(depth, 5, 5, subsample=(2, 2), \\\n input_shape=input_shape))\n else:\n model.add(Convolution2D(depth, 5, 5, subsample=(2, 2), \\\n activation='relu', W_regularizer=l2(0.05)))",
"def _first_conv(x: tf.Tensor) -> tf.Tensor:\n with slim.arg_scope([slim.conv2d], activation_fn=None, normalizer_fn=None):\n x = ResNet._conv2d_same(x, 64, 7, stride=2, scope='conv1')\n return slim.max_pool2d(x, [3, 3], stride=2, scope='pool1')",
"def resnet_layer(inputs,\n num_filters=16,\n kernel_size=3,\n strides=1,\n activation='relu',\n batch_normalization=False,\n conv_first=True):\n # conv = Conv1D(num_filters,\n # kernel_size=kernel_size,\n # strides=strides,\n # padding='same',\n # kernel_initializer='he_normal',\n # kernel_regularizer=l2(1e-4))\n conv = Conv1D(num_filters,\n kernel_size=kernel_size,\n strides=strides,\n padding='same',\n kernel_initializer='he_normal',\n )\n x = inputs\n if conv_first:\n x = conv(x)\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n else:\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n x = conv(x)\n return x",
"def _init_layers(self) -> None:\n super()._init_layers()\n self.controller = nn.Conv2d(\n self.feat_channels, self.num_params, 3, padding=1)",
"def build_2l_conv1d(input_shape, # type: tuple\n n_outputs, # type: int\n filters=64, # type: int\n kernel_size=3, # type: int\n dropout_rate=0.5, # type: float\n max_pool_size=2 # type: int\n ):\n # type: (...) -> Sequential\n model = Sequential(name='fd2lcov1dnet')\n model.add(Conv1D(filters=filters, kernel_size=kernel_size, activation='relu', input_shape=input_shape))\n model.add(Conv1D(filters=filters, kernel_size=kernel_size, activation='relu', input_shape=input_shape))\n\n return __add_model_tail(model, n_outputs, dropout_rate, max_pool_size)",
"def __init__(self, n_filters = 64,\n n_kernels = 3,\n n_outputs = 10,\n inp_shape = (28,28),\n residual=True,\n regularizer = None,\n intializer = None,\n use_pool= False,\n use_dropout = False,\n use_batchnorm = False\n ):\n super(CNNModel, self).__init__()\n self.conv_dim = len(inp_shape)-1\n self.n_filters = n_filters\n self.initializer = intializer\n self.n_kernels = n_kernels\n self.projection = 3\n self.n_outputs = n_outputs\n self.num_layers = 1\n self.inp_shape = inp_shape\n self.regularizer = regularizer\n self.use_pool = use_pool\n self.residual = residual\n self.use_dropout = use_dropout\n self.use_batchnorm = use_batchnorm\n\n kernel_initializer = initializers.RandomNormal(mean=0.0, stddev=0.05)\n\n if self.conv_dim == 1:\n self.input_layer = layers.Conv1D(self.n_filters, (self.projection),\n activation = \"linear\",\n input_shape = self.inp_shape,\n name ='cnn_input',\n padding = 'same',\n kernel_regularizer = self.regularizer,\n bias_regularizer = self.regularizer,\n kernel_initializer=kernel_initializer,\n bias_initializer=initializers.get(\"zeros\")\n )\n self.output_layer = layers.Conv1D(self.n_kernels, (self.projection),\n activation=\"linear\",\n input_shape=(None, self.inp_shape[0], self.n_filters),\n name='cnn_output',\n padding = 'same',\n kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer,\n kernel_initializer=kernel_initializer,\n bias_initializer=initializers.get(\"zeros\")\n )\n if self.use_pool:\n self.pool = layers.MaxPool1D()\n elif self.conv_dim == 2:\n self.input_layer = layers.Conv2D(self.n_filters, (self.projection,self.projection),\n activation=\"linear\",\n input_shape=self.inp_shape,\n name='cnn_input',\n padding = 'same',\n kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer,\n kernel_initializer=kernel_initializer,\n bias_initializer=initializers.get(\"zeros\")\n )\n self.output_layer = layers.Conv2D(self.n_kernels, (self.projection, self.projection),\n activation= \"linear\",\n input_shape=(None, self.inp_shape[0],self.inp_shape[1], self.n_filters),\n name=\"cnn_output\",\n padding = 'same',\n kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer,\n kernel_initializer=kernel_initializer,\n bias_initializer=initializers.get(\"zeros\")\n )\n if self.use_pool:\n self.pool = layers.MaxPool2D()\n self.list_cnn = [self.input_layer]\n self.flatten = layers.Flatten()\n\n #compute input shape after flatten for the dense layer\n if not self.use_pool:\n self.class_inp = np.prod(self.inp_shape[:-1])*self.n_kernels\n else:\n self.class_inp = np.prod(self.inp_shape[:-1])*self.n_kernels//(2**self.conv_dim)\n # self.classify = MyDenseLayer(\n # self.n_outputs,shape = (None,self.class_inp),\n # layer_name = 'classify',\n # initializer = \"RandomNormal\")\n self.classify = layers.Dense(units = self.n_outputs,\n activation = 'softmax', use_bias = True,\n input_shape = self.class_inp,\n kernel_initializer = kernel_initializer, bias_initializer=initializers.get(\"zeros\"),\n name = 'classification_layer')",
"def _strict_conv1d(x, h):\n with ops.name_scope('strict_conv1d', values=[x, h]):\n x = array_ops.reshape(x, (1, -1, 1, 1))\n h = array_ops.reshape(h, (-1, 1, 1, 1))\n result = nn_ops.conv2d(x, h, [1, 1, 1, 1], 'SAME')\n return array_ops.reshape(result, [-1])",
"def _make_conv_level(in_channels, out_channels, num_convs, norm_func,\n stride=1, dilation=1):\n layers = []\n for i in range(num_convs):\n layers.extend([\n nn.Conv2D(in_channels, out_channels, kernel_size=3,\n stride=stride if i == 0 else 1,\n padding=dilation, bias_attr=False, dilation=dilation),\n norm_func(out_channels),\n nn.ReLU()])\n\n in_channels = out_channels\n\n return nn.Sequential(*layers)",
"def shallow_CNN(num_bands = None, k_1 = None, k_2 = None, k_3 = None):\n active = 'relu'\n active2 = 'tanh'\n active3 = 'linear'\n inp = Input(shape=(None, None, num_bands))\n# bn = BatchNormalization()(inp)\n l1 = Conv2D(64, kernel_size=k_1, activation= active, padding='same', kernel_initializer='he_normal' )(inp)\n l2 = Conv2D(48, kernel_size=k_2, activation=active, padding='same', kernel_initializer='he_normal')(l1)\n l3 = Conv2D(32, kernel_size=k_3, activation=active, padding='same', kernel_initializer='he_normal')(l2)\n l4 = Conv2D(1, kernel_size=k_3, activation=active2, padding='same', kernel_initializer='he_normal',name=\"details\")(l3)\n# l4= Conv2D(1, kernel_size=k_3, activation=active2, padding='same', kernel_initializer='he_normal')(l3)\n# inp2 = Input(shape=(None, None, 1))\n inp1 = Input(shape=(None, None, 1))\n out = Add(name=\"band\")([l4, inp1])\n out1 = Conv2D(1, kernel_size=k_3, activation=active3, padding='same', kernel_initializer='he_normal',name=\"struct\")(out)\n out2 = Conv2D(1, kernel_size=k_3, activation=active3, padding='same', kernel_initializer='he_normal',name=\"TV\")(out)\n model = Model([inp, inp1], [out, out1, out2], name='shallow_CNN')\n \n# out= Conv2D(1, kernel_size=k_3, activation='relu', padding='same', kernel_initializer='he_normal',name=\"nothing\")(out1)\n# model = Model(inp, l4, name='shallow_CNN')\n return model",
"def conv1x1(in_channels, out_channels, groups=1):\n return nn.Conv2d(\n in_channels, \n out_channels, \n kernel_size=1, \n groups=groups,\n stride=1)",
"def __init__(self, momentum: float = .5):\n super(VanillaEncoder, self).__init__()\n self.conv1 = PointNetConv2Layer(64, momentum)\n self.conv2 = PointNetConv2Layer(64, momentum)\n self.conv3 = PointNetConv2Layer(64, momentum)\n self.conv4 = PointNetConv2Layer(128, momentum)\n self.conv5 = PointNetConv2Layer(1024, momentum)",
"def Conv1dWrapper(generated, *args, **kwargs):\n if generated:\n return Conv1dGenerated(*args, **kwargs)\n else:\n return Conv1dStatic(*args, **kwargs)",
"def __init__(self, filter1x1, ker_size, filters):\n super(reduce, self).__init__()\n self.con1 = layers.Conv2D(\n filter1x1, kernel_size=1, padding=\"same\", activation=\"relu\"\n )\n self.conv = layers.Conv2D(\n filters, kernel_size=ker_size, padding=\"same\", activation=\"relu\"\n )",
"def LinearizedConv1d(\n in_channels: int,\n out_channels: int,\n kernel_size: int,\n dropout: float=0,\n **kwargs,\n) -> nn.Module:\n m = fairseq_linear_conv(in_channels, out_channels, kernel_size, **kwargs)\n std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))\n nn.init.normal_(m.weight, mean=0, std=std)\n nn.init.constant_(m.bias, 0)\n return nn.utils.weight_norm(m, dim=2)",
"def time_conv_layer(model, nb_filters):\n\n model = Conv1D(filters=nb_filters, kernel_size=3, padding='causal', activation='relu')(model)\n model = BatchNormalization()(model)\n return model",
"def __init__(self, z_dim, initailize_weights=True):\n super().__init__()\n self.z_dim = z_dim\n\n self.frc_encoder = nn.Sequential(\n CausalConv1D(6, 16, kernel_size=2, stride=2),\n nn.LeakyReLU(0.1, inplace=True),\n CausalConv1D(16, 32, kernel_size=2, stride=2),\n nn.LeakyReLU(0.1, inplace=True),\n CausalConv1D(32, 64, kernel_size=2, stride=2),\n nn.LeakyReLU(0.1, inplace=True),\n CausalConv1D(64, 128, kernel_size=2, stride=2),\n nn.LeakyReLU(0.1, inplace=True),\n CausalConv1D(128, 2 * self.z_dim, kernel_size=2, stride=2),\n nn.LeakyReLU(0.1, inplace=True),\n )\n\n if initailize_weights:\n init_weights(self.modules())",
"def apply(self, input):\n\n # input.unsqueeze(1) changes dim from (minibatch_size, sequence_length) to\n # (minibatch_size, num_channels=1, sequence_length)\n # the final squeeze(1) removes the num_channels=1 axis\n return torch.nn.functional.conv1d(input.unsqueeze(1), self.filt.type_as(input),\n padding=self.padding).squeeze(1)",
"def resnet_layer(inputs,\n num_filters=16,\n kernel_size=3,\n strides=1,\n activation='relu',\n batch_normalization=True,\n conv_first=True):\n conv = Conv2D(num_filters,\n kernel_size=kernel_size,\n strides=strides,\n padding='same',\n kernel_initializer='he_normal',\n kernel_regularizer=l2(1e-4))\n\n x = inputs\n if conv_first:\n x = conv(x)\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n else:\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n x = conv(x)\n return x",
"def resnet_layer(inputs,\n num_filters=16,\n kernel_size=3,\n strides=1,\n activation='relu',\n batch_normalization=True,\n conv_first=True):\n conv = Conv2D(num_filters,\n kernel_size=kernel_size,\n strides=strides,\n padding='same',\n kernel_initializer='he_normal',\n kernel_regularizer=l2(1e-4))\n\n x = inputs\n if conv_first:\n x = conv(x)\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n else:\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n x = conv(x)\n return x"
] | [
"0.6902533",
"0.6492409",
"0.6420767",
"0.6415407",
"0.63521636",
"0.62814677",
"0.62803644",
"0.6270748",
"0.6245793",
"0.6226488",
"0.607607",
"0.6073126",
"0.5975672",
"0.59554344",
"0.5911507",
"0.59033775",
"0.58964217",
"0.58664775",
"0.5841798",
"0.58339804",
"0.5821849",
"0.5810137",
"0.58017254",
"0.5754241",
"0.5749667",
"0.5731316",
"0.5727349",
"0.5721029",
"0.5706489",
"0.5706489"
] | 0.74178046 | 0 |
Helper function that returns dedicated directory for Post media. This organizes user uploaded Post content and is used by `ministry.models.Post.attachment` to save uploaded content. Arguments ========= | def post_media_dir(instance, filename, prepend=settings.MEDIA_ROOT):
if instance.ministry:
_ministry = instance.ministry
elif instance.campaign:
_ministry = instance.campaign.ministry
else:
e = 'There was an unknown error finding a dir for %s' % instance.title
raise AttributeError(e)
return path.join(generic_media_dir(_ministry, prepend=prepend),
'post_media', filename) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def upload_dir(self):\n return os.path.join(settings.MEDIA_ROOT,self.upload_dir_rel())",
"def get_media_directory():\n\treturn _paths[_MEDIA_DIRECTORY_KEY]",
"def public_upload_dir(self):\n return os.path.join(settings.MEDIA_ROOT,\n self.public_upload_dir_rel())",
"def create_news_post_dir(instance, prepend=settings.MEDIA_ROOT):\n for _ in (post_media_dir,):\n _path = path.split(_(instance, \"\", prepend=prepend))[0]\n try:\n mkdir(_path)\n except FileExistsError:\n pass\n except FileNotFoundError:\n if instance.ministry:\n _ministry = instance.ministry\n elif instance.campaign:\n _campaign = instance.campaign\n _ministry = _campaign.ministry\n else:\n e = 'There was an unknown error finding a dir for %s' % instance.name\n raise AttributeError(e)\n\n # NOTE: this is infinitely recursive if `prepend` does not lead to correct directory\n create_news_post_dir(instance, prepend=prepend)",
"def path_media(self) -> Path:\n return self.path_supervisor / MEDIA_DATA",
"def get_project_data_folder(self):\n return os.path.join(settings.MEDIA_ROOT,self.short_name)",
"def mediaGenerator(request):\n folder = 'content/' + request\n mediaPaths = glob(folder + '/*')\n return random.choice(mediaPaths)",
"def get_media_path(self, filename):\n return join(settings.CMS_PAGE_MEDIA_PATH, \"%d\" % self.id, filename)",
"def get_media_dir(self):\n dir_path = _paths.concat(self._gnbase, _DIRNAME_GNMEDIA)\n if not _os.path.isdir(dir_path):\n raise OSError('GEONIS media directory {!r} does not exist'.format(dir_path))\n return dir_path",
"def get_full_folder_path(self):\n data_dir_path = os.path.join(settings.MEDIA_ROOT,self.folder)\n return data_dir_path",
"def full_path(self):\n return os.path.join(settings.MEDIA_ROOT, self.path)",
"def get_gallery(self):\n return os.path.join(self.directory, GALLERY_DIR)",
"def media_path(self):\n return self._path",
"def upload_dir_rel(self):\n return os.path.join(self.short_name,\"uploads\")",
"def create_media_path(custom_path=''):\n def generate_path(instance, filename):\n if hasattr((instance), 'name'):\n return os.path.join(\n custom_path,\n instance.name,\n filename\n )\n\n return os.path.join(\n custom_path,\n filename\n )\n\n return generate_path",
"def prepare_media_url(self, object):\n if object.media is not None:\n return os.path.join(settings.MEDIA_URL, object.media.media_file.name)\n else:\n return ''",
"def get_upload_path(instance, filename):\n from os import path\n from django.conf import settings\n from django.template.defaultfilters import slugify\n \n if hasattr(settings, 'MEDIA_BROWSER_UPLOAD_BASE'):\n base = settings.MEDIA_BROWSER_UPLOAD_BASE\n else:\n base = 'media_browser_uploads'\n type = slugify(instance._meta.verbose_name_plural)\n upload_path = path.join(base, type)\n # If MEDIA_BROWSER_ORGANIZE_BY_DATE is not set or is False, return \n # current path:\n if not hasattr(settings, 'MEDIA_BROWSER_ORGANIZE_BY_DATE') \\\n or settings.MEDIA_BROWSER_ORGANIZE_BY_DATE:\n return path.join(upload_path, filename)\n # Otherwise, put in dated subfolders:\n else:\n return path.join(upload_path, \"%Y\", \"%m\", \"%d\", filename)",
"def setup_local_storage(mod, media_type, media_id, id=None):\n BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n if mod == 'post':\n mod = 'posts'\n path = os.path.join(BASE_DIR, 'save', mod, str(media_id))\n if id:\n path = os.path.join(BASE_DIR, 'save', mod, str(id))\n name = media_type.lower()\n try:\n os.mkdir(path)\n except FileExistsError as e:\n timestamp = time.strftime('%Y%m%d-%H%M%S')\n name += f\"_{timestamp}\"\n except OSError as e:\n raise InvalidUsage(\"OSError in setup_local_storage. \", status_code=501, payload=e)\n filename = f\"{str(path)}/{name}\"\n return path, filename",
"def get_data_folder_path(challenge_short_name):\n return safe_join(settings.MEDIA_ROOT, challenge_short_name)",
"def _UploadFile(self, media_source, title, category):\n media_entry = gdata.GDataEntry()\n media_entry.title = atom.Title(text=title)\n media_entry.category.append(category)\n media_entry = self.Post(media_entry, '/feeds/documents/private/full',\n media_source = media_source,\n extra_headers = {'Slug' : media_source.file_name })\n\n return media_entry",
"def media_folder_name(self):\n raise NotImplementedError",
"def directory(self) -> Path:\n (directory := Path(\"markdown\").resolve(strict=False)).mkdir(exist_ok=True, parents=True)\n return directory",
"def public_upload_dir_rel(self):\n return os.path.join(self.short_name,settings.COMIC_PUBLIC_FOLDER_NAME)",
"def content_media_urls(*paths):\n from mezzanine.conf import settings\n media_url = settings.CONTENT_MEDIA_URL.strip(\"/\")\n return [\"/%s/%s\" % (media_url, path) for path in paths]",
"def get_directory(self, subdir=None):\n path = settings.SUBMISSION_DIR / str(self.assignment.id) / str(self.id)\n if subdir:\n path = path / subdir\n\n return path",
"def get_thumbnails_directory():\n\treturn _paths[_THUMBNAILS_DIRECTORY_KEY]",
"def return_directory(path):\n files = os.listdir(path)\n content = \"\"\n for file in files:\n content = content + f\"{file}\\n\"\n content = content.encode()\n mime_type = b\"text/plain\"\n return content, mime_type",
"def prepare_media(self, object):\n if object.media is not None:\n #return object.media.media_file.name\n return '/api/v1/media/{0}/'.format(object.media.id)\n else:\n return ''",
"def savePost(post, save_folder, header=\"\", save_file=None):\n\n\tslug = post[\"url-with-slug\"].rpartition(\"/\")[2]\n\tdate_gmt = post[\"date-gmt\"]\n\tdate = date_gmt[:-7]\n\n\tslug = byte_truncate(slug)\n\tfile_name = os.path.join(save_folder, date +\" \"+ slug + \".html\")\n\tf = codecs.open(file_name, \"w\", encoding=ENCODING)\n\n\t#\tDate info for all posts\n\tf.write('<article>\\n\\t<time datetime>' + date + '</time>\\n\\t')\n\n#\tPOST KINDS\t:\n\n#\tText\n\n\tif post[\"type\"] == \"regular\":\n\t\ttitle = \"\"\n\t\ttitle_tag = post.find(\"regular-title\")\n\t\tif title_tag:\n\t\t\ttitle = unescape(title_tag.string)\n\t\tbody = \"\"\n\t\tbody_tag = post.find(\"regular-body\")\n\t\tif body_tag:\n\t\t\tbody = unescape(body_tag.string)\n\n\t\tif title:\n\t\t\tf.write(\"<h3>\" + title + \"</h3>\\n\\t\")\n\t\tif body:\n\t\t\tf.write(body)\n\n#\tPhoto\n\n\tif post[\"type\"] == \"photo\":\n\t\tcaption = \"\"\n\t\tcaption_tag = post.find(\"photo-caption\")\n\t\tif caption_tag:\n\t\t\tcaption = unescape(caption_tag.string)\n\t\timage_url = post.find(\"photo-url\", {\"max-width\": \"1280\"}).string\n\n\t\timage_filename = image_url.rpartition(\"/\")[2].encode(ENCODING)\n\t\timage_folder = os.path.join(save_folder, \"../images\")\n\t\tif not os.path.exists(image_folder):\n\t\t\tos.mkdir(image_folder)\n\t\tlocal_image_path = os.path.join(image_folder, image_filename)\n\n\t\tif not os.path.exists(local_image_path):\n\t\t\t# only download images if they don't already exist\n\t\t\tprint \"Downloading a photo. This may take a moment.\"\n\t\t\ttry:\n\t\t\t\timage_response = urllib2.urlopen(image_url)\n\t\t\t\timage_file = open(local_image_path, \"wb\")\n\t\t\t\timage_file.write(image_response.read())\n\t\t\t\timage_file.close()\n\t\t\texcept urllib2.HTTPError, e:\n\t\t\t\tlogging.warning('HTTPError = ' + str(e.code))\n\t\t\texcept urllib2.URLError, e:\n\t\t\t\tlogging.warning('URLError = ' + str(e.reason))\n\t\t\texcept httplib.HTTPException, e:\n\t\t\t\tlogging.warning('HTTPException')\n\t\t\texcept Exception:\n\t\t\t\timport traceback\n\t\t\t\tlogging.warning('generic exception: ' + traceback.format_exc())\n\n\t\tf.write(caption + '<img alt=\"' + caption.replace('\"', '"') + '\" src=\"images/' + image_filename + '\" />')\n\n#\tQuote\n\n\tif post[\"type\"] == \"quote\":\n\t\tquote = \"\"\n\t\tquote_tag = post.find(\"quote-text\")\n\t\tif quote_tag:\n\t\t\tquote = unescape(quote_tag.string)\n\t\tsource = \"\"\n\t\tsource_tag = post.find(\"quote-source\")\n\t\tif source_tag:\n\t\t\tsource = unescape(source_tag.string)\n\n\t\tif quote:\n\t\t\tf.write(\"<blockquote>\\n\\t\\t<p>\" + quote + \"</p>\\n\\t\\t\")\n\t\t\tif source:\n\t\t\t\tf.write('<cite>' + source + '</cite>\\n\\t')\n\t\tif quote:\n\t\t\tf.write(\"</blockquote>\")\n\n#\tFooter for all posts\n\n\tf.write(\"\\n</article>\")\n\tf.close()",
"def get_posts(path, which=None, verbose=True):\n \"\"\" Here we conditionally define include_test depending on what type of\n object 'which' is.\"\"\"\n\n class_ = type(which)\n if class_ == str:\n def include_test(name):\n if name == which:\n return True\n else:\n return False\n elif class_ in {list, tuple, set}:\n def include_test(name):\n if name in which:\n return True\n else:\n return False\n else:\n def include_test(name):\n return True\n\n \"\"\" Loop through all files in the posts directory and add each file format\n e.g. 'json', 'html', etc as keys for a subdictionary. The base dictionary\n is keyed by the file name (excluding the extension).\n\n example:\n >>> output = {\n ... 'myfile':{\n ... 'html':'/path/to/posts/myfile.html',\n ... 'json':'/path/to/posts/myfile.json'\n ... }\n ...}\n \"\"\"\n all_files = defaultdict(dict)\n for file_ in listdir(path):\n if isfile(join(path, file_)):\n name = file_.split('.')[0]\n type_ = splitext(file_)[1].strip('.')\n file_path = join(path, file_)\n all_files[name][type_] = file_path\n\n posts = list()\n required_keys = {'html', 'title', 'date'}\n for key, value in all_files.items():\n \"\"\" If include_test says that we don't need this file, we skip the\n rest of the current iteration and continue with the next key, value\n pair. \"\"\"\n if not include_test(key):\n continue\n value['id_'] = key\n if 'json' in value:\n with open(value['json'], \"rU\") as json_handle:\n value.update(\n json.load(json_handle, object_hook=json_date_parser)\n )\n elif 'yaml' in value:\n with open(value['yaml'], 'rU') as yaml_handle:\n value.update(yaml.load(yaml_handle))\n elif 'yml' in value:\n with open(value['yml'], 'rU') as yaml_handle:\n value.update(yaml.load(yaml_handle))\n\n \"\"\" If some required keys are missing then we skip the rest of the\n current iteration and continue with the next key, value pair.\n If verbose is True we print which keys were missing.\"\"\"\n if required_keys.intersection(value) != required_keys:\n if verbose:\n d = required_keys.difference(required_keys.intersection(value))\n print(\n \"Excluded {} from posts because it did not \".format(key) +\n \"have all of the required information. The field(s) \" +\n \"'{}' was/were missing.\".format(\"', '\".join(list(d)))\n )\n continue\n\n \"\"\" Everything is cool, add the post to the list.\"\"\"\n posts.append(value)\n\n \"\"\" We could run into problems here when dates aren't parsed as datetime\n objects. I might need to figure out a better way of ordering posts by date\n in the future.\"\"\"\n posts.sort(key=lambda d: d['date'])\n return posts"
] | [
"0.6631069",
"0.6379418",
"0.63036174",
"0.61629796",
"0.6003821",
"0.5945193",
"0.58830917",
"0.58586687",
"0.5771684",
"0.5700648",
"0.5641766",
"0.56303585",
"0.55782616",
"0.5443347",
"0.543865",
"0.53991973",
"0.5380151",
"0.5379563",
"0.53412217",
"0.5281436",
"0.5271819",
"0.5268363",
"0.525724",
"0.52451295",
"0.523664",
"0.5236475",
"0.51891416",
"0.51837957",
"0.5172882",
"0.5165145"
] | 0.6963208 | 0 |
Utility function that creates a dedicated directory for Post media. Arguments ========= | def create_news_post_dir(instance, prepend=settings.MEDIA_ROOT):
for _ in (post_media_dir,):
_path = path.split(_(instance, "", prepend=prepend))[0]
try:
mkdir(_path)
except FileExistsError:
pass
except FileNotFoundError:
if instance.ministry:
_ministry = instance.ministry
elif instance.campaign:
_campaign = instance.campaign
_ministry = _campaign.ministry
else:
e = 'There was an unknown error finding a dir for %s' % instance.name
raise AttributeError(e)
# NOTE: this is infinitely recursive if `prepend` does not lead to correct directory
create_news_post_dir(instance, prepend=prepend) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def post_media_dir(instance, filename, prepend=settings.MEDIA_ROOT):\n if instance.ministry:\n _ministry = instance.ministry\n elif instance.campaign:\n _ministry = instance.campaign.ministry\n else:\n e = 'There was an unknown error finding a dir for %s' % instance.title\n raise AttributeError(e)\n\n return path.join(generic_media_dir(_ministry, prepend=prepend),\n 'post_media', filename)",
"def setup_local_storage(mod, media_type, media_id, id=None):\n BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n if mod == 'post':\n mod = 'posts'\n path = os.path.join(BASE_DIR, 'save', mod, str(media_id))\n if id:\n path = os.path.join(BASE_DIR, 'save', mod, str(id))\n name = media_type.lower()\n try:\n os.mkdir(path)\n except FileExistsError as e:\n timestamp = time.strftime('%Y%m%d-%H%M%S')\n name += f\"_{timestamp}\"\n except OSError as e:\n raise InvalidUsage(\"OSError in setup_local_storage. \", status_code=501, payload=e)\n filename = f\"{str(path)}/{name}\"\n return path, filename",
"def make_directory(scripts):\n if not os.path.exists(os.path.join(os.path.dirname(__file__), 'Uploads')):\n os.makedirs(os.path.join(os.path.dirname(__file__), 'Uploads'))\n for script_object in scripts:\n if script_object.type is None:\n continue\n path = script_object.type.split('::')\n path = os.path.join(os.path.dirname(__file__), \"/\".join(path[:-1]))\n if not os.path.exists(path):\n os.makedirs(path)",
"def mkdir(path):",
"def _create_folders(self):\n if not os.path.exists(os.path.join(BASE_DIR, DIR)):\n os.mkdir(os.path.join(BASE_DIR, DIR))\n directory = os.path.join(BASE_DIR, DIR, self.title)\n if not os.path.exists(directory):\n os.mkdir(directory)\n return directory",
"def mkdir(self, *args):\n p = self.join(*args)\n error.checked_call(os.mkdir, os.fspath(p))\n return p",
"def create_media_path(custom_path=''):\n def generate_path(instance, filename):\n if hasattr((instance), 'name'):\n return os.path.join(\n custom_path,\n instance.name,\n filename\n )\n\n return os.path.join(\n custom_path,\n filename\n )\n\n return generate_path",
"def _create_dir(filename):\n head = os.path.dirname(filename)\n if head != '' and not os.path.isdir(head):\n os.makedirs(head)",
"def create_directory(tracking_id):\n upload_path = os.path.join(app.config['DRS_UPLOADS'], '{0}'.format(tracking_id))\n if not os.path.isdir(upload_path):\n os.mkdir(upload_path)",
"def prepare_url(self, url, **kwargs):\n (self.base_path / url).mkdir(mode=kwargs.get(\"dir_mode\", 0o755), parents=True)",
"def create_dir(cls, relpath):\r\n safe_mkdir(os.path.join(cls.build_root, relpath))",
"def create_dir(dir_type, base_path):\n\n path = os.path.join(base_path, dir_type)\n if not os.path.exists(path):\n os.mkdir(path)\n print('Created directory {!r}'.format(path))\n else:\n print('Found directory {!r}'.format(path))\n\n\n if dir_type.find('figure') != -1:\n sc.settings.figdir = path\n scv.settings.figdir = path\n\n return path",
"def prep_folder(args):\n if(args.save_folder[-1]!='/'):\n args.save_folder += '/'\n if(not os.path.isdir(args.save_folder)):\n os.mkdir(args.save_folder)",
"def create_directory():\n try:\n if os.path.isdir(\"./imagesFromTweets\") != True:\n os.makedirs(\"./imagesFromTweets\")\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise",
"def mkDir(contentDirPath):\n\tif os.path.isdir(contentDirPath):\n\t\tprint \"Directory %s already exists.\" % contentDirPath\n\t\tclearLocalDir(contentDirPath)\n\t\treturn;\n\telse:\n\t\tos.mkdir(contentDirPath)\n\t\tprint \"Created directory %s.\" % contentDirPath",
"def dirmaker(dirp):\n try:\n if not os.path.exists(dirp):\n os.makedirs(dirp)\n except:\n pass",
"def create_gallery(jekyll_site_path, gallery_name):\n\n gallery_path = os.path.join(jekyll_site_path, 'images', 'galleries', gallery_name)\n\n if not os.path.exists(gallery_path):\n os.makedirs(gallery_path)\n\n print(f\"Created gallery path {gallery_path}\")\n\n return gallery_path",
"def create_project_dir():\r\n with settings(warn_only=True):\r\n run('mkdir -p %s/packages' % (env.path,))\r\n run('mkdir %s/log' % (env.path,))\r\n run('mkdir -p %s/media/uploads' % (env.path,))\r\n run('mkdir -p %s/collected_static' % (env.path,))\r\n # change permissions for writable folder\r\n cmd = env.host_settings.get('make_folder_world_writeable','chown -R www-data:www-data')\r\n if cmd:\r\n run('%s %s/media' % (cmd, env.path))\r\n run('%s %s/collected_static' % (cmd, env.path))",
"def directory(self) -> Path:\n (directory := Path(\"markdown\").resolve(strict=False)).mkdir(exist_ok=True, parents=True)\n return directory",
"def make_dir(url):\n parts = url.strip('/').split('/')\n done = []\n for part in parts:\n path = os.path.join(STORAGE_PATH, '/'.join(done), part)\n if not os.path.exists(path):\n os.mkdir(path)\n done.append(part)",
"def CreateDirs(self):\n# First, create a list of directories.\n dnames = []\n tags = ['', '_m', '_mf']\n for entry in self.info.keys():\n if self.info[entry]['type'] == 'epi':\n for tag in tags:\n fname = self.info[entry].get('imgfile%s' % tag, None)\n if fname is not None:\n dnames.append(os.path.dirname(fname))\n else:\n if self.info[entry].get('outdir',None) is not None:\n dnames.append(self.info[entry]['outdir'])\n\n# Create them if they don't already exist.\n for dname in dnames:\n if not os.path.exists(dname):\n self.MakeDir(dname)\n if self.verbose:\n print 'mkdir %s' % dname",
"def create_directory(self):\n dirname = self.name+\"_distillates\"\n i = 1\n while True:\n try:\n mkdir(dirname)\n return dirname\n except OSError:\n dirname = self.name+\"_distillates_{0}\".format(i)\n i += 1",
"def create_directory():\r\n\r\n # Create directory for all lyrics\r\n try:\r\n os.mkdir(markovDir)\r\n except FileExistsError:\r\n pass",
"def create_files(save_dir, vid_name):\n file_name = vid_name.split('/')[-1].split('.')[0]\n if not os.path.isdir(os.path.join(save_dir, file_name)):\n os.makedirs(os.path.join(save_dir, file_name))\n return file_name",
"def prepare_destination(self):\n self.movie_root_path = self.config.share_movie_root_path % (\n self.share_path, self.title)\n\n if os.path.isdir(self.movie_root_path):\n if self.capacity_reached():\n Logger.log(\n '[!] Capacity reached. Skipping adding movie %s.' % self.title)\n else:\n if not os.path.isdir(self.movie_root_path):\n Logger.log('[+] Adding Movie: %s' % self.title)\n os.mkdir(self.movie_root_path)",
"def MakeDir(self, path: str) -> None:\n ...",
"def createDir(self, dir_name):\n os.mkdir(os.path.join(self.user[\"Save\"], dir_name))",
"def create_directory_structure():\n\n def ensure_directory(path):\n try:\n os.makedirs(path)\n\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n ensure_directory('./out/textures')\n ensure_directory('./out/data')",
"def make_dir(d):\n if not os.path.exists(d):\n os.makedirs(d)",
"def make_dir(d):\n if not os.path.exists(d):\n os.makedirs(d)"
] | [
"0.7129349",
"0.6599995",
"0.65848666",
"0.6278641",
"0.6270782",
"0.62637275",
"0.621008",
"0.6079701",
"0.60781497",
"0.6029266",
"0.6018568",
"0.6013729",
"0.5994422",
"0.5988125",
"0.5982215",
"0.59778285",
"0.5950593",
"0.59488165",
"0.59399545",
"0.59338474",
"0.5907977",
"0.58873296",
"0.5882072",
"0.58646166",
"0.5859473",
"0.58408856",
"0.58262074",
"0.5810219",
"0.5789764",
"0.5789764"
] | 0.73629373 | 0 |
Decrypts input ciphertext using a symmetric CryptoKey. | def decrypt_symmetric(self, ciphertext):
from google.cloud import kms_v1
# Creates an API client for the KMS API.
client = kms_v1.KeyManagementServiceClient()
# The resource name of the CryptoKey.
name = client.crypto_key_path_path(self.project_id, self.location_id, self.key_ring_id,
self.crypto_key_id)
# Use the KMS API to decrypt the data.
response = client.decrypt(name, ciphertext)
return response.plaintext | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def decrypt_symmetric(secret_key, ciphertext, ttl=None):\n f = Fernet(secret_key)\n # fernet requires the ciphertext to be bytes, it will raise an exception\n # if it is a string\n return f.decrypt(bytes(ciphertext), ttl)",
"def decrypt(priv_key, ciphertext):\n pk_encrypted_secret_key = ciphertext['pk_encrypted_secret_key']\n sym_encrypted_data = ciphertext['sym_encrypted_data']\n # TODO: secure delete\n secret_key = decrypt_pk(priv_key, pk_encrypted_secret_key)\n encoded_string = decrypt_symmetric(secret_key, sym_encrypted_data)\n return decode_data(encoded_string)",
"def decrypt(self, ciphertext, key):\n iv = ciphertext[:AES.block_size]\n cipher = AES.new(key, AES.MODE_CBC, iv, segment_size=64)\n plaintext = cipher.decrypt(ciphertext[AES.block_size:])\n return self.pkcs7_unpad(plaintext)",
"def decrypt(key, ciphertext):\n data = fk(keyGen(key)[1], ip(ciphertext))\n return fp(fk(keyGen(key)[0], swapNibbles(data)))",
"def decrypt(self, ciphertext: bytes,\n padding: AsymmetricPadding) -> bytes:\n pass",
"def decrypt(private_key, ciphertext):\n if len(ciphertext) < 512 + 16:\n return None\n msg_header = ciphertext[:512]\n msg_iv = ciphertext[512:512+16]\n msg_body = ciphertext[512+16:]\n try:\n symmetric_key = PKCS1_OAEP.new(private_key).decrypt(msg_header)\n except ValueError:\n return None\n if len(symmetric_key) != 32:\n return None\n return AES.new(symmetric_key,\n mode=AES.MODE_CFB,\n IV=msg_iv).decrypt(msg_body)",
"def decrypt(ciphertext: str, key: str) -> str:\n return encrypt(ciphertext, key)",
"def decrypt(ciphertext, key):\n\ttry:\n\t\tfrom Cryptodome.Cipher import AES\n\texcept ImportError:\n\t\tfrom Crypto.Cipher import AES\n\n\tif not isPython2():\n\t\tif isString(ciphertext):\n\t\t\tciphertext = ciphertext.encode(\"latin-1\")\n\t\tif isString(key):\n\t\t\tkey = key.encode(\"latin-1\")\n\t\t\n\tiv = ciphertext[:AES.block_size]\n\tcipher = AES.new(key, AES.MODE_CBC, iv)\n\tplaintext = cipher.decrypt(ciphertext[AES.block_size:])\n\treturn plaintext",
"def sym_dec(self, ciph, passphrase):\n (rfd, wfd) = xpipe()\n os.write(wfd, passphrase + '\\n')\n plain = xsystem([self.sslname, self.symmetric, '-d', '-pass',\n 'fd:' + str(rfd)], ciph)\n xclose(wfd)\n xclose(rfd)\n if not plain:\n warning('keymanagement: Unable to decrypt because %s does not exist\\n' %(self.sslname))\n return None\n\n return plain",
"def decrypt(ciphertext):\n # AES decrypt\n iv = ciphertext[:16]\n ciphertext = ciphertext[16:]\n aes = AES.new(aes_key, AES.MODE_CBC, iv)\n return unpad(aes.decrypt(ciphertext))",
"def decrypt_block(self, ciphertext):\n assert len(ciphertext) == 16\n\n cipher_state = bytes2matrix(ciphertext)\n\n add_round_key(cipher_state, self._key_matrices[-1])\n inv_shift_rows(cipher_state)\n inv_sub_bytes(cipher_state)\n\n for i in range(self.n_rounds - 1, 0, -1):\n add_round_key(cipher_state, self._key_matrices[i])\n inv_mix_columns(cipher_state)\n inv_shift_rows(cipher_state)\n inv_sub_bytes(cipher_state)\n \n add_round_key(cipher_state, self._key_matrices[0])\n\n return matrix2bytes(cipher_state)",
"def decrypt_cbc(key, ciphertext):\n\tmessage = ''\n\tfor i in range(0, len(ciphertext)/16 - 1):\n\t\tiv = ciphertext[i*16:(i+1)*16]\n\t\tinputblock = ciphertext[(i+1)*16:(i+2)*16]\n\t\tcipher = AES.new(key, AES.MODE_CBC, iv)\n\t\tmessage +=cipher.decrypt(inputblock)\n\tif ord(message[-1]) <=16:\n\t\tmessage = message[:-ord(message[-1])]\n\treturn message",
"def decrypt(cypher, priv_key):\n\n if not isinstance(priv_key, key.PrivateKey):\n raise TypeError(\"You must use the private key with decrypt\")\n\n return gluechops(cypher, priv_key.d, priv_key.n, decrypt_int)",
"def decryptEncryptionKey(cipherString, key):\n\tencryptionType, iv, cipherText, mac = decodeCipherString(cipherString)\n\t# log.debug(\"mac:%s\", mac)\n\t# log.debug(\"iv:%s\", iv)\n\t# log.debug(\"ct:%s\", cipherText)\n\tassert mac is None\n\tif encryptionType != 0:\n\t\traise UnimplementedError(\"can not decrypt type:%s\" % encryptionType)\n\tcipher = cryptography.hazmat.primitives.ciphers.Cipher(\n\t algorithms.AES(key), modes.CBC(iv), backend=default_backend())\n\tdecryptor = cipher.decryptor()\n\tplainText = decryptor.update(cipherText) + decryptor.finalize()\n\t# log.debug(\"mackey before unpad:%s\", plainText[32:])\n\treturn plainText[:32], plainText[32:64]",
"def _decrypt(data):\n cipher = AES.new(bytes(_AES_KEY), AES.MODE_CBC, bytes(_AES_IV))\n return cipher.decrypt(data)",
"def decrypt(self, cypher):\n\n if self.crypt_private == \"\":\n raise ValueError(\"Error decrypting: No private encryption key found for {}\".format(self))\n\n key_private = RsaPrivateKey.Read(self.crypt_private)\n return key_private.Decrypt(cypher)",
"def decrypt_ctr(key, ciphertext):\n\tmessage = ''\n\tiv = ciphertext[0:16]\n\tfor i in range(16, len(ciphertext), 16):\n\t\tinputblock = ciphertext[i:i+16]\n\t\tcipher = AES.new(key, AES.MODE_ECB)\n\t\txorkey = cipher.encrypt(long_to_bytes(bytes_to_long(iv)+(i/16-1)))\n\t\tif len(inputblock) == 16:\n\t\t\tmessage += strxor(inputblock, xorkey)\n\t\telse:\n\t\t\tmessage += strxor(inputblock, xorkey[:len(inputblock)])\n\treturn message",
"def decrypt(self, ciphertext):\n return self._transform(ciphertext, self._backward)",
"def aes_decrypt(encrypted_data, key):\r\n cipher = aes_cipher_from_key(key)\r\n padded_data = cipher.decrypt(encrypted_data)\r\n return unpad(padded_data)",
"def decrypt(ciphertext, key, iv):\n cipher = AES.new(key, AES.MODE_CFB, iv)\n msg = cipher.decrypt(ciphertext)\n return msg",
"def asym_dec(self, ciph, keyfile):\n ciph = ciph.split('\\0')\n ciphkey_len = int(ciph[0])\n ciph = '\\0'.join(ciph[1:])\n ciphkey = ciph[:ciphkey_len]\n ciph = ciph[ciphkey_len:]\n\n passphrase = xsystem([self.sslname, 'rsautl', '-decrypt', '-inkey',\n keyfile], ciphkey)\n if not passphrase:\n warning('keymanagement: Unable to perform asymmetric decryption\\n')\n return None\n\n return self.sym_dec(ciph, passphrase)",
"def Decrypt(self, input_bytes):\n data_bytes = input_bytes[keyczar.HEADER_SIZE:] # remove header\n if len(data_bytes) < self.block_size + util.HLEN: # IV + sig\n raise errors.ShortCiphertextError(len(data_bytes))\n\n iv_bytes = data_bytes[:self.block_size] # first block of bytes is the IV\n ciph_bytes = data_bytes[self.block_size:-util.HLEN]\n sig_bytes = data_bytes[-util.HLEN:] # last 20 bytes are sig\n if not self.hmac_key.Verify(input_bytes[:-util.HLEN], sig_bytes):\n raise errors.InvalidSignatureError()\n\n plain = AES.new(self.key_bytes, AES.MODE_CBC, iv_bytes).decrypt(ciph_bytes)\n return self.__UnPad(plain)",
"def decrypt(data, key, iv):\n decryptor = AES.new(key, AES.MODE_CBC, iv=iv)\n return decryptor.decrypt(data)",
"def rsa_decrypt(cypher, privatekey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions.\r\n temp_key_obj = _rsa_keydict_to_keyobj(privatekey = privatekey) \r\n \r\n return _rsa_gluechops(cypher, temp_key_obj, temp_key_obj.decrypt)",
"def decrypt(\r\n key: bytes,\r\n cipher_text: bytes,\r\n) -> str:\r\n block_size = 16\r\n iv = cipher_text[:block_size]\r\n cipher = AES.new(key, AES.MODE_CBC, iv)\r\n plain_text = cipher.decrypt(cipher_text[block_size:]).decode('utf-8')\r\n return _unpad(plain_text)",
"def decrypt(algorithm, key, encrypted_data, associated_data):\n decryptor = Decryptor(algorithm, key, associated_data, encrypted_data.iv, encrypted_data.tag)\n return decryptor.update(encrypted_data.ciphertext) + decryptor.finalize()",
"def decrypt():\n plaintext = \"\"\n i = 0\n while i < len(ciphertext):\n if i%2==1:\n try:\n plaintext += key[ ciphertext[i-1]+ciphertext[i] ]\n except KeyError:\n plaintext += ciphertext[i-1]+ciphertext[i]\n i += 1\n return plaintext",
"def decrypt(text,key):\r\n aes = pyaes.AESModeOfOperationCTR(key)\r\n decrypted = aes.decrypt(text)\r\n return decrypted",
"def decrypt(self, cypher):\n\n cypher = b64decode(cypher)\n key_private = RsaPrivateKey.Read(self.crypt_private)\n return key_private.Decrypt(cypher)",
"def decrypt_key(data, key):\n data = MegaCrypto.base64_decode(data)\n return sum((MegaCrypto.str_to_a32(MegaCrypto.cbc_decrypt(data[_i:_i + 16], key))\n for _i in range(0, len(data), 16)), ())"
] | [
"0.78190506",
"0.76649165",
"0.73512477",
"0.7301102",
"0.72868747",
"0.7224076",
"0.7158116",
"0.7080847",
"0.7073697",
"0.70620877",
"0.7024779",
"0.70223695",
"0.70039576",
"0.7003598",
"0.6956859",
"0.6855221",
"0.6831517",
"0.68168175",
"0.6813381",
"0.679899",
"0.6788728",
"0.67878425",
"0.6756865",
"0.6752442",
"0.6739608",
"0.6719981",
"0.6691214",
"0.6678506",
"0.66594785",
"0.66182584"
] | 0.8391707 | 0 |
Method that decrypts a file using the decrypt_symmetric method and writes the output of this decryption to a file named gcpkey.json | def decrypt_from_file(self, file_path):
# open and decrypt byte file
f = open(file_path, "rb").read()
decrypted = self.decrypt_symmetric(f)
json_string = decrypted.decode("utf-8")
# write string to json file
destination_file_name = Path("downloaded-key/gcp-key.json")
destination_file_name.touch(exist_ok=True) # creates file if it does not yet exist
destination_file_name.touch(exist_ok=True) # creates file if it does not yet exist
destination_file_name.write_text(json_string) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def decrypt_symmetric(self, ciphertext):\n from google.cloud import kms_v1\n\n # Creates an API client for the KMS API.\n client = kms_v1.KeyManagementServiceClient()\n\n # The resource name of the CryptoKey.\n name = client.crypto_key_path_path(self.project_id, self.location_id, self.key_ring_id,\n self.crypto_key_id)\n # Use the KMS API to decrypt the data.\n response = client.decrypt(name, ciphertext)\n return response.plaintext",
"def decrypt(self, filename):\n\t f = Fernet(self.key)\n\t with open(filename, \"rb\") as file:\n\t # read the encrypted data\n\t encrypted_data = file.read()\n\t # decrypt data\n\t decrypted_data = f.decrypt(encrypted_data)\n\t # write the original filename\n\t return decrypted_data",
"def decrypt(project_id, location_id, key_ring_id, crypto_key_id,\n ciphertext_file_name, plaintext_file_name):\n\n # Creates an API client for the KMS API.\n kms_client = googleapiclient.discovery.build('cloudkms', 'v1')\n\n # The resource name of the CryptoKey.\n name = 'projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}'.format(\n project_id, location_id, key_ring_id, crypto_key_id)\n\n # Read encrypted data from the input file.\n with io.open(ciphertext_file_name, 'rb') as ciphertext_file:\n ciphertext = ciphertext_file.read()\n\n # Use the KMS API to decrypt the data.\n crypto_keys = kms_client.projects().locations().keyRings().cryptoKeys()\n request = crypto_keys.decrypt(\n name=name,\n body={'ciphertext': base64.b64encode(ciphertext).decode('ascii')})\n response = request.execute()\n plaintext = base64.b64decode(response['plaintext'].encode('ascii'))\n\n # Write the decrypted data to a file.\n with io.open(plaintext_file_name, 'wb') as plaintext_file:\n plaintext_file.write(plaintext)\n\n print('Saved plaintext to {}.'.format(plaintext_file_name))",
"def _decrypt(self):\n self._outfile = os.path.join(self.dest, self.plain_file)\n self._infile = self.encrypted_file\n self._log.info(\"Decrypting file '%s' to '%s'\", self.encrypted_file, self._outfile)\n with open(self.encrypted_file, \"rb\") as enc_file:\n openssl(\n \"enc\",\n \"-aes-256-cbc\",\n \"-d\",\n \"-pass\",\n \"file:{secret}\".format(secret=self.secret.keyfile),\n _in=enc_file,\n _out=self._outfile,\n )\n self._log.info(\"File '%s' decrypted to '%s'\", self.encrypted_file, self._outfile)\n return True",
"def _decrypt(self, src_filepath, dest_filepath):\r\n self.log.info(\"Decrypting file {0} to {1}.\".format(src_filepath, dest_filepath))\r\n\r\n gpg = gnupg.GPG(options=self.gpg_options)\r\n key_data = open(self.key_file, mode='rb').read()\r\n import_result = gpg.import_keys(key_data)\r\n self.log.info(\"Key import results: {0}\".format(import_result.results))\r\n\r\n with open(src_filepath, 'rb') as f:\r\n status = gpg.decrypt_file(f,\r\n passphrase=self._passphrase,\r\n output=dest_filepath)\r\n self.log.info(\"ok: {0}, status:{1}, stderr: {2}\".format(status.ok, status.status, status.stderr))\r\n\r\n if status.ok and self.remove_encrypted:\r\n os.remove(src_filepath)\r\n\r\n if not status.ok:\r\n raise AirflowException(\"Failed to decrypt file {0}: {1}\"\r\n .format(src_filepath, status.stderr))\r\n\r\n self.log.info(\"Completed file decryption.\")",
"def decrypt_file(open_name:str, write_name:str, key:str):\n\n\n with open(write_name, \"wb\") as f:\n key = key.encode()\n for (i, part_of_picture) in enumerate(PictureSlices(open_name)):\n round_key = b\"%d%s randominfix %d\" % (i, key, i)\n decrypted_part = xor(part_of_picture, sha512(round_key).digest())\n f.write(decrypted_part)",
"def asym_dec(self, ciph, keyfile):\n ciph = ciph.split('\\0')\n ciphkey_len = int(ciph[0])\n ciph = '\\0'.join(ciph[1:])\n ciphkey = ciph[:ciphkey_len]\n ciph = ciph[ciphkey_len:]\n\n passphrase = xsystem([self.sslname, 'rsautl', '-decrypt', '-inkey',\n keyfile], ciphkey)\n if not passphrase:\n warning('keymanagement: Unable to perform asymmetric decryption\\n')\n return None\n\n return self.sym_dec(ciph, passphrase)",
"def decrypt_file(self, file_name, key):\n with open(file_name, 'rb') as fo:\n try:\n ciphertext = fo.read()\n except:\n print \"[-] Error opening file {0} for reading.\".format(file_name)\n return\n try:\n dec = self.decrypt(ciphertext, key)\n except:\n print \"[-] Decryption failed.\"\n return\n\n with open(file_name[:-4], 'wb') as fo:\n try:\n fo.write(dec)\n except:\n print \"[-] Error writing out file {0}\".format(file_name[:-4])\n return\n\n os.chmod(file_name[:-4], 0600)\n return file_name[:-4]",
"def decrypt_file(filename, key):\n f = Fernet(key)\n with open(filename, \"rb\") as file:\n # read the encrypted data\n encrypted_data = file.read()\n # decrypt data\n decrypted_data = f.decrypt(encrypted_data)\n # delete file\n remove(filename)\n # generate new filename\n new_filename = generate_new_filename(filename, key, False)\n # write the encrypted file\n with open(new_filename, \"wb\") as file:\n print(\"Decrypted: \" + new_filename)\n file.write(decrypted_data)\n\n return new_filename",
"def decrypt_file(self, input_file_name='', output_file_name=''):\n\n # Checking if input and output files selected right\n assert input_file_name and isfile(input_file_name), \"Input file wasn't selected!\"\n assert output_file_name, \"Output file wasn't selected!\"\n\n with open(output_file_name, 'wb') as output_file:\n # To iterate file as int values, I'm using generator\n input_file = self._open_file_longint(input_file_name)\n try:\n alpha = input_file.__next__()\n beta = input_file.__next__()\n except StopIteration:\n raise AssertionError(\"Input file is empty! Nothing to decrypt.\")\n\n x = self.keys['private']\n p = self.keys['public']['p']\n\n while alpha and beta:\n message_byte = bytes(chr((beta % p * (pow(alpha, (p - 1 - x), p))) % p), \"ascii\")\n output_file.write(message_byte)\n try:\n alpha = input_file.__next__()\n beta = input_file.__next__()\n except StopIteration:\n alpha = 0\n beta = 0\n return 1",
"def decrypt_file(path_to_enc_file, target_ext):\n\t\tencrypted_string = EncryptDecrypt.file_to_string(path_to_enc_file)\n\t\tdecrypted_string = EncryptDecrypt.hex_to_ascii_string(encrypted_string)\n\t\tenc_file_name, _ = os.path.splitext(path_to_enc_file)\n\t\twith open(enc_file_name+\".\"+target_ext, \"w+\") as df:\n\t\t\tdf.write(decrypted_string)\n\t\t#os.remove(path_to_enc_file)",
"def main():\n # file = None\n # for arg in sys.argv:\n # if \".txt\" in arg or \".py\" not in arg or \".log\" not in arg:\n # file = arg\n\n file = input(\"Enter a file: \")\n\n file_data = Cryptography()\n file_data.file = file\n\n crypt_type = input(\"Please enter 'E' to encrypt or 'D' to decrypt\\n>> \")\n file_data.crypt_type = crypt_type\n\n crypt_type = \"encrypt\" if crypt_type == 'E' else \"decrypt\"\n\n file_data.crypt_method = file_data.crypt_method\n\n key = input(\"Please enter a key for your data\\n>> \")\n file_data.key = key\n\n print(f\"crypt_method: {file_data.crypt_method}\")\n new_data = file_data.crypt_methods[file_data.crypt_method]()\n\n crypt_methods = defaultdict(str,\n {'C': \"Caesar\",\n 'M': \"Monoalphabetic\",\n 'P': \"Polyalphabetic\"})\n\n if DEBUG is False:\n crypt_method = crypt_methods[file_data.crypt_method]\n new_file_name = f\"{crypt_method}_{crypt_type.capitalize()}ed.txt\"\n logger.info(f\"{type(new_data)}: {new_data}\")\n Cryptography.write(new_file_name, new_data)\n print(f\"Your new {crypt_type}ed file has been created as \" +\n f\"{new_file_name}.\")",
"def decrypt_file(self, key):\n k, iv, meta_mac = MegaCrypto.get_cipher_key(key)\n ctr = Crypto.Util.Counter.new(\n 128, initial_value=(\n (iv[0] << 32) + iv[1]) << 64)\n cipher = Crypto.Cipher.AES.new(\n MegaCrypto.a32_to_str(k),\n Crypto.Cipher.AES.MODE_CTR,\n counter=ctr)\n\n self.pyfile.setStatus(\"decrypting\")\n self.pyfile.setProgress(0)\n\n file_crypted = encode(self.last_download)\n file_decrypted = file_crypted.rsplit(self.FILE_SUFFIX)[0]\n\n try:\n f = open(file_crypted, \"rb\")\n df = open(file_decrypted, \"wb\")\n\n except IOError, e:\n self.fail(e.message)\n\n encrypted_size = os.path.getsize(file_crypted)\n\n checksum_activated = self.config.get(\n \"activated\", default=False, plugin=\"Checksum\")\n check_checksum = self.config.get(\n \"check_checksum\", default=True, plugin=\"Checksum\")\n\n cbc_mac = MegaCrypto.Checksum(\n key) if checksum_activated and check_checksum else None\n\n progress = 0\n for chunk_start, chunk_size in MegaCrypto.get_chunks(encrypted_size):\n buf = f.read(chunk_size)\n if not buf:\n break\n\n chunk = cipher.decrypt(buf)\n df.write(chunk)\n\n progress += chunk_size\n self.pyfile.setProgress(int((100.0 / encrypted_size) * progress))\n\n if checksum_activated and check_checksum:\n cbc_mac.update(chunk)\n\n self.pyfile.setProgress(100)\n\n f.close()\n df.close()\n\n self.log_info(_(\"File decrypted\"))\n os.remove(file_crypted)\n\n if checksum_activated and check_checksum:\n file_mac = cbc_mac.digest()\n if file_mac == meta_mac:\n self.log_info(_('File integrity of \"%s\" verified by CBC-MAC checksum (%s)') %\n (self.pyfile.name.rsplit(self.FILE_SUFFIX)[0], meta_mac))\n else:\n self.log_warning(_('CBC-MAC checksum for file \"%s\" does not match (%s != %s)') %\n (self.pyfile.name.rsplit(self.FILE_SUFFIX)[0], file_mac, meta_mac))\n self.checksum_failed(\n file_decrypted, _(\"Checksums do not match\"))\n\n self.last_download = decode(file_decrypted)",
"def decrypt_text_file(self):\r\n\t\t#Ensures that the file has something that can be decrypted.\r\n\t\tfile_contains_message = True\r\n\t\twhile file_contains_message:\r\n\t\t\tfile_exists = True\r\n\t\t\t#Checks to see if the file exists.\r\n\t\t\twhile file_exists:\r\n\t\t\t\tself.text_file_name = input(\"Please enter the name of the text file you wish to decrypt in format |file_name.txt|.--> \")\r\n\t\t\t\tif \".txt\" in self.text_file_name:\r\n\t\t\t\t\tfile_exists = Doc_Control().check_for_file(self.text_file_name)\r\n\t\t\t\telse: \r\n\t\t\t\t\tcontinue\r\n\t\t\t#Decrypts message but verifys correct key before giving user their decrypted message.\r\n\t\t\twhile True: \r\n\t\t\t\tself.message = Doc_Control().open_file(self.text_file_name)\r\n\t\t\t\tif self.message != \"\" and len(self.message) > 4:\r\n\t\t\t\t\tfile_contains_message = False\r\n\t\t\t\t\tbreak\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint(\"Your file does not contain an encryptable message.\")\r\n\t\t\t\t\tbreak\r\n\t\tself.right_key = True\r\n\t\twhile self.right_key:\r\n\t\t\tself.setup_key_decrypt()\r\n\t\t\tself.my_code = Decryptor(self.message, self.key).transfer_decrypt()\r\n\t\t\tself.verify_decrypt_key()\r\n\t\tself.output_file = Doc_Control().assign_output_file()\r\n\t\toutput_file_obj = open(self.output_file, 'w')\r\n\t\toutput_file_obj.write(self.my_code)\r\n\t\toutput_file_obj.close()\t\t\r\n\t\tprint(\"\\nYour file has been decrypted.\")",
"def decrypt_csv_file(encrypted_csv_path, decrypted_csv_path, password):\n csv_list = decrypt_csv(encrypted_csv_path, password)\n write_csv(csv_list, decrypted_csv_path)",
"def decrypt(self, path):\n with open(path, \"rb\") as fileh:\n gpg = qpgpg.GPG()\n try:\n decrypted = gpg.decrypt_file(fileh)\n except qpgpg.GPG.DecryptionException:\n raise\n else:\n return decrypted",
"def decrypt_file(filename, auth_tag, bytestring):\n\treceived_nonce = bytestring[:16]\n\tciphertext = bytestring[16:]\n\tcipher = AES.new(my_privaeskey, AES.MODE_GCM, received_nonce)\n\tplaintext = cipher.decrypt_and_verify(ciphertext, auth_tag)\n\tf = open(filename, 'w')\n\tf.write(plaintext.decode('ascii'))",
"def decryptFile(files, key):\n\tfrom os.path import splitext\n\tfrom os import unlink\n\tfrom tarfile import open as openTar\n\t\n\tif isString(files):\n\t\tfiles = [files]\n\n\tfor filename in files:\n\t\tif splitext(filename)[1][1:].upper() == ENCRYPTED_EXTENSION:\n\t\t\twith open(filename, 'rb') as fo:\n\t\t\t\tcyphered = fo.read()\n\t\t\tcontent = BytesIO(decrypt(cyphered, key))\n\t\t\ttarFilename = splitext(filename)[0]+\".TAR\"\n\t\t\ttarCopy = open(tarFilename,\"wb\")\n\t\t\ttarCopy.write(content.getvalue())\n\t\t\ttarCopy.close()\n\t\t\twith openTar(fileobj=content, mode=\"r\") as fo:\n\t\t\t\tfo.extractall(splitext(filename)[0])\n\t\t\tunlink(tarFilename)\n\t\t\ttry:\n\t\t\t\tunlink(filename)\n\t\t\texcept:\n\t\t\t\tpass",
"def decryptor(file_name, key):\n\twith open(file_name, 'rb') as dfile:\n\t\tciphertext = dfile.read()\n\t\tdec = decrypt(key, ciphertext)\n\t\tdfile.close()\n\t\tdtext = \"The encrypted file was opened by macupdate.py by the user: \"\n\t\tcreateLog(dtext, 'logs/macupdate.log')\n\t\treturn dec",
"def do_android_decryption(self):\r\n self.aes_decryption_key = self.extract_aes_key()\r\n self.decrypt_device_file()\r\n # join is optimized and does not cause O(n^2) total memory copies.\r\n self.decrypted_file = b\"\\n\".join(self.good_lines)",
"def decrypt(fileLocation):\n key = load_key()\n f = Fernet(key)\n with open(fileLocation, \"rb\") as file:\n # read the encrypted data\n encrypted_data = file.read()\n # decrypt data\n decrypted_data = f.decrypt(encrypted_data)\n # write the original file\n with open(fileLocation, \"wb\") as file:\n file.write(decrypted_data)",
"def decrypt_file(file, key_str):\n encrypt_data = read_raw(file)\n key = fe.key_encode(key_str)\n iv = encrypt_data[:fe.AES.block_size]\n cipher = fe.create_cipher(key, iv)\n image = cipher.decrypt(encrypt_data[fe.AES.block_size:])\n return image",
"def decrypt(self, input_file, output_file):\n self.key %= 26\n plaintext = \"\"\n with open(input_file) as encrypted_text:\n self.text = encrypted_text.read()\n for char in self.text:\n if char.isalpha():\n if 65 <= ord(char) <= 90: #char is between A and Z\n if ord(char) - self.key >= 65: #65 = ord('A')\n plaintext += chr(ord(char) - self.key)\n elif ord(char) - self.key < 65:\n plaintext += chr(ord(char) - self.key + 26)\n if 97 <= ord(char) <= 122:\n if ord(char) - self.key >= 97:\n plaintext += chr(ord(char) - self.key)\n elif ord(char) - self.key < 97:\n plaintext += chr(ord(char) - self.key + 26)\n else:\n plaintext += char\n\n decrypted_file = open(output_file, 'w')\n decrypted_file.write(plaintext)\n print \"Created file: ces-decrypted.txt\"",
"def decrypt(outfile, keyfile):\n decrypted = \"\"\n for index, o in enumerate(outfile):\n mod = index % 7\n k = keyfile[mod]\n d = decrypt_char(o, k)\n d_ord = ord(d)\n d_hex = hex(d_ord)\n o_repr = repr(o)\n print(f\"{index:2d} {mod:2d} {o_repr: >7s} {k: >2s} {d: >2s} {d_ord:3d} {d_hex: >5s}\")\n decrypted += d\n return decrypted",
"def crypt_file(self, file_path, encrypted=False):\n\n with open(file_path, 'rb+') as f:\n _data = f.read()\n\n if not encrypted:\n## print(f'File contents pre encryption: {_data}')\n data = self.cryptor.encrypt(_data)\n## print(f'File contents post encryption: {data}')\n else:\n data = self.cryptor.decrypt(_data)\n## print(f'File content post decryption: {data}')\n\n file=open(file_path,'wb')\n file.write(data)",
"def __output_encrypted(self, data, key_len, filename, iv):\n with open(filename, \"w\") as f:\n f.write(START_HEADER + \"\\n\")\n\n key = \"Description\"\n val = \"Crypted file\"\n f.write(self.gen_key_val(key, val))\n\n key = \"Method\"\n val = \"AES\"\n f.write(self.gen_key_val(key, val))\n\n key = \"File name\"\n val = filename\n f.write(self.gen_key_val(key, val))\n\n key = \"IV\"\n val = binascii.hexlify(iv)\n f.write(self.gen_key_val(key, val))\n\n key = \"Data\"\n val = base64.b64encode(data)\n # val = data\n f.write(self.gen_key_val(key, val))\n\n f.write(END_HEADER + \"\\n\")",
"def decrypt(path, key):\n key = load_key(key)\n\n if p.isdir(path):\n # encrypt a directory\n return decrypt_dir(path, key)\n # decrypt a file\n path = decrypt_file(path, key)\n # check if file contains suffix\n if \"-encrypted.zip\" in path:\n return decrypt_dir(path, key)\n return",
"def encrypt_file(file, target_path, key):\n file_name = file.split('/')[-1] + '.enc'\n image = convert_content(file, key)\n write_raw(image, os.path.join(target_path, file_name))",
"def decrypt_using_gpg(self, gpg_file, extract_target=None):\n if not os.path.isfile(f\"{gpg_file}.gpg\"):\n os.symlink(gpg_file, f\"{gpg_file}.gpg\")\n\n gpg_file_link = f\"{gpg_file}.gpg\"\n tar_fn = f\"{gpg_file}.tar.gz\"\n try:\n cmd = [\n \"gpg\",\n \"--verbose\",\n \"--batch\",\n \"--yes\",\n f\"--output={tar_fn}\",\n \"--pinentry-mode\",\n \"loopback\",\n f\"--passphrase-file={env.GPG_PASS_FILE}\",\n \"--decrypt\",\n gpg_file_link,\n ]\n run(cmd, suppress_stderr=True)\n log(f\"#> GPG decrypt {ok()}\")\n _remove(gpg_file)\n os.unlink(gpg_file_link)\n except Exception as e:\n print_tb(e)\n raise e\n # finally:\n # os.unlink(gpg_file_link)\n\n if extract_target:\n try:\n untar(tar_fn, extract_target)\n except Exception as e:\n raise Exception(\"Could not extract the given tar file\") from e\n finally:\n cmd = None\n _remove(f\"{extract_target}/.git\")\n _remove(tar_fn)",
"def _write_encrypted_pem(self, passphrase, tmpfile):\n key = PKey()\n key.generate_key(TYPE_RSA, 1024)\n pem = dump_privatekey(FILETYPE_PEM, key, \"blowfish\", passphrase)\n with open(tmpfile, \"w\") as fObj:\n fObj.write(pem.decode(\"ascii\"))\n return tmpfile"
] | [
"0.64325106",
"0.63213474",
"0.622638",
"0.61633515",
"0.61598235",
"0.6144704",
"0.608841",
"0.60840195",
"0.6062278",
"0.5996128",
"0.5985062",
"0.59499764",
"0.59409845",
"0.59369785",
"0.5880555",
"0.5879515",
"0.5877775",
"0.58735037",
"0.5852493",
"0.58381975",
"0.58349544",
"0.5832787",
"0.58187425",
"0.5814456",
"0.58140635",
"0.58129454",
"0.5790948",
"0.5751469",
"0.5715876",
"0.5712323"
] | 0.801987 | 0 |
Downloads key for configured service account and stores it in the folder generatedkey/ | def download_key_from_blob(self):
source_blob_name = "generated-keys/{}".format(self.service_account_email)
destination_name = self.service_account_email
# generate destination folder and file if they do not yet exist
Path("downloaded-key/").mkdir(parents=True, exist_ok=True) # creates folder if not exists
folder = Path("downloaded-key/") # folder where all the newly generated keys go
destination_file_name = folder / "{}".format(destination_name) # file named after service-account name
destination_file_name.touch(exist_ok=True)
# download the file and store it locally
storage_client = storage.Client()
bucket = storage_client.get_bucket(self.bucket_name)
blob = bucket.blob(source_blob_name)
blob.download_to_filename(destination_file_name)
# prints source and destination indicating successful download
print('Encrypted key downloaded to -----> \n {}.'.format(
source_blob_name,
destination_file_name))
return destination_file_name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download_key():\n data = check_args(('cloudProvider', ))\n provider = jobs.init_provider(data, True)\n key = encrypt_key(provider.get_key(), data['username'])\n return make_response(keyName=provider.keyname, key=key)",
"def generate_key():\r\n # generating key\r\n key = Fernet.generate_key()\r\n\r\n key_dir = os.path.join(os.path.dirname(__file__), \"resources/key\")\r\n\r\n # writing key in file\r\n with open(key_dir, \"wb\") as keyFile:\r\n keyFile.write(key)",
"def generate_key():\n key = Fernet.generate_key()\n with open(\"Secret.key\",\"wb\")as key_file:\n key_file.write(key)",
"def _keypath(self) -> pathlib.Path:\n home = pathlib.Path.home()\n keyfile = home / \".cmdc\" / \"apikey\"\n keyfile.parent.mkdir(parents=True, exist_ok=True)\n return keyfile",
"def generate_key(self):\n self.key = Fernet.generate_key()\n with open(\"secret.key\", \"wb\") as key_file:\n key_file.write(self.key)",
"def generate_key():\n key = Fernet.generate_key()\n with open(\"pass.key\", \"wb\") as key_file:\n key_file.write(key)",
"def setup_keys():\n if os.path.isfile(\"key.txt\"):\n message = \"Key already generated\"\n else:\n secret = secrets.token_urlsafe(64)\n message = \"Secret generated and saved in key.txt\"\n with open(\"key.txt\", \"w\") as fd:\n fd.write(secret)\n return json.dumps({'message': message})",
"def util_generate_key(conf_file=None):\n keyname = DebRepo(**config(conf_file=conf_file)).generate_key()\n print(keyname)",
"def generate_key():\n key = Fernet.generate_key()\n with open(\"secret.key\", \"wb\") as key_file:\n key_file.write(key)",
"def load_key():\n return open(\"Secret.key\",\"rb\").read()",
"def private_key():\n return \"Toholampi summer festival 2017 has the most harcore rock bands\"",
"def apikey(serv):\n path = os.path.join(os.path.abspath(os.path.dirname(__file__)),\n '{0}.key'.format(serv))\n key = open(path, \"r\").read().rstrip()\n return key",
"def generate_key(domain_name):\n key = rsa.generate_private_key(\n public_exponent=65537,\n key_size=2048,\n backend=default_backend()\n )\n\n #storing client's private key\n with open(domain_name + \".key\", \"wb\") as f:\n f.write(key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption(),\n ))\n\n return key",
"def CreateKeyFile():\n keyfile = tempfile.mkstemp()[1]\n cmd = [\n 'openssl',\n 'genrsa',\n '-out', keyfile,\n '2048'\n ]\n _RunCommand(cmd)\n return keyfile",
"def create_key ():",
"def private_key(self):",
"def gen_tlsauth_key():\n cmd = ['/usr/sbin/openvpn', '--genkey', 'secret', 'ta.tmp']\n ret = subprocess.check_call(cmd)\n with open('ta.tmp') as key:\n key = key.read()\n os.remove('ta.tmp')\n return key",
"def load_key():\n return open(\"secret.key\", \"rb\").read()",
"def write_key(api_key, output_path, client_module=pyalveo):\n client = client_module.Client(api_key, API_URL, use_cache=False)\n outfile = open(output_path, 'w')\n outfile.write(api_key)\n outfile.close()",
"def get_api_key ():\n PROJECT_PATH = os.path.abspath(os.path.dirname(__name__))\n key_file = open(PROJECT_PATH + \"/key_api.txt\", \"r\")\n return (key_file.read()).rstrip('\\n')",
"def read_key():\n path = os.path.join(os.path.dirname(__file__), 'data')\n f = open(os.path.join(path, 'credential.txt'), 'r')\n key = f.read()\n f.close()\n return key",
"def write_key():\n key = fernet.Fernet.generate_key()\n keyfile = open(KEY_PATH,'wb')\n keyfile.write(key)\n keyfile.close()",
"def generate_key(self):\n key = rsa.generate_private_key(\n public_exponent=self.settings['key_public_exponent_size'],\n key_size=self.settings['key_size'],\n backend=default_backend()\n )\n return key",
"def _set_keystore_path(self) -> None:\n response = self.single_call(\"hmy keys location\").strip()\n if not os.path.exists(response):\n os.mkdir(response)\n self.keystore_path = response",
"def generate_secret_key(self, server_name: str) -> str:\n if self.config_in_use():\n raise BaseConfigInUseError()\n\n signing_key_path = join(self.config_dir, server_name + \".signing.key\")\n subprocess.run([\"generate_signing_key.py\", \"-o\", signing_key_path])\n with open(signing_key_path, \"r\") as f:\n return f.read()",
"def _get_api_key():\n api_key_directory = os.getenv(\"KOKORO_GFILE_DIR\")\n api_key_file = os.path.join(api_key_directory, \"resultstore_api_key\")\n assert os.path.isfile(api_key_file), (\n \"Must add --api_key arg if not on \"\n \"Kokoro or Kokoro environment is not set up properly.\"\n )\n with open(api_key_file, \"r\") as f:\n return f.read().replace(\"\\n\", \"\")",
"def get_access_key(self, keyfile):\n my_key = AccessKey.create_key_from_file(keyfile)\n my_key.store_keys()\n return my_key.key",
"def download_data_key(self, name):\n temp_data_key = self._get_data_key(name)\n # File wasn't found on s3 so we return.\n if not temp_data_key:\n return\n\n output_file = \"/dev/shm/\" + name + \".tmp.key\"\n\n try:\n file = open(output_file, \"w\")\n except Exception as e:\n print \"[-] Error opening /dev/shm for writing.\"\n return\n\n file.write(temp_data_key)\n os.chmod(output_file, 0600)\n\n print \"[+] {0} data key saved to {1}\".format(name, output_file)",
"def get_private_key(self):\n# _log.debug(\"get_private_key: node_name={}\".format(self.node_name))\n with open(os.path.join(self.runtime_dir, \"private\", \"private.key\"), 'rb') as f:\n return f.read()",
"def getLocalKey(cmd, path):\n\n executeCmd(cmd)\n out = subprocess.Popen(\"cat\" + \" \" + path, shell=True,\n stdout=subprocess.PIPE)\n key = out.stdout.read().rstrip('\\n')\n logging.debug(\"Local key has been generated successfully : %s \", key)\n return key"
] | [
"0.7312609",
"0.6830154",
"0.6603424",
"0.6562619",
"0.6516955",
"0.6498773",
"0.6481392",
"0.64763385",
"0.64518285",
"0.6347369",
"0.6296295",
"0.6269412",
"0.62465113",
"0.6237751",
"0.61916953",
"0.61603403",
"0.6143181",
"0.6117199",
"0.61056596",
"0.6093765",
"0.6090465",
"0.6089385",
"0.60882056",
"0.6084267",
"0.60782576",
"0.60771036",
"0.6071095",
"0.60572505",
"0.6015782",
"0.599508"
] | 0.803482 | 0 |
Calculate overlap among trajectories | def trajectory_overlap(gt_trajs, pred_traj):
max_overlap = 0
max_index = 0
for t, gt_traj in enumerate(gt_trajs):
s_viou = viou_sx(gt_traj['sub_traj'], gt_traj['duration'], pred_traj['sub_traj'], pred_traj['duration'])
o_viou = viou_sx(gt_traj['obj_traj'], gt_traj['duration'], pred_traj['obj_traj'], pred_traj['duration'])
so_viou = min(s_viou, o_viou)
if so_viou > max_overlap:
max_overlap = so_viou
max_index = t
return max_overlap, max_index | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def overlap_cost(track_a, track_b):\n return 1 - overlap(track_a.bbox, track_b.bbox)",
"def overlap_with(self, other):",
"def poverlap(t1, t2, size1, size2):\n x0 = t1[0]\n y0 = t1[1]\n x1 = t1[0] + size1[0]\n y1 = t1[1] + size1[1]\n\n x2 = t2[0]\n y2 = t2[1]\n x3 = t2[0] + size2[0]\n y3 = t2[1] + size2[1]\n \n ol = max(0, min(x1, x3) - max(x0, x2)) * max(0, min(y1, y3) - max(y0, y2))\n\n return ol / float(2*(size2[0]*size2[1]) - ol)",
"def findOverlap( columns, t, minOverlap ):\n for c in columns:\n c.setOverlap() # defaults to 0.0\n for s in c.getConnectedSynapses():\n c.setOverlap( c.getOverlap() + s.getSourcetInput( t ) )\n\n if c.getOverlap() < minOverlap:\n c.setOverlap()\n else:\n c.boostOverlap()",
"def listOfOverlappingTTPairs():\n listOfHalfModules = listOfTTHalfModules()\n ttmap = TTModulesMap_instance\n pairs = []\n regions = {'A':1, 'B':2, 'C':3}\n print \"Overlapping TT half modules:\"\n for hm1 in listOfHalfModules:\n for hm2 in listOfHalfModules:\n # they must be different\n if hm1 == hm2: continue\n # they must be both on top or both on bottom\n if locateTTHalfModule(hm1)[3] != locateTTHalfModule(hm2)[3]: continue\n # they must be on the same layer\n if locateTTHalfModule(hm1)[0] != locateTTHalfModule(hm2)[0]: continue\n # avoid duplicates\n if (hm1, hm2) in pairs: continue\n if (hm2, hm1) in pairs: continue\n # they must be contiguous:\n if (locateTTHalfModule(hm1)[1] == locateTTHalfModule(hm2)[1]):\n if (abs(locateTTHalfModule(hm1)[2] - locateTTHalfModule(hm2)[2]) == 1):\n pairs.append( (hm1, hm2) )\n else:\n num1 = locateTTHalfModule(hm1)[2]\n num2 = locateTTHalfModule(hm2)[2]\n max1 = ttmap.numberOfModules[locateTTHalfModule(hm1)[0]]['Region'+locateTTHalfModule(hm1)[1]] - 1\n max2 = ttmap.numberOfModules[locateTTHalfModule(hm2)[0]]['Region'+locateTTHalfModule(hm2)[1]] - 1\n nreg1 = regions[locateTTHalfModule(hm1)[1]]\n nreg2 = regions[locateTTHalfModule(hm2)[1]]\n if ( (num1==max1 and num2==0 and nreg2-nreg1==1) or (num2==max2 and num1==0 and nreg1-nreg2==1) ):\n pairs.append( (hm1, hm2) )\n print '\\t', hm1, hm2\n ## - same region\n #if ((abs(locateTTHalfModule(hm1)[2] - locateTTHalfModule(hm2)[2]) != 1)\n # and (locateTTHalfModule(hm1)[0] != locateTTHalfModule(hm2)[0])): continue\n ## - or neighbouring region\n #elif not ((locateTTHalfModule(hm1)[0] != locateTTHalfModule(hm2)[0])\n # and ( ( (ttmap.numberOfModules[locateTTHalfModule(hm1)[0]] == locateTTHalfModule(hm1)[2]+1 )\n # and (locateTTHalfModule(hm2)[2] == 0) )\n # or ( (ttmap.numberOfModules[locateTTHalfModule(hm2)[0]] == locateTTHalfModule(hm2)[2]+1 )\n # and (locateTTHalfModule(hm1)[2] == 0) ) ) ): continue\n ## append to list of pairs\n #pairs.append( (hm1, hm2) )\n print\n return pairs",
"def getOverlap(self):\n return 0.5",
"def overlap(table1, table2):\n out = np.zeros(np.size(table1, axis=0), dtype='bool')\n for i in range(np.size(table1, axis=0)):\n s1_s2 = table1[i, 0] < table2[:, 0] \n s1_e2 = table1[i, 0] <= table2[:, 1]\n e1_s2 = table1[i, 1] < table2[:, 0]\n e1_e2 = table1[i, 1] < table2[:, 1]\n # no overlap occurs when all four parameters above either == 0 or 1\n sum_params = np.sum(np.array([s1_s2, s1_e2, e1_s2, e1_e2]), axis=0)\n olap = (sum_params == 1) | (sum_params == 2) | (sum_params == 3)\n out[i] = np.any(olap)\n return out",
"def overlap(t1start, t1end, t2start, t2end):\n\n return (t1start <= t2start <= t1end) or (t2start <= t1start <= t2end)",
"def overlap(list1,list2):\n \n coord=[]\n for pos1 in list1:\n #print 'pos in list1 is', pos1\n coord.append(('S',int(pos1.split('-')[0]), 'l1'))\n #print 'S is ', pos1.split('-')[0]\n coord.append(('E',int(pos1.split('-')[1]),'l1'))\n #print 'E is ', pos1.split('-')[1]\n #print coord \n for pos2 in list2:\n #print 'pos in list2 is', pos2\n coord.append(('S',int(pos2.split('-')[0]),'l2'))\n #print 'S is ', pos2.split('-')[0]\n coord.append(('E', int(pos2.split('-')[1]),'l2'))\n #print 'E is ', pos2.split('-')[1]\n #print coord\n \n coord.sort(key = lambda x : x[0], reverse = True)\n #print 'coord after first sort \\n', coord\n coord.sort(key = lambda x : x[1])\n #print 'coord after 2nd sort by number \\n', coord\n # PART 1: SEARCHES FOR OVERLAPS BETWEEN 2 HISTONE MARKS\n new_coord_list = [] #initialize new list to which to move all those that don't overlap\n #index = 0 #position in list \n spos=0 # start pos initialized \n ct=0\n ovl=[]\n for pos in coord:\n new_coord_list.append(pos)\n #print pos, 'doesn\\'t overlap'\n index = int(new_coord_list.index(pos)) \n if pos[0]=='S':\n ct+=1\n if ct==2:\n spos=pos[1]\n if pos[0]=='E':\n ct-=1\n if ct==1:\n if not spos==pos[1]:\n #print spos, '-', pos[1], 'overlap'\n ovl.append(('ovl', spos, pos[1])) # add to overlap vector the positions that overlap\n #print 'overlap found! :', [str(spos),str(pos[1]),'ovl']\n #print 'removing ', new_coord_list[index]\n del new_coord_list[index]\n #print 'removing', new_coord_list[index-1]\n del new_coord_list[index-1]\n \n # \n new_coord_list.sort(key = lambda x : x[0], reverse = True)\n start=0\n end = 0\n two_hist_away_from_cent_of_peak = 0\n two_hist_away_list = []\n for nc_pos in new_coord_list:\n if nc_pos[0]=='S':\n if (start<=two_hist_away_from_cent_of_peak) and (two_hist_away_from_cent_of_peak !=0) and (end!=0): \n #if center_of_peak <= two_hist_away_from_cent_of_peak and (two_hist_away_from_cent_of_peak !=0):\n two_hist_away_list.append('-'.join([str(start),str(end), 'tha']))\n start= nc_pos[1]\n if nc_pos[0]=='E':\n end = nc_pos[1]\n center_of_peak= (start+nc_pos[1])/2\n two_hist_away_from_cent_of_peak = center_of_peak + 300\n # print 'new_coord_list: ', new_coord_list\n return ovl, new_coord_list",
"def pred_overlap(t, h):\n a_set = set(get_pred(t))\n b_set = set(get_pred(h))\n return len(a_set&b_set)/float(len(a_set|b_set))",
"def overlap(t1, t2):\n t1 = dict(min=np.min(t1), max=np.max(t1))\n t2 = dict(min=np.min(t2), max=np.max(t2))\n for t in (t1, t2):\n t['dur'] = t['max'] - t['min']\n\n # Ensure t1 min < t2 min\n if t2['min'] < t1['min']:\n print('t2 starts earlier')\n t1, t2 = t2, t1\n \n # var names wrt t2\n min_inside = t2['min'] >= t1['min'] and t2['min'] <= t1['max']\n max_inside = t2['max'] <= t1['max']\n if min_inside and max_inside:\n # t2 completely contained by t1\n return (t2['min'], t2['max'])\n elif min_inside:\n # t2 partially contained by t1\n return (t2['min'], t1['max'])\n else:\n # no overlap\n return (None, None)",
"def calculate_overlaps(drives, dist_tol, time_tol):\n \n for i1 in range(len(drives)-1):\n d1 = drives[i1]\n \n for i2 in range(i1+1, len(drives)):\n d2 = drives[i2]\n \n #stop trying if d1 ends more than time_tol before d2 starts\n #note that drives are chronologically ordered\n if d2.coords[0].time - d1.coords[-1].time > time_tol:\n break\n \n overlap = ol.compute_overlap(d1, d2, dist_tol, time_tol)\n if overlap:\n ol1 = ol.Overlap(d1, d2, overlap[0], overlap[1])\n d1.append_overlap(ol1)\n ol2 = ol.Overlap(d2, d1, overlap[2], overlap[3])\n d2.append_overlap(ol2)",
"def overlap(x,y):\n if (x[0]<=y[-1] and x[-1]>y[0]) or (y[0]<=x[-1] and y[-1]>x[0]):\n return 1\n else: return 0",
"def cal_overlaps(boxes1, boxes2):\n area1 = (boxes1[:, 0] - boxes1[:, 2]) * (boxes1[:, 1] - boxes1[:, 3]) # (Nsample, 1)\n area2 = (boxes2[:, 0] - boxes2[:, 2]) * (boxes2[:, 1] - boxes2[:, 3]) # (Msample, 1)\n\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0])) # (Nsample, Msample)\n\n # calculate the intersection of boxes1(anchor) and boxes2(GT box)\n for i in range(boxes1.shape[0]):\n overlaps[i][:] = cal_iou(boxes1[i], area1[i], boxes2, area2)\n\n return overlaps",
"def test_compute_overlap(self):\n # box1 contained in box2\n box1 = ((1, 2), (1, 2), (1, 2))\n box2 = ((1, 3), (1, 3), (1, 3))\n mapping = {box1: [1, 2, 3, 4], box2: [1, 2, 3, 4, 5]}\n # box1 in box2, so complete overlap\n np.testing.assert_almost_equal(\n dc.dock.binding_pocket.compute_overlap(mapping, box1, box2), 1)\n # 4/5 atoms in box2 in box1, so 80 % overlap\n np.testing.assert_almost_equal(\n dc.dock.binding_pocket.compute_overlap(mapping, box2, box1), .8)",
"def calc_overlap(self, start, stop):\n\n overlaps = []\n for s in self.map:\n e = self.map[s]\n if s >= start or s <= stop:\n # We found an overlap\n if e <= stop:\n overlaps.append({\"start\": s, \"stop\": e})\n else:\n overlaps.append({\"start\": s, \"stop\": stop})\n elif e >= start or e <= stop:\n if s >= start:\n overlaps.append({\"start\": s, \"stop\": e})\n else:\n overlaps.append({\"start\": start, \"stop\": e})\n return overlaps",
"def get_overlap(self, transposon):\n return max(0, min(self.last-transposon.first,\n transposon.last-self.first,\n len(self), len(transposon)))",
"def overlap(line1, line2):\n\tx1, x2 = line1\n\tx3, x4 = line2\n\tonLeft = min(x1, x2) <= min(x3, x4)\n\tif onLeft:\n\t\treturn max(max((x1, x2)) - min((x3, x4)), 0) > 0\n\treturn max(max((x3, x4)) - min((x1, x2)),0) > 0",
"def overlap(component1, component2):\n if component1[0].start <= component2[0].stop and component2[0].start <= component1[0].stop:\n if component1[1].start <= component2[1].stop and component2[1].start <= component1[1].stop:\n return True\n return False",
"def get_overlap(a, b):\n return max(0, min(a[1], b[1]) - max(a[0], b[0]))",
"def count_overlap(self, time, other_object, other_time):\n ti = np.where(time == self.times)[0][0]\n ma = np.where(self.masks[ti].ravel() == 1)\n oti = np.where(other_time == other_object.times)[0]\n obj_coords = np.zeros(self.masks[ti].sum(), dtype=[('x', int), ('y', int)])\n other_obj_coords = np.zeros(other_object.masks[oti].sum(), dtype=[('x', int), ('y', int)])\n obj_coords['x'] = self.i[ti].ravel()[ma]\n obj_coords['y'] = self.j[ti].ravel()[ma]\n other_obj_coords['x'] = other_object.i[oti][other_object.masks[oti] == 1]\n other_obj_coords['y'] = other_object.j[oti][other_object.masks[oti] == 1]\n return float(np.intersect1d(obj_coords,\n other_obj_coords).size) / np.maximum(self.masks[ti].sum(),\n other_object.masks[oti].sum())",
"def compute_overlaps(boxes1, boxes2):\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)\n return overlaps",
"def compute_overlaps(boxes1, boxes2):\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)\n return overlaps",
"def compute_overlaps(boxes1, boxes2):\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)\n return overlaps",
"def overlap(path1, path2):\n DataL1 = BedTool(path1).sort()\n DataL2 = BedTool(path2).sort()\n overlap = DataL1.intersect(DataL2, wao=True)\n Overlap_df = overlap.to_dataframe()\n Strand1 = list(Overlap_df.iloc[:, 5])\n Strand2 = list(Overlap_df.iloc[:, 11])\n p_p, m_m, p_m, m_p, same_strand, opposite_strand, convergent, divergent = orientation(Strand1, Strand2)\n return p_p, m_m, p_m, m_p, same_strand, opposite_strand, convergent, divergent",
"def compute_overlap(self, skymap1, skymap2, single_skymap1, single_skymap2):\n from ligo.skymap.postprocess.crossmatch import crossmatch\n from astropy.coordinates import SkyCoord\n ra, dec = self.get_ra_dec_from_skymap(single_skymap1)\n coord = SkyCoord(ra, dec, unit=\"rad\")\n result = crossmatch(skymap2, coord)\n searched_prob_1 = np.min([result.searched_prob, 1.0])\n ra, dec = self.get_ra_dec_from_skymap(single_skymap2)\n coord = SkyCoord(ra, dec, unit=\"rad\")\n result = crossmatch(skymap1, coord)\n searched_prob_2 = np.min([result.searched_prob, 1.0])\n return np.max([1-searched_prob_1, 1-searched_prob_2])",
"def define_overlap_operations(self):\n self._d_i = lambda q:np.roll(q,-1,axis=-1) - q\n self._d_j = lambda q:np.roll(q,-1,axis=-2) - q",
"def overlap(self, *args, type='bbox'):\n return self.phy2abs.overlap(*args, type=type)",
"def check_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'spec'):\n for i, spectrum in enumerate(self.spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.spec[spectrum]._add_to_overlapping_filters(filtername, verbose=verbose)\n else:\n warnings.warn(\"SNClass.check_overlaps - something went wrong... no data?\")\n pass",
"def create_overlap_metric(anchor_boxes):\n y, x, h, w = np.transpose(anchor_boxes)\n ab_area = w * h\n y0 = y - h // 2\n x0 = x - w // 2\n y1 = y + h // 2\n x1 = x + w // 2\n\n def overlap(gt_boxes):\n overlaps = []\n for gt_box in gt_boxes:\n gt_y0, gt_x0, gt_y1, gt_x1 = gt_box\n int_y0 = np.maximum(gt_y0, y0)\n int_x0 = np.maximum(gt_x0, x0)\n int_y1 = np.minimum(gt_y1, y1)\n int_x1 = np.minimum(gt_x1, x1)\n int_area = np.maximum(0, int_x1 - int_x0) * np.maximum(0, int_y1 - int_y0)\n overlaps.append(int_area / ab_area)\n overlaps = np.transpose(overlaps)\n gt_indices = np.argmax(overlaps, axis=1)\n overlaps = np.squeeze(np.take_along_axis(overlaps, gt_indices[:, np.newaxis], axis=1))\n gt_boxes = np.take(gt_boxes, gt_indices, axis=0)\n return overlaps, gt_boxes\n return overlap"
] | [
"0.70222414",
"0.6725737",
"0.66626245",
"0.6478671",
"0.6425438",
"0.6360582",
"0.63547766",
"0.63308674",
"0.63305753",
"0.6300792",
"0.6285758",
"0.6278362",
"0.6266364",
"0.62542456",
"0.62484795",
"0.6247598",
"0.614467",
"0.613606",
"0.61254627",
"0.6118002",
"0.6102571",
"0.59964085",
"0.59964085",
"0.59964085",
"0.5995658",
"0.5987914",
"0.59875274",
"0.5986674",
"0.5967984",
"0.59564435"
] | 0.68951005 | 1 |
Decodes a single string to a list of strings. | def decode (self, s):
if s == "null": return []
return s.split(chr(257)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def decode(self, s):\n lststr = s.split(',')\n if s=='': return []\n rst = []\n for i in range(len(lststr)):\n rst.append(lststr[i])\n return rst",
"def parse_string_list(data):\n txt = data.decode()\n x = ast.literal_eval(txt)\n return x",
"def _decode_list(data: BencodedString) -> list:\n result_list = []\n data.del_prefix(1)\n\n while True:\n if data.bytes:\n if data.bytes[0] != END_MARKER:\n result_list.append(_decode(data))\n else:\n data.del_prefix(1)\n break\n else:\n raise ValueError(\n \"Cannot decode a list, reached end of the bencoded string \"\n \"before the end marker was found. Most likely the bencoded \"\n \"string is incomplete or incorrect.\"\n )\n\n return result_list",
"def _strings_to_list(one_or_more_strings):\n if isinstance(one_or_more_strings, str):\n return [one_or_more_strings]\n else:\n return list(one_or_more_strings)",
"def decode(self, s):\n res = []\n i, j, length = 0, 0, len(s)\n while i < length:\n if s[j] == ':':\n num = int(s[i:j])\n res.append('' + s[j+1:j+1+num])\n i = j+1+num\n j = j+1+num\n else:\n j+=1\n return res",
"def str2list(input):\n if isinstance(input, str):\n return [input]\n\n else:\n return input",
"def string_to_list(string):\r\n al_dict = {'a':0, 'b':1, 'c':2, 'd':3, 'e':4, 'f':5, 'g':6, 'h':7, 'i':8, \r\n 'j':9, 'k':10, 'l':11, 'm':12, 'n':13, 'o':14, 'p':15, 'q':16, \r\n 'r':17, 's':18, 't':19, 'u':20, 'v':21, 'w':22, 'x':23, 'y':24, \r\n 'z':25}\r\n \r\n out = list(string)\r\n \r\n for i in range(len(string)):\r\n out[i] = al_dict[out[i]]\r\n \r\n return out",
"def str2list(string):\n return [string[i:i + 2] for i in range(0, len(string), 2)]",
"def string_to_list(string: str, sep: str):\n return string.strip(\"][\").split(sep)",
"def string_list(s):\n\n if not isinstance(s, str):\n raise ValueError(f\"Not a string: {s!r}\")\n return [p for p in [part.strip() for part in s.split(\",\")] if p]",
"def test_string_to_list_string(self):\n assert_equals(\n str_to_list('a, b, c'),\n ['a', 'b', 'c']\n )",
"def _convert_str_to_list(cls, v: Union[List[str], str]) -> List[str]:\n if isinstance(v, str):\n return v.split(\",\")\n return v # cov: ignore",
"def test_string_to_list_string(self):\n\n assert_equals(\n str_to_list('a, b, c'),\n ['a', 'b', 'c']\n )",
"def decode(self, s):\n i = 0\n strs = []\n while i < len(s):\n l = int(s[i:i+8], 16)\n strs.append(s[i+8:i+8+l])\n i += 8+l\n return strs",
"def from_str(cls, string):\n # If quotes are found, parse it as a Python string literal after adding\n # brackets around\n if '\"' in string or \"'\" in string:\n string = '[' + string + ']'\n l = ast.literal_eval(string)\n return [str(x) for x in l]\n # Otherwise, just split on commas\n else:\n return string.split(',')",
"def explode(_string):\n if not _string or not isinstance(_string, str):\n return _string\n else:\n return list(_string)",
"def convert_string_to_list(string_val):\n result_list = []\n\n list_string = string_val.split(',')\n for val in list_string:\n val = str(val.strip())\n val = val.replace(\"(\", \"\")\n val = val.replace(\")\", \"\")\n val = val.replace(\"L\", \"\")\n val = val.replace(\"[\", \"\")\n val = val.replace(\"]\", \"\")\n if val not in (\"\", \"None\"):\n result_list.append(int(val))\n\n return result_list",
"def string_to_list(s):\n return list(filter(lambda x: x, s.strip().split(' ')))",
"def decode_chain_list(in_bytes):\n bstrings = numpy.frombuffer(in_bytes, numpy.dtype('S' + str(mmtf.utils.constants.CHAIN_LEN)))\n return [s.decode(\"ascii\").strip(mmtf.utils.constants.NULL_BYTE) for s in bstrings]",
"def cs_string_to_typed_list(cs_str: str, sep=\",\", type_conv_fcn=float):\n try:\n list_strings = cs_str.split(sep)\n if all(map(lambda s: s.strip() == '', cs_str.split(sep))):\n # we are getting a list of empty strings we return [] and do not print warning\n return []\n return list([type_conv_fcn(x) for x in list_strings])\n except:\n warnings.warn('Could not convert string {s} to a typed list'.format(s=cs_str))\n return []",
"def __ui_convert_ids_string_to_list(string_of_ids):\n if string_of_ids == \"\":\n return []\n string_of_ids = string_of_ids.strip()\n string_of_ids = string_of_ids.replace(\",\", \" \")\n\n done = False\n while not done:\n if string_of_ids.find(\" \") == -1:\n done = True\n else:\n string_of_ids = string_of_ids.replace(\" \", \" \")\n list_of_ids = string_of_ids.split(\" \")\n for id_index in range(len(list_of_ids)):\n list_of_ids[id_index] = int(list_of_ids[id_index])\n return list_of_ids",
"def format_string_to_list(self, avi_string):\n\n repls = ('[', ''), (']', ''), (\"'\", \"\")\n avi_string = reduce(lambda a, kv: a.replace(*kv), repls, avi_string)\n return avi_string.split(',')",
"def decode_list(data):\n return_value = []\n for item in data:\n if isinstance(item, unicode):\n item = item.encode('utf-8')\n elif isinstance(item, list):\n item = decode_list(item)\n elif isinstance(item, dict):\n item = decode_dict(item)\n return_value.append(item)\n return return_value",
"def test_string_to_list(self):\n self.assertEqual([1, 2, 3], string_to_list('1-3'))\n self.assertEqual([6, 7, 8, 9, 10], string_to_list('6-10'))",
"def decode(string,root):\n ## split the string into a list\n ## then copy the elements of the list one by one.\n answer = []\n clist = list( string )\n ## start from root\n currentnode = root\n for c in clist:\n if ( c=='\\n' ): continue ## special case for newline characters\n assert ( c == '0' )or( c == '1')\n currentnode = currentnode[int(c)]\n if isinstance( currentnode , str ) :\n answer.append( currentnode )\n currentnode = root\n pass\n assert (currentnode == root) ## if this is not true then we have run out of characters and are half-way through a codeword\n return answer",
"def decode1(s):\n rv = []\n idx = 0\n item = ''\n while True:\n try:\n if s[idx:idx+2] == '+,':\n rv.append(item)\n item = ''\n idx += 2\n elif s[idx:idx+2] == '++':\n item += '+'\n idx += 2\n else:\n item += s[idx]\n idx += 1\n except IndexError:\n rv.append(item)\n break\n return rv",
"def decode_list(as_bytes: typing.List[int], inner_decoder: typing.Callable) -> list:\n raise NotImplementedError()",
"def strToStrList(x):\n if type(x)==str:\n return x[2:-2].split(\"', '\")",
"def string_to_list(value: str, intify: bool = False) -> Union[List[str], List[int]]:\n if not value:\n return [] # type: ignore[return-value]\n if value.startswith(\"[\") and value.endswith(\"]\"):\n value = value[1:-1]\n result = []\n for p in value.split(\",\"):\n p = p.strip()\n if p.startswith(\"'\") and p.endswith(\"'\"):\n p = p[1:-1]\n if p.startswith('\"') and p.endswith('\"'):\n p = p[1:-1]\n p = p.strip()\n if intify:\n p = int(p) # type: ignore[assignment]\n result.append(p)\n return result",
"def _string_to_list(self, string):\n try:\n new_value = literal_eval(string)\n if isinstance(new_value, tuple):\n new_value = list(new_value)\n elif not isinstance(new_value, list):\n raise SyntaxError\n if not all(isinstance(i, int) for i in new_value):\n raise SyntaxError\n except (SyntaxError, ValueError):\n raise InvalidFieldValueError(\n f\"Value of field {self.field_nickname} must be a list of integers, e.g. [1, 2, 3, ...]\"\n )\n return new_value"
] | [
"0.7429849",
"0.67665714",
"0.6603133",
"0.6484686",
"0.64099807",
"0.63982195",
"0.63764083",
"0.63426924",
"0.63409954",
"0.6335773",
"0.62704915",
"0.62629604",
"0.6239383",
"0.6189524",
"0.6171236",
"0.6159162",
"0.6110307",
"0.6100137",
"0.6022035",
"0.60078084",
"0.59908146",
"0.5987798",
"0.5965413",
"0.5963164",
"0.59271646",
"0.5915528",
"0.5890541",
"0.5882655",
"0.588065",
"0.5850877"
] | 0.7317169 | 1 |
sort list of objects randomly then update everything in this world | def update(self, dt):
random.shuffle(self.gameObjects)
for item in self.gameObjects:
description = item.update(dt) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dispatch_items_randomly(self, level):\n for item in self.list:\n item.position = Item.define_random_position(item, level)",
"def populate_objects(self):\n if not self._random_object: # only populate the first object\n U.spawn_object(self.object_list[0], self.object_initial_position)\n else:\n rand_x = np.random.uniform(low=-0.35, high=0.35, size=(len(self.object_list),))\n rand_y = np.random.uniform(low=2.2, high=2.45, size=(len(self.object_list),))\n for idx, obj in enumerate(self.object_list):\n box_pos = Pose(position=Point(x=rand_x[idx],\n y=rand_y[idx],\n z=1.05))\n U.spawn_object(obj, box_pos)",
"def _randomly_place_objects(self, urdfList):\n\n # Randomize positions of each object urdf.\n objectUids = []\n for urdf_name in urdfList:\n xpos = 0.35 + self._blockRandom * random.random()\n ypos = 0.28 + self._blockRandom * (random.random() - .5)\n angle = np.pi / 2 + self._blockRandom * np.pi * random.random()\n orn = p.getQuaternionFromEuler([0, 0, angle])\n urdf_path = os.path.join(self._urdfRoot, urdf_name)\n uid = p.loadURDF(urdf_path, [xpos, ypos, .05],\n [orn[0], orn[1], orn[2], orn[3]])\n objectUids.append(uid)\n # Let each object fall to the tray individual, to prevent object\n # intersection.\n for _ in range(300):\n p.stepSimulation()\n return objectUids",
"def update(self):\n for object in reversed(self.addList):\n self.objects.append(object)\n self.addList.remove(object)\n\n for object in reversed(self.removeList):\n self.objects.remove(object)\n self.removeList.remove(object)\n\n self.objects = sorted(self.objects,key=priority)\n\n for object in self.objects:\n object.update()",
"def randomize_herbs(self):\n random.shuffle(self.herbivores)",
"def place_objects(self):\n placed_objects = []\n index = 0\n np.random.seed(300)\n # place objects by rejection sampling\n for _, obj_mjcf in self.mujoco_objects.items():\n horizontal_radius = obj_mjcf.get_horizontal_radius()\n bottom_offset = obj_mjcf.get_bottom_offset()\n success = False\n for _ in range(5000): # 5000 retries\n bin_x_half = self.bin_size[0] / 2 - horizontal_radius - 0.05\n bin_y_half = self.bin_size[1] / 2 - horizontal_radius - 0.05\n object_x = np.random.uniform(high=bin_x_half, low=-bin_x_half)\n object_y = np.random.uniform(high=bin_y_half, low=-bin_y_half)\n\n # make sure objects do not overlap\n object_xy = np.array([object_x, object_y, 0])\n pos = self.bin_offset - bottom_offset + object_xy\n location_valid = True\n for pos2, r in placed_objects:\n dist = np.linalg.norm(pos[:2] - pos2[:2], np.inf)\n if dist <= r + horizontal_radius:\n location_valid = False\n break\n\n # place the object\n if location_valid:\n # add object to the position\n placed_objects.append((pos, horizontal_radius))\n self.objects[index].set(\"pos\", array_to_string(pos))\n # random z-rotation\n quat = self.sample_quat()\n self.objects[index].set(\"quat\", array_to_string(quat))\n success = True\n print('object {} in pick place task: pos:{}, quat:{}'.format(index, pos, quat))\n break\n\n # raise error if all objects cannot be placed after maximum retries\n if not success:\n raise RandomizationError(\"Cannot place all objects in the bins\")\n index += 1",
"def Shuffle(self):\r\n random.shuffle(self.cards_list)",
"def test_shuffle(self):\n random.shuffle(self.liste)\n self.liste.sort()\n self.assertEqual(self.liste, list(range(10)))",
"def test_shuffle(self):\n random.shuffle(self.liste)\n self.liste.sort()\n self.assertEqual(self.liste, list(range(10)))",
"def totem_random():\n random_head()\n random_head()\n random_head()",
"def detect_and_shuffle(self, *args):\n\n self._update_suspicion()\n self.remove_attackers()\n self.drop_buckets()\n buckets = self.get_buckets_to_sort()\n if len(buckets) > 0:\n self._reorder_buckets(buckets)\n self._sort_buckets(buckets)",
"def put_items(self,*maplist):\n self.position_x = random.randint(0, (len(maplist) - 1))\n self.position_y = random.randint(1, (len(maplist[0]) - 2))\n\n while maplist[self.position_y][self.position_x] == \"x\":\n self.position_x = random.randint(0, (len(maplist) - 1))\n self.position_y = random.randint(1, (len(maplist[0]) - 2))",
"def reindex_graphics(self):\n for obj in self.context.static_objects:\n self.canvas.children.remove(obj.widget.canvas)\n # fill _objects_z_index\n _objects_z_index = {}\n for obj in self.context.static_objects:\n y = obj.widget.pos[1]\n if not y in _objects_z_index:\n _objects_z_index[y] = []\n _objects_z_index[y].append(obj)\n _keys = _objects_z_index.keys()\n _keys.sort()\n _keys.reverse()\n for k in _keys:\n objs = _objects_z_index[k]\n for obj in objs:\n self.canvas.add(obj.widget.canvas)",
"def randomize(self):\n if self.randomize_players is True:\n random.shuffle(self.player_field)",
"def shuffle(self):\n self.logger.debug('Shuffling wallpaper queue')\n\n random.shuffle(self.wallpapers)\n self.index = 0",
"def shuffle(self):\n for i in range(10):\n random.shuffle(self.set)",
"def random_pos(self, ):\n self.pos_item['needle'] = self.shuffle_pos()\n self.pos_item['ether'] = self.shuffle_pos()\n self.pos_item['tube'] = self.shuffle_pos()",
"def shuffle(self):\n\n args = list(self)\n random.shuffle(args)\n\n self.clear()\n super(DogeDeque, self).__init__(args)",
"def generate_lists(self):\n scenelist = self.scenelist\n newbies = self.newbies\n claimlist = [ob for ob in self.claimlist if ob not in newbies]\n choices = self.valid_scene_choices\n num_scenes = self.NUM_SCENES - (len(claimlist) + len(scenelist))\n if num_scenes > 0:\n try:\n scenelist.extend(random.sample(choices, num_scenes))\n except ValueError:\n scenelist.extend(choices)\n scenelist = sorted(scenelist, key=lambda x: x.key.capitalize())\n self.caller.player_ob.db.random_scenelist = scenelist",
"def organizeAndUpdate(self): \r\n for point in self.points:\r\n point.organize()\r\n point.update()",
"def live(self):\n\t\t#random assignment of fittnes for now\n\t\tfor chrom in self.chromosomes:\n\t\t\tchrom.strength = random.random()\n\t\tself.chromosomes.sort(key=lambda chromosomes: chromosomes.strength, reverse = True)\n\n\t\tself.bestChromosomes = self.chromosomes[0:2]",
"def Generate_Random( self ):\n print( 'Generating Random coordinates' )\n stands = self.Data.Stand.keys()\n stands.sort()\n for s in stands:\n trees = self.Data.Stand[s].Tree.keys()\n trees.sort()\n for t in trees:\n self.Data.Stand[s].Tree[t].X = random.uniform( 0, 208.71 )\n self.Data.Stand[s].Tree[t].Y = random.uniform( 0, 208.71 )",
"def update(entity_list):\n for entity in entity_list:\n # Only moveable entities should have the dest field\n if \"dest\" in entity:\n # If no destination, pick a new random one\n if entity[\"dest\"] == None:\n entity[\"dest\"] = random_pos(100, 100)\n \n # Move one step towards destination\n cpos = entity[\"position\"]\n dest = entity[\"dest\"]\n entity[\"position\"] = move(cpos, dest) \n\n # Clear destination if it has been reached\n if entity[\"dest\"] == entity[\"position\"]:\n entity[\"dest\"] = None",
"def randomize_in_place(list1, list2, init=0):\n np.random.seed(seed=init)\n np.random.shuffle(list1)\n np.random.seed(seed=init)\n np.random.shuffle(list2)",
"def main():\n\n import random\n print( \"*** Initializing new list ... done. \" )\n print( \"*** Filling in 20 random values ... done.\" )\n\n l = []\n\n for i in range( 20 ):\n l.append( random.randint( 0, 100 ))\n\n print( \" ### Unsorted list: \" )\n print( l )\n\n print( \"\\n*** Sorting the list with Bubble Sort ... done.\" )\n bubbleSort( l )\n\n print( \" ### Sorted list: \")\n print( l )",
"def sort(self):\r\n print(f'robot is holding nothing ({self._item})')\r\n # print(l)\r\n SortingRobot.swap_item(self)\r\n SortingRobot.move_right(self)\r\n print(f'SECOND FOR LOOP Robot picked up item {self._item} and is now moving right')\r\n SortingRobot.set_light_off(self)\r\n for i in range(101):\r\n while SortingRobot.can_move_right(self) == True:\r\n SortingRobot.set_light_on(self) # Holding None\r\n print(f'WHILE LOOP robot is holding {self._item}')\r\n if SortingRobot.compare_item(self) == None:\r\n SortingRobot.swap_item(self)\r\n # SortingRobot.set_light_off(self) # Holding None\r\n SortingRobot.move_right(self)\r\n print(f'WHILE LOOP TOP NONE IF STATEMENT robot is holding {self._item}')\r\n if (SortingRobot.compare_item(self) == None) and (SortingRobot.can_move_left(self) == False): #\r\n break\r\n # print(l)\r\n if (SortingRobot.compare_item(self) == -1) and (SortingRobot.can_move_right(self) == True): # held item is lower than table\r\n print(f'item held is lower so swapping (current item = {self._item}) and moving right')\r\n SortingRobot.swap_item(self)\r\n SortingRobot.move_right(self)\r\n print(f'new item held is {self._item}')\r\n print(f'1curent position = {self._position}')\r\n # SortingRobot.move_right(self)\r\n # print(f'curent position = {self._position}')\r\n # SortingRobot.move_right(self)\r\n # print(f'curent position = {self._position}')\r\n # print(l[self._position])\r\n # print(l)\r\n if (SortingRobot.compare_item(self) == 1) and (SortingRobot.can_move_right(self) == False):\r\n SortingRobot.swap_item(self)\r\n print(f'2curent position = {self._position}')\r\n if SortingRobot.compare_item(self) == 0: # held item is equal to table\r\n print(f'3curent position = {self._position}')\r\n SortingRobot.move_right(self)\r\n SortingRobot.swap_item(self)\r\n pass\r\n if SortingRobot.compare_item(self) == 1 and (SortingRobot.can_move_right(self) == True): # held item is higher than table\r\n SortingRobot.move_right(self)\r\n print(f'4curent position = {self._position}')\r\n # if (SortingRobot.compare_item(self) == None) and (SortingRobot.light_is_on(self) == True): # either held or table is None\r\n # SortingRobot.swap_item(self)\r\n # SortingRobot.set_light_off(self)\r\n # print(f'5curent position = {self._position}')\r\n # if SortingRobot.can_move_right(self) == True:\r\n # SortingRobot.move_right(self)\r\n # print(f'FINAL WHILE LOOP robot hit end and is moving left one by one and swapping items '\r\n # f'one by one (current item = {self._item})')\r\n # print(f'6curent position = {self._position}')\r\n # # print(l)\r\n if SortingRobot.can_move_right(self) == False:\r\n print(f'7curent position = {self._position}')\r\n while SortingRobot.can_move_left(self) == True:\r\n # print('item held is lower but hit end of list so ')\r\n SortingRobot.move_left(self)\r\n SortingRobot.swap_item(self)\r\n print(f'8curent position = {self._position}')\r\n # print(f'Ending List = {l}')\r\n if (SortingRobot.compare_item(self) == -1) and (\r\n SortingRobot.can_move_right(self) == False): # held item is lower than table\r\n print(f'9curent position = {self._position}')\r\n while SortingRobot.can_move_left(self) == True:\r\n print('item held is lower but hit end of list so moving left one by one')\r\n SortingRobot.move_left(self)\r\n SortingRobot.swap_item(self)\r\n print(f'Robot at end of list; continues to hold item {self._item}; moving back to start now')\r\n print(f'10curent position = {self._position}')\r\n if SortingRobot.compare_item(self) == 1 and (\r\n SortingRobot.can_move_right(self) == False): # held item is higher than table\r\n print(f'11curent position = {self._position}')\r\n SortingRobot.swap_item(self)\r\n while SortingRobot.can_move_left(self) == True:\r\n # print('item held is lower but hit end of list so ')\r\n SortingRobot.swap_item(self)\r\n SortingRobot.move_left(self)\r\n print(f'12curent position = {self._position}')",
"def shuffle(self):\n reorder(self.cards) #importing shuffle as reorder",
"def sort(self):\n self.model_list.sort()\n for model in self.model_list:\n model.sort()",
"def bogosort(to_sort):\n # Be sure to sort the list at each pass in the while loop to make it extra\n # inefficient!\n while sorted(to_sort) != to_sort:\n shuffle(to_sort)",
"def randomize(self):\n self.size = randint(1,5)\n self.resource = randint(1,3)\n self.temperature = randint(20, 1000)\n self.gravity = randint(0, 10)\n for key in self.get_atmosphere().keys():\n setattr(self, key, randint(0, 5))\n for attribute_count in range(randint(0, 3)):\n pa = PlanetaryAttribute.objects.order_by('?')[0]\n self.attributes.add(pa)"
] | [
"0.6626622",
"0.6585694",
"0.6483848",
"0.6469369",
"0.6035206",
"0.6019736",
"0.60169244",
"0.5968468",
"0.5968468",
"0.5951621",
"0.5919026",
"0.59153605",
"0.5901477",
"0.5880968",
"0.58774793",
"0.5859918",
"0.5817854",
"0.58043724",
"0.5801076",
"0.5794008",
"0.5786912",
"0.57615674",
"0.5758612",
"0.57475036",
"0.57418615",
"0.57228833",
"0.5717533",
"0.5711719",
"0.5704576",
"0.5701705"
] | 0.6854891 | 0 |
add this to the world | def add_to_world(self, thing):
thing.set_world_info(self.current_id, self)
self.gameObjects.append(thing)
self.current_id += 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_world(self):\n raise NotImplementedError()",
"def __init__(self, world):\n self.__init__(world, ArrayList())",
"def update_world(self):\n pass",
"def __init__(self, world, x, y, direction):\n self.ID = world.__register__(x, y, direction)",
"def world(self):\n return World(self)",
"def updateWorld(self):\n pass",
"def create_the_world(cls):\n from muddery.server.mappings.element_set import ELEMENT\n world = ELEMENT(\"WORLD\")()\n world.setup_element(\"\")\n cls._world_data = world",
"def world(self, value):\n self.worlds[self.world_index] = value",
"def run_world(self):\n self.world_alive = True\n self.world_setable = False",
"def __init__(self, wink, opp):\n super().__init__(wink, opp)\n opp.data[DOMAIN][\"entities\"][\"scene\"].append(self)",
"def apply_to_world(self, world):\n # add the current obstacles\n for obstacle in self.current_obstacles:\n world.add_obstacle(obstacle)\n\n # program the robot supervisors\n for robot in world.robots:\n robot.supervisor.goal = self.current_goal[:]",
"def world(self) -> World:\n return World(self)",
"def save_world(self):\n pass",
"def add(self):\n pass",
"async def async_added_to_opp(self):\n self.opp.data[DOMAIN][\"entities\"][\"scene\"].append(self)",
"def setup(self):\n build_world.start_level(self)",
"def this_word(self):\n self.append = self.add_to_current_word",
"def __init___0(self, world, list_):\n self.world = world\n self.list_ = list_",
"def getWorld(self):\n return self.world",
"def _setup_world(self, taskname):\n self.x0 = self._hyperparams[\"x0\"]\n self._world = [gym.make(taskname)\n for _ in range(self._hyperparams['conditions'])]",
"def world(cls):\n try:\n return cls._world_data\n except AttributeError:\n cls.create_the_world()\n return cls._world_data",
"def add(self):\n\n self.scene.projs.add(self)\n self.scene.all.add(self.scene.projs)\n self.rotate()",
"def __init__(self):\n this = _sunpos.new_cLocation()\n try: self.this.append(this)\n except: self.this = this",
"def __add__(self, this):\n return self.add(this)",
"def build_world(self, width, height, entrance, agent, objects):\n env = WumpusEnvironment(width, height, entrance)\n if self.trace:\n agent = wumpus_environment.TraceAgent(agent)\n agent.register_environment(env)\n env.add_thing(agent, env.entrance)\n for (obj, loc) in objects:\n env.add_thing(obj, loc)\n print env.to_string()\n print self.objects \n return env",
"def registeredBy(self, world):\n self.world = world\n self._determineSuffix()\n self.short = \"\"\n self.short = self.shortName(3)",
"def add(self):\n self.inp.inputs.add(self)\n self.out.outputs.add(self)",
"def append(self, this):\n return self.add(this)",
"def add_site_to_context(self):\n g.current_site = self",
"def introduce(self):\n print(f\"Hi, I am {self.name}!\")"
] | [
"0.6970694",
"0.6964329",
"0.6806117",
"0.6598959",
"0.64999914",
"0.64821887",
"0.64505976",
"0.6299828",
"0.6281762",
"0.6269024",
"0.62657404",
"0.62023485",
"0.61991674",
"0.6084373",
"0.5940955",
"0.5922447",
"0.58853203",
"0.5820223",
"0.5784347",
"0.57677424",
"0.57647425",
"0.5750902",
"0.57429063",
"0.57279694",
"0.5717122",
"0.56737316",
"0.564446",
"0.560288",
"0.5600272",
"0.5594945"
] | 0.74709004 | 0 |
Merge data from an apdex metric object. | def merge_apdex_metric(self, metric):
self[0] += metric.satisfying
self[1] += metric.tolerating
self[2] += metric.frustrating
self[3] = ((self[0] or self[1] or self[2]) and
min(self[3], metric.apdex_t) or metric.apdex_t)
self[4] = max(self[4], metric.apdex_t) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merge_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, other in metrics:\n key = (name, '')\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)",
"def mergeAggregatedCsvData(self, contexts, obj, aggData1, aggData2):\n return aggData1 + aggData2",
"def _aggregate_log_values(self, source, dest):\n remove = []\n for key, item in source.items():\n if \"data\" not in item:\n # Assume it's a sub-group\n dest[key] = {}\n self._aggregate_log_values(item, dest[key])\n else:\n aggregator = self._get_aggregator_for_key(key, item['agg'])\n value = aggregator(item['data'])\n if item['precision'] is not None:\n value = round(value, item['precision'])\n dest[key] = value\n if item['scope'] == 'get':\n remove.append(key)\n for key in remove:\n del source[key]",
"def update(self, data: Mapping[str, np.ndarray]) -> Self:\n\n for metric in self.metrics:\n metric.update(data)\n\n return self",
"def _merge(acc: Dict[str, str], cur: Any) -> Dict[str, str]:\n parsed = _parse_feature(cur)\n acc[\"timestamp\"] = parsed[\"timestamp\"]\n acc[\"lat\"] = parsed[\"lat\"]\n acc[\"lon\"] = parsed[\"lon\"]\n key = parsed[\"property\"]\n val = parsed[\"value\"]\n\n acc[key] = val\n\n return acc",
"def merge_metric_stats(self, snapshot):\n\n if not self.__settings:\n return\n\n for key, other in six.iteritems(snapshot.__stats_table):\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)",
"def get_metric(self, metric, existing_dict=None):\n metric_key, metric_type, metric_name, metric_help = metric\n metric_dict = {\n 'name': metric_name,\n 'type': metric_type,\n 'help': metric_help,\n 'values': OrderedDict()\n }\n values = self.r.hgetall(metric_key) # new values\n # print \"values: %r\" % values\n metric_dict['values'] = values\n\n if existing_dict:\n # we're updating a metric we've already seen\n # print \"existing dict: %r\" % existing_dict\n for value in values:\n # print \"checking value: %r\" % value\n # value = json.loads(value)\n if value in existing_dict['values']:\n if metric_type == 'counter' or metric_type == 'histogram':\n # Combine the values if it's a counter or histogram\n # TODO: sort histogram buckets\n # TODO: append _bucket to histogram bucket names\n existing_dict['values'][value] = float(\n values[value]) + float(\n existing_dict['values'][value])\n elif metric_type == 'gauge':\n # use the last value we see for a gauge - # TODO: is there a better way? we could average it\n existing_dict['values'][value] = float(values[value])\n else:\n existing_dict['values'][value] = float(values[value])\n metric_dict['values'] = existing_dict['values']\n\n if metric_type == 'histogram':\n # json decode all of the labels\n samples = [json.loads(x, object_pairs_hook=OrderedDict) for x in metric_dict['values']]\n # we need to sort the values by the bucket labeled \"le\"\n sorted_keys = sorted(samples, key=lambda b: b['le'])\n # and then we need to store the values again json encoded\n vals = metric_dict['values']\n metric_dict['values'] = OrderedDict()\n for k in sorted_keys:\n kn = json.dumps(k, sort_keys=True)\n metric_dict['values'][kn] = vals[kn]\n\n return metric_dict",
"def _merge_report(self, target, new):\n time = None\n if 'ts' in new['parsed']:\n time = new['parsed']['ts']\n\n if (target.get('lastSeenDate', None) and\n time and\n target['lastSeenDate'] < time):\n target['lastSeenDate'] = time\n\n query_millis = int(new['parsed']['stats']['millis'])\n target['stats']['totalTimeMillis'] += query_millis\n target['stats']['count'] += 1\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']",
"def aggregate(all_metrics, reducer, suffix):\n # Collect metric separately\n separated_metrics = {} # type: dict[frozenset, list[dict]]\n for el in all_metrics:\n key = frozenset(el[\"metric\"][\"dimensions\"].items())\n if key not in separated_metrics:\n separated_metrics[key] = [el]\n else:\n separated_metrics[key].append(el)\n\n # Collect all dimensions\n dims = {}\n for metric_dims in separated_metrics.keys():\n for prop, val in dict(metric_dims).iteritems():\n if prop in dims:\n dims[prop].add(val)\n else:\n dims[prop] = set(val)\n\n # Sort each metric\n for _, metric in separated_metrics.iteritems():\n metric.sort(key=lambda v: v[\"metric\"][\"timestamp\"])\n\n separated_metrics = sorted(separated_metrics.values(), key=len)\n separated_metrics.reverse()\n\n # Compute the new values\n new_values = []\n all_timestamps = map(\n lambda l: map(\n lambda x: x[\"metric\"][\"timestamp\"], l),\n separated_metrics)\n metric_count = len(separated_metrics)\n for index in range(0, len(separated_metrics[0])):\n new_value = reducer[0](\n separated_metrics[0][index][\"metric\"][\"value\"],\n metric_count)\n new_timestamp = separated_metrics[0][index][\"metric\"][\"timestamp\"]\n for metric_index in range(1, metric_count):\n new_value = reducer[1](new_value, helpers.interpolate(\n new_timestamp,\n separated_metrics[metric_index],\n all_timestamps[metric_index]\n ), metric_count)\n new_values.append((new_timestamp, new_value))\n\n # Aggregate the other details:\n metric_name = separated_metrics[0][0][\"metric\"][\"name\"] + suffix\n meta = separated_metrics[0][0][\"meta\"]\n new_metrics = [\n helpers.create_agg_metric(\n metric_name,\n meta,\n dims,\n val[0],\n val[1]\n ) for val in new_values\n ]\n return new_metrics",
"def get_metric_data(config, metric_list, metric_grouping, start_time, end_time, collected_data_map):\n\n def format_data_entry(json_data_entry):\n metric_name = json_data_entry.get('metric')\n host_name = json_data_entry.get('tags', {}).get('host') or 'unknownHost'\n dps = json_data_entry.get('dps', {})\n metric_value = None\n header_field = normalize_key(metric_name) + \"[\" + host_name + \"]:\" + str(\n get_grouping_id(metric_name, metric_grouping))\n mtime = 0\n for stime, val in dps.items():\n if int(stime) > mtime:\n metric_value = val\n mtime = int(stime)\n\n epoch = mtime * 1000\n\n if epoch in collected_data_map:\n timestamp_value_map = collected_data_map[epoch]\n else:\n timestamp_value_map = {}\n\n timestamp_value_map[header_field] = str(metric_value)\n collected_data_map[epoch] = timestamp_value_map\n\n json_data = {\n \"token\": config['OPENTSDB_TOKEN'],\n \"start\": start_time,\n \"end\": end_time,\n \"queries\": map(lambda m: {\n \"aggregator\": \"avg\",\n \"downsample\": \"1m-avg\",\n \"metric\": m.encode('ascii')\n }, metric_list)\n }\n\n url = config[\"OPENTSDB_URL\"] + \"/api/query\"\n response = requests.post(url, data=json.dumps(json_data))\n if response.status_code == 200:\n rawdata_list = response.json()\n logger.debug(\"Get metric data from opentsdb: \" + str(len(rawdata_list)))\n\n # format metric and save to collected_data_map\n map(lambda d: format_data_entry(d), rawdata_list)",
"def _merge_report(self, target, new):\r\n time = None\r\n if 'ts' in new['parsed']:\r\n time = new['parsed']['ts']\r\n\r\n if (target.get('lastSeenDate', None) and\r\n time and\r\n target['lastSeenDate'] < time):\r\n target['lastSeenDate'] = time\r\n\r\n query_millis = int(new['parsed']['stats']['millis'])\r\n target['stats']['totalTimeMillis'] += query_millis\r\n target['stats']['count'] += 1\r\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']",
"def _update_metric(\n metrics: List[mlflow.entities.Metric], dataset: MetricsDict = {}\n ) -> MetricsDict:\n for metric in metrics:\n metric_dict = {\"step\": metric.step, \"value\": metric.value}\n if metric.key in dataset:\n if isinstance(dataset[metric.key], list):\n dataset[metric.key].append(metric_dict)\n else:\n dataset[metric.key] = [dataset[metric.key], metric_dict]\n else:\n dataset[metric.key] = metric_dict\n return dataset",
"def summerize_adapter_metrics(parsed_metrics: Dict[int, dict]) -> Dict[Tuple[str, str], dict]:\n\n summarized_metrics = {}\n for lane in parsed_metrics:\n # Iterate over all samples in lane\n summarized_metrics[lane] = summarized_metrics.get(lane, {})\n for value in parsed_metrics[lane].values():\n sample_id = value.get(\"Sample_ID\")\n summarized_metrics[lane][sample_id] = summarized_metrics[lane].get(sample_id, value)\n summarized_metrics[lane][sample_id][\n \"R\" + value.get(\"ReadNumber\") + \"_SampleBases\"\n ] = value.get(\"SampleBases\")\n\n return summarized_metrics",
"def _aggregate_metrics(metrics, aggfunc, base):\n return base.Struct(**_UNCOMPRESSED_METRICS)(\n left_side_bearing=aggfunc(_m.left_side_bearing for _m in metrics),\n right_side_bearing=aggfunc(_m.right_side_bearing for _m in metrics),\n character_width=aggfunc(_m.character_width for _m in metrics),\n character_ascent=aggfunc(_m.character_ascent for _m in metrics),\n character_descent=aggfunc(_m.character_descent for _m in metrics),\n character_attributes=0,\n )",
"def merge_time_metric(self, metric):\n\n self.merge_raw_time_metric(metric.duration, metric.exclusive)",
"def to_metric(self):\r\n if self.units != 'metric':\r\n self.units = 'metric'\r\n for statement in self.statements:\r\n statement.to_metric()\r\n for tool in iter(self.tools.values()):\r\n tool.to_metric()\r\n for primitive in self.primitives:\r\n primitive.to_metric()\r\n for hit in self.hits:\r\n hit.to_metric()",
"def aggregate_data(mts, feature, target):\r\n set_dict = dict()\r\n set_dict['mt'] = mts\r\n set_dict['feature'] = feature\r\n set_dict['target'] = target\r\n \r\n return set_dict",
"def compute_metrics(self, x, extra=None):\n if self.__metrics is None and extra is None:\n return None\n\n ret = {}\n if self.__metrics is not None:\n for m in self.__metrics:\n ret[m.name] = self._mdmetric(x, m)\n\n if extra is not None and extra.name not in ret:\n ret[extra.name] = self._mdmetric(x, extra)\n\n return ret",
"def __init_metrics(self):\n\n batch = {}\n # split data into batches of size batch_size or less\n for metric_name, metric_pattern in self.metrics.items():\n # get the batch list for that metric\n batch_list = []\n for s in range(1, self.schema + 1):\n for t in range(1, self.table + 1):\n k = '/metrics/type=IndexTable/keyspace={}/scope={}/name={}/mean'.format(s, t, metric_name)\n # from Python 3.6 onwards, the standard dict type maintains insertion order by default\n batch[k] = 0\n # if the batch has batch_size items or at the end of iteration,\n # append the batch to list of that metric and create a new empty batch\n if len(batch) == self.batch_size or (s == self.schema and t == self.table):\n batch_list.append(batch)\n batch = {}\n\n # parse metric patterns\n l = metric_pattern.split()\n if l[0] == '(>':\n self.metrics[metric_name] = IncMetricStruct(float(int(l[1])), float(l[2][1:]), float(l[4][:-2]),\n batch_list)\n else:\n self.metrics[metric_name] = RandMetricStruct(float(l[0][1:]), float(l[-1][:-1]), batch_list)",
"def mergeMetadata(self, obj, dom): \n self.update_semantics = 'merge'\n # create a metadata dict that has all the values from obj, overridden\n # by the current dom values.\n metadata = self.getModuleMetadata(obj, {})\n metadata.update(self.getMetadata(dom, METADATA_MAPPING))\n for oerdc_name, cnx_name in METADATA_MAPPING.items():\n if cnx_name in ['keywords',]:\n old_value = getattr(obj, cnx_name)\n if old_value:\n current_value = list(metadata.get(cnx_name, []))\n current_value.extend(old_value)\n metadata[cnx_name] = current_value\n if metadata:\n self.validate_metadata(metadata)\n metadata = self.fixEntities(metadata, ATTRIBUTES_TO_FIX)\n if ICollection.providedBy(obj):\n obj.collection_metadata(**metadata)\n elif IModule.providedBy(obj):\n obj.update_metadata(**metadata)\n self.updateRoles(obj, dom)\n obj.reindexObject(idxs=metadata.keys())",
"def metric_data(self, normalizer=None):\n\n if not self.__settings:\n return []\n\n result = []\n normalized_stats = {}\n\n # Metric Renaming and Re-Aggregation. After applying the metric\n # renaming rules, the metrics are re-aggregated to collapse the\n # metrics with same names after the renaming.\n\n if self.__settings.debug.log_raw_metric_data:\n _logger.info('Raw metric data for harvest of %r is %r.',\n self.__settings.app_name,\n list(six.iteritems(self.__stats_table)))\n\n if normalizer is not None:\n for key, value in six.iteritems(self.__stats_table):\n key = (normalizer(key[0])[0], key[1])\n stats = normalized_stats.get(key)\n if stats is None:\n normalized_stats[key] = copy.copy(value)\n else:\n stats.merge_stats(value)\n else:\n normalized_stats = self.__stats_table\n\n if self.__settings.debug.log_normalized_metric_data:\n _logger.info('Normalized metric data for harvest of %r is %r.',\n self.__settings.app_name,\n list(six.iteritems(normalized_stats)))\n\n for key, value in six.iteritems(normalized_stats):\n key = dict(name=key[0], scope=key[1])\n result.append((key, value))\n\n return result",
"def add_metrics(self, metric_dict: dict):\n self.metric_dict.update(metric_dict)",
"def _create_metric_sum(a, b):\n metric_sum = GridSearchClassificationMetrics()\n metric_sum.accuracy = a.accuracy + b.accuracy\n metric_sum.precision = a.precision + b.precision\n metric_sum.f_measure = a.f_measure + b.f_measure\n metric_sum.recall = a.recall + b.recall\n metric_sum.confusion_matrix = a.confusion_matrix + b.confusion_matrix\n return metric_sum",
"def record_apdex_metric(self, metric):\n\n if not self.__settings:\n return\n\n # Note that because we are using a scope here of an empty string\n # we can potentially clash with an unscoped metric. Using None,\n # although it may help to keep them separate in the agent will\n # not make a difference to the data collector which treats None\n # as an empty string anyway.\n\n key = (metric.name, '')\n stats = self.__stats_table.get(key)\n if stats is None:\n stats = ApdexStats(apdex_t=metric.apdex_t)\n self.__stats_table[key] = stats\n stats.merge_apdex_metric(metric)\n\n return key",
"def merge(self, obj):\n pass",
"def consolidate_other(self):\n record = self.db[self.args['cstats_table']].find_one({'type': 'client'})\n if not record:\n self.log.critical('Could not get the \"client\" key in the \"cstats_table\"')\n return\n for k in record.keys():\n if k in ['_id', 'type', 'stats']:\n continue\n self.other[k] = record[k]\n self.stats.update(record.get('stats', {}))",
"def convert(self, data, *args, **kwargs):\n\n # all of this is still quite ugly and verrrry specific...\n json_data = {}\n for hit in data[\"hits\"][\"hits\"]:\n # pprint(hit)\n\n # get the PQ\n pq = hit.get(\"_source\", {}).get(\"metadata\", {}).get(\"PanDAQueue\", None)\n if not pq:\n continue\n\n # get the list of all benchmark results\n latest_list = (\n hit.get(\"inner_hits\", {})\n .get(\"most_recent\", {})\n .get(\"hits\", {})\n .get(\"hits\", [])\n )\n if len(latest_list) == 0:\n continue\n\n # get the average of the latest benchmark results.\n # Only results not older than 7d, and a maximum of 50 results (whichever value is hit first).\n # If we have no values more recent than 7d, simply use the last available one (that PQ is probably not online anymore anyway)\n values = []\n for d in latest_list:\n date = datetime.datetime.strptime(\n d.get(\"_source\", {}).get(\"timestamp\", \"\"), \"%Y-%m-%dT%H:%M:%SZ\"\n )\n two_days_ago = datetime.datetime.now() - datetime.timedelta(days=2)\n seven_days_ago = datetime.datetime.now() - datetime.timedelta(days=7)\n\n if date > two_days_ago:\n # we are within the last two days, so we take all the measurements we can get!\n values.append(d)\n elif (date < two_days_ago) and (date > seven_days_ago):\n # we are between 2 and 7 days ago, so take only values if we don't have 25 values already\n if len(values) < 30:\n values.append(d)\n elif date < seven_days_ago:\n # we are further away than 7 days, so take a maximum of 5 values from here if we don't have 5 yet\n if len(values) < 10:\n values.append(d)\n\n to_average = [\n i.get(\"_source\", {})\n .get(\"profiles\", {})\n .get(\"fastBmk\", {})\n .get(\"value\", 0.0)\n for i in values\n ]\n json_data[pq] = {\n \"avg_value\": float(sum(to_average)) / len(to_average),\n \"measurements\": len(to_average),\n }\n # print(len(to_average))\n\n return json_data",
"def _get_metrics_to_collect(self, instance_key, additional_metrics):\n if instance_key not in self.metrics_to_collect_by_instance:\n self.metrics_to_collect_by_instance[instance_key] = \\\n self._build_metric_list_to_collect(additional_metrics)\n return self.metrics_to_collect_by_instance[instance_key]",
"def add_datapoints(self, stats):\n # APCU Stats\n apcu_stats = stats.get('apcu_stats', dict())\n self.add_gauge_value('APCu Cache/Slots', 'slots',\n apcu_stats.get('nslots',\n apcu_stats.get('num_slots', 0)))\n self.add_gauge_value('APCu Cache/Entries', 'keys',\n apcu_stats.get('nentries',\n apcu_stats.get('num_entries', 0)))\n self.add_gauge_value('APCu Cache/Size', 'bytes',\n apcu_stats.get('mem_size', 0))\n self.add_gauge_value('APCu Cache/Expunges', 'keys',\n apcu_stats.get('nexpunges',\n apcu_stats.get('expunges', 0)))\n\n hits = apcu_stats.get('nhits', apcu_stats.get('num_hits', 0))\n misses = apcu_stats.get('nmisses', apcu_stats.get('num_misses', 0))\n total = hits + misses\n if total > 0:\n effectiveness = float(float(hits) / float(total)) * 100\n else:\n effectiveness = 0\n self.add_gauge_value('APCu Cache/Effectiveness', 'percent',\n effectiveness)\n\n self.add_derive_value('APCu Cache/Hits', 'keys', hits)\n self.add_derive_value('APCu Cache/Misses', 'keys', misses)\n self.add_derive_value('APCu Cache/Inserts', 'keys',\n apcu_stats.get('ninserts',\n apcu_stats.get('num_inserts',0)))",
"def _aggregate_perf_data(perf_all_ordinals: List[str]):\n aggregate = {}\n\n pd = PerfData()\n for data in perf_all_ordinals:\n worker_pd = PerfData(**json.loads(data))\n if len(perf_all_ordinals) > 1:\n aggregate.setdefault(\"ordinals\", [])\n aggregate[\"ordinals\"].append(worker_pd.throughput_dict())\n\n pd.merge(worker_pd)\n\n aggregate.update(dataclasses.asdict(pd))\n return aggregate"
] | [
"0.59747314",
"0.59122944",
"0.54553163",
"0.53186303",
"0.52896434",
"0.52513325",
"0.52353007",
"0.5187257",
"0.5186104",
"0.51808596",
"0.5174486",
"0.5171695",
"0.5140604",
"0.51390433",
"0.5108755",
"0.5096023",
"0.50914425",
"0.50677156",
"0.5052862",
"0.50481683",
"0.503186",
"0.5021795",
"0.50000453",
"0.49747816",
"0.495587",
"0.4948683",
"0.4938958",
"0.49351326",
"0.4929363",
"0.49192527"
] | 0.69163495 | 0 |
Merge data from a time metric object. | def merge_time_metric(self, metric):
self.merge_raw_time_metric(metric.duration, metric.exclusive) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def combine(cls, date_obj, time_obj):\n return cls(date_obj.year, date_obj.month, date_obj.day,\n time_obj.hour, time_obj.minute, time_obj.second,\n time_obj.nanosecond)",
"def add_time(data, t):\n data['year'] = t.year\n data['month'] = t.month\n data['day'] = t.day\n data['hour'] = t.hour\n data['minute'] = t.minute\n data['second'] = t.second",
"def _merge_report(self, target, new):\n time = None\n if 'ts' in new['parsed']:\n time = new['parsed']['ts']\n\n if (target.get('lastSeenDate', None) and\n time and\n target['lastSeenDate'] < time):\n target['lastSeenDate'] = time\n\n query_millis = int(new['parsed']['stats']['millis'])\n target['stats']['totalTimeMillis'] += query_millis\n target['stats']['count'] += 1\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']",
"def _merge_report(self, target, new):\r\n time = None\r\n if 'ts' in new['parsed']:\r\n time = new['parsed']['ts']\r\n\r\n if (target.get('lastSeenDate', None) and\r\n time and\r\n target['lastSeenDate'] < time):\r\n target['lastSeenDate'] = time\r\n\r\n query_millis = int(new['parsed']['stats']['millis'])\r\n target['stats']['totalTimeMillis'] += query_millis\r\n target['stats']['count'] += 1\r\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']",
"def _load_time(self):\n\n time_variables = ('time', 'Times', 'Itime', 'Itime2')\n got_time, missing_time = [], []\n for time in time_variables:\n # Since not all of the time_variables specified above are required, only try to load the data if they\n # exist. We'll raise an error if we don't find any of them though.\n if time in self.ds.variables:\n setattr(self.time, time, self.ds.variables[time][:])\n got_time.append(time)\n attributes = type('attributes', (object,), {})()\n for attribute in self.ds.variables[time].ncattrs():\n setattr(attributes, attribute, getattr(self.ds.variables[time], attribute))\n setattr(self.atts, time, attributes)\n else:\n missing_time.append(time)\n\n if len(missing_time) == len(time_variables):\n warn('No time variables found in the netCDF.')\n else:\n if 'Times' in got_time:\n # Overwrite the existing Times array with a more sensibly shaped one.\n self.time.Times = np.asarray([''.join(t.astype(str)).strip() for t in self.time.Times])\n\n # Make whatever we got into datetime objects and use those to make everything else. Note: the `time' variable\n # is often the one with the lowest precision, so use the others preferentially over that.\n if 'Times' not in got_time:\n if 'time' in got_time:\n _dates = num2date(self.time.time, units=getattr(self.ds.variables['time'], 'units'))\n elif 'Itime' in got_time and 'Itime2' in got_time:\n _dates = num2date(self.time.Itime + self.time.Itime2 / 1000.0 / 60 / 60, units=getattr(self.ds.variables['Itime'], 'units'))\n try:\n self.time.Times = np.array([datetime.strftime(d, '%Y-%m-%dT%H:%M:%S.%f') for d in _dates])\n except ValueError:\n self.time.Times = np.array([datetime.strftime(d, '%Y/%m/%d %H:%M:%S.%f') for d in _dates])\n # Add the relevant attribute for the Times variable.\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'Times', attributes)\n\n if 'time' not in got_time:\n if 'Times' in got_time:\n try:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y-%m-%dT%H:%M:%S.%f') for t in self.time.Times])\n except ValueError:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y/%m/%d %H:%M:%S.%f') for t in self.time.Times])\n elif 'Itime' in got_time and 'Itime2' in got_time:\n _dates = num2date(self.time.Itime + self.time.Itime2 / 1000.0 / 60 / 60, units=getattr(self.ds.variables['Itime'], 'units'))\n # We're making Modified Julian Days here to replicate FVCOM's 'time' variable.\n self.time.time = date2num(_dates, units='days since 1858-11-17 00:00:00')\n # Add the relevant attributes for the time variable.\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'units', 'days since 1858-11-17 00:00:00')\n setattr(attributes, 'long_name', 'time')\n setattr(attributes, 'format', 'modified julian day (MJD)')\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'time', attributes)\n\n if 'Itime' not in got_time and 'Itime2' not in got_time:\n if 'Times' in got_time:\n try:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y-%m-%dT%H:%M:%S.%f') for t in self.time.Times])\n except ValueError:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y/%m/%d %H:%M:%S.%f') for t in self.time.Times])\n elif 'time' in got_time:\n _dates = num2date(self.time.time, units=getattr(self.ds.variables['time'], 'units'))\n # We're making Modified Julian Days here to replicate FVCOM's 'time' variable.\n _datenum = date2num(_dates, units='days since 1858-11-17 00:00:00')\n self.time.Itime = np.floor(_datenum)\n self.time.Itime2 = (_datenum - np.floor(_datenum)) * 1000 * 60 * 60 # microseconds since midnight\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'units', 'days since 1858-11-17 00:00:00')\n setattr(attributes, 'format', 'modified julian day (MJD)')\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'Itime', attributes)\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'units', 'msec since 00:00:00')\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'Itime2', attributes)\n\n # Additional nice-to-have time representations.\n if 'Times' in got_time:\n try:\n self.time.datetime = np.array([datetime.strptime(d, '%Y-%m-%dT%H:%M:%S.%f') for d in self.time.Times])\n except ValueError:\n self.time.datetime = np.array([datetime.strptime(d, '%Y/%m/%d %H:%M:%S.%f') for d in self.time.Times])\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'long_name', 'Python datetime.datetime')\n setattr(self.atts, 'datetime', attributes)\n else:\n self.time.datetime = _dates\n self.time.matlabtime = self.time.time + 678942.0 # convert to MATLAB-indexed times from Modified Julian Date.\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'long_name', 'MATLAB datenum')\n setattr(self.atts, 'matlabtime', attributes)\n\n # Clip everything to the time indices if we've been given them. Update the time dimension too.\n if 'time' in self._dims:\n if all([isinstance(i, (datetime, str)) for i in self._dims['time']]):\n # Convert datetime dimensions to indices in the currently loaded data.\n self._dims['time'][0] = self.time_to_index(self._dims['time'][0])\n self._dims['time'][1] = self.time_to_index(self._dims['time'][1]) + 1 # make the indexing inclusive\n for time in self.obj_iter(self.time):\n setattr(self.time, time, getattr(self.time, time)[self._dims['time'][0]:self._dims['time'][1]])\n self.dims.time = len(self.time.time)",
"def update_time(self):\n time_metrics = self._fetch_time_metrics_and_clear()\n self._logger.info('update_time. time_metrics = %s', build_metrics_times_data(time_metrics))",
"def _get_time(self) -> None:\n self.data[\"time\"] = np.zeros(len(self.data[\"yyyymmdd\"]), dtype=object)\n \n for idx, (yyyymmdd, hhmmss) in enumerate(zip(self.data[\"yyyymmdd\"], self.data[\"hhmmss\"])):\n year, month, day = yyyymmdd.split(\"/\")\n hour, minute, second = hhmmss.split(\":\")\n self.data[\"time\"][idx] = datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))\n \n del self.data[\"yyyymmdd\"]\n del self.data[\"hhmmss\"]",
"def _fetch_time_metrics_and_clear(self):\n with self._time_rlock:\n time_metrics = self._time_metrics\n self._time_metrics = defaultdict(LatencyTracker)\n\n return time_metrics",
"def build_metrics_times_data(time_metrics):\n return [{'name': name, 'latencies': latencies.get_latencies()}\n for name, latencies in iteritems(time_metrics)]",
"def combine(cls, date, time, tzinfo=True):\n if not isinstance(date, real_date):\n raise TypeError(\"date argument must be a date instance\")\n if not isinstance(time, real_time):\n raise TypeError(\"time argument must be a time instance\")\n if tzinfo is True:\n tzinfo = time.tzinfo\n return cls(\n date.year,\n date.month,\n date.day,\n time.hour,\n time.minute,\n time.second,\n time.microsecond,\n tzinfo,\n fold=time.fold,\n )",
"def with_time(self):\n key = list(self.keys())[0]\n length = len(self[key])\n time_slices = self[key].time_slices\n\n if time_slices is None:\n raise FeatureError(\"FeatureCollection has no time reference.\")\n\n for i in range(length):\n res = {}\n for key, feature in self.items():\n res[key] = feature.data[feature.name][i]\n yield (time_slices[i], res)",
"def merge(self, otr):\n self._duration = otr.get_start() - self.get_start()\n self._duration += otr.get_duration()\n self._line[3] = self._duration",
"def _add_time_field(self) -> None:\n self.data[\"time\"] = [datetime(int(yyyy), int(mm), int(dd)) + timedelta(hours=hh) for yyyy, mm, dd, hh in zip(self.data[\"year\"], self.data[\"month\"], self.data[\"day\"], self.data[\"hour\"])]\n for key in [\"year\", \"doy\", \"month\", \"day\", \"hour\"]:\n del self.data[key]",
"def merge_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, other in metrics:\n key = (name, '')\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)",
"def copy_and_append_time_dimension_to_netcdf_dataset(self,dataset_in,dataset_out):\n\n for dim_name,dim_obj in list(dataset_in.dimensions.items()):\n dataset_out.createDimension(dim_name,len(dim_obj)\n if not dim_obj.isunlimited() else None)\n dataset_out.createDimension('time',None)\n times = dataset_out.createVariable(\"time\",'f8',(\"time\",))\n times.units = \"years since 0001-01-01 00:00:00.0\"\n times.calendar = \"proleptic_gregorian\"\n times[0] = np.array([0.0])\n for var_name, var_obj in list(dataset_in.variables.items()):\n new_var = dataset_out.createVariable(var_name,var_obj.datatype,var_obj.dimensions\n if (len(var_obj.dimensions) <= 1\n or var_name == 'AREA') else\n [\"time\"] + list(var_obj.dimensions))\n if len(var_obj.dimensions) <= 1 or var_name == 'AREA':\n new_var[:] = var_obj[:]\n else:\n new_var[0,:] = var_obj[:]\n new_var.setncatts({attr_name: var_obj.getncattr(attr_name) for attr_name in var_obj.ncattrs()})",
"def merge_metric_stats(self, snapshot):\n\n if not self.__settings:\n return\n\n for key, other in six.iteritems(snapshot.__stats_table):\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)",
"def merge(self, other: PerfData):\n self.total_samples += other.total_samples\n if self.total_time == 0.0:\n self.total_time = other.total_time\n self.compile_time = max(self.compile_time, other.compile_time)\n self.programming_time = max(\n self.programming_time, other.programming_time\n )\n if self.est_samples_per_sec == 0.0:\n self.est_samples_per_sec = other.est_samples_per_sec\n else:\n assert (\n self.est_samples_per_sec == other.est_samples_per_sec\n ), \"Expected all fabric-based performance estimates to be identical\"\n\n if self.total_time > 0:\n self.samples_per_sec = float(self.total_samples) / self.total_time\n else:\n self.samples_per_sec = 0.0",
"def set_data(self, time, data):\n for diagnostic in self._diagnostics_list:\n out = diagnostic(time, data)",
"def copyDataFrom (self, other):\n\n self.localTimeString=other.localTimeString\n self._myHasLocalTimeString=other._myHasLocalTimeString\n \n self.utcTimeString=other.utcTimeString\n self._myHasUtcTimeString=other._myHasUtcTimeString\n \n self.daylightSavingTime=other.daylightSavingTime\n self._myHasDaylightSavingTime=other._myHasDaylightSavingTime\n \n self.epoch=other.epoch\n self._myHasEpoch=other._myHasEpoch\n \n self.utcOffsetMinutes=other.utcOffsetMinutes\n self._myHasUtcOffsetMinutes=other._myHasUtcOffsetMinutes",
"def with_time(self):\n if self.time_slices is None:\n raise FeatureError(\"Feature has no time reference.\")\n\n for i, datum in enumerate(self.data[self.name]):\n yield (self.time_slices[i], datum)",
"def addData(self, other, time, index):\n\n xoffset = index[0]*other.xdim\n yoffset = index[1]*other.ydim \n zoffset = index[2]*other.zdim\n \n self.data [ time-self.time_range[0], zoffset:zoffset+other.zdim, yoffset:yoffset+other.ydim, xoffset:xoffset+other.xdim] = other.data [:,:,:]",
"def add_time_point(self,time, mdv_instance):\n\n self.mdvtc[time] = mdv_instance",
"def record_time_metric(self, metric):\n\n if not self.__settings:\n return\n\n # Scope is forced to be empty string if None as\n # scope of None is reserved for apdex metrics.\n\n key = (metric.name, metric.scope or '')\n stats = self.__stats_table.get(key)\n if stats is None:\n stats = TimeStats(call_count=1,\n total_call_time=metric.duration,\n total_exclusive_call_time=metric.exclusive,\n min_call_time=metric.duration,\n max_call_time=metric.duration,\n sum_of_squares=metric.duration ** 2)\n self.__stats_table[key] = stats\n else:\n stats.merge_time_metric(metric)\n\n return key",
"def _fill_meas_result(self,meas,from_time,to_time,meas_data):\r\n input=self._pvsr.create_pvsr_object(\"GetMeasuredValuesInput\")\r\n input.ObjType = \"Measurement\"\r\n input.ObjId = meas.Id\r\n input.From = datetime.datetime.fromtimestamp(from_time)\r\n input.To = datetime.datetime.fromtimestamp(to_time)\r\n logging.info(\"Get values, eq: {0}, type: {1}, index: {2}, name: {3}, {4} -> {5}\".format(self._meas[\"equipment\"],meas.Type,meas.Index,meas.DescriptionToShow,input.From,input.To))\r\n meas_res=self._pvsr.getMeasuredValues(input)\r\n \r\n index2mplane_name={}\r\n multiply = None\r\n if \"first\" in self._meas[\"types\"][meas.Type]:\r\n index2mplane_name[0]=self._meas[\"types\"][meas.Type][\"first\"]\r\n if \"second\" in self._meas[\"types\"][meas.Type]:\r\n index2mplane_name[1]=self._meas[\"types\"][meas.Type][\"second\"]\r\n if \"multiply\" in self._meas[\"types\"][meas.Type]:\r\n multiply=int(self._meas[\"types\"][meas.Type][\"multiply\"])\r\n\r\n if hasattr(meas_res,\"D\"):\r\n for d in meas_res.D:\r\n if d.T not in meas_data:\r\n meas_data[d.T]={}\r\n for index,mplane_name in index2mplane_name.items():\r\n if index < len(d.V):\r\n if multiply is not None:\r\n d.V[index]*=multiply\r\n meas_data[d.T][mplane_name]=d.V[index]\r\n else:\r\n meas_data[d.T][mplane_name]=None",
"def __add__ ( self, other, resample_opts=None ):\n result = ObservationStorage (datadir=self.datadir, \\\n resample_opts=resample_opts )\n if self.date[0] > other.date[0]:\n start_date = other.date[0]\n else:\n start_date = self.date[0]\n if self.date[-1] > other.date[-1]:\n end_date = other.date[-1]\n else:\n end_date = self.date[-1]\n \n delta = datetime.timedelta ( days=1 )\n this_date = start_date.date()\n end_date = end_date.date() + delta\n \n this_obs_dates = [ x.date() for x in self.date ]\n other_obs_dates = [ x.date() for x in other.date ]\n \n date = [] ; vza = [] ; vaa = [] ; sza = [] ; saa = []\n emulator = [] ; mask = [] ; data_pntr = [] ; spectral = []\n sensor = []\n \n while this_date < end_date:\n if this_date in this_obs_dates:\n iloc = this_obs_dates.index ( this_date )\n date.append ( self.date[iloc] )\n emulator.append ( self.emulator[iloc] )\n vza.append ( self.vza[iloc] )\n sza.append ( self.sza[iloc] )\n vaa.append ( self.vaa[iloc] )\n saa.append ( self.saa[iloc] )\n spectral.append ( self.spectral )\n mask.append ( ( self.get_mask, [iloc] ) )\n sensor.append ( self.sensor )\n \n data_pntr.append ( self._data_pntr[iloc] )\n if this_date in other_obs_dates:\n iloc = other_obs_dates.index ( this_date )\n date.append ( other.date[iloc] )\n emulator.append ( other.emulator[iloc] )\n vza.append ( other.vza[iloc] )\n sza.append ( other.sza[iloc] )\n vaa.append ( other.vaa[iloc] )\n saa.append ( other.saa[iloc] )\n spectral.append ( other.spectral )\n mask.append ( ( other.get_mask, [iloc] ) )\n sensor.append ( other.sensor )\n data_pntr.append ( other._data_pntr[iloc] )\n this_date += delta\n result.vza = vza\n result.vaa = vaa\n result.sza = sza \n result.saa = saa \n result.date = date\n result.spectral = spectral\n result.masks = mask\n result.sensor = sensor\n result.emulator = emulator\n result._data_pntr = data_pntr\n return result",
"def time_stats(df):",
"def get_metric_data(config, metric_list, metric_grouping, start_time, end_time, collected_data_map):\n\n def format_data_entry(json_data_entry):\n metric_name = json_data_entry.get('metric')\n host_name = json_data_entry.get('tags', {}).get('host') or 'unknownHost'\n dps = json_data_entry.get('dps', {})\n metric_value = None\n header_field = normalize_key(metric_name) + \"[\" + host_name + \"]:\" + str(\n get_grouping_id(metric_name, metric_grouping))\n mtime = 0\n for stime, val in dps.items():\n if int(stime) > mtime:\n metric_value = val\n mtime = int(stime)\n\n epoch = mtime * 1000\n\n if epoch in collected_data_map:\n timestamp_value_map = collected_data_map[epoch]\n else:\n timestamp_value_map = {}\n\n timestamp_value_map[header_field] = str(metric_value)\n collected_data_map[epoch] = timestamp_value_map\n\n json_data = {\n \"token\": config['OPENTSDB_TOKEN'],\n \"start\": start_time,\n \"end\": end_time,\n \"queries\": map(lambda m: {\n \"aggregator\": \"avg\",\n \"downsample\": \"1m-avg\",\n \"metric\": m.encode('ascii')\n }, metric_list)\n }\n\n url = config[\"OPENTSDB_URL\"] + \"/api/query\"\n response = requests.post(url, data=json.dumps(json_data))\n if response.status_code == 200:\n rawdata_list = response.json()\n logger.debug(\"Get metric data from opentsdb: \" + str(len(rawdata_list)))\n\n # format metric and save to collected_data_map\n map(lambda d: format_data_entry(d), rawdata_list)",
"def merge(self, dataset):\n def merge_data(source, dest):\n for key, value in source.items():\n if isinstance(value, dict):\n merge_data(value, dest.setdefault(key, {}))\n else:\n dest[key] = value\n return dest\n\n merge_data(dataset.data, self._data)\n\n for h in dataset.task_history:\n if h not in self._task_history:\n self._task_history.append(h)",
"def metrics_times(self, times_data):\n url = _METRICS_URL_TEMPLATE.format(base_url=self._events_api_url_base, endpoint='times')\n return self._post(url, times_data)",
"def update(self, data: Mapping[str, np.ndarray]) -> Self:\n\n for metric in self.metrics:\n metric.update(data)\n\n return self"
] | [
"0.6252038",
"0.5991166",
"0.5987996",
"0.59735656",
"0.5910383",
"0.5889225",
"0.58206046",
"0.5819968",
"0.5817195",
"0.56706285",
"0.5599506",
"0.55690825",
"0.5536235",
"0.54553676",
"0.5344387",
"0.532456",
"0.5291916",
"0.52320737",
"0.52283955",
"0.5221072",
"0.5202319",
"0.51057315",
"0.51032513",
"0.51024926",
"0.5096357",
"0.50709116",
"0.5053796",
"0.5052287",
"0.5047473",
"0.5038343"
] | 0.69977385 | 0 |
Record a single value metric, merging the data with any data from prior value metrics with the same name. | def record_custom_metric(self, name, value):
if isinstance(value, dict):
if len(value) == 1 and 'count' in value:
new_stats = CountStats(call_count=value['count'])
else:
new_stats = TimeStats(*c2t(**value))
else:
new_stats = TimeStats(1, value, value, value, value, value**2)
stats = self.__stats_table.get(name)
if stats is None:
self.__stats_table[name] = new_stats
else:
stats.merge_stats(new_stats) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def log(self, metric_name: str, value: float) -> None:\n if metric_name in self.metrics:\n self.metrics[metric_name].append(value)\n else:\n self.metrics[metric_name] = [value]",
"def log_metric(self, name: str, value):\n self.metrics[name] = value\n\n self._sync_log_event()",
"def write_metric(self, metric_name: str, metric_value: Union[float, int]):\n self._metrics.append(Metric(metric_name, metric_value))",
"def record_custom_metric(self, name, value):\n key = (name, '')\n\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(key)\n if stats is None:\n self.__stats_table[key] = new_stats\n else:\n stats.merge_stats(new_stats)\n\n return key",
"def record_gauge(self, name, value, tags=None):\n identity = self.create_identity(name, tags)\n with self._lock:\n self._batch[identity] = value\n self._timestamps[identity] = int(time.time() * 1000.0)",
"def add_metric(self, metric_name: str, metric_val: typing.Any):\n self.add_metrics({metric_name: metric_val})",
"def record_summary(self, name, value, tags=None):\n identity = self.create_identity(name, tags, \"summary\")\n with self._lock:\n if identity in self._batch:\n merged_value = self._batch[identity]\n merged_value[\"count\"] += 1\n merged_value[\"sum\"] += value\n merged_value[\"min\"] = min(value, merged_value[\"min\"])\n merged_value[\"max\"] = max(value, merged_value[\"max\"])\n else:\n value = {\"count\": 1, \"sum\": value, \"min\": value, \"max\": value}\n self._batch[identity] = value",
"def log_metric(self, name, val):\n raise NotImplementedError",
"def update_metric(self, metric, value):\n if self.is_number(value):\n self.logger.debug(\"Collected raw metric: %s = %s\" % (metric, value))\n self.raw_metrics[metric] = value",
"def __push_metric(self, metric_name, value, timestamp):\n sock = self.__get_carbon_socket()\n _data = \"%s %d %d\\n\" % (metric_name, value, timestamp)\n LOGGER.debug(\"SEND: %s\", _data.replace(\"\\n\", \"\"))\n sock.send(_data.encode('utf-8'))",
"def metric_recorded(self, record):\n if record.name in self.headers and self.current_row is not None:\n if record.name == \"learning_rate\" and not record.is_scalar:\n # record is a list of scalars\n value = \",\".join([f\"{lr:.4f}\" for lr in record.value])\n elif record.is_scalar and isinstance(record.value, int):\n value = str(record.value)\n else:\n assert record.is_scalar\n\n value = f\"{record.value:.4f}\"\n\n self.current_row[record.name] = value",
"def dispatch_value(metric, value, type):\n log_verbose('Sending metric: %s=%s as type %s' % (metric, value,type))\n\n val = collectd.Values(plugin='redis_metrics')\n val.type = type\n val.type_instance = metric\n val.values = [value]\n val.dispatch()",
"def log_metric(self, name, val, step):\n raise NotImplementedError",
"def sum(self, key, value):\n self._metrics[key] += value",
"def log_metric(key, value, step=None):\n mlflow.log_metric(key, value, step=step)",
"def add_metric(self, metric_name, metric_value, login=False):\n if login:\n self._gc.login()\n\n try: \n if metric_name not in self._metric_dict:\n metric_index = len(self._metric_dict) + 2\n self._wks.update_cell(1, metric_index, metric_name)\n self._metric_dict[metric_name] = metric_index\n self.save_config()\n\n self._wks.update_cell(self.row_index, self._metric_dict[metric_name], metric_value)\n except Exception as ins:\n if not login:\n self.add_metric(metric_name, metric_value, login=True)\n else:\n return '\\n'.join([str(type(ins)), str(ins.args), str(ins)])\n return None",
"def record_metrics(self, pid, metrics):\n\n for _metric, _metric_value in metrics.items():\n if not self.__metrics_history[pid].get(_metric):\n self.__metrics_history[pid][_metric] = []\n self.__metrics_history[pid][_metric].append(_metric_value)\n # only keep the last 2 running history for any metric\n self.__metrics_history[pid][_metric] = self.__metrics_history[pid][_metric][\n -2:\n ]",
"def record_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, value in metrics:\n self.record_custom_metric(name, value)",
"def _record_storage_event(metric, value=0):\n command_name = properties.VALUES.metrics.command_name.Get()\n metrics.CustomKeyValue(command_name, 'Storage-' + metric, value)",
"def _log_op_value(self, name: str, value: Any) -> None:\n summary_op, placeholder = self._get_log_op(name)\n sess = tf.get_default_session()\n result = sess.run(summary_op, {placeholder: value})\n self.summary_writer.add_summary(result, self.batches_seen)",
"def put(self, metric, values, timestamp=None):\n if timestamp is None:\n timestamp = time.time()\n now_date = datetime.datetime.fromtimestamp(timestamp)\n\n if self.last is None:\n self.last = timestamp\n return\n\n self.last = timestamp\n\n values = [str(d) for d in [now_date, timestamp]+values]\n\n with open(self.filename, \"at\") as df:\n df.write(\"{}\\n\".format(\",\".join(values)))",
"def publish_metric(name, value, type):\n t = time.time()\n m = json.dumps({'monitor':name, type:value, 'time':t})\n r = redis.StrictRedis(host='localhost', port=6379, db=0) \n r.lpush('sensor_readings',m)",
"def save_metric(key, value, timestamp=None):\n\n from analytics_client.settings import _ANALYTICS_ENABLED\n\n if not _ANALYTICS_ENABLED:\n return None\n\n from analytics_client.tasks import store_metric\n\n # Set a timestamp if it is undefined\n _timestamp = timestamp\n if _timestamp is None:\n _timestamp = datetime.now()\n\n store_metric.delay(Metric(key=key, value=value, timestamp=_timestamp))",
"def _AddMetric(self, metric):\n machine = metric.machine_id\n time = metric.timestamp\n payload = DotDict(json.loads(metric.payload)).flatten()\n\n self.machines.add(machine)\n self.timestamps.add(time)\n for k in payload:\n if k not in self.counter_data:\n continue\n val = payload.get(k, None)\n if val is not None:\n self.counter_data[k].AddSample(machine, time, val)",
"def log_other(self, name: str, value):\n self._other_metadata[name] = value\n\n self._sync_log_event()",
"def add_metric(self, metric: str):\n if metric not in self.metrics:\n self.metrics[metric] = self.creator.create_metric(metric)",
"def _record(self, metric_point: MetricPoint,\n measurement_map: MeasurementMap):\n metric_name = metric_point.metric_name\n tags = metric_point.tags\n\n metric = self._registry.get(metric_name)\n # Metrics should be always registered dynamically.\n assert metric\n\n tag_map = tag_map_module.TagMap()\n for key, value in tags.items():\n tag_key = tag_key_module.TagKey(key)\n tag_value = tag_value_module.TagValue(value)\n tag_map.insert(tag_key, tag_value)\n\n metric_value = metric_point.value\n measurement_map.measure_float_put(metric.measure, metric_value)\n # NOTE: When we record this metric, timestamp will be renewed.\n measurement_map.record(tag_map)",
"def add_metric(self, metric):\n self.metrics.append(metric)\n self.estimate()",
"def log(self, metric, value, source, timestamp=None):\n if timestamp is None:\n timestamp = datetime.now()\n\n sql = \"insert into measurement(metric, value, source, timestamp) values('{0}', {1}, '{2}', '{3}');\".format(\n metric, value, source, timestamp)\n\n self._execute_sql(sql)",
"def get_metric(self, metric, existing_dict=None):\n metric_key, metric_type, metric_name, metric_help = metric\n metric_dict = {\n 'name': metric_name,\n 'type': metric_type,\n 'help': metric_help,\n 'values': OrderedDict()\n }\n values = self.r.hgetall(metric_key) # new values\n # print \"values: %r\" % values\n metric_dict['values'] = values\n\n if existing_dict:\n # we're updating a metric we've already seen\n # print \"existing dict: %r\" % existing_dict\n for value in values:\n # print \"checking value: %r\" % value\n # value = json.loads(value)\n if value in existing_dict['values']:\n if metric_type == 'counter' or metric_type == 'histogram':\n # Combine the values if it's a counter or histogram\n # TODO: sort histogram buckets\n # TODO: append _bucket to histogram bucket names\n existing_dict['values'][value] = float(\n values[value]) + float(\n existing_dict['values'][value])\n elif metric_type == 'gauge':\n # use the last value we see for a gauge - # TODO: is there a better way? we could average it\n existing_dict['values'][value] = float(values[value])\n else:\n existing_dict['values'][value] = float(values[value])\n metric_dict['values'] = existing_dict['values']\n\n if metric_type == 'histogram':\n # json decode all of the labels\n samples = [json.loads(x, object_pairs_hook=OrderedDict) for x in metric_dict['values']]\n # we need to sort the values by the bucket labeled \"le\"\n sorted_keys = sorted(samples, key=lambda b: b['le'])\n # and then we need to store the values again json encoded\n vals = metric_dict['values']\n metric_dict['values'] = OrderedDict()\n for k in sorted_keys:\n kn = json.dumps(k, sort_keys=True)\n metric_dict['values'][kn] = vals[kn]\n\n return metric_dict"
] | [
"0.72285455",
"0.6880584",
"0.6831178",
"0.67528003",
"0.64814395",
"0.6404036",
"0.6218233",
"0.6194534",
"0.6135706",
"0.60753894",
"0.6063926",
"0.60352796",
"0.59790224",
"0.59553665",
"0.59262705",
"0.589197",
"0.58506346",
"0.5779258",
"0.5770089",
"0.5764042",
"0.5720421",
"0.56907314",
"0.56858194",
"0.5682319",
"0.56533355",
"0.56444114",
"0.5638115",
"0.56074387",
"0.5605615",
"0.559948"
] | 0.69385666 | 1 |
Returns an iterator over the set of value metrics. The items returned are a tuple consisting of the metric name and accumulated stats for the metric. | def metrics(self):
return six.iteritems(self.__stats_table) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __iter__(self):\n prefix = len(META_NS) + 2\n for key, value in self.stats.items():\n yield (key[prefix:-6], int(value))",
"def get_val_iterator(self) -> Iterable[Batch]:\n if self._val_name not in self._datasets:\n raise ValueError(\"Val data not provided.\")\n return self.get_iterator(self._val_name)",
"def itervaluerefs(self):\r\n return self.data.itervalues()",
"def metrics(self) -> List[Metric]:\n return self._metrics",
"def metrics(self):\n return self._metrics",
"def metrics(self):\n return self._metrics",
"def itervaluerefs(self):\n for value in self.itervalues():\n yield ref(value)",
"def metrics(self):\n return self.__metrics",
"def stat_values(self):\n return self._stat_values",
"def metrics(self) -> list[dict[str, dict[str, float | int]]]:\n return self.performance[\"performances\"]",
"def get_valued_metrics(self):\n return self._valued_metrics",
"def get_metrics(self) -> Dict[str, base.Number]:\n return self._metrics",
"def metrics(self):\n raise NotImplementedError(\"metrics\")",
"def values(self):\n return iter(util.annotate(v) for _, v in self._pairs())",
"def metrics(self):\n if not self.df:\n return []\n\n column_metric_strings = [col.split(self.sep)[0] for col in self.df.columns]\n\n metrics = set()\n for colstring in column_metric_strings:\n try:\n metrics.add(Metric(colstring))\n except ValueError:\n continue\n\n return sorted(list(set(metrics)))",
"def iter_values(self):\n values = self.values\n if (values is not None):\n yield from values",
"def values(self):\n\t\treturn iter(self.data)",
"def metrics(self) -> typing.Optional[typing.List[\"BucketMetrics\"]]:\n return self._values.get('metrics')",
"def metrics(self):\r\n if not hasattr(self, '_observable_metrics'):\r\n self._observable_metrics = Metrics()\r\n return self._observable_metrics",
"def iterator(self):\n return self.ValueIterator()",
"def values(self):\n for ts in self:\n yield self[ts]",
"def metrics(self):\n\n data = requests.get(\n f\"http://{self.prometheus_host}:{self.prometheus_port}/metrics\"\n ).content.decode()\n lines = [line for line in data.split(\"\\n\") if not line.startswith(\"#\")]\n metrics = {}\n for line in lines:\n if not line:\n continue\n\n name, value = line.split(\" \")\n\n try:\n value = int(value) # type: ignore\n except ValueError:\n value = float(value) # type: ignore\n\n if \"{\" in name and \"}\" in name:\n base = name[: name.index(\"{\")]\n tags = name[name.index(\"{\") + 1 : -1]\n tags = [tag.split(\"=\") for tag in tags.split(\",\")]\n tags = [(key, val.replace('\"', \"\")) for key, val in tags]\n\n name = base + \"#\" + \",\".join(f\"{k}:{v}\" for k, v in sorted(tags))\n\n metrics[name] = value\n\n return metrics",
"def iter_values(self):\n if self.contributes:\n for value in self.values:\n if isinstance(value, GroupingComponent):\n for x in value.iter_values():\n yield x\n else:\n yield value",
"def read_metric_values(self):\n inv_objs = self._inventory_mgr.current_inventory()\n monitored_metrics = self._metric_mgr.get_monitored_metrics()\n perf_manager = self._si.RetrieveServiceContent().perfManager\n for mor in inv_objs.keys():\n for inv_obj in inv_objs[mor]:\n inv_obj_metrics = inv_obj.metric_id_map\n desired_keys = list(set(inv_obj_metrics.keys()) & set(monitored_metrics[mor].keys()))\n if not len(desired_keys) == 0:\n metric_id_objs = [inv_obj_metrics[key] for key in desired_keys]\n query_spec = vim.PerformanceManager.QuerySpec(\n entity=inv_obj.mor, metricId=metric_id_objs,\n intervalId=inv_obj.INSTANT_INTERVAL,\n maxSample=1, format='normal'\n )\n try:\n results = perf_manager.QueryPerf(querySpec=[query_spec])\n except Exception as e:\n self._logger.error(\"Exception while making performance query : {0}\".format(e))\n if results:\n dps = self._parse_query(inv_obj, results, monitored_metrics[mor])\n payload = self._build_payload(dps)\n self._dispatch_metrics(payload)\n else:\n self._logger.warning(\"Empty result from query : {0}\".format(query_spec))",
"def get_state(self, duration):\n metrics = []\n\n if duration:\n for count_key in self.kv_counts:\n metrics.append(\n MetricObject(\n count_key,\n self.kv_counts[count_key] / duration\n )\n )\n\n for time_key in self.kv_times:\n values = self.kv_times[time_key]['values']\n unit = self.kv_times[time_key]['unit']\n\n metrics.append(\n MetricObject(\n '.'.join([time_key, 'mean']),\n stats_helper.find_mean(values),\n unit\n )\n )\n\n metrics.append(\n MetricObject(\n '.'.join([time_key, 'median']),\n stats_helper.find_median(values),\n unit\n )\n )\n\n for pct in self.percentiles:\n metrics.append(\n MetricObject(\n '.'.join([time_key, \"%sth_percentile\" % pct]),\n stats_helper.find_percentile(values, int(pct)),\n unit\n )\n )\n\n return metrics",
"def iteritems(self):\n\t\tfor attribute_name in dir(self):\n\t\t\tif self._valuable(attribute_name):\n\t\t\t\tyield (attribute_name, getattr(self, attribute_name))",
"def collect(self): # pylint: disable=no-self-use\n start = time.time()\n\n if \"jobs\" in PLUGIN_SETTINGS and PLUGIN_SETTINGS[\"jobs\"]:\n for metric in metric_jobs():\n yield metric\n\n if \"models\" in PLUGIN_SETTINGS:\n for metric in metric_models(PLUGIN_SETTINGS[\"models\"]):\n yield metric\n\n # --------------------------------------------------------------\n # Extras Function defined in configuration.py or the Regristry\n # # --------------------------------------------------------------\n if \"extras\" in PLUGIN_SETTINGS:\n for metric in collect_extras_metric(PLUGIN_SETTINGS[\"extras\"]):\n yield metric\n\n for metric in collect_extras_metric(__REGISTRY__):\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_app_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge",
"def IterBuildStats(\n self) -> Generator[Tuple[str, str, BaseBuildStats], None, None]:\n return self.IterToValueType(BuildStats)",
"def itervalues(self):\r\n return self.data.itervalues()",
"def itervalues(self):\r\n return self.data.itervalues()"
] | [
"0.61560905",
"0.6048202",
"0.6047815",
"0.6044677",
"0.6018319",
"0.6018319",
"0.5974941",
"0.59604144",
"0.5956948",
"0.595421",
"0.59379584",
"0.58816415",
"0.58809036",
"0.58738184",
"0.5869844",
"0.5865912",
"0.58568335",
"0.5852111",
"0.5831421",
"0.58237755",
"0.5791648",
"0.5778051",
"0.5740947",
"0.57382125",
"0.5714218",
"0.5701945",
"0.5700453",
"0.5693304",
"0.56856513",
"0.56856513"
] | 0.66805625 | 0 |
Merge data from a slow sql node object. | def merge_slow_sql_node(self, node):
duration = node.duration
self[1] += duration
self[2] = self[0] and min(self[2], duration) or duration
self[3] = max(self[3], duration)
if self[3] == duration:
self[4] = node
# Must update the call count last as update of the
# minimum call time is dependent on initial value.
self[0] += 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def record_slow_sql_node(self, node):\n\n if not self.__settings:\n return\n\n key = node.identifier\n stats = self.__sql_stats_table.get(key)\n if stats is None:\n # Only record slow SQL if not already over the limit on\n # how many can be collected in the harvest period.\n\n settings = self.__settings\n maximum = settings.agent_limits.slow_sql_data\n if len(self.__sql_stats_table) < maximum:\n stats = SlowSqlStats()\n self.__sql_stats_table[key] = stats\n\n if stats:\n stats.merge_slow_sql_node(node)\n\n return key",
"def concatenate_data():",
"def query(self, qid):\r\n connection = self.getConnection()\r\n cursor = connection.cursor()\r\n lst = []\r\n\r\n #========================================================================\r\n # Preparation\r\n #========================================================================\r\n whereStatement, ranges, morPrep, insert, Levels, rangeTab = self.prepareQuery(qid)\r\n lst.append(round(morPrep, 6)) # preparation\r\n lst.append(round(insert, 6)) # insert ranges into table\r\n lst.append(ranges) #number of ranges\r\n lst.append(Levels) #depth of the tree\r\n\r\n #========================================================================\r\n # First approximation of query region\r\n #========================================================================\r\n\r\n if whereStatement is not '':\r\n if rangeTab is not None:\r\n query = \"SELECT \" + ora.getHintStatement(['USE_NL (t r)', ora.getParallelStringQuery(self.numProcesses)]) + \\\r\n\" \" + ', '.join(['t.'+ i for i in self.columnNames]) + \"\"\"\r\nFROM \"\"\" + self.iotTableName + \" t, \" + rangeTab + \"\"\" r \r\n\"\"\" + whereStatement\r\n\r\n else:\r\n query = \"SELECT \"+ ora.getHintStatement([ora.getParallelStringQuery(self.numProcesses)]) + ', '.join(self.columnNames) + \"\"\" \r\nFROM \"\"\" + self.iotTableName + \"\"\" \r\n\"\"\" + whereStatement\r\n\r\n start1 = time.time()\r\n ora.mogrifyExecute(cursor, query)\r\n result = cursor.fetchall()\r\n\r\n lst.append(round(time.time() - start1, 10)) # fetching\r\n \r\n if (self.integration == 'loose' and self.qtype.lower() != 'time') or self.integration == 'deep':\r\n qTable = self.queryTable + '_temp_' + qid\r\n else: \r\n qTable = self.queryTable + '_' + qid\r\n \r\n start1 = time.time()\r\n decoded = self.decodeSpaceTime(result)\r\n lst.append(round(time.time() - start1, 6)) #decoding\r\n\r\n start1 = time.time()\r\n res = self.storeQuery(qTable, self.queryColumns, decoded, True)\r\n lst.append(round(time.time() - start1, 6)) #storing\r\n if res != []:\r\n ptsInTemp = res\r\n lst.append(res) #approximate points\r\n else:\r\n ptsInTemp = 0\r\n \r\n #==================================================================\r\n # Secondary filtering of query region\r\n #==================================================================\r\n\r\n if (self.qtype.lower() == 'time' and self.integration == 'loose') or res == []:\r\n # no data returned or it is a time query in the loose integration\r\n lst.append(ptsInTemp) #approximate points\r\n lst.append(0) # point in polygon time\r\n return lst\r\n else:\r\n \r\n if self.integration.lower() == 'deep' and self.qtype.lower() == 'time':\r\n queryTab = self.iotTableName + '_res_' + str(qid)\r\n timeWhere = whereClause.addTimeCondition(getTime(self.granularity, self.start_date, self.end_date), 'TIME', self.timeType)\r\n zWhere = whereClause.addZCondition([self.ozmin, self.ozmax], 'Z')\r\n whereValue = whereClause.getWhereStatement([timeWhere, zWhere])\r\n \r\n \r\n if self.granularity == 'day':\r\n query = \"CREATE TABLE \" + queryTab + \"\"\"\r\n\"\"\" + ora.getTableSpaceString(self.tableSpace) + \"\"\"\r\nAS SELECT * \r\nFROM (\r\n SELECT \"\"\" + ora.getHintStatement([ora.getParallelStringQuery(self.numProcesses)]) + \\\r\n \"\"\" X, Y, Z, TO_DATE(TIME, 'yyyy/mm/dd') as TIME \r\n FROM \"\"\" + qTable +\"\"\"\r\n ) \r\n\"\"\" + whereValue\r\n else:\r\n query = \"CREATE TABLE \" + queryTab + \"\"\"\r\n\"\"\" + ora.getTableSpaceString(self.tableSpace) + \"\"\" \r\n AS SELECT \"\"\" + ora.getHintStatement([ora.getParallelStringQuery(self.numProcesses)]) + \\\r\n \"\"\" X, Y, Z, TIME \r\n FROM \"\"\"+ qTable + \"\"\"\" \r\n \"\"\" + whereValue\r\n \r\n start1 = time.time()\r\n cursor.execute(query)\r\n end = round(time.time() - start1, 2)\r\n\r\n ora.dropTable(cursor, qTable, False)\r\n final = ora.getNumPoints(connection, cursor, queryTab)\r\n lst.append(final) #final points\r\n lst.append(end) #point in polygon time\r\n return lst\r\n \r\n else:\r\n final, end = self.pointInPolygon(qTable, qid, True)\r\n \r\n lst.append(final) #final points\r\n lst.append(end) #point in polygon time\r\n return lst\r\n else:\r\n print 'No data returned'\r\n return [lst[0], lst[1], '-', '-', '-','-','-','-']",
"def merge(self, obj, **kwargs):\r\n raise NotImplementedError\r\n # if type(obj) == StreamFork:\r\n # node = obj.node\r\n # else:\r\n # node = obj\r\n #\r\n # self.stream.append(node)\r\n #\r\n # merge = MergeNode(**kwargs)\r\n # self.stream.append(merge)\r\n # self.stream.connect()\r",
"def merge(self, other_btree):\n pass",
"def process_row_to_graph(s3_object_info, app_logger, app_config, start_timing):\n\n # Graph Server Connection Info\n graph_server_host = get_config_item(app_config, \"neo4j.host\")\n graph_server_user = get_config_item(app_config, \"neo4j.username\")\n graph_server_pwd = get_config_item(app_config, \"neo4j.password\")\n driver = GraphDatabase.driver(\"bolt://\" + graph_server_host, auth=basic_auth(graph_server_user, graph_server_pwd))\n\n object_key = get_config_item(app_config, 's3_info.object_base') + \\\n '/' + s3_object_info['camera_name'] + '/' + \\\n s3_object_info['date_string'] + '/' + \\\n s3_object_info['hour_string'] + '/' + \\\n s3_object_info['img_type'] + '/' + \\\n s3_object_info['just_file']\n\n date_info = parse_date_time_pacific(object_key)\n event_ts = s3_object_info['utc_ts']\n\n add_camera_node = 'MERGE(this_camera:Camera {camera_name: \"' + s3_object_info['camera_name'] + '\"})'\n if s3_object_info['img_type'] == 'snap':\n add_image_node = 'MERGE(this_image:Image {object_key: \"' + object_key + \\\n '\", timestamp: ' + str(event_ts) + '})'\n else:\n add_image_node = 'MERGE(this_image:Video {object_key: \"' + object_key + \\\n '\", timestamp: ' + str(event_ts) + '})'\n add_isodate_node = 'MERGE(this_isodate:ISODate {iso_date: \"' + date_info['isodate'] + '\"})'\n add_year_node = 'MERGE(this_year:Year {year_value: ' + date_info['year'] + '})'\n add_month_node = 'MERGE(this_month:Month {month_value: ' + date_info['month'] + '})'\n add_day_node = 'MERGE(this_day:Day {day_value: ' + date_info['day'] + '})'\n add_hour_node = 'MERGE(this_hour:Hour {hour_value: ' + date_info['hour'] + '})'\n add_size_node = 'MERGE(this_size:Size {size_in_bytes: ' + s3_object_info['size_in_bytes'] + '})'\n relate_image_to_camera = 'MERGE (this_camera)-[:HAS_IMAGE {timestamp: ' + str(event_ts) + '}]->(this_image)'\n relate_image_to_timestamp = 'MERGE (this_image)-[:HAS_TIMESTAMP]->(this_isodate)'\n relate_image_to_year = 'MERGE (this_image)-[:HAS_YEAR]->(this_year)'\n relate_image_to_month = 'MERGE (this_image)-[:HAS_MONTH]->(this_month)'\n relate_image_to_day = 'MERGE (this_image)-[:HAS_DAY]->(this_day)'\n relate_image_to_hour = 'MERGE (this_image)-[:HAS_HOUR]->(this_hour)'\n relate_image_to_size = 'MERGE (this_image)-[:HAS_SIZE]->(this_size)'\n\n full_query_list = add_camera_node + \" \" + \\\n add_image_node + \" \" + \\\n add_isodate_node + \" \" + \\\n add_year_node + \" \" + \\\n add_month_node + \" \" + \\\n add_day_node + \" \" + \\\n add_hour_node + \" \" + \\\n add_size_node + \" \" + \\\n relate_image_to_camera + \" \" + \\\n relate_image_to_timestamp + \" \" + \\\n relate_image_to_year + \" \" + \\\n relate_image_to_month + \" \" + \\\n relate_image_to_day + \" \" + \\\n relate_image_to_size + \" \" + \\\n relate_image_to_hour\n\n neo_session = driver.session()\n\n tx = neo_session.begin_transaction()\n\n tx.run(full_query_list)\n\n tx.commit()\n neo_session.close()\n total_time = time.time() - start_timing\n app_logger.info(\"S3 Object: {} information written to graph DB in {} seconds.\".format(object_key, total_time))\n return True",
"def archive_ost_data(self, lmtdb):\n\n dataset_names = [\n 'datatargets/readbytes',\n 'datatargets/writebytes',\n 'fullness/bytes',\n 'fullness/bytestotal',\n 'fullness/inodes',\n 'fullness/inodestotal'\n ]\n\n self.init_datasets(dataset_names, lmtdb.ost_names)\n\n # Now query the OST_DATA table to get byte counts over the query time range\n results, columns = lmtdb.get_ost_data(self.query_start, self.query_end_plusplus)\n\n # Index the columns to speed up insertion of data\n col_map = {}\n try:\n for db_col in ['TIMESTAMP', 'OST_ID', 'READ_BYTES',\n 'WRITE_BYTES', 'KBYTES_USED', 'KBYTES_FREE',\n 'INODES_USED', 'INODES_FREE']:\n col_map[db_col] = columns.index(db_col)\n except ValueError:\n raise ValueError(\"LMT database schema does not match expectation\")\n\n # Loop through all the results of the timeseries query\n for row in results:\n if isstr(row[col_map['TIMESTAMP']]):\n # SQLite stores timestamps as a unicode string\n timestamp = datetime.datetime.strptime(row[col_map['TIMESTAMP']],\n \"%Y-%m-%d %H:%M:%S\")\n else:\n # MySQL timestamps are automatically converted to datetime.datetime\n timestamp = row[col_map['TIMESTAMP']]\n target_name = lmtdb.ost_id_map[row[col_map['OST_ID']]]\n for dataset_name in dataset_names:\n target_dbcol = self.config[dataset_name].get('column')\n if target_dbcol is not None:\n self[dataset_name].insert_element(\n timestamp,\n target_name,\n row[col_map[target_dbcol]])\n elif dataset_name == 'fullness/bytestotal':\n self[dataset_name].insert_element(\n timestamp,\n target_name,\n row[col_map['KBYTES_USED']] + row[col_map['KBYTES_FREE']])\n elif dataset_name == 'fullness/inodestotal':\n self[dataset_name].insert_element(\n timestamp,\n target_name,\n row[col_map['INODES_USED']] + row[col_map['INODES_FREE']])\n else:\n errmsg = \"%s in self.config but missing 'column' setting\" % dataset_name\n raise KeyError(errmsg)",
"def _load_elastic(self, sqldata):\n from collections import defaultdict\n attributes = ResourceMetricsLoader.attr_fields.keys()\n records = defaultdict(lambda: defaultdict(int))\n for sd in sqldata:\n r = dict(sd.items())\n if r['ATTRIBUTE_NAME'] not in attributes:\n continue\n # Only store hostnames and not FQDN for resources\n r['RESOURCE_NAME'] = r['RESOURCE_NAME'].split('.')[0]\n\n (attr, val) = self._get_attr_val(r)\n records[r.get('RESOURCE_NAME'),r.get('TIME_STAMP')][attr] = val\n records[r.get('RESOURCE_NAME'),r.get('TIME_STAMP')]['INSERT_SEQ'] = r['INSERT_SEQ']\n\n # Construct docs from records\n inserts = [] \n for k, v in records.iteritems():\n body = { attr: val for attr, val in v.iteritems() } \n body['RESOURCE_NAME'], body['TIME_STAMP'] = k\n document = {\n \"_index\" : self._get_index_name(body['TIME_STAMP']),\n \"_type\" : 'default',\n \"_source\" : body\n }\n inserts.append(document)\n \n # Insert list of documents into elasticsearch\n self.logger.info(\"Loading chunk into elasticsearch\")\n status = helpers.bulk(self.es,\n inserts,\n self.chunk_size)\n self.logger.info(\"Finished loading chunk into elasticsearch\")\n\n # update sequence to last item in the results\n #self.seq = dict(results[-1].items())[self.id_field]\n self.seq = sqldata[-1][self.seq_field]\n \n return status",
"def _load_elastic(self, sqldata):\n inserts = []\n for r in sqldata:\n body = self._preprocess(dict(r.items()))\n if not body:\n continue # Skip if preprocessing returns False\n index_name = self._get_index_name(body['TIME_STAMP'])\n document = {\n \"_index\" : index_name,\n \"_type\" : 'default', # Hardcoded - we only have 1 doctype\n \"_id\" : body[self.seq_field],\n \"_source\" : body\n }\n inserts.append(document)\n\n # update sequence to last item in the results\n self.seq = sqldata[-1][self.seq_field]\n \n # Insert list of documents into elasticsearch\n status = helpers.bulk(self.es, inserts, self.chunk_size)\n self.logger.info(\"Inserted %d chunks into %s\" % (self.chunk_size,\n index_name))\n return status",
"def get_src_data(DbClass, date_from, date_to, columns_to_copy):\n session = get_db_session(\"src\")\n results = (\n session.query(DbClass)\n .filter(\n and_(\n DbClass.timestamp >= date_from,\n DbClass.timestamp <= date_to,\n )\n )\n .all()\n )\n all_results = []\n for r in results:\n result_dict = {}\n for col in columns_to_copy:\n result_dict[col] = getattr(r, col)\n all_results.append(result_dict)\n session_close(session)\n return all_results",
"def _merge_report(self, target, new):\r\n time = None\r\n if 'ts' in new['parsed']:\r\n time = new['parsed']['ts']\r\n\r\n if (target.get('lastSeenDate', None) and\r\n time and\r\n target['lastSeenDate'] < time):\r\n target['lastSeenDate'] = time\r\n\r\n query_millis = int(new['parsed']['stats']['millis'])\r\n target['stats']['totalTimeMillis'] += query_millis\r\n target['stats']['count'] += 1\r\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']",
"def _merge_report(self, target, new):\n time = None\n if 'ts' in new['parsed']:\n time = new['parsed']['ts']\n\n if (target.get('lastSeenDate', None) and\n time and\n target['lastSeenDate'] < time):\n target['lastSeenDate'] = time\n\n query_millis = int(new['parsed']['stats']['millis'])\n target['stats']['totalTimeMillis'] += query_millis\n target['stats']['count'] += 1\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']",
"def _merge(self):\n raise NotImplementedError",
"def _get_new_data(self):\n self.log.info(\"Get new query from db \")\n surveys = self.db.execute_pandas_query(self._get_query('surveys_query'))\n\n final_query = ''\n for index_s, survey_id in surveys.iterrows():\n questions = self.db.execute_pandas_query(self._get_query('questions_query').replace('@currentSurveyId', str(survey_id['SurveyId'])))\n query_in_progress = ''\n for index_q, question_id in questions.iterrows():\n if question_id['InSurvey'] == 0:\n query_in_progress = query_in_progress + self._get_query('query_template_for_null_column').replace('<QUESTION_ID>', str(question_id['QuestionId']))\n else:\n query_in_progress = query_in_progress + self._get_query('query_template_for_answer_column').replace('<QUESTION_ID>', str(question_id['QuestionId']))\n\n if index_q != questions.index[-1]:\n query_in_progress = query_in_progress + ' , '\n\n union_query_block = self._get_query('query_template_outer_union_query').replace('<DYNAMIC_QUESTION_ANSWERS>', query_in_progress)\n union_query_block = union_query_block.replace('<SURVEY_ID>', str(survey_id['SurveyId']))\n final_query = final_query + union_query_block\n if index_s != surveys.index[-1]:\n final_query = final_query + ' UNION '\n return final_query",
"def update_data(self, data):\n start_time = data.index[-1].strftime(\"%Y-%m-%dT%H:%M:%S.000000Z\")\n temp_data = self.gather_data(start=start_time)\n temp_data = self._list_to_df(temp_data)\n if (len(temp_data) > 1):\n # temp_data[0] is the same as data[-1]\n out_data = data.append(temp_data[1:])\n return out_data",
"def mergeNodes(new, t1, t2):\n \n if t1 and t2:\n new.val = t1.val + t2.val\n elif not t1:\n new.val = t2.val\n elif not t2:\n new.val = t1.val",
"def merge(self, obj):\n pass",
"def get_query_results(sql_query, raw_info, column_fields= []):\n answer= {}\n #try:\n if True:\n if not connect():\n answer['type']= \"message\"\n answer['data']= \"Connection Problem.\"\n return answer\n cursor.execute('set profiling = 1')\n cursor.execute(sql_query) #query execution\n \"\"\"\n if answer['type']== 'graph':\n data = {}\n for row in cursor.fetchall():\n key, value, date = row\n _data = data.get(key, {'date':[], 'values':[], 'all':[]})\n _data['date'].append(str(date))\n _data['values'].append(float(value))\n _data['all'].append([date, float(value)])\n data[key] = _data\n\n final_data = []\n for k, v in data.items():\n v['key'] = k\n sorted_data = sorted(v['all'], key=lambda x: x[0])\n date, values = [], []\n for _date, _value in sorted_data:\n date.append(str(_date))\n values.append(_value)\n v['date'] = date\n v['values'] = values\n v.pop('all')\n final_data.append(v)\n answer['data']= final_data\"\"\"\n #else: # answer['type']== 'text'\n if True:\n data= []\n for row in cursor.fetchall():\n temp= []\n for r in row:\n temp.append(str(r))\n data.append(temp)\n answer['type']= 'text'\n answer['data']= data\n answer['format']= []\n for col in column_fields:\n answer['format'].append({'field': col, 'type': ''})\n if not answer['data']:\n answer['type']= \"message\"\n answer['data']= \"No records found.\"\n if 'format' in answer.keys():\n del answer['format']\n\n cursor.execute('set profiling= 0')\n cursor.execute(\"SELECT query_id, SUM(duration) FROM information_schema.profiling GROUP BY query_id ORDER BY query_id DESC LIMIT 1\")\n raw_info['execution_time']= float(cursor.fetchone()[1])\n #except Exception as e:\n if False:\n print('problem executing query')\n print(e, 'line:', sys.exc_info()[-1].tb_lineno)\n answer['type']= \"message\"\n answer['data']= \"No records found.\"\n if 'format' in answer.keys():\n del answer['format']\n if request and request.session['query_stack']:\n request.session['query_stack']= {}\n #finally:\n if True:\n return answer",
"def _from_db_object(nodegroup, db_nodegroup):\n for field in nodegroup.fields:\n nodegroup[field] = db_nodegroup[field]\n\n nodegroup.obj_reset_changes()\n return nodegroup",
"def archive_oss_data(self, lmtdb):\n\n dataset_names = [\n 'dataservers/cpuload',\n 'dataservers/memused',\n ]\n\n self.init_datasets(dataset_names, lmtdb.oss_names)\n\n # Now query the OSS_DATA table to get byte counts over the query time range\n results, columns = lmtdb.get_oss_data(self.query_start, self.query_end_plusplus)\n\n # Index the columns to speed up insertion of data\n col_map = {}\n try:\n for db_col in ['TIMESTAMP', 'OSS_ID', 'PCT_CPU', 'PCT_MEMORY']:\n col_map[db_col] = columns.index(db_col)\n except ValueError:\n raise ValueError(\"LMT database schema does not match expectation\")\n\n # Loop through all the results of the timeseries query\n for row in results:\n if isstr(row[col_map['TIMESTAMP']]):\n # SQLite stores timestamps as a unicode string\n timestamp = datetime.datetime.strptime(row[col_map['TIMESTAMP']],\n \"%Y-%m-%d %H:%M:%S\")\n else:\n # MySQL timestamps are automatically converted to datetime.datetime\n timestamp = row[col_map['TIMESTAMP']]\n target_name = lmtdb.oss_id_map[row[col_map['OSS_ID']]]\n for dataset_name in dataset_names:\n target_dbcol = self.config[dataset_name].get('column')\n # target_dbcol=PCT_CPU, target_name=snx11025n022\n if target_dbcol is not None:\n self[dataset_name].insert_element(\n timestamp,\n target_name,\n row[col_map[target_dbcol]])\n else:\n errmsg = \"%s in self.config but missing 'column' setting\" % dataset_name\n raise KeyError(errmsg)",
"def merge(self, graph):\n # keep previous self.filename\n # copy data\n for x in graph.data:\n self.data.append(x)\n # copy headers, unless already exists (is so, info is lost)\n for key in graph.headers:\n if key not in self.headers:\n self.headers.update({key: graph.headers[key]})\n # copy graphInfo, unless already exists (is so, info is lost)\n for key in graph.graphInfo:\n if key not in self.graphInfo:\n self.graphInfo.update({key: graph.graphInfo[key]})\n # copy sampleInfo, unless already exists (is so, info is lost)\n for key in graph.sampleInfo:\n if key not in self.sampleInfo:\n self.sampleInfo.update({key: graph.sampleInfo[key]})",
"def _before_stockpyle_deserialize(self, obj):\n \n # only merge SA objects\n if _is_sqlalchemy_object(obj):\n self.__session.merge(obj, load=False)",
"def merge(self):\n collapsable = self.findSameSubtrees()\n\n dummy = GraphiusNode(-1, None)\n for i, node in self.nodes.items():\n dummy.addNeighbor(node)\n\n # Perform the merge\n self.mergeHelper(dummy, collapsable)\n\n # Regenerate trees\n newNodes = self.dfs(dummy)\n assert(-1 not in newNodes)\n self.nodes = newNodes",
"def merge(self, other):\n from .dataset import Dataset\n\n if other is None:\n return self.to_dataset()\n else:\n other_vars = getattr(other, 'variables', other)\n coords = merge_coords_without_align([self.variables, other_vars])\n return Dataset._from_vars_and_coord_names(coords, set(coords))",
"def _merge_by_query(self, obj_dict):\n _res = self.__session.query(obj_dict[\"class\"]).filter_by(**obj_dict[\"query_dict\"]).first()\n\n if _res is None:\n self._add(obj_dict[\"instance\"])\n else:\n if hasattr(obj_dict[\"instance\"], 'attributes') and \\\n hasattr(obj_dict[\"instance\"], 'p_key'):\n for attr in obj_dict[\"instance\"].attributes:\n if attr not in obj_dict[\"instance\"].p_key:\n setattr(_res, attr, getattr(obj_dict[\"instance\"], attr))\n # updating the instance\n obj_dict[\"instance\"] = _res\n else:\n raise AttributeError(\"Class variable (attributes / p_key) not set for %s\" %\n (obj_dict[\"instance\"],))",
"def _build_from_chunks(self, data_node):\n result = ''\n\n if not data_node:\n return ''\n\n master_data = data_node[0]\n result = \"{}{}\".format(result, self._decode(master_data['value']))\n # if data is not in chunks, then return the first node's value\n if 'tags' not in master_data or 'chunks' not in master_data['tags']:\n return result\n\n # join the values in chunks\n last_chunk = int(master_data['tags']['chunks'])\n for chunk_id in range(1, last_chunk):\n slave_data = data_node[chunk_id]\n result = \"{}{}\".format(result, self._decode(slave_data['value']))\n return result",
"def _gather_deep_data(self):\n\n cleaned_data_from_website = list()\n\n for i, search_result in self.data_from_website.iterrows():\n cleaned_data_from_website.append(self._deep_data(search_result.url))\n\n cleaned_data_from_website = pd.DataFrame(cleaned_data_from_website)\n if len(cleaned_data_from_website) == 0:\n cleaned_data_from_website['@id'] = '0'\n cleaned_data_from_website.set_index('@id', inplace=True)\n self.data_from_website = cleaned_data_from_website",
"def merge_logs(self):\n ourlog = LogData()\n for l in self.data_set:\n ourlog.entries = ourlog.entries + l.entries\n ourlog.sort_time()\n self.finalized_data = ourlog",
"def _to_redis(self):\n\n # OSM ways and nodes tables\n self._gdf_to_redis(self._bbid + \"_ways\", self._ways, geometry='geometry')\n self._df_to_redis(self._bbid + \"_nodes\", self._nodes)\n\n # graph to graph nodes and edges tables (storing only ids and edge lengths)\n gdf_nodes, gdf_edges = osmnx.utils_graph.graph_to_gdfs(self._graph, node_geometry=False,\n fill_edge_geometry=False)\n self._gdf_to_redis(self._bbid + \"_graph_nodes\", gdf_nodes[['id']]) # ['id', 'x', 'y'] to store coordinates\n self._gdf_to_redis(self._bbid + \"_graph_edges\", gdf_edges[['id', 'length', 'u', 'v', 'key']])",
"def mergeAggregatedCsvData(self, contexts, obj, aggData1, aggData2):\n return aggData1 + aggData2"
] | [
"0.5318473",
"0.52193975",
"0.5193449",
"0.515354",
"0.5137922",
"0.50185084",
"0.49987668",
"0.49883923",
"0.4975488",
"0.4966561",
"0.49638265",
"0.49379689",
"0.48661035",
"0.48568356",
"0.48503768",
"0.4833968",
"0.48199612",
"0.4812925",
"0.4802843",
"0.47878385",
"0.47712696",
"0.47580692",
"0.47430667",
"0.47422996",
"0.4735941",
"0.4728958",
"0.472683",
"0.47210637",
"0.47130597",
"0.47112504"
] | 0.6787032 | 0 |
Returns a count of the number of unique metrics currently recorded for apdex, time and value metrics. | def metrics_count(self):
return len(self.__stats_table) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def number_of_running_metrics(self):\n try:\n return len(self.get_classads(\"OSGRSV==\\\"metrics\\\"\"))\n except TypeError:\n self.rsv.log(\"ERROR\", \"Classad parsing failed, unable to count running metrics\")",
"def metric_data_count(self):\n\n if not self.__settings:\n return 0\n\n return len(self.__stats_table)",
"def get_counts(self):\n self._update_counts()\n return self.failures, self.warnings, self.infos",
"def get_number_of_measurement(self):\n num_of_meas = 0\n for time in self.mdvtc.keys():\n num_of_meas = num_of_meas + self.mdvtc[time].get_number_of_measurement()\n #\n return num_of_meas",
"def device_count():\n apipath = \"/targets/devices\"\n url = SERVER + apipath\n params = {\n 'q': '(deviceType:ASA)',\n 'agg': 'count'}\n headers = {\n 'Accept': \"application/json\",\n 'Content-Type': \"application/json\",\n 'Authorization': \"bearer {}\".format(token)}\n response = requests.get(url, verify=False, stream=True, headers=headers, params=params)\n getstatuscode = response.status_code\n getresponse = response.json()\n if getstatuscode == 200:\n return getresponse\n else:\n response.raise_for_status()",
"def _fetch_count_metrics_and_clear(self):\n with self._count_rlock:\n count_metrics = self._count_metrics\n self._count_metrics = defaultdict(int)\n\n return count_metrics",
"def count(self) -> Tuple[groupable, pdarray]:\n repMsg = generic_msg(\n cmd=\"countReduction\",\n args={\"segments\": cast(pdarray, self.segments), \"size\": self.length},\n )\n self.logger.debug(repMsg)\n return self.unique_keys, create_pdarray(repMsg)",
"def get_total_counts(self):\n ret = {}\n all_loggers_count = 0\n for logger, name_map in self.acc_map.items():\n cur_logger_count = 0\n ret[logger.name] = {}\n for name, status_map in name_map.items():\n cur_name_count = 0\n ret[logger.name][name] = {}\n for status, acc in status_map.items():\n cur_count = acc.total_count\n ret[logger.name][name][status] = cur_count\n cur_name_count += cur_count\n cur_logger_count += cur_count\n all_loggers_count += cur_count\n ret[logger.name][name]['__all__'] = cur_name_count\n ret[logger.name]['__all__'] = cur_logger_count\n ret['__all__'] = all_loggers_count\n return ret",
"def get_count():\n _check_init()\n return _pypm.CountDevices()",
"def analyze_count(data):\n\n dsct_vk = pd.unique(data['vk'])\n dsct_itemid = pd.unique(data['itemid'])\n\n print 'number of user:', dsct_vk.shape\n print 'number of items:', dsct_itemid.shape\n print 'the number of ratings:', data.shape\n\n print 'unique actions:', pd.unique(data['action'])\n print 'the number of action 0:', np.sum(data['action'] == 0)\n print 'the number of action 1:', np.sum(data['action'] == 1)\n print 'the number of action 2:', np.sum(data['action'] == 2)\n print 'the number of action 3:', np.sum(data['action'] == 3)\n print 'the number of action 4:', np.sum(data['action'] == 4)\n \n time_range_item = data.groupby('itemid')['real_time'].aggregate(sum_unique)\n print 'Max Range:', np.max(time_range_item)\n print 'Mean Range:', np.mean(time_range_item)\n print 'Median Range:', np.median(time_range_item)",
"def count(self, key):\n self._metrics[key] += 1",
"def observation_count(self):\n if not self.can_update():\n self._handle_error(910, [self.type])\n return self.tc_requests.observation_count(\n self.api_type, self.api_branch, self.unique_id, owner=self.owner\n )",
"def get_metrics(self):\n self.logger.debug(\"Fetching metrics.\")\n return self._api_query(\"metrics\")['metrics']",
"async def test_nr_of_metrics(self):\n response = await self.collect(get_request_json_side_effect=[self.data_model, self.reports])\n self.assert_measurement(\n response,\n value=str(len(self.entities)),\n total=self.expected_software_metrics,\n entities=self.entities,\n )",
"def counts(self):\n return sum(self.counter.values()), len(self.visited)",
"def counts(self):\n return sum(self.counter.values()), len(self.visited)",
"def count(time):\n \n return len(events(time))",
"def get_count(cls):\n total = 0\n for counter in SimpleCounterShard.objects.all():\n total += counter.count\n return total",
"def count(self):\n nreq, nres = 0, 0\n for entry in self.__history:\n if entry.oreq is not None:\n nreq += 1\n if entry.ores is not None:\n nres += 1\n return nreq, nres",
"def test_get_time_summary_stats_counter():\n # This is constructed to test the parsing logic for timestamps, so the number don't\n # add up.\n runtime_profile = \"- ExampleTimeStats: (Avg: 161.554ms ; \" \\\n \"Min: 101.411us ; \" \\\n \"Max: 1h2m3s4ms5us6ns ; \" \\\n \"Number of samples: 6)\"\n summary_stats = get_time_summary_stats_counter(\"ExampleTimeStats\", runtime_profile)\n assert len(summary_stats) == 1\n assert summary_stats[0].sum == 969324000\n assert summary_stats[0].min_value == 101411\n assert summary_stats[0].max_value == 3723004005006\n assert summary_stats[0].total_num_values == 6",
"def gives_stats():\n dict_count = {\n \"amenities\": storage.count(Amenity),\n \"cities\": storage.count(City),\n \"places\": storage.count(Place),\n \"reviews\": storage.count(Review),\n \"states\": storage.count(State),\n \"users\": storage.count(User)\n }\n return jsonify(dict_count)",
"def get_metrics(self) -> Dict[str, base.Number]:\n return self._metrics",
"def get_count_overview():\n from app.core.api_views import Api\n api = Api()\n return api.getOverviewCount(\n db_name=LoggingDetails,\n field='success',\n key='logs',\n )",
"def metrics(self):\n\n return six.iteritems(self.__stats_table)",
"def test_counts(self):\n c = array([5,0,1,1,5,5])\n obs = counts(c)\n exp = array([1,2,0,0,0,3])\n self.assertEqual(obs, exp)\n d = array([2,2,1,0])\n obs = counts(d, obs)\n exp = array([2,3,2,0,0,3])\n self.assertEqual(obs, exp)",
"def count(timeseries):\n try:\n return timeseries[0].points[0].value.int64_value\n except (IndexError, AttributeError) as exception:\n LOGGER.warning(\"Couldn't find any values in timeseries response\")\n LOGGER.debug(exception)\n return 0 # no events in timeseries",
"def get_attendance_counts(attendance):\n count_a = 0\n count_p = 0\n count_d = 0\n for a in attendance:\n if a.ATT_STATUS == 'A':\n count_a+=1\n elif a.ATT_STATUS == 'D':\n count_d+=1\n elif a.ATT_STATUS == 'P':\n count_p+=1\n return (count_p,count_a,count_d)",
"def count():",
"def count(self):\n return self._reduce_for_stat_function(F.count, only_numeric=False)",
"def hits(self) -> Mapping[str, int]:\n if len(self._clock_starts) > 0:\n warnings.warn(\n \"Retrieved hit counts while clocks are still going, \"\n \"incomplete times are not included: \"\n f\"{list(self._clock_starts.keys())}\",\n RuntimeWarning,\n )\n return self._hit_count.copy()"
] | [
"0.6411366",
"0.6283042",
"0.62296087",
"0.6220417",
"0.61111647",
"0.6047972",
"0.60304636",
"0.5969979",
"0.59513307",
"0.5878805",
"0.5827968",
"0.5753369",
"0.5703473",
"0.56953144",
"0.56933933",
"0.56933933",
"0.5690032",
"0.5668702",
"0.56455106",
"0.5636696",
"0.56353694",
"0.5620147",
"0.56077564",
"0.5602681",
"0.5592418",
"0.5571603",
"0.5568807",
"0.55369514",
"0.5507133",
"0.5506029"
] | 0.6419223 | 0 |
Record a single apdex metric, merging the data with any data from prior apdex metrics with the same name. | def record_apdex_metric(self, metric):
if not self.__settings:
return
# Note that because we are using a scope here of an empty string
# we can potentially clash with an unscoped metric. Using None,
# although it may help to keep them separate in the agent will
# not make a difference to the data collector which treats None
# as an empty string anyway.
key = (metric.name, '')
stats = self.__stats_table.get(key)
if stats is None:
stats = ApdexStats(apdex_t=metric.apdex_t)
self.__stats_table[key] = stats
stats.merge_apdex_metric(metric)
return key | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def record_apdex_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_apdex_metric(metric)",
"def merge_apdex_metric(self, metric):\n\n self[0] += metric.satisfying\n self[1] += metric.tolerating\n self[2] += metric.frustrating\n\n self[3] = ((self[0] or self[1] or self[2]) and\n min(self[3], metric.apdex_t) or metric.apdex_t)\n self[4] = max(self[4], metric.apdex_t)",
"def write_metric(self, metric_name: str, metric_value: Union[float, int]):\n self._metrics.append(Metric(metric_name, metric_value))",
"def add_metric(self, metric: str):\n if metric not in self.metrics:\n self.metrics[metric] = self.creator.create_metric(metric)",
"def log(self, metric_name: str, value: float) -> None:\n if metric_name in self.metrics:\n self.metrics[metric_name].append(value)\n else:\n self.metrics[metric_name] = [value]",
"def add_metric(self, metric_name: str, metric_val: typing.Any):\n self.add_metrics({metric_name: metric_val})",
"def add_metric(self, metric, *, name=None, **kwargs):\n if name is None:\n name = metric.__name__\n\n self.metrics.append((metric, name, kwargs))",
"def add_metric(self, metric, *, name=None, **kwargs):\n if name is None:\n name = metric.__name__\n\n self.metrics.append((metric, name, kwargs))",
"def record_custom_metric(self, name, value):\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(name)\n if stats is None:\n self.__stats_table[name] = new_stats\n else:\n stats.merge_stats(new_stats)",
"def add_metric(self, metric):\n self.metrics.append(metric)\n self.estimate()",
"def log_metric(self, name: str, value):\n self.metrics[name] = value\n\n self._sync_log_event()",
"def _record(self, metric_point: MetricPoint,\n measurement_map: MeasurementMap):\n metric_name = metric_point.metric_name\n tags = metric_point.tags\n\n metric = self._registry.get(metric_name)\n # Metrics should be always registered dynamically.\n assert metric\n\n tag_map = tag_map_module.TagMap()\n for key, value in tags.items():\n tag_key = tag_key_module.TagKey(key)\n tag_value = tag_value_module.TagValue(value)\n tag_map.insert(tag_key, tag_value)\n\n metric_value = metric_point.value\n measurement_map.measure_float_put(metric.measure, metric_value)\n # NOTE: When we record this metric, timestamp will be renewed.\n measurement_map.record(tag_map)",
"def _AddMetric(self, metric):\n machine = metric.machine_id\n time = metric.timestamp\n payload = DotDict(json.loads(metric.payload)).flatten()\n\n self.machines.add(machine)\n self.timestamps.add(time)\n for k in payload:\n if k not in self.counter_data:\n continue\n val = payload.get(k, None)\n if val is not None:\n self.counter_data[k].AddSample(machine, time, val)",
"def record_custom_metric(self, name, value):\n key = (name, '')\n\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(key)\n if stats is None:\n self.__stats_table[key] = new_stats\n else:\n stats.merge_stats(new_stats)\n\n return key",
"def record_gauge(self, name, value, tags=None):\n identity = self.create_identity(name, tags)\n with self._lock:\n self._batch[identity] = value\n self._timestamps[identity] = int(time.time() * 1000.0)",
"def record_metrics(self, pid, metrics):\n\n for _metric, _metric_value in metrics.items():\n if not self.__metrics_history[pid].get(_metric):\n self.__metrics_history[pid][_metric] = []\n self.__metrics_history[pid][_metric].append(_metric_value)\n # only keep the last 2 running history for any metric\n self.__metrics_history[pid][_metric] = self.__metrics_history[pid][_metric][\n -2:\n ]",
"def record_metrics(metrics, args):\n with open('interpretation_metrics/model_metrics_{}'.format(args.file_num), 'a') as f:\n f.write(\"META DATA\\n\")\n f.write(\"---------\\n\")\n f.write(\"Model Name: {}\\n\".format(args.model_name))\n f.write(\"Attack Target: {}\\n\".format(args.attack_target))\n f.write(\"Gradient Model File: {}\\n\".format(args.gradient_model_file))\n f.write(\"Predictive Model File: {}\\n\".format(args.predictive_model_file))\n f.write(\"Cuda: {}\\n\".format(args.cuda))\n\n f.write(\"\\nSIMPLE GRADIENT COMBINED MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['simple_gradient_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nSIMPLE GRADIENT BASELINE MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['simple_gradient_baseline'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nSMOOTH GRADIENT COMBINED MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['smooth_gradient_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nSMOOTH GRADIENT BASELINE MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['smooth_gradient_baseline'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nINTEGRATED GRADIENT COMBINED MODEL METRICS\\n\")\n f.write(\"--------------------------------------------\\n\")\n for key, val in metrics['integr_gradient_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nINTEGRATED GRADIENT BASELINE MODEL METRICS\\n\")\n f.write(\"--------------------------------------------\\n\")\n for key, val in metrics['integr_gradient_baseline'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))",
"def add_metric_class(self, metric: NNSimpleMetric):\n if metric.name not in self.metrics:\n self.metrics[metric.name] = metric",
"def add_metric(self, metric_name, aggregate=None):\n\n clean_metric = metric_name.lower().strip()\n\n if clean_metric.lower() not in METRICS:\n raise Exception(\"Metric named: \" + metric_name + \" is not a valid benchmark metric.\")\n self.metrics.add(clean_metric)\n\n if not aggregate:\n self.raw_metrics.add(clean_metric)\n elif aggregate.lower().strip() in AGGREGATES:\n # Add aggregate to this metric\n clean_aggregate = aggregate.lower().strip()\n current_aggregates = self.aggregated_metrics.get(clean_metric, list())\n current_aggregates.append(clean_aggregate)\n self.aggregated_metrics[clean_metric] = current_aggregates\n else:\n raise Exception(\"Aggregate function \" + aggregate + \" is not a legal aggregate function name\");\n\n return self;",
"def send_metrics(self):\n metrics = self.get_metrics()\n if not metrics:\n return\n\n for mkey, metric in metrics.items():\n for mname, mval in metric.items():\n try:\n self.agent.record_custom_metric(self.convert_metric_name(mkey, mname), mval, None)\n except Exception as e:\n print_(e)",
"def add_metrics(self, metric_dict: dict):\n self.metric_dict.update(metric_dict)",
"def log_metric(data_category, key, value):\n # always, just print in logs\n log(logging.INFO, data_category, \"AML Metric({}={})\".format(key, value))\n if data_category == DataCategory.ONLY_PUBLIC_DATA:\n # if public, ask azureml to record (if azureml attached)\n run = AmlRunWrapper()\n run.setup(attach=True)\n run.log(key, value)\n run.flush()",
"def record_time_metric(self, metric):\n\n if not self.__settings:\n return\n\n # Scope is forced to be empty string if None as\n # scope of None is reserved for apdex metrics.\n\n key = (metric.name, metric.scope or '')\n stats = self.__stats_table.get(key)\n if stats is None:\n stats = TimeStats(call_count=1,\n total_call_time=metric.duration,\n total_exclusive_call_time=metric.exclusive,\n min_call_time=metric.duration,\n max_call_time=metric.duration,\n sum_of_squares=metric.duration ** 2)\n self.__stats_table[key] = stats\n else:\n stats.merge_time_metric(metric)\n\n return key",
"def process(self, metric):\n self.metrics.append(metric)\n if self.should_flush():\n self._send()",
"def report_metrics(prefix, metrics):\n series = []\n\n now = time.time()\n for key, value in metrics.iteritems():\n metric = '{prefix}.{key}'.format(prefix=prefix, key=key)\n point = [(now, value)]\n series.append({'metric':metric, 'points':point})\n\n if len(series) > 0:\n print u\"Sending {}\".format(series)\n dog_http_api.metrics(series)",
"def add_metric(self, metric_fn):\n self._metrics.append(metric_fn)",
"def record_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, value in metrics:\n self.record_custom_metric(name, value)",
"def __push_metric(self, metric_name, value, timestamp):\n sock = self.__get_carbon_socket()\n _data = \"%s %d %d\\n\" % (metric_name, value, timestamp)\n LOGGER.debug(\"SEND: %s\", _data.replace(\"\\n\", \"\"))\n sock.send(_data.encode('utf-8'))",
"def increment(self) -> None:\n self._increment_called = True\n self.append(deepcopy(self._base_metric))",
"def add_metric(self, metric_name, metric_value, login=False):\n if login:\n self._gc.login()\n\n try: \n if metric_name not in self._metric_dict:\n metric_index = len(self._metric_dict) + 2\n self._wks.update_cell(1, metric_index, metric_name)\n self._metric_dict[metric_name] = metric_index\n self.save_config()\n\n self._wks.update_cell(self.row_index, self._metric_dict[metric_name], metric_value)\n except Exception as ins:\n if not login:\n self.add_metric(metric_name, metric_value, login=True)\n else:\n return '\\n'.join([str(type(ins)), str(ins.args), str(ins)])\n return None"
] | [
"0.7010535",
"0.6634994",
"0.5894368",
"0.579922",
"0.57742566",
"0.5767345",
"0.5742764",
"0.5742764",
"0.56948036",
"0.5694517",
"0.56521934",
"0.564671",
"0.5620353",
"0.56121796",
"0.552094",
"0.53992915",
"0.5386446",
"0.52903515",
"0.5282726",
"0.52693814",
"0.5267927",
"0.52584195",
"0.5209942",
"0.5201727",
"0.51946783",
"0.51549935",
"0.5154828",
"0.5152201",
"0.5135652",
"0.5111745"
] | 0.74796903 | 0 |
Record the apdex metrics supplied by the iterable for a single transaction, merging the data with any data from prior apdex metrics with the same name. | def record_apdex_metrics(self, metrics):
if not self.__settings:
return
for metric in metrics:
self.record_apdex_metric(metric) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def record_transaction(self, transaction):\n\n if not self.__settings:\n return\n\n settings = self.__settings\n\n # Record the apdex, value and time metrics generated from the\n # transaction. Whether time metrics are reported as distinct\n # metrics or into a rollup is in part controlled via settings\n # for minimum number of unique metrics to be reported and thence\n # whether over a time threshold calculated as percentage of\n # overall request time, up to a maximum number of unique\n # metrics. This is intended to limit how many metrics are\n # reported for each transaction and try and cut down on an\n # explosion of unique metric names. The limits and thresholds\n # are applied after the metrics are reverse sorted based on\n # exclusive times for each metric. This ensures that the metrics\n # with greatest exclusive time are retained over those with\n # lesser time. Such metrics get reported into the performance\n # breakdown tab for specific web transactions.\n\n self.record_apdex_metrics(transaction.apdex_metrics(self))\n\n self.merge_custom_metrics(transaction.custom_metrics.metrics())\n\n self.record_time_metrics(transaction.time_metrics(self))\n\n # Capture any errors if error collection is enabled.\n # Only retain maximum number allowed per harvest.\n\n error_collector = settings.error_collector\n\n if (error_collector.enabled and settings.collect_errors and\n len(self.__transaction_errors) <\n settings.agent_limits.errors_per_harvest):\n self.__transaction_errors.extend(transaction.error_details())\n\n self.__transaction_errors = self.__transaction_errors[:\n settings.agent_limits.errors_per_harvest]\n\n if (error_collector.capture_events and\n error_collector.enabled and\n settings.collect_error_events):\n events = transaction.error_events(self.__stats_table)\n for event in events:\n self._error_events.add(event, priority=transaction.priority)\n\n # Capture any sql traces if transaction tracer enabled.\n\n if settings.slow_sql.enabled and settings.collect_traces:\n for node in transaction.slow_sql_nodes(self):\n self.record_slow_sql_node(node)\n\n # Remember as slowest transaction if transaction tracer\n # is enabled, it is over the threshold and slower than\n # any existing transaction seen for this period and in\n # the historical snapshot of slow transactions, plus\n # recording of transaction trace for this transaction\n # has not been suppressed.\n\n transaction_tracer = settings.transaction_tracer\n\n if (not transaction.suppress_transaction_trace and\n transaction_tracer.enabled and settings.collect_traces):\n\n # Transactions saved for Synthetics transactions\n # do not depend on the transaction threshold.\n\n self._update_synthetics_transaction(transaction)\n\n threshold = transaction_tracer.transaction_threshold\n\n if threshold is None:\n threshold = transaction.apdex_t * 4\n\n if transaction.duration >= threshold:\n self._update_slow_transaction(transaction)\n\n # Create the transaction event and add it to the\n # appropriate \"bucket.\" Synthetic requests are saved in one,\n # while transactions from regular requests are saved in another.\n\n if transaction.synthetics_resource_id:\n event = transaction.transaction_event(self.__stats_table)\n self._synthetics_events.add(event)\n\n elif (settings.collect_analytics_events and\n settings.transaction_events.enabled):\n\n event = transaction.transaction_event(self.__stats_table)\n self._transaction_events.add(event, priority=transaction.priority)\n\n # Merge in custom events\n\n if (settings.collect_custom_events and\n settings.custom_insights_events.enabled):\n self.custom_events.merge(transaction.custom_events)\n\n # Merge in span events\n\n if (settings.distributed_tracing.enabled and\n settings.span_events.enabled and settings.collect_span_events):\n if settings.infinite_tracing.enabled:\n for event in transaction.span_protos(settings):\n self._span_stream.put(event)\n elif transaction.sampled:\n for event in transaction.span_events(self.__settings):\n self._span_events.add(event, priority=transaction.priority)",
"def flush(self):\n with self._lock:\n batch = self._batch\n timestamps = self._timestamps\n\n items = []\n for identity, value in batch.items():\n metric = {}\n typ, name, tags = identity\n metric[\"name\"] = name\n if typ:\n metric[\"type\"] = typ\n else:\n metric[\"timestamp\"] = timestamps[identity]\n\n if tags:\n metric[\"attributes\"] = dict(tags)\n\n metric[\"value\"] = value\n items.append(metric)\n\n items = tuple(items)\n\n batch.clear()\n timestamps.clear()\n\n common = self._common.copy()\n common[\"timestamp\"] = self._interval_start\n now = int(time.time() * 1000.0)\n interval = now - self._interval_start\n common[\"interval.ms\"] = interval\n\n self._interval_start = now\n\n return items, common",
"def record_apdex_metric(self, metric):\n\n if not self.__settings:\n return\n\n # Note that because we are using a scope here of an empty string\n # we can potentially clash with an unscoped metric. Using None,\n # although it may help to keep them separate in the agent will\n # not make a difference to the data collector which treats None\n # as an empty string anyway.\n\n key = (metric.name, '')\n stats = self.__stats_table.get(key)\n if stats is None:\n stats = ApdexStats(apdex_t=metric.apdex_t)\n self.__stats_table[key] = stats\n stats.merge_apdex_metric(metric)\n\n return key",
"def log_batch(self, measurements):\n for m in measurements:\n logger.info(m)\n self.log(metric=m.metric, value=m.value, source=m.source, timestamp=m.timestamp)",
"def merge_apdex_metric(self, metric):\n\n self[0] += metric.satisfying\n self[1] += metric.tolerating\n self[2] += metric.frustrating\n\n self[3] = ((self[0] or self[1] or self[2]) and\n min(self[3], metric.apdex_t) or metric.apdex_t)\n self[4] = max(self[4], metric.apdex_t)",
"def _dispatch_metrics(self, payload):\n for item in payload:\n try:\n self._ingest.send(gauges=item['gauges'], counters=item['counters'])\n except Exception as e:\n self._logger.error(\"Exception while sending payload to ingest : {0}\".format(e))",
"def increment(self) -> None:\n self._increment_called = True\n self.append(deepcopy(self._base_metric))",
"def calculate_batch_metrics(self):\n pass",
"def apply_all_accumulators(self):\n self._require_state(\"APPLYING\")\n for mi in self._accums.keys():\n self._apply_one_accum_set(mi)",
"def accumulate(self, batch_pred_map_cls, batch_gt_map_cls):\n bsize = len(batch_pred_map_cls)\n assert bsize == len(batch_gt_map_cls)\n for i in range(bsize):\n self.gt_map_cls[self.scan_cnt] = batch_gt_map_cls[i]\n self.pred_map_cls[self.scan_cnt] = batch_pred_map_cls[i]\n self.scan_cnt += 1",
"def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])",
"def send_metrics(self):\n metrics = self.get_metrics()\n if not metrics:\n return\n\n for mkey, metric in metrics.items():\n for mname, mval in metric.items():\n try:\n self.agent.record_custom_metric(self.convert_metric_name(mkey, mname), mval, None)\n except Exception as e:\n print_(e)",
"def calculate_agrigate(self):\n self.total = 0.0\n for rec in self.data:\n self.total = self.total + rec[\"value\"]\n\n self.agrigate_data = {\n \"site\": self.site,\n \"utc\": self.timestamp_utc,\n \"local\": self.timestamp_local,\n \"tag\": \"TOTAL\",\n \"value\": round(self.total, 3)}\n self.data.append(self.agrigate_data)",
"def __apply_accumulators():\n self.__xdata = np.array([])\n self.__ydata = np.array([])\n for acc in self.signal_accumulators:\n self.__xdata = __array_append(self.__xdata,acc.attempt)\n self.__ydata = __array_append(self.__ydata,acc.count)\n self.__applied = True",
"def merge_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, other in metrics:\n key = (name, '')\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)",
"def report_metrics(prefix, metrics):\n series = []\n\n now = time.time()\n for key, value in metrics.iteritems():\n metric = '{prefix}.{key}'.format(prefix=prefix, key=key)\n point = [(now, value)]\n series.append({'metric':metric, 'points':point})\n\n if len(series) > 0:\n print u\"Sending {}\".format(series)\n dog_http_api.metrics(series)",
"def __init__(self, metrics_to_record):\n self.tape = {}\n\n for metric_name in metrics_to_record:\n self.tape[metric_name] = []",
"def collect(self): # pylint: disable=no-self-use\n start = time.time()\n for metric in metric_rq():\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_rq_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge",
"def __init_metrics(self):\n\n batch = {}\n # split data into batches of size batch_size or less\n for metric_name, metric_pattern in self.metrics.items():\n # get the batch list for that metric\n batch_list = []\n for s in range(1, self.schema + 1):\n for t in range(1, self.table + 1):\n k = '/metrics/type=IndexTable/keyspace={}/scope={}/name={}/mean'.format(s, t, metric_name)\n # from Python 3.6 onwards, the standard dict type maintains insertion order by default\n batch[k] = 0\n # if the batch has batch_size items or at the end of iteration,\n # append the batch to list of that metric and create a new empty batch\n if len(batch) == self.batch_size or (s == self.schema and t == self.table):\n batch_list.append(batch)\n batch = {}\n\n # parse metric patterns\n l = metric_pattern.split()\n if l[0] == '(>':\n self.metrics[metric_name] = IncMetricStruct(float(int(l[1])), float(l[2][1:]), float(l[4][:-2]),\n batch_list)\n else:\n self.metrics[metric_name] = RandMetricStruct(float(l[0][1:]), float(l[-1][:-1]), batch_list)",
"def record_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, value in metrics:\n self.record_custom_metric(name, value)",
"def get_next_batch(self):\n\n metrics = {}\n for struct in self.metrics.values():\n metrics = {**metrics, **struct.get_next_batch()}\n\n return metrics",
"def _average_training_metrics(\n self, per_batch_metrics: List[Dict[str, Any]]\n ) -> List[Dict[str, Any]]:\n check.true(self.hvd_config.use, \"Can only average training metrics in multi-GPU training.\")\n metrics_timeseries = util._list_to_dict(per_batch_metrics)\n\n # combined_timeseries is: dict[metric_name] -> 2d-array.\n # A measurement is accessed via combined_timeseries[metric_name][process_idx][batch_idx].\n combined_timeseries, _ = self._combine_metrics_across_processes(\n metrics_timeseries, num_batches=len(per_batch_metrics)\n )\n\n # If the value for a metric is a single-element array, the averaging process will\n # change that into just the element. We record what metrics are single-element arrays\n # so we can wrap them in an array later (for perfect compatibility with non-averaging\n # codepath).\n array_metrics = []\n for metric_name in per_batch_metrics[0].keys():\n if isinstance(per_batch_metrics[0][metric_name], np.ndarray):\n array_metrics.append(metric_name)\n\n if self.is_chief:\n combined_timeseries_type = Dict[str, List[List[Any]]]\n combined_timeseries = cast(combined_timeseries_type, combined_timeseries)\n num_batches = len(per_batch_metrics)\n num_processes = hvd.size()\n averaged_metrics_timeseries = {} # type: Dict[str, List]\n\n for metric_name in combined_timeseries.keys():\n averaged_metrics_timeseries[metric_name] = []\n for batch_idx in range(num_batches):\n batch = [\n combined_timeseries[metric_name][process_idx][batch_idx]\n for process_idx in range(num_processes)\n ]\n\n np_batch = np.array(batch)\n batch_avg = np.mean(np_batch[np_batch != None]) # noqa: E711\n if metric_name in array_metrics:\n batch_avg = np.array(batch_avg)\n averaged_metrics_timeseries[metric_name].append(batch_avg)\n per_batch_metrics = util._dict_to_list(averaged_metrics_timeseries)\n return per_batch_metrics",
"def update(self, current_iter, *metrics, **named_metrics):\n\n # Same order as __init__() in python>=3.6\n if len(metrics) > 0:\n for key, metric in zip(self.metrics.keys(), metrics):\n self.metrics[key].append((current_iter, metric))\n \n # Random order with names\n elif len(named_metrics) > 0:\n for name, metric in named_metrics.item():\n self.metrics[name].append((metric))\n\n else:\n raise ValueError(\"No valid value to update losses\")",
"def record_time_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_time_metric(metric)",
"def prepareAccumulatedMetrics(self):\n displayDF = analyzeMetricsDF(self.resultList)\n displayDF.to_csv(\"data/results.csv\")",
"def log_metrics(self, metrics: dict):\n self.metrics.update(metrics)\n\n self._sync_log_event()",
"def _aggregate_perf_data(perf_all_ordinals: List[str]):\n aggregate = {}\n\n pd = PerfData()\n for data in perf_all_ordinals:\n worker_pd = PerfData(**json.loads(data))\n if len(perf_all_ordinals) > 1:\n aggregate.setdefault(\"ordinals\", [])\n aggregate[\"ordinals\"].append(worker_pd.throughput_dict())\n\n pd.merge(worker_pd)\n\n aggregate.update(dataclasses.asdict(pd))\n return aggregate",
"def record_custom_metric(self, name, value):\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(name)\n if stats is None:\n self.__stats_table[name] = new_stats\n else:\n stats.merge_stats(new_stats)",
"async def update_trade_stats(self):\n\n summary_keys = [base for base in config['min_base_volumes']] + ['global']\n summaries = {\n key: {\n 'open_count': 0,\n 'buys': 0,\n 'rebuys': 0,\n 'sells': 0,\n 'collect_sells': 0,\n 'soft_stop_sells': 0,\n 'total_profit': 0.0,\n 'total_loss': 0.0,\n 'total_fees': 0.0,\n 'balancer_refills': 0,\n 'balancer_remits': 0,\n 'balancer_stop_losses': 0,\n 'balancer_profit': 0.0,\n 'balancer_loss': 0.0,\n 'balancer_fees': 0.0,\n } for key in summary_keys\n }\n\n for pair in self.trades:\n if pair not in self.trade_stats[self.time_prefix]:\n continue\n\n base = pair.split('-', 1)[0]\n open_count = len(self.trades[pair]['open'])\n\n summaries[base]['open_count'] += open_count\n summaries[base]['buys'] += self.trade_stats[self.time_prefix][pair]['buys']\n summaries[base]['rebuys'] += self.trade_stats[self.time_prefix][pair]['rebuys']\n summaries[base]['sells'] += self.trade_stats[self.time_prefix][pair]['sells']\n summaries[base]['collect_sells'] += self.trade_stats[self.time_prefix][pair]['collect_sells']\n summaries[base]['soft_stop_sells'] += self.trade_stats[self.time_prefix][pair]['soft_stop_sells']\n summaries[base]['total_profit'] += self.trade_stats[self.time_prefix][pair]['total_profit']\n summaries[base]['total_loss'] += self.trade_stats[self.time_prefix][pair]['total_loss']\n summaries[base]['total_fees'] += self.trade_stats[self.time_prefix][pair]['total_fees']\n summaries[base]['balancer_refills'] += self.trade_stats[self.time_prefix][pair]['balancer_refills']\n summaries[base]['balancer_remits'] += self.trade_stats[self.time_prefix][pair]['balancer_remits']\n summaries[base]['balancer_profit'] += self.trade_stats[self.time_prefix][pair]['balancer_profit']\n summaries[base]['balancer_loss'] += self.trade_stats[self.time_prefix][pair]['balancer_loss']\n summaries[base]['balancer_fees'] += self.trade_stats[self.time_prefix][pair]['balancer_fees']\n\n summaries['global']['open_count'] += open_count\n summaries['global']['buys'] += self.trade_stats[self.time_prefix][pair]['buys']\n summaries['global']['rebuys'] += self.trade_stats[self.time_prefix][pair]['rebuys']\n summaries['global']['sells'] += self.trade_stats[self.time_prefix][pair]['sells']\n summaries['global']['collect_sells'] += self.trade_stats[self.time_prefix][pair]['collect_sells']\n summaries['global']['soft_stop_sells'] += self.trade_stats[self.time_prefix][pair]['soft_stop_sells']\n summaries['global']['total_profit'] += self.trade_stats[self.time_prefix][pair]['total_profit']\n summaries['global']['total_loss'] += self.trade_stats[self.time_prefix][pair]['total_loss']\n summaries['global']['total_fees'] += self.trade_stats[self.time_prefix][pair]['total_fees']\n summaries['global']['balancer_refills'] += self.trade_stats[self.time_prefix][pair]['balancer_refills']\n summaries['global']['balancer_remits'] += self.trade_stats[self.time_prefix][pair]['balancer_remits']\n summaries['global']['balancer_profit'] += self.trade_stats[self.time_prefix][pair]['balancer_profit']\n summaries['global']['balancer_loss'] += self.trade_stats[self.time_prefix][pair]['balancer_loss']\n summaries['global']['balancer_fees'] += self.trade_stats[self.time_prefix][pair]['balancer_fees']\n\n for key in summaries:\n self.trade_stats[self.time_prefix][key]['buys'] = summaries[key]['buys']\n self.trade_stats[self.time_prefix][key]['rebuys'] = summaries[key]['rebuys']\n self.trade_stats[self.time_prefix][key]['sells'] = summaries[key]['sells']\n self.trade_stats[self.time_prefix][key]['collect_sells'] = summaries[key]['collect_sells']\n self.trade_stats[self.time_prefix][key]['soft_stop_sells'] = summaries[key]['soft_stop_sells']\n self.trade_stats[self.time_prefix][key]['total_profit'] = summaries[key]['total_profit']\n self.trade_stats[self.time_prefix][key]['total_loss'] = summaries[key]['total_loss']\n self.trade_stats[self.time_prefix][key]['total_fees'] = summaries[key]['total_fees']\n self.trade_stats[self.time_prefix][key]['balancer_refills'] = summaries[key]['balancer_refills']\n self.trade_stats[self.time_prefix][key]['balancer_remits'] = summaries[key]['balancer_remits']\n self.trade_stats[self.time_prefix][key]['balancer_profit'] = summaries[key]['balancer_profit']\n self.trade_stats[self.time_prefix][key]['balancer_loss'] = summaries[key]['balancer_loss']\n self.trade_stats[self.time_prefix][key]['balancer_fees'] = summaries[key]['balancer_fees']\n\n if summaries[key]['open_count'] > self.trade_stats[self.time_prefix][key]['most_open']:\n self.trade_stats[self.time_prefix][key]['most_open'] = summaries[key]['open_count']\n\n filter_items = [pair for pair in self.trades] + [base for base in config['min_base_volumes']] + ['global']\n self.save_attr('trade_stats', max_depth=2, filter_items=filter_items, filter_keys=[self.time_prefix])",
"def aggregate(all_metrics, reducer, suffix):\n # Collect metric separately\n separated_metrics = {} # type: dict[frozenset, list[dict]]\n for el in all_metrics:\n key = frozenset(el[\"metric\"][\"dimensions\"].items())\n if key not in separated_metrics:\n separated_metrics[key] = [el]\n else:\n separated_metrics[key].append(el)\n\n # Collect all dimensions\n dims = {}\n for metric_dims in separated_metrics.keys():\n for prop, val in dict(metric_dims).iteritems():\n if prop in dims:\n dims[prop].add(val)\n else:\n dims[prop] = set(val)\n\n # Sort each metric\n for _, metric in separated_metrics.iteritems():\n metric.sort(key=lambda v: v[\"metric\"][\"timestamp\"])\n\n separated_metrics = sorted(separated_metrics.values(), key=len)\n separated_metrics.reverse()\n\n # Compute the new values\n new_values = []\n all_timestamps = map(\n lambda l: map(\n lambda x: x[\"metric\"][\"timestamp\"], l),\n separated_metrics)\n metric_count = len(separated_metrics)\n for index in range(0, len(separated_metrics[0])):\n new_value = reducer[0](\n separated_metrics[0][index][\"metric\"][\"value\"],\n metric_count)\n new_timestamp = separated_metrics[0][index][\"metric\"][\"timestamp\"]\n for metric_index in range(1, metric_count):\n new_value = reducer[1](new_value, helpers.interpolate(\n new_timestamp,\n separated_metrics[metric_index],\n all_timestamps[metric_index]\n ), metric_count)\n new_values.append((new_timestamp, new_value))\n\n # Aggregate the other details:\n metric_name = separated_metrics[0][0][\"metric\"][\"name\"] + suffix\n meta = separated_metrics[0][0][\"meta\"]\n new_metrics = [\n helpers.create_agg_metric(\n metric_name,\n meta,\n dims,\n val[0],\n val[1]\n ) for val in new_values\n ]\n return new_metrics"
] | [
"0.6160698",
"0.56689644",
"0.5492042",
"0.53796333",
"0.5373829",
"0.5285188",
"0.52515113",
"0.52226347",
"0.522238",
"0.52220243",
"0.5142877",
"0.51234",
"0.5087667",
"0.50747657",
"0.50630915",
"0.5057553",
"0.5049021",
"0.50429595",
"0.5004636",
"0.49941427",
"0.49351588",
"0.49190253",
"0.49097806",
"0.4884444",
"0.48710936",
"0.48627272",
"0.48547217",
"0.48513654",
"0.48434678",
"0.4842337"
] | 0.6819747 | 0 |
Record a single time metric, merging the data with any data from prior time metrics with the same name and scope. | def record_time_metric(self, metric):
if not self.__settings:
return
# Scope is forced to be empty string if None as
# scope of None is reserved for apdex metrics.
key = (metric.name, metric.scope or '')
stats = self.__stats_table.get(key)
if stats is None:
stats = TimeStats(call_count=1,
total_call_time=metric.duration,
total_exclusive_call_time=metric.exclusive,
min_call_time=metric.duration,
max_call_time=metric.duration,
sum_of_squares=metric.duration ** 2)
self.__stats_table[key] = stats
else:
stats.merge_time_metric(metric)
return key | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def record_time_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_time_metric(metric)",
"def record_custom_metric(self, name, value):\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(name)\n if stats is None:\n self.__stats_table[name] = new_stats\n else:\n stats.merge_stats(new_stats)",
"def merge_time_metric(self, metric):\n\n self.merge_raw_time_metric(metric.duration, metric.exclusive)",
"def record_custom_metric(self, name, value):\n key = (name, '')\n\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(key)\n if stats is None:\n self.__stats_table[key] = new_stats\n else:\n stats.merge_stats(new_stats)\n\n return key",
"def record(self, time, increment):\n raise NotImplementedError(\"Abstract method not implemented.\")",
"def record_data(self, time, x, tau):\n\n self.t_values.append(np.copy(time))\n self.x_values.append(np.copy(x))\n self.tau_values.append(np.copy(tau))",
"def update_time(self):\n time_metrics = self._fetch_time_metrics_and_clear()\n self._logger.info('update_time. time_metrics = %s', build_metrics_times_data(time_metrics))",
"def record(self, time, increment):\n\n if time < self._initialTime:\n return\n\n if self._lastObsValue > self._max:\n self._max = self._lastObsValue\n if time == self._initialTime:\n self._min = self._lastObsValue\n elif self._lastObsValue < self._min:\n self._min = self._lastObsValue\n\n self._n += 1\n self._area += self._lastObsValue * (time - self._lastObsTime)\n self._areaSquared += (self._lastObsValue ** 2) * (time - self._lastObsTime)\n self._lastObsTime = time\n self._lastObsValue += increment",
"def _AddMetric(self, metric):\n machine = metric.machine_id\n time = metric.timestamp\n payload = DotDict(json.loads(metric.payload)).flatten()\n\n self.machines.add(machine)\n self.timestamps.add(time)\n for k in payload:\n if k not in self.counter_data:\n continue\n val = payload.get(k, None)\n if val is not None:\n self.counter_data[k].AddSample(machine, time, val)",
"def observe_first(self, env: dm_env.Environment, timestep: dm_env.TimeStep\n ) -> None:\n self._metrics = {}\n self._accumulate_metrics(env)",
"def record_metrics(self, pid, metrics):\n\n for _metric, _metric_value in metrics.items():\n if not self.__metrics_history[pid].get(_metric):\n self.__metrics_history[pid][_metric] = []\n self.__metrics_history[pid][_metric].append(_metric_value)\n # only keep the last 2 running history for any metric\n self.__metrics_history[pid][_metric] = self.__metrics_history[pid][_metric][\n -2:\n ]",
"def time_metrics(self, stats, root, parent):\n\n product = self.product\n operation = self.operation or 'other'\n target = self.target\n\n # Determine the scoped metric\n\n statement_metric_name = 'Datastore/statement/%s/%s/%s' % (product,\n target, operation)\n\n operation_metric_name = 'Datastore/operation/%s/%s' % (product,\n operation)\n\n if target:\n scoped_metric_name = statement_metric_name\n else:\n scoped_metric_name = operation_metric_name\n\n yield TimeMetric(name=scoped_metric_name, scope=root.path,\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped rollup metrics\n\n yield TimeMetric(name='Datastore/all', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/all' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n if root.type == 'WebTransaction':\n yield TimeMetric(name='Datastore/allWeb', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/allWeb' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n else:\n yield TimeMetric(name='Datastore/allOther', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/allOther' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped operation metric\n\n yield TimeMetric(name=operation_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped statement metric\n\n if target:\n yield TimeMetric(name=statement_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped instance Metric\n\n if self.instance_hostname and self.port_path_or_id:\n\n instance_metric_name = 'Datastore/instance/%s/%s/%s' % (product,\n self.instance_hostname, self.port_path_or_id)\n\n yield TimeMetric(name=instance_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)",
"def _push(self):\n if len(self._stat_now):\n self._stat_now['epoch_num'] = self.epoch_num\n self._stat_now['global_step'] = self.global_step\n\n self._stats.append(self._stat_now)\n self._stat_now = {}\n self._write_stat()",
"def record_gauge(self, name, value, tags=None):\n identity = self.create_identity(name, tags)\n with self._lock:\n self._batch[identity] = value\n self._timestamps[identity] = int(time.time() * 1000.0)",
"def _record(self, metric_point: MetricPoint,\n measurement_map: MeasurementMap):\n metric_name = metric_point.metric_name\n tags = metric_point.tags\n\n metric = self._registry.get(metric_name)\n # Metrics should be always registered dynamically.\n assert metric\n\n tag_map = tag_map_module.TagMap()\n for key, value in tags.items():\n tag_key = tag_key_module.TagKey(key)\n tag_value = tag_value_module.TagValue(value)\n tag_map.insert(tag_key, tag_value)\n\n metric_value = metric_point.value\n measurement_map.measure_float_put(metric.measure, metric_value)\n # NOTE: When we record this metric, timestamp will be renewed.\n measurement_map.record(tag_map)",
"def _fetch_time_metrics_and_clear(self):\n with self._time_rlock:\n time_metrics = self._time_metrics\n self._time_metrics = defaultdict(LatencyTracker)\n\n return time_metrics",
"def record_apdex_metric(self, metric):\n\n if not self.__settings:\n return\n\n # Note that because we are using a scope here of an empty string\n # we can potentially clash with an unscoped metric. Using None,\n # although it may help to keep them separate in the agent will\n # not make a difference to the data collector which treats None\n # as an empty string anyway.\n\n key = (metric.name, '')\n stats = self.__stats_table.get(key)\n if stats is None:\n stats = ApdexStats(apdex_t=metric.apdex_t)\n self.__stats_table[key] = stats\n stats.merge_apdex_metric(metric)\n\n return key",
"def build_metrics_times_data(time_metrics):\n return [{'name': name, 'latencies': latencies.get_latencies()}\n for name, latencies in iteritems(time_metrics)]",
"def put(self, metric, values, timestamp=None):\n if timestamp is None:\n timestamp = time.time()\n now_date = datetime.datetime.fromtimestamp(timestamp)\n\n if self.last is None:\n self.last = timestamp\n return\n\n self.last = timestamp\n\n values = [str(d) for d in [now_date, timestamp]+values]\n\n with open(self.filename, \"at\") as df:\n df.write(\"{}\\n\".format(\",\".join(values)))",
"def log(self, label, times, overlapping=False):\r\n self._timings.append(Timing(label, times, overlapping))",
"def add_metric(self, metric: str):\n if metric not in self.metrics:\n self.metrics[metric] = self.creator.create_metric(metric)",
"def log_metric(self, name: str, value):\n self.metrics[name] = value\n\n self._sync_log_event()",
"def save_metric(key, value, timestamp=None):\n\n from analytics_client.settings import _ANALYTICS_ENABLED\n\n if not _ANALYTICS_ENABLED:\n return None\n\n from analytics_client.tasks import store_metric\n\n # Set a timestamp if it is undefined\n _timestamp = timestamp\n if _timestamp is None:\n _timestamp = datetime.now()\n\n store_metric.delay(Metric(key=key, value=value, timestamp=_timestamp))",
"def log(self, metric_name: str, value: float) -> None:\n if metric_name in self.metrics:\n self.metrics[metric_name].append(value)\n else:\n self.metrics[metric_name] = [value]",
"def _merge_report(self, target, new):\r\n time = None\r\n if 'ts' in new['parsed']:\r\n time = new['parsed']['ts']\r\n\r\n if (target.get('lastSeenDate', None) and\r\n time and\r\n target['lastSeenDate'] < time):\r\n target['lastSeenDate'] = time\r\n\r\n query_millis = int(new['parsed']['stats']['millis'])\r\n target['stats']['totalTimeMillis'] += query_millis\r\n target['stats']['count'] += 1\r\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']",
"def _merge_report(self, target, new):\n time = None\n if 'ts' in new['parsed']:\n time = new['parsed']['ts']\n\n if (target.get('lastSeenDate', None) and\n time and\n target['lastSeenDate'] < time):\n target['lastSeenDate'] = time\n\n query_millis = int(new['parsed']['stats']['millis'])\n target['stats']['totalTimeMillis'] += query_millis\n target['stats']['count'] += 1\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']",
"def add_time(data, t):\n data['year'] = t.year\n data['month'] = t.month\n data['day'] = t.day\n data['hour'] = t.hour\n data['minute'] = t.minute\n data['second'] = t.second",
"def _record_current_time(self):\n now = time.time()\n delta = now - self._last_time\n self._last_time = now\n self._timing_recorder.append(delta)",
"def log(self, metric, value, source, timestamp=None):\n if timestamp is None:\n timestamp = datetime.now()\n\n sql = \"insert into measurement(metric, value, source, timestamp) values('{0}', {1}, '{2}', '{3}');\".format(\n metric, value, source, timestamp)\n\n self._execute_sql(sql)",
"def submit_metric():\n\n gson = json.loads(request.get_json())\n\n new_point = DataPoint(\n computer_name=gson[\"computer_name\"],\n cpu_percentage=gson[\"cpu_percentage\"],\n memory_percentage=gson[\"memory_percentage\"],\n timestamp=gson[\"timestamp\"]\n )\n\n with lock:\n if not instances.get(new_point.computer_name):\n instances[new_point.computer_name] = Timeline(\n maxsize=int(os.environ.get(\"COLLECTOR_BUFFER_SIZE\"))\n )\n instances[new_point.computer_name].append(new_point)\n\n return Response(status=200)"
] | [
"0.6278324",
"0.62258005",
"0.6140665",
"0.6003981",
"0.58658415",
"0.5658924",
"0.5652999",
"0.5612092",
"0.5606227",
"0.5472987",
"0.5470835",
"0.54608715",
"0.5427048",
"0.53969103",
"0.53553826",
"0.533382",
"0.52751833",
"0.5262168",
"0.52570385",
"0.52540565",
"0.5227859",
"0.52182734",
"0.5213903",
"0.51284367",
"0.5117981",
"0.5115526",
"0.51100886",
"0.51041216",
"0.50835633",
"0.5070457"
] | 0.7506857 | 0 |
Record the time metrics supplied by the iterable for a single transaction, merging the data with any data from prior time metrics with the same name and scope. | def record_time_metrics(self, metrics):
if not self.__settings:
return
for metric in metrics:
self.record_time_metric(metric) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def flush(self):\n with self._lock:\n batch = self._batch\n timestamps = self._timestamps\n\n items = []\n for identity, value in batch.items():\n metric = {}\n typ, name, tags = identity\n metric[\"name\"] = name\n if typ:\n metric[\"type\"] = typ\n else:\n metric[\"timestamp\"] = timestamps[identity]\n\n if tags:\n metric[\"attributes\"] = dict(tags)\n\n metric[\"value\"] = value\n items.append(metric)\n\n items = tuple(items)\n\n batch.clear()\n timestamps.clear()\n\n common = self._common.copy()\n common[\"timestamp\"] = self._interval_start\n now = int(time.time() * 1000.0)\n interval = now - self._interval_start\n common[\"interval.ms\"] = interval\n\n self._interval_start = now\n\n return items, common",
"def time_metrics(self, stats, root, parent):\n\n product = self.product\n operation = self.operation or 'other'\n target = self.target\n\n # Determine the scoped metric\n\n statement_metric_name = 'Datastore/statement/%s/%s/%s' % (product,\n target, operation)\n\n operation_metric_name = 'Datastore/operation/%s/%s' % (product,\n operation)\n\n if target:\n scoped_metric_name = statement_metric_name\n else:\n scoped_metric_name = operation_metric_name\n\n yield TimeMetric(name=scoped_metric_name, scope=root.path,\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped rollup metrics\n\n yield TimeMetric(name='Datastore/all', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/all' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n if root.type == 'WebTransaction':\n yield TimeMetric(name='Datastore/allWeb', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/allWeb' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n else:\n yield TimeMetric(name='Datastore/allOther', scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n yield TimeMetric(name='Datastore/%s/allOther' % product, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped operation metric\n\n yield TimeMetric(name=operation_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped statement metric\n\n if target:\n yield TimeMetric(name=statement_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)\n\n # Unscoped instance Metric\n\n if self.instance_hostname and self.port_path_or_id:\n\n instance_metric_name = 'Datastore/instance/%s/%s/%s' % (product,\n self.instance_hostname, self.port_path_or_id)\n\n yield TimeMetric(name=instance_metric_name, scope='',\n duration=self.duration, exclusive=self.exclusive)",
"def record_time_metric(self, metric):\n\n if not self.__settings:\n return\n\n # Scope is forced to be empty string if None as\n # scope of None is reserved for apdex metrics.\n\n key = (metric.name, metric.scope or '')\n stats = self.__stats_table.get(key)\n if stats is None:\n stats = TimeStats(call_count=1,\n total_call_time=metric.duration,\n total_exclusive_call_time=metric.exclusive,\n min_call_time=metric.duration,\n max_call_time=metric.duration,\n sum_of_squares=metric.duration ** 2)\n self.__stats_table[key] = stats\n else:\n stats.merge_time_metric(metric)\n\n return key",
"def record_transaction(self, transaction):\n\n if not self.__settings:\n return\n\n settings = self.__settings\n\n # Record the apdex, value and time metrics generated from the\n # transaction. Whether time metrics are reported as distinct\n # metrics or into a rollup is in part controlled via settings\n # for minimum number of unique metrics to be reported and thence\n # whether over a time threshold calculated as percentage of\n # overall request time, up to a maximum number of unique\n # metrics. This is intended to limit how many metrics are\n # reported for each transaction and try and cut down on an\n # explosion of unique metric names. The limits and thresholds\n # are applied after the metrics are reverse sorted based on\n # exclusive times for each metric. This ensures that the metrics\n # with greatest exclusive time are retained over those with\n # lesser time. Such metrics get reported into the performance\n # breakdown tab for specific web transactions.\n\n self.record_apdex_metrics(transaction.apdex_metrics(self))\n\n self.merge_custom_metrics(transaction.custom_metrics.metrics())\n\n self.record_time_metrics(transaction.time_metrics(self))\n\n # Capture any errors if error collection is enabled.\n # Only retain maximum number allowed per harvest.\n\n error_collector = settings.error_collector\n\n if (error_collector.enabled and settings.collect_errors and\n len(self.__transaction_errors) <\n settings.agent_limits.errors_per_harvest):\n self.__transaction_errors.extend(transaction.error_details())\n\n self.__transaction_errors = self.__transaction_errors[:\n settings.agent_limits.errors_per_harvest]\n\n if (error_collector.capture_events and\n error_collector.enabled and\n settings.collect_error_events):\n events = transaction.error_events(self.__stats_table)\n for event in events:\n self._error_events.add(event, priority=transaction.priority)\n\n # Capture any sql traces if transaction tracer enabled.\n\n if settings.slow_sql.enabled and settings.collect_traces:\n for node in transaction.slow_sql_nodes(self):\n self.record_slow_sql_node(node)\n\n # Remember as slowest transaction if transaction tracer\n # is enabled, it is over the threshold and slower than\n # any existing transaction seen for this period and in\n # the historical snapshot of slow transactions, plus\n # recording of transaction trace for this transaction\n # has not been suppressed.\n\n transaction_tracer = settings.transaction_tracer\n\n if (not transaction.suppress_transaction_trace and\n transaction_tracer.enabled and settings.collect_traces):\n\n # Transactions saved for Synthetics transactions\n # do not depend on the transaction threshold.\n\n self._update_synthetics_transaction(transaction)\n\n threshold = transaction_tracer.transaction_threshold\n\n if threshold is None:\n threshold = transaction.apdex_t * 4\n\n if transaction.duration >= threshold:\n self._update_slow_transaction(transaction)\n\n # Create the transaction event and add it to the\n # appropriate \"bucket.\" Synthetic requests are saved in one,\n # while transactions from regular requests are saved in another.\n\n if transaction.synthetics_resource_id:\n event = transaction.transaction_event(self.__stats_table)\n self._synthetics_events.add(event)\n\n elif (settings.collect_analytics_events and\n settings.transaction_events.enabled):\n\n event = transaction.transaction_event(self.__stats_table)\n self._transaction_events.add(event, priority=transaction.priority)\n\n # Merge in custom events\n\n if (settings.collect_custom_events and\n settings.custom_insights_events.enabled):\n self.custom_events.merge(transaction.custom_events)\n\n # Merge in span events\n\n if (settings.distributed_tracing.enabled and\n settings.span_events.enabled and settings.collect_span_events):\n if settings.infinite_tracing.enabled:\n for event in transaction.span_protos(settings):\n self._span_stream.put(event)\n elif transaction.sampled:\n for event in transaction.span_events(self.__settings):\n self._span_events.add(event, priority=transaction.priority)",
"def build_metrics_times_data(time_metrics):\n return [{'name': name, 'latencies': latencies.get_latencies()}\n for name, latencies in iteritems(time_metrics)]",
"async def update_trade_stats(self):\n\n summary_keys = [base for base in config['min_base_volumes']] + ['global']\n summaries = {\n key: {\n 'open_count': 0,\n 'buys': 0,\n 'rebuys': 0,\n 'sells': 0,\n 'collect_sells': 0,\n 'soft_stop_sells': 0,\n 'total_profit': 0.0,\n 'total_loss': 0.0,\n 'total_fees': 0.0,\n 'balancer_refills': 0,\n 'balancer_remits': 0,\n 'balancer_stop_losses': 0,\n 'balancer_profit': 0.0,\n 'balancer_loss': 0.0,\n 'balancer_fees': 0.0,\n } for key in summary_keys\n }\n\n for pair in self.trades:\n if pair not in self.trade_stats[self.time_prefix]:\n continue\n\n base = pair.split('-', 1)[0]\n open_count = len(self.trades[pair]['open'])\n\n summaries[base]['open_count'] += open_count\n summaries[base]['buys'] += self.trade_stats[self.time_prefix][pair]['buys']\n summaries[base]['rebuys'] += self.trade_stats[self.time_prefix][pair]['rebuys']\n summaries[base]['sells'] += self.trade_stats[self.time_prefix][pair]['sells']\n summaries[base]['collect_sells'] += self.trade_stats[self.time_prefix][pair]['collect_sells']\n summaries[base]['soft_stop_sells'] += self.trade_stats[self.time_prefix][pair]['soft_stop_sells']\n summaries[base]['total_profit'] += self.trade_stats[self.time_prefix][pair]['total_profit']\n summaries[base]['total_loss'] += self.trade_stats[self.time_prefix][pair]['total_loss']\n summaries[base]['total_fees'] += self.trade_stats[self.time_prefix][pair]['total_fees']\n summaries[base]['balancer_refills'] += self.trade_stats[self.time_prefix][pair]['balancer_refills']\n summaries[base]['balancer_remits'] += self.trade_stats[self.time_prefix][pair]['balancer_remits']\n summaries[base]['balancer_profit'] += self.trade_stats[self.time_prefix][pair]['balancer_profit']\n summaries[base]['balancer_loss'] += self.trade_stats[self.time_prefix][pair]['balancer_loss']\n summaries[base]['balancer_fees'] += self.trade_stats[self.time_prefix][pair]['balancer_fees']\n\n summaries['global']['open_count'] += open_count\n summaries['global']['buys'] += self.trade_stats[self.time_prefix][pair]['buys']\n summaries['global']['rebuys'] += self.trade_stats[self.time_prefix][pair]['rebuys']\n summaries['global']['sells'] += self.trade_stats[self.time_prefix][pair]['sells']\n summaries['global']['collect_sells'] += self.trade_stats[self.time_prefix][pair]['collect_sells']\n summaries['global']['soft_stop_sells'] += self.trade_stats[self.time_prefix][pair]['soft_stop_sells']\n summaries['global']['total_profit'] += self.trade_stats[self.time_prefix][pair]['total_profit']\n summaries['global']['total_loss'] += self.trade_stats[self.time_prefix][pair]['total_loss']\n summaries['global']['total_fees'] += self.trade_stats[self.time_prefix][pair]['total_fees']\n summaries['global']['balancer_refills'] += self.trade_stats[self.time_prefix][pair]['balancer_refills']\n summaries['global']['balancer_remits'] += self.trade_stats[self.time_prefix][pair]['balancer_remits']\n summaries['global']['balancer_profit'] += self.trade_stats[self.time_prefix][pair]['balancer_profit']\n summaries['global']['balancer_loss'] += self.trade_stats[self.time_prefix][pair]['balancer_loss']\n summaries['global']['balancer_fees'] += self.trade_stats[self.time_prefix][pair]['balancer_fees']\n\n for key in summaries:\n self.trade_stats[self.time_prefix][key]['buys'] = summaries[key]['buys']\n self.trade_stats[self.time_prefix][key]['rebuys'] = summaries[key]['rebuys']\n self.trade_stats[self.time_prefix][key]['sells'] = summaries[key]['sells']\n self.trade_stats[self.time_prefix][key]['collect_sells'] = summaries[key]['collect_sells']\n self.trade_stats[self.time_prefix][key]['soft_stop_sells'] = summaries[key]['soft_stop_sells']\n self.trade_stats[self.time_prefix][key]['total_profit'] = summaries[key]['total_profit']\n self.trade_stats[self.time_prefix][key]['total_loss'] = summaries[key]['total_loss']\n self.trade_stats[self.time_prefix][key]['total_fees'] = summaries[key]['total_fees']\n self.trade_stats[self.time_prefix][key]['balancer_refills'] = summaries[key]['balancer_refills']\n self.trade_stats[self.time_prefix][key]['balancer_remits'] = summaries[key]['balancer_remits']\n self.trade_stats[self.time_prefix][key]['balancer_profit'] = summaries[key]['balancer_profit']\n self.trade_stats[self.time_prefix][key]['balancer_loss'] = summaries[key]['balancer_loss']\n self.trade_stats[self.time_prefix][key]['balancer_fees'] = summaries[key]['balancer_fees']\n\n if summaries[key]['open_count'] > self.trade_stats[self.time_prefix][key]['most_open']:\n self.trade_stats[self.time_prefix][key]['most_open'] = summaries[key]['open_count']\n\n filter_items = [pair for pair in self.trades] + [base for base in config['min_base_volumes']] + ['global']\n self.save_attr('trade_stats', max_depth=2, filter_items=filter_items, filter_keys=[self.time_prefix])",
"def with_time(self):\n if self.time_slices is None:\n raise FeatureError(\"Feature has no time reference.\")\n\n for i, datum in enumerate(self.data[self.name]):\n yield (self.time_slices[i], datum)",
"def _fetch_time_metrics_and_clear(self):\n with self._time_rlock:\n time_metrics = self._time_metrics\n self._time_metrics = defaultdict(LatencyTracker)\n\n return time_metrics",
"def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])",
"def record_custom_metric(self, name, value):\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(name)\n if stats is None:\n self.__stats_table[name] = new_stats\n else:\n stats.merge_stats(new_stats)",
"def update_time(self):\n time_metrics = self._fetch_time_metrics_and_clear()\n self._logger.info('update_time. time_metrics = %s', build_metrics_times_data(time_metrics))",
"def with_time(self):\n key = list(self.keys())[0]\n length = len(self[key])\n time_slices = self[key].time_slices\n\n if time_slices is None:\n raise FeatureError(\"FeatureCollection has no time reference.\")\n\n for i in range(length):\n res = {}\n for key, feature in self.items():\n res[key] = feature.data[feature.name][i]\n yield (time_slices[i], res)",
"def _push(self):\n if len(self._stat_now):\n self._stat_now['epoch_num'] = self.epoch_num\n self._stat_now['global_step'] = self.global_step\n\n self._stats.append(self._stat_now)\n self._stat_now = {}\n self._write_stat()",
"def merge_logs(self):\n ourlog = LogData()\n for l in self.data_set:\n ourlog.entries = ourlog.entries + l.entries\n ourlog.sort_time()\n self.finalized_data = ourlog",
"def _merge_report(self, target, new):\n time = None\n if 'ts' in new['parsed']:\n time = new['parsed']['ts']\n\n if (target.get('lastSeenDate', None) and\n time and\n target['lastSeenDate'] < time):\n target['lastSeenDate'] = time\n\n query_millis = int(new['parsed']['stats']['millis'])\n target['stats']['totalTimeMillis'] += query_millis\n target['stats']['count'] += 1\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']",
"def _merge_report(self, target, new):\r\n time = None\r\n if 'ts' in new['parsed']:\r\n time = new['parsed']['ts']\r\n\r\n if (target.get('lastSeenDate', None) and\r\n time and\r\n target['lastSeenDate'] < time):\r\n target['lastSeenDate'] = time\r\n\r\n query_millis = int(new['parsed']['stats']['millis'])\r\n target['stats']['totalTimeMillis'] += query_millis\r\n target['stats']['count'] += 1\r\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']",
"def collect(self, revisions):\n nr_revisions = len(revisions)\n estimate = TimeEstimator(nr_revisions)\n for index, revision_number in enumerate(revisions):\n last_measurement = self.__get_last_measurement(revision_number)\n self.__write_measurement(last_measurement)\n self.__last_revision.set(revision_number)\n logging.info('Revision: %s, %s/%s, measurement date: %s, time remaining: %s', revision_number, index + 1,\n nr_revisions, self.__get_date(last_measurement), estimate.time_remaining(index))",
"def record_all(self):\n for i in self.recorders:\n t = i[0]\n r = i[1]\n self.add_row(t, r())",
"def record_event_times(counter, event_times):\n # type: (event_counter.EventCounter, List[float]) -> None\n for event_time in event_times:\n with mock_time(event_time):\n counter.record_event()",
"def record_data(self, time, x, tau):\n\n self.t_values.append(np.copy(time))\n self.x_values.append(np.copy(x))\n self.tau_values.append(np.copy(tau))",
"def generate_record(self, data_dictionaries, group_by):\n result = {}\n\n for one_measurement in data_dictionaries:\n time = one_measurement['datetime']\n\n if isinstance(time, str):\n if self.timezone:\n time = arrow.get(time).shift(hours=6) # TODO: fix utc conversion\n else:\n time = arrow.get(time)\n\n record = Record(self.name, self.lat, self.lon, self.height, time)\n\n del one_measurement['datetime']\n\n one_measurement = {k: float(v) for k, v in one_measurement.items()}\n\n record.merge(one_measurement)\n\n key = group_by(time)\n \n if key == '2016-04-01_00':\n break\n\n record_string = record.little_r_report()\n\n try:\n result[key].append(record_string)\n except KeyError:\n result[key] = [record_string]\n\n return result",
"def log_batch(self, measurements):\n for m in measurements:\n logger.info(m)\n self.log(metric=m.metric, value=m.value, source=m.source, timestamp=m.timestamp)",
"def Analyze(self):\n \n self._analyzeLogs()\n for user in self._start_times:\n self._result[user] = self._zipTimes(user)",
"def add(self, timing_dict: Dict[str, float]):\n self._timings.append(timing_dict)\n if not self.steps:\n self.steps = list(timing_dict.keys())",
"def spending_over_time_test_data():\n for i in range(30):\n # Define some values that are calculated and used multiple times\n transaction_id = i\n award_id = i + 1000\n awarding_agency_id = i + 2000\n toptier_awarding_agency_id = i + 3000\n subtier_awarding_agency_id = i + 4000\n funding_agency_id = i + 5000\n toptier_funding_agency_id = i + 6000\n subtier_funding_agency_id = i + 7000\n federal_action_obligation = i + 8000\n total_obligation = i + 9000\n federal_account_id = i + 10000\n treasury_account_id = i + 11000\n\n action_date = f\"20{i % 10 + 10}-{i % 9 + 1}-{i % 28 + 1}\"\n action_date_obj = datetime.datetime.strptime(action_date, \"%Y-%m-%d\")\n fiscal_month = generate_fiscal_month(action_date_obj)\n fiscal_year = generate_fiscal_year(action_date_obj)\n fiscal_action_date = f\"{fiscal_year}-{fiscal_month}-{i % 28 + 1}\"\n contract_award_type = [\"A\", \"B\", \"C\", \"D\"][i % 4]\n grant_award_type = [\"02\", \"03\", \"04\", \"05\"][i % 4]\n is_fpds = i % 2 == 0\n\n # Award\n baker.make(\n \"search.AwardSearch\",\n award_id=award_id,\n fain=f\"fain_{transaction_id}\" if not is_fpds else None,\n is_fpds=is_fpds,\n latest_transaction_id=transaction_id,\n piid=f\"piid_{transaction_id}\" if is_fpds else None,\n total_obligation=total_obligation,\n type=contract_award_type if is_fpds else grant_award_type,\n action_date=\"2020-01-01\",\n )\n\n # Federal, Treasury, and Financial Accounts\n baker.make(\n \"accounts.FederalAccount\",\n id=federal_account_id,\n parent_toptier_agency_id=toptier_awarding_agency_id,\n account_title=f\"federal_account_title_{transaction_id}\",\n federal_account_code=f\"federal_account_code_{transaction_id}\",\n )\n baker.make(\n \"accounts.TreasuryAppropriationAccount\",\n agency_id=f\"taa_aid_{transaction_id}\",\n allocation_transfer_agency_id=f\"taa_ata_{transaction_id}\",\n availability_type_code=f\"taa_a_{transaction_id}\",\n beginning_period_of_availability=f\"taa_bpoa_{transaction_id}\",\n ending_period_of_availability=f\"taa_epoa_{transaction_id}\",\n federal_account_id=federal_account_id,\n main_account_code=f\"taa_main_{transaction_id}\",\n sub_account_code=f\"taa_sub_{transaction_id}\",\n treasury_account_identifier=treasury_account_id,\n )\n tas_components = [\n f\"aid=taa_aid_{transaction_id}\"\n f\"main=taa_main_{transaction_id}\"\n f\"ata=taa_ata_{transaction_id}\"\n f\"sub=taa_sub_{transaction_id}\"\n f\"bpoa=taa_bpoa_{transaction_id}\"\n f\"epoa=taa_epoa_{transaction_id}\"\n f\"a=taa_a_{transaction_id}\"\n ]\n baker.make(\"awards.FinancialAccountsByAwards\", award_id=award_id, treasury_account_id=treasury_account_id)\n\n # Awarding Agency\n baker.make(\n \"references.Agency\",\n id=awarding_agency_id,\n subtier_agency_id=subtier_awarding_agency_id,\n toptier_agency_id=toptier_awarding_agency_id,\n )\n baker.make(\n \"references.ToptierAgency\",\n abbreviation=f\"toptier_awarding_agency_abbreviation_{transaction_id}\",\n name=f\"toptier_awarding_agency_agency_name_{transaction_id}\",\n toptier_agency_id=toptier_awarding_agency_id,\n toptier_code=f\"toptier_awarding_agency_code_{transaction_id}\",\n )\n baker.make(\n \"references.SubtierAgency\",\n abbreviation=f\"subtier_awarding_agency_abbreviation_{transaction_id}\",\n name=f\"subtier_awarding_agency_agency_name_{transaction_id}\",\n subtier_agency_id=subtier_awarding_agency_id,\n subtier_code=f\"subtier_awarding_agency_code_{transaction_id}\",\n )\n\n # Funding Agency\n baker.make(\n \"references.Agency\",\n id=funding_agency_id,\n subtier_agency_id=subtier_funding_agency_id,\n toptier_agency_id=toptier_funding_agency_id,\n )\n baker.make(\n \"references.ToptierAgency\",\n abbreviation=f\"toptier_funding_agency_abbreviation_{transaction_id}\",\n name=f\"toptier_funding_agency_agency_name_{transaction_id}\",\n toptier_agency_id=toptier_funding_agency_id,\n toptier_code=f\"toptier_funding_agency_code_{transaction_id}\",\n )\n baker.make(\n \"references.SubtierAgency\",\n abbreviation=f\"subtier_funding_agency_abbreviation_{transaction_id}\",\n name=f\"subtier_funding_agency_agency_name_{transaction_id}\",\n subtier_agency_id=subtier_funding_agency_id,\n subtier_code=f\"subtier_funding_agency_code_{transaction_id}\",\n )\n\n # Ref Country Code\n baker.make(\"references.RefCountryCode\", country_code=\"USA\", country_name=\"UNITED STATES\")\n\n # FPDS / FABS\n if is_fpds:\n baker.make(\n \"search.TransactionSearch\",\n transaction_id=transaction_id,\n is_fpds=is_fpds,\n action_date=action_date,\n fiscal_year=fiscal_year,\n fiscal_action_date=fiscal_action_date,\n award_id=award_id,\n awarding_agency_id=awarding_agency_id,\n business_categories=[f\"business_category_1_{transaction_id}\", f\"business_category_2_{transaction_id}\"],\n transaction_description=f\"This is a test description {transaction_id}\"\n if transaction_id % 2 == 0\n else None,\n federal_action_obligation=federal_action_obligation,\n generated_pragmatic_obligation=federal_action_obligation,\n award_amount=total_obligation,\n funding_agency_id=funding_agency_id,\n type=contract_award_type if is_fpds else grant_award_type,\n awarding_agency_code=f\"toptier_awarding_agency_code_{transaction_id}\",\n awarding_toptier_agency_name=f\"toptier_awarding_agency_agency_name_{transaction_id}\",\n awarding_toptier_agency_abbreviation=f\"toptier_awarding_agency_agency_name_{transaction_id}\",\n funding_agency_code=f\"toptier_funding_agency_code_{transaction_id}\",\n funding_toptier_agency_name=f\"toptier_funding_agency_agency_name_{transaction_id}\",\n funding_toptier_agency_abbreviation=f\"toptier_funding_agency_agency_name_{transaction_id}\",\n awarding_sub_tier_agency_c=f\"subtier_awarding_agency_code_{transaction_id}\",\n awarding_subtier_agency_name=f\"subtier_awarding_agency_agency_name_{transaction_id}\",\n funding_sub_tier_agency_co=f\"subtier_funding_agency_code_{transaction_id}\",\n funding_subtier_agency_name=f\"subtier_funding_agency_agency_name_{transaction_id}\",\n funding_subtier_agency_abbreviation=f\"subtier_funding_agency_agency_name_{transaction_id}\",\n recipient_name=f\"recipient_name_{transaction_id}\",\n recipient_unique_id=f\"{transaction_id:09d}\",\n recipient_hash=\"c687823d-10af-701b-1bad-650c6e680190\" if transaction_id == 21 else None,\n recipient_levels=[\"R\"] if i == 21 else [],\n extent_competed=f\"extent_competed_{transaction_id}\",\n recipient_location_country_code=\"USA\",\n recipient_location_country_name=\"USA\",\n recipient_location_state_code=f\"LE_STATE_CODE_{transaction_id}\",\n recipient_location_county_code=f\"{transaction_id:03d}\",\n recipient_location_county_name=f\"LE_COUNTY_NAME_{transaction_id}\",\n recipient_location_congressional_code=f\"{transaction_id:02d}\",\n recipient_location_zip5=f\"LE_ZIP5_{transaction_id}\",\n recipient_location_city_name=f\"LE_CITY_NAME_{transaction_id}\",\n naics_code=f\"{transaction_id}{transaction_id}\",\n naics_description=f\"naics_description_{transaction_id}\",\n piid=f\"piid_{transaction_id}\",\n pop_country_code=\"USA\",\n pop_country_name=\"UNITED STATES\",\n pop_state_code=f\"POP_STATE_CODE_{transaction_id}\",\n pop_county_code=f\"{transaction_id:03d}\",\n pop_county_name=f\"POP_COUNTY_NAME_{transaction_id}\",\n pop_zip5=f\"POP_ZIP5_{transaction_id}\",\n pop_congressional_code=f\"{transaction_id:02d}\",\n pop_city_name=f\"POP_CITY_NAME_{transaction_id}\",\n product_or_service_code=str(transaction_id).zfill(4),\n product_or_service_description=f\"psc_description_{transaction_id}\",\n type_of_contract_pricing=f\"type_of_contract_pricing_{transaction_id}\",\n type_set_aside=f\"type_set_aside_{transaction_id}\",\n tas_components=tas_components,\n )\n baker.make(\n \"references.NAICS\",\n code=f\"{transaction_id}\",\n description=f\"naics_description_{transaction_id}\",\n )\n baker.make(\n \"references.PSC\", code=str(transaction_id).zfill(4), description=f\"psc_description_{transaction_id}\"\n )\n else:\n baker.make(\n \"search.TransactionSearch\",\n transaction_id=transaction_id,\n is_fpds=is_fpds,\n action_date=action_date,\n fiscal_year=fiscal_year,\n fiscal_action_date=fiscal_action_date,\n award_id=award_id,\n awarding_agency_id=awarding_agency_id,\n business_categories=[f\"business_category_1_{transaction_id}\", f\"business_category_2_{transaction_id}\"],\n transaction_description=f\"This is a test description {transaction_id}\"\n if transaction_id % 2 == 0\n else None,\n federal_action_obligation=federal_action_obligation,\n generated_pragmatic_obligation=federal_action_obligation,\n award_amount=total_obligation,\n funding_agency_id=funding_agency_id,\n type=contract_award_type if is_fpds else grant_award_type,\n awarding_agency_code=f\"toptier_awarding_agency_code_{transaction_id}\",\n awarding_toptier_agency_name=f\"toptier_awarding_agency_agency_name_{transaction_id}\",\n awarding_toptier_agency_abbreviation=f\"toptier_awarding_agency_agency_name_{transaction_id}\",\n funding_agency_code=f\"toptier_funding_agency_code_{transaction_id}\",\n funding_toptier_agency_name=f\"toptier_funding_agency_agency_name_{transaction_id}\",\n funding_toptier_agency_abbreviation=f\"toptier_funding_agency_agency_name_{transaction_id}\",\n awarding_sub_tier_agency_c=f\"subtier_awarding_agency_code_{transaction_id}\",\n awarding_subtier_agency_name=f\"subtier_awarding_agency_agency_name_{transaction_id}\",\n funding_sub_tier_agency_co=f\"subtier_funding_agency_code_{transaction_id}\",\n funding_subtier_agency_name=f\"subtier_funding_agency_agency_name_{transaction_id}\",\n funding_subtier_agency_abbreviation=f\"subtier_funding_agency_agency_name_{transaction_id}\",\n recipient_name=f\"recipient_name_{transaction_id}\",\n recipient_unique_id=f\"{transaction_id:09d}\",\n recipient_hash=\"c687823d-10af-701b-1bad-650c6e680190\" if transaction_id == 21 else None,\n recipient_levels=[\"R\"] if i == 21 else [],\n cfda_number=f\"cfda_number_{transaction_id}\",\n fain=f\"fain_{transaction_id}\",\n recipient_location_country_code=\"USA\",\n recipient_location_country_name=\"USA\",\n recipient_location_state_code=f\"LE_STATE_CODE_{transaction_id}\",\n recipient_location_county_code=f\"{transaction_id:03d}\",\n recipient_location_county_name=f\"LE_COUNTY_NAME_{transaction_id}\",\n recipient_location_congressional_code=f\"{transaction_id:02d}\",\n recipient_location_zip5=f\"LE_ZIP5_{transaction_id}\",\n recipient_location_city_name=f\"LE_CITY_NAME_{transaction_id}\",\n pop_country_code=\"USA\",\n pop_country_name=\"UNITED STATES\",\n pop_state_code=f\"POP_STATE_CODE_{transaction_id}\",\n pop_county_code=f\"{transaction_id:03d}\",\n pop_county_name=f\"POP_COUNTY_NAME_{transaction_id}\",\n pop_zip5=f\"POP_ZIP5_{transaction_id}\",\n pop_congressional_code=f\"{transaction_id:02d}\",\n pop_city_name=f\"POP_CITY_NAME{transaction_id}\",\n tas_components=tas_components,\n )",
"def aggregate_historical_trades(self, pair: list):\n raise NotImplementedError",
"def align_time_frames(self, dict, name, freq_unit, group_unit='mean'):\n\t\tagg_dict = {}\n\t\tindex_use = dict[name]\n\t\tif freq_unit == 'M':\n\t\t\tfreq_unit = 'MS'\n\t\tfor name, df in dict.items():\n\t\t\ttime_series = pd.date_range(index_use.index[0],index_use.index[-1], freq=freq_unit)\n\t\t\t#print('time_series',time_series)\n\t\t\tdf = df.reindex(time_series)\n\t\t\t#print('df', df)\n\t\t\tarray = list(df.columns)\n\t\t\tarray.remove('value')\n\t\t\tdf = df.drop(array,axis=1)\n\t\t\tdf[name + ' Value'] = df['value']\n\t\t\tagg_dict[name + ' Value'] = group_unit\n\t\t\t\"\"\"\n\t\t\tdf[name + ' Min'] = df['value']\n\t\t\tdf[name + ' Max'] = df['value']\n\t\t\tdf[name + ' Average'] = df['value']\n\t\t\tagg_dict[name + ' Min'] = 'min'\n\t\t\tagg_dict[name + ' Max'] = 'max'\n\t\t\tagg_dict[name + ' Average'] = 'mean'\n\t\t\t\"\"\"\n\t\t\tdf = df.drop('value',axis=1)\n\t\t\t#print(df)\n\t\t\tdict[name] = df\n\t\treturn dict, agg_dict",
"def collect(self): # pylint: disable=no-self-use\n start = time.time()\n for metric in metric_rq():\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_rq_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge",
"def record_custom_metric(self, name, value):\n key = (name, '')\n\n if isinstance(value, dict):\n if len(value) == 1 and 'count' in value:\n new_stats = CountStats(call_count=value['count'])\n else:\n new_stats = TimeStats(*c2t(**value))\n else:\n new_stats = TimeStats(1, value, value, value, value, value**2)\n\n stats = self.__stats_table.get(key)\n if stats is None:\n self.__stats_table[key] = new_stats\n else:\n stats.merge_stats(new_stats)\n\n return key",
"def do_timestamp_messages(self, messages):\n timestamp = self.env.now\n self.reception_records[timestamp] = messages\n log.debug(\"{} recorded {}\".format(self, self.reception_records))"
] | [
"0.6077945",
"0.5785074",
"0.5715007",
"0.5492405",
"0.5340035",
"0.5287281",
"0.5276565",
"0.5246753",
"0.524385",
"0.5236754",
"0.5186013",
"0.5171279",
"0.5168675",
"0.51627535",
"0.51169103",
"0.51088405",
"0.5038848",
"0.5023717",
"0.49753776",
"0.49482045",
"0.4947367",
"0.4939258",
"0.493599",
"0.4935371",
"0.49286577",
"0.49141905",
"0.4912324",
"0.49121654",
"0.49102762",
"0.49041864"
] | 0.6196177 | 0 |
Check if transaction is the slowest transaction and update accordingly. | def _update_slow_transaction(self, transaction):
slowest = 0
name = transaction.path
if self.__slow_transaction:
slowest = self.__slow_transaction.duration
if name in self.__slow_transaction_map:
slowest = max(self.__slow_transaction_map[name], slowest)
if transaction.duration > slowest:
# We are going to replace the prior slow transaction.
# We need to be a bit tricky here. If we are overriding
# an existing slow transaction for a different name,
# then we need to restore in the transaction map what
# the previous slowest duration was for that, or remove
# it if there wasn't one. This is so we do not incorrectly
# suppress it given that it was never actually reported
# as the slowest transaction.
if self.__slow_transaction:
if self.__slow_transaction.path != name:
if self.__slow_transaction_old_duration:
self.__slow_transaction_map[
self.__slow_transaction.path] = (
self.__slow_transaction_old_duration)
else:
del self.__slow_transaction_map[
self.__slow_transaction.path]
if name in self.__slow_transaction_map:
self.__slow_transaction_old_duration = (
self.__slow_transaction_map[name])
else:
self.__slow_transaction_old_duration = None
self.__slow_transaction = transaction
self.__slow_transaction_map[name] = transaction.duration | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_database_with_block(self, block):\n\n txs = sorted(block['txs'], key=lambda x: x['count'] if 'count' in x else -1)\n\n for tx in txs:\n result = self.update_database_with_tx(tx, block['length'])\n if not result:\n return False\n\n return True",
"def merge_slow_sql_node(self, node):\n\n duration = node.duration\n\n self[1] += duration\n self[2] = self[0] and min(self[2], duration) or duration\n self[3] = max(self[3], duration)\n\n if self[3] == duration:\n self[4] = node\n\n # Must update the call count last as update of the\n # minimum call time is dependent on initial value.\n\n self[0] += 1",
"def slow_update_duration(self):\n for i in range(len(self.data_file.sorted_data)):\n if self.data_file.sorted_data[i]['type'] == 'slow':\n slow_upd = self.data_file.sorted_data[i]['timestamp']\n Config.ANALYSIS.write(f\"slow at: {slow_upd}\\n\")\n if i == 0:\n after_slow = self.data_file.sorted_data[i + 1]['timestamp']\n Config.ANALYSIS.write(f\"after slow: ({slow_upd}, {after_slow}) \"\n f\"= {after_slow - slow_upd}\\n\\n\")\n elif i == len(self.data_file.sorted_data) - 1:\n before_slow = self.data_file.sorted_data[i - 1]['timestamp']\n Config.ANALYSIS.write(f\"before slow: ({before_slow}, {slow_upd}) \"\n f\"= {slow_upd - before_slow}\\n\\n\")\n else:\n before_slow = self.data_file.sorted_data[i - 1]['timestamp']\n after_slow = self.data_file.sorted_data[i + 1]['timestamp']\n Config.ANALYSIS.write(f\"before slow: ({before_slow}, {slow_upd}) \"\n f\"= {slow_upd - before_slow}\\n\")\n Config.ANALYSIS.write(f\"after slow: ({slow_upd}, {after_slow}) \"\n f\"= {after_slow - slow_upd}\\n\\n\")\n Config.ANALYSIS.write(\"\\n\\n\")",
"def test_commit_optimize(self):\n # Same id, data and user_id\n id = data = user_id = get_rand_string()\n self.conn.add(id=id, user_id=user_id, data=data)\n\n # Make sure the changes weren't commited.\n results = self.conn.query(\"id:\" + id).results\n self.assertEquals(len(results), 0,\n (\"Changes to index shouldn't be visible without commiting, \"\n \"results:%s\" % (repr(results))))\n\n # Optimizing commits the changes\n self.conn.commit(_optimize=True)\n\n results = self.conn.query(\"id:\" + id).results\n self.assertEquals(len(results), 1,\n \"No documents returned, results:%s\" % (repr(results)))",
"def exit_transaction():\n _state.transactions = max(get_transactions() - 1, 0)",
"def search_UI_transaction_bigger(account):\n\t_amount = read_amount()\n\tfound = search_transaction_bigger(account, _amount, print_transaction)\n\tif (not found):\n\t\tprint(\"Nu exista nici o tranzactie cu suma mai mare de %f.\" % (_amount))",
"def test_optimize(self):\n # Same id, data and user_id\n id = data = user_id = get_rand_string()\n self.conn.add(id=id, user_id=user_id, data=data)\n\n # Make sure the changes weren't commited.\n results = self.conn.query(\"id:\" + id).results\n self.assertEquals(len(results), 0,\n (\"Changes to index shouldn't be visible without call\"\n \"to optimize first, results:%s\" % (repr(results))))\n\n # Optimizing commits the changes\n self.conn.optimize()\n\n results = self.conn.query(\"id:\" + id).results\n self.assertEquals(len(results), 1,\n \"No documents returned, results:%s\" % (repr(results)))",
"def optimize_for_dagit(self, statement_timeout):",
"def slow(newETM): #Status: Done, not tested\r\n pass",
"def _should_try_reoptimize(self, last_statistics_refresh_time: timedelta, last_event: Event):\n if last_statistics_refresh_time is None:\n return True\n return last_event.max_timestamp - last_statistics_refresh_time > self.__statistics_update_time_window",
"def supports_transactions(self):\n return self._mysql_storage_engine != \"MyISAM\"",
"def optimize_for_dagit(self, statement_timeout: int):",
"def transaction_run():\n print('working...')\n # Get all transaction\n transactions = executor.submit(Transaction.query.filter_by(done=False).all)\n print(transactions.result())\n # Check if thier a transactions\n if transactions.result():\n # Go through each transaction\n for tran in transactions.result():\n print(\"Looping...\")\n # print(trans)\n # Get the currency account for the source user\n currency = executor.submit(Currency.query.filter_by(user_id=tran.user_id).first).result()\n print(currency)\n # target_user = executor.submit(User.query.filter_by(id=tran.target_user).first).result()\n # print(target_user)\n # Get the currency account for the target user\n target = executor.submit(Currency.query.filter_by(user_id=tran.target_user).first).result()\n # Get the transaction account for the target user\n trans_target = executor.submit(Transaction.query.filter_by(user_id=tran.target_user).first).result()\n ### # TODO:\n trans_source = executor.submit(Transaction.query.filter_by(user_id=tran.user_id).first).result()\n # update replace all tran with trans_source\n\n print(tran)\n # print(target_user)\n print(target)\n print(trans_target)\n # Check if the target user has account\n if target:\n # If the user send to himself fail the transaction\n if tran.user_id == tran.target_user:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n else:\n # If the currency type is bitcoin\n # Check if the user has a bitcoin ID\n if tran.currency_Type.lower() == \"bitcoin\":\n if not currency.bitcoin_id:\n tran.state = \"Transaction faild.\"\n # trans_source.state = \"Transaction faild. You don't have a bitcoin account!\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # If user has a bitcoin ID\n # Check if transfared money greater than his balance or not\n # Check if transfared money greater than the max amount per transaction or not\n else:\n if tran.currency_amount > currency.bitcoin_balance:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n elif tran.currency_amount > currency.max_amount:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # Everything ok, then subtract the transfared money from source user\n # Add transfare maney to target user\n else:\n balance = currency.bitcoin_balance - tran.currency_amount\n # updated_balance = str(balance)\n currency.bitcoin_balance = balance\n db.session.merge(currency)\n db.session.commit()\n db.session.remove()\n\n balance_target = target.bitcoin_balance + tran.currency_amount\n target.bitcoin_balance = balance_target\n db.session.merge(target)\n db.session.commit()\n db.session.remove()\n\n tran.state = \"Transaction success.\"\n tran.time_processed = datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S)\")\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n\n # If the currency type is ethereum\n # Check if the user has a ethereum ID\n elif tran.currency_Type.lower() == \"ethereum\":\n if not currency.ethereum_id:\n tran.state = \"Transaction faild.\"\n # trans_source.state = \"Transaction faild. You don't have a ethereum account!\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # If user has a ethereum ID\n # Check if transfared money greater than his balance or not\n # Check if transfared money greater than the max amount per transaction or not\n else:\n if tran.currency_amount > currency.ethereum_balance:\n tran.state = \"Transaction faild.\"\n # trans_source.state = \"Transaction faild. You don't have enough money!\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n elif tran.currency_amount > currency.max_amount:\n tran.state = \"Transaction faild.\"\n # trans_source.state = \"Transaction faild. You exceed the max amount!\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # Everything ok, then subtract the transfared money from source user\n # Add transfare maney to target\n else:\n balance = currency.ethereum_balance - tran.currency_amount\n currency.ethereum_balance = balance\n db.session.merge(currency)\n db.session.commit()\n db.session.remove()\n\n balance_target = target.ethereum_balance + tran.currency_amount\n target.ethereum_balance = balance_target\n db.session.merge(target)\n db.session.commit()\n db.session.remove()\n\n tran.state = \"Transaction success.\"\n tran.time_processed = datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S)\")\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # if the currency type not bitcoin or ethereum\n else:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # If the user has no currency account\n else:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n\n\n # Finish the transaction request\n print(tran)\n tran.done = True\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n print('Done!!!!')",
"def update_isolation(self, time: int):",
"def _should_try_reoptimize(self, last_statistics_refresh_time: timedelta, last_event: Event):\n if self.__is_simultaneous_state:\n return False\n return super()._should_try_reoptimize(last_statistics_refresh_time, last_event)",
"def update_highest_buy(self, limit):\n if limit.size == 0:\n #predecessor case\n limit = self.buy_tree.predecessor(limit)\n if limit is None:\n #no predecessor\n self.highest_buy = None\n else: # have a predecessor but dont know if it has order or not\n if limit.size == 0: #limit has no order but other limits in the tree might have orders\n if self.buy_tree.size == 0: #we know no other limits have an order\n self.highest_buy = None\n else: #other limits have an order\n while limit.size == 0:\n limit = self.buy_tree.predecessor(limit)\n #now our limit has a valid order\n self.highest_buy = limit.price\n else: #found valid pred\n self.highest_buy = limit.price",
"def is_transaction(self) -> bool:\n return False",
"def is_best(self, val) -> bool:\n if self.val is None or (val > self.val):\n self.val = val\n print(\"Updating Best\")\n return True\n else:\n return False",
"async def short_sync_backtrack(\n self, peer: WSChiaConnection, peak_height: uint32, target_height: uint32, target_unf_hash: bytes32\n ) -> bool:\n try:\n if peer.peer_node_id not in self.sync_store.backtrack_syncing:\n self.sync_store.backtrack_syncing[peer.peer_node_id] = 0\n self.sync_store.backtrack_syncing[peer.peer_node_id] += 1\n\n unfinished_block: Optional[UnfinishedBlock] = self.full_node_store.get_unfinished_block(target_unf_hash)\n curr_height: int = target_height\n found_fork_point = False\n blocks = []\n while curr_height > peak_height - 5:\n # If we already have the unfinished block, don't fetch the transactions. In the normal case, we will\n # already have the unfinished block, from when it was broadcast, so we just need to download the header,\n # but not the transactions\n fetch_tx: bool = unfinished_block is None or curr_height != target_height\n curr = await peer.call_api(\n FullNodeAPI.request_block, full_node_protocol.RequestBlock(uint32(curr_height), fetch_tx)\n )\n if curr is None:\n raise ValueError(f\"Failed to fetch block {curr_height} from {peer.get_peer_logging()}, timed out\")\n if curr is None or not isinstance(curr, full_node_protocol.RespondBlock):\n raise ValueError(\n f\"Failed to fetch block {curr_height} from {peer.get_peer_logging()}, wrong type {type(curr)}\"\n )\n blocks.append(curr.block)\n if self.blockchain.contains_block(curr.block.prev_header_hash) or curr_height == 0:\n found_fork_point = True\n break\n curr_height -= 1\n if found_fork_point:\n for block in reversed(blocks):\n await self.add_block(block, peer)\n except (asyncio.CancelledError, Exception):\n self.sync_store.backtrack_syncing[peer.peer_node_id] -= 1\n raise\n\n self.sync_store.backtrack_syncing[peer.peer_node_id] -= 1\n return found_fork_point",
"def update_trades(self, timestamp):\n if not self.trades:\n return\n\n trader = self.strategy.trader()\n\n #\n # for each trade check if the TP or SL is reached and trigger if necessary\n #\n\n self.lock()\n\n for trade in self.trades:\n\n #\n # managed operation\n #\n\n if trade.has_operations():\n mutated = False\n\n for operation in trade.operations:\n mutated |= operation.test_and_operate(trade, self.instrument, trader)\n\n if mutated:\n trade.cleanup_operations()\n\n #\n # active trade\n #\n\n if trade.is_active():\n # for statistics usage\n trade.update_stats(self.instrument.close_exec_price(trade.direction), timestamp)\n\n #\n # asset trade\n #\n\n if trade.trade_type == StrategyTrade.TRADE_BUY_SELL:\n if trade.is_closed():\n continue\n\n # process only on active trades\n if not trade.is_active():\n # @todo timeout if not filled before condition...\n continue\n\n if trade.is_closing():\n continue\n\n if not self.instrument.tradeable:\n continue\n\n if trade.is_dirty:\n # entry quantity changed need to update the exits orders\n trade.update_dirty(trader, self.instrument)\n\n # potential order exec close price (always close a long)\n close_exec_price = self.instrument.close_exec_price(Order.LONG)\n\n if (trade.tp > 0) and (close_exec_price >= trade.tp) and not trade.has_limit_order():\n # take profit trigger stop, close at market (taker fee)\n if trade.close(trader, self.instrument):\n # notify\n self.strategy.notify_order(trade.id, Order.SHORT, self.instrument.market_id,\n self.instrument.format_price(close_exec_price), timestamp, trade.timeframe,\n 'take-profit', trade.estimate_profit_loss(self.instrument))\n\n # streaming (but must be done with notify)\n if self._global_streamer:\n self._global_streamer.member('buy-exit').update(close_exec_price, timestamp)\n\n elif (trade.sl > 0) and (close_exec_price <= trade.sl) and not trade.has_stop_order():\n # stop loss trigger stop, close at market (taker fee)\n if trade.close(trader, self.instrument):\n # notify\n self.strategy.notify_order(trade.id, Order.SHORT, self.instrument.market_id,\n self.instrument.format_price(close_exec_price), timestamp, trade.timeframe,\n 'stop-loss', trade.estimate_profit_loss(self.instrument))\n\n # streaming (but must be done with notify)\n if self._global_streamer:\n self._global_streamer.member('buy-exit').update(close_exec_price, timestamp)\n\n #\n # margin trade\n #\n\n elif trade.trade_type in (StrategyTrade.TRADE_MARGIN, StrategyTrade.TRADE_POSITION, StrategyTrade.TRADE_IND_MARGIN):\n # process only on active trades\n if not trade.is_active():\n # @todo timeout if not filled before condition...\n continue\n\n if trade.is_closed():\n continue\n\n if trade.is_closing():\n continue\n\n if not self.instrument.tradeable:\n continue\n\n # potential order exec close price\n close_exec_price = self.instrument.close_exec_price(trade.direction)\n\n if (trade.tp > 0) and ((trade.direction > 0 and close_exec_price >= trade.tp) or (trade.direction < 0 and close_exec_price <= trade.tp)) and not trade.has_limit_order():\n # close in profit at market (taker fee)\n if trade.close(trader, self.instrument):\n # and notify\n self.strategy.notify_order(trade.id, trade.close_direction(), self.instrument.market_id,\n self.instrument.format_price(close_exec_price), timestamp, trade.timeframe,\n 'take-profit', trade.estimate_profit_loss(self.instrument))\n\n # and for streaming\n if self._global_streamer:\n self._global_streamer.member('sell-exit' if trade.direction < 0 else 'buy-exit').update(close_exec_price, timestamp)\n\n elif (trade.sl > 0) and ((trade.direction > 0 and close_exec_price <= trade.sl) or (trade.direction < 0 and close_exec_price >= trade.sl)) and not trade.has_stop_order():\n # close a long or a short position at stop-loss level at market (taker fee)\n if trade.close(trader, self.instrument):\n # and notify\n self.strategy.notify_order(trade.id, trade.close_direction(), self.instrument.market_id,\n self.instrument.format_price(close_exec_price), timestamp, trade.timeframe,\n 'stop-loss', trade.estimate_profit_loss(self.instrument))\n\n # and for streaming\n if self._global_streamer:\n self._global_streamer.member('sell-exit' if trade.direction < 0 else 'buy-exit').update(close_exec_price, timestamp)\n\n self.unlock()\n\n #\n # remove terminated, rejected, canceled and empty trades\n #\n\n mutated = False\n\n self.lock()\n\n for trade in self.trades:\n if trade.can_delete():\n mutated = True\n\n # cleanup if necessary before deleting the trade related refs\n trade.remove(trader)\n\n # record the trade for analysis and study\n if not trade.is_canceled():\n # last update of stats before logging\n trade.update_stats(self.instrument.close_exec_price(trade.direction), timestamp)\n\n # realized profit/loss\n profit_loss = trade.profit_loss - trade.entry_fees_rate() - trade.exit_fees_rate()\n\n # perf sommed here it means that its not done during partial closing\n if profit_loss != 0.0:\n self._stats['perf'] += profit_loss\n self._stats['best'] = max(self._stats['best'], profit_loss)\n self._stats['worst'] = min(self._stats['worst'], profit_loss)\n\n if profit_loss <= 0.0:\n self._stats['cont-loss'] += 1\n self._stats['cont-win'] = 1\n\n elif profit_loss > 0.0:\n self._stats['cont-loss'] = 0\n self._stats['cont-win'] += 1\n\n record = {\n 'id': trade.id,\n 'eot': trade.entry_open_time,\n 'xot': trade.exit_open_time,\n 'freot': trade.first_realized_entry_time,\n 'frxot': trade.first_realized_exit_time,\n 'lreot': trade.last_realized_entry_time,\n 'lrxot': trade.last_realized_exit_time,\n 'd': trade.direction_to_str(),\n 'l': self.instrument.format_quantity(trade.order_price),\n 'q': self.instrument.format_quantity(trade.order_quantity),\n 'e': self.instrument.format_quantity(trade.exec_entry_qty),\n 'x': self.instrument.format_quantity(trade.exec_exit_qty),\n 'tp': self.instrument.format_price(trade.take_profit),\n 'sl': self.instrument.format_price(trade.stop_loss),\n 'tf': timeframe_to_str(trade.timeframe),\n 'aep': self.instrument.format_price(trade.entry_price),\n 'axp': self.instrument.format_price(trade.exit_price),\n 's': trade.state_to_str(),\n 'b': self.instrument.format_price(trade.best_price()),\n 'w': self.instrument.format_price(trade.worst_price()),\n 'bt': trade.best_price_timestamp(),\n 'wt': trade.worst_price_timestamp(),\n 'pl': profit_loss,\n 'fees': trade.entry_fees_rate() + trade.exit_fees_rate(),\n 'c': trade.get_conditions(),\n 'com': trade.comment,\n 'rpnl': self.instrument.format_price(trade.unrealized_profit_loss), # once close its realized\n 'pnlcur': trade.profit_loss_currency\n }\n\n if profit_loss < 0:\n self._stats['failed'].append(record)\n elif profit_loss > 0:\n self._stats['success'].append(record)\n else:\n self._stats['roe'].append(record)\n\n if self._reporting == StrategyTrader.REPORTING_VERBOSE:\n self.report(trade, False)\n\n # recreate the list of trades\n if mutated:\n trades_list = []\n\n for trade in self.trades:\n if not trade.can_delete():\n # keep only active and pending trades\n trades_list.append(trade)\n\n self.trades = trades_list\n\n self.unlock()",
"def deciding(self):\n\n if not self.db.cacheEmpty():\n cacheMsgs = self.db.getCacheMsgs()\n prev = datetime.datetime.min\n prev_location = \"FOO LOCATION\"\n for msg in cacheMsgs:\n neutrinoTime = msg[\"neutrino_time\"]\n # go through messages to check if any two or more are within the time threshold\n if neutrinoTime - datetime.timedelta(seconds=self.coinc_threshold) <= prev:\n # verify the locations are different\n if msg[\"location\"] != prev_location:\n return True\n prev = neutrinoTime\n prev_location = msg[\"location\"]\n return False\n\n # return not self.db.cacheEmpty()",
"def cache_txn_manage(database, table, action, trans=None, **kw):\n trace = kw['trace']\n cache = server.data[database].tables['cache']\n transaction = request.get_json() if trans == None else trans\n if 'txn' in transaction:\n txn_id = transaction['txn']\n tx=None\n wait_time = 0.0 # total time waiting to commit txn \n wait_interval = txn_default_wait_in_sec # amount of time to wait between checks - if multiple txns exist \n # Get transaction from cache db\n if action == 'commit':\n while True:\n txns = cache.select('id','timestamp',\n where={'table_name': table}\n )\n if not txn_id in {tx['id'] for tx in txns}:\n return {\"message\": trace.error(f\"{txn_id} does not exist in cache\")}, 500\n if len(txns) == 1:\n if not txns[0]['id'] == txn_id:\n warning = f\"txn with id {txn_id} does not exist for {database} {table}\"\n return {'warning': trace.warning(warning)}, 500\n # txn_id is only value inside\n tx = txns[0]\n break\n # multiple pending txns - need to check timestamp to verify if this txn can be commited yet\n txns = sorted(txns, key=lambda txn: txn['timestamp'])\n for ind, txn in enumerate(txns):\n if txn['id'] == txn_id:\n if ind == 0:\n tx = txns[0]\n break\n if wait_time > txn_max_wait_time_in_sec:\n warning = f\"timeout of {wait_time} reached while waiting to commit {txn_id} for {database} {table}, waiting on {txns[:ind]}\"\n trace.warning(warning)\n trace.warning(f\"removing txn with id {txns[0]['id']} maxWaitTime of {txn_max_wait_time_in_sec} reached\")\n cache.delete(where={'id': txns[0]['id']})\n break\n break\n if tx == None:\n trace.warning(f\"txn_id {txn_id} is behind txns {txns[:ind]} - waiting {wait_time} to retry\")\n time.sleep(wait_interval)\n wait_time+=wait_interval \n # wait_interval scales up to txn_max_wait_interval_in_sec\n wait_interval+=wait_interval \n if wait_interval >= txn_max_wait_interval_in_sec:\n wait_interval = txn_max_wait_interval_in_sec\n continue\n break\n # Should not have broken out of loop here without a tx\n if tx == None:\n trace.error(\"tx is None, this should not hppen\")\n return {\"error\": \"tx was none\"}, 500\n tx = cache.select('type','txn',\n where={'id': txn_id})[0]\n try:\n r, rc = server.actions[tx['type']](database, table, tx['txn'])\n trace.warning(f\"##cache {action} response {r} rc {rc}\")\n except Exception as e:\n r, rc = trace.exception(f\"Exception when performing cache {action}\"), 500\n \n del_txn = cache.delete(\n where={'id': txn_id}\n )\n if rc == 200:\n # update last txn id\n set_params = {\n 'set': {\n 'last_txn_uuid': txn_id,\n 'last_mod_time': float(time.time())\n },\n 'where': {\n 'table_name': table\n }\n }\n server.data['cluster'].tables['pyql'].update(\n **set_params['set'],\n where=set_params['where']\n )\n return {\"message\": r, \"status\": rc}, rc\n if action == 'cancel':\n del_txn = cache.delete(\n where={'id': txn_id}\n )\n return {'deleted': txn_id}, 200",
"async def _finish_sync(self) -> None:\n self.log.info(\"long sync done\")\n self.sync_store.set_long_sync(False)\n self.sync_store.set_sync_mode(False)\n self._state_changed(\"sync_mode\")\n if self._server is None:\n return None\n\n async with self.blockchain.priority_mutex.acquire(priority=BlockchainMutexPriority.high):\n await self.sync_store.clear_sync_info()\n\n peak: Optional[BlockRecord] = self.blockchain.get_peak()\n peak_fb: Optional[FullBlock] = await self.blockchain.get_full_peak()\n if peak_fb is not None:\n assert peak is not None\n state_change_summary = StateChangeSummary(peak, uint32(max(peak.height - 1, 0)), [], [], [])\n ppp_result: PeakPostProcessingResult = await self.peak_post_processing(\n peak_fb, state_change_summary, None\n )\n await self.peak_post_processing_2(peak_fb, None, state_change_summary, ppp_result)\n\n if peak is not None and self.weight_proof_handler is not None:\n await self.weight_proof_handler.get_proof_of_weight(peak.header_hash)\n self._state_changed(\"block\")",
"def in_transaction(self):\n # We likely just changed data - give it a second to catch up\n time.sleep(0.1) # I think I keep reading journal watermark too soon without this\n \n # Get relevant data\n water_mark = pos.read_journal_watermark()\n self.log.info(f\"Watermark: [{water_mark}]\")\n balance = pos.read_balance()['Total']\n self.log.info(f\"Balance: [{balance}]\")\n \n # Decide if we need more checks based on watermark\n if water_mark == \"TRANSACTION IN PROGRESS\":\n self.log.info(\"In Transaction: In Transaction Watermark found\")\n return True\n elif water_mark == \"TRANSACTION COMPLETE\" or water_mark == \"TRANSACTION VOIDED\":\n self.log.info(\"Not in Transaction: Transaction Complete/Voided watermarks found\")\n return False\n else:\n # No watermark - decide based on balance\n if balance == \"$0.00\":\n self.log.info(\"Not in Transaction: $0 balance with no watermark\")\n return False\n else:\n self.log.info(\"In Transaction: Non-$0 balance with no watermark\")\n return True",
"def get_long_trx(self):\n if self.skip_long_trx_check:\n return False\n processes = self.query(sql.show_processlist)\n for proc in processes:\n if not proc[\"Info\"]:\n sql_statement = \"\"\n else:\n if isinstance(proc[\"Info\"], bytes):\n sql_statement = proc[\"Info\"].decode(\"utf-8\", \"replace\")\n else:\n sql_statement = proc[\"Info\"]\n\n proc[\"Info\"] = sql_statement\n # Time can be None if the connection is in \"Connect\" state\n if (\n (proc.get(\"Time\") or 0) > self.long_trx_time\n and proc.get(\"db\", \"\") == self._current_db\n and self.table_name in \"--\" + sql_statement\n and not proc.get(\"Command\", \"\") == \"Sleep\"\n ):\n return proc",
"def check_transaction(coins_inserted, cost_drink, machine_balance):\n if coins_inserted < cost_drink:\n return False\n else:\n if coins_inserted > cost_drink:\n change_given = coins_inserted - cost_drink\n print(f\"Here is ${change_given:0.2f} in change.\")\n return machine_balance + cost_drink",
"def _warn_and_lock_if_needed(self, transaction: Transaction) -> None:\n budget = self.budget_manager.get_budget(transaction.budget_category)\n exceeded_ratio = budget.exceeded_ratio\n if exceeded_ratio > 1:\n self._notify_exceeded_budget(budget)\n self.print_transactions_for_review(budget)\n elif exceeded_ratio > 0.9:\n self._warn_nearing_exceed_budget(budget, 90)\n self.print_transactions_for_review(budget)",
"def _warn_and_lock_if_needed(self, transaction: Transaction) -> None:\n pass",
"def check_best(self):\n # Get the most profitable network based on our current data\n new_best = max(self.profit_data.iteritems(),\n key=operator.itemgetter(1))[0]\n\n if self.current_network is None:\n self.logger.info(\n \"No active network, so switching to {} with profit of {:,.4f}\"\n .format(new_best, self.profit_data[new_best]))\n self.next_network = new_best\n self.switch_network()\n return\n\n # If the currently most profitable network is 120% the profitability\n # of what we're mining on, we should switch immediately\n margin_switch = self.config['margin_switch']\n if (margin_switch and\n self.profit_data[self.next_network] >\n (self.profit_data[self.current_network] * margin_switch)):\n self.logger.info(\n \"Network {} {:,.4f} now more profitable than current network \"\n \"{} {:,.4f} by a fair margin. Switching NOW.\"\n .format(new_best, self.profit_data[new_best], self.current_network,\n self.profit_data[self.current_network]))\n self.next_network = new_best\n self.switch_network()\n return\n\n if new_best != self.next_network:\n self.logger.info(\n \"Network {} {:,.4f} now more profitable than current best \"\n \"{} {:,.4f}. Switching on next block from current network {}.\"\n .format(new_best, self.profit_data[new_best], self.next_network,\n self.profit_data[self.next_network], self.current_network))\n self.next_network = new_best\n return\n\n self.logger.debug(\"Network {} {:,.4f} still most profitable\"\n .format(new_best, self.profit_data[new_best]))",
"def slow_upd_count(self):\n return self.upd_type_count(\"slow\", [0] * 24)"
] | [
"0.5796732",
"0.56013685",
"0.5567382",
"0.5438813",
"0.52951",
"0.527724",
"0.5276468",
"0.52692777",
"0.51808035",
"0.5144416",
"0.5136642",
"0.51107734",
"0.5102024",
"0.5087963",
"0.50874853",
"0.5072691",
"0.5062309",
"0.503474",
"0.49916357",
"0.49607188",
"0.4943801",
"0.4930255",
"0.4902352",
"0.49019596",
"0.48956656",
"0.48263434",
"0.48068967",
"0.4802907",
"0.4801067",
"0.47867498"
] | 0.78206486 | 0 |
Check if transaction is a synthetics trace and save it to __synthetics_transactions. | def _update_synthetics_transaction(self, transaction):
settings = self.__settings
if not transaction.synthetics_resource_id:
return
maximum = settings.agent_limits.synthetics_transactions
if len(self.__synthetics_transactions) < maximum:
self.__synthetics_transactions.append(transaction) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isSStx(tx):\n try:\n checkSStx(tx)\n\n except Exception as e:\n log.debug(\"isSStx: {}\".format(e))\n\n else:\n return True",
"def save(self, trade: Trade) -> Trade:\n\n pass # pragma: no cover",
"def is_transaction(self) -> bool:\n return False",
"def isTx(self):\n\t\treturn self.extension == '.tx'",
"def trackTrans(self):\n self.data_struct['_trackTrans'] = True",
"def _is_trace_on():\n return AceQLHttpApi.is_trace_on()",
"def is_transaction(self):\n return self._request.has_var(\"_transid\")",
"def isSTraced(self):\n try:\n return self.sTraced\n except AttributeError:\n self.sTraced = False\n return False",
"def report(self, trade, is_entry):\n pass",
"def transaction_trace_data(self, connections):\n\n _logger.debug('Generating transaction trace data.')\n\n if not self.__settings:\n return []\n\n # Create a set 'traces' that is a union of slow transaction,\n # and Synthetics transactions. This ensures we don't send\n # duplicates of a transaction.\n\n traces = set()\n if self.__slow_transaction:\n traces.add(self.__slow_transaction)\n traces.update(self.__synthetics_transactions)\n\n # Return an empty list if no transactions were captured.\n\n if not traces:\n return []\n\n # We want to limit the number of explain plans we do across\n # these. So work out what were the slowest and tag them.\n # Later the explain plan will only be run on those which are\n # tagged.\n\n agent_limits = self.__settings.agent_limits\n explain_plan_limit = agent_limits.sql_explain_plans_per_harvest\n maximum_nodes = agent_limits.transaction_traces_nodes\n\n database_nodes = []\n\n if explain_plan_limit != 0:\n for trace in traces:\n for node in trace.slow_sql:\n # Make sure we clear any flag for explain plans on\n # the nodes in case a transaction trace was merged\n # in from previous harvest period.\n\n node.generate_explain_plan = False\n\n # Node should be excluded if not for an operation\n # that we can't do an explain plan on. Also should\n # not be one which would not be included in the\n # transaction trace because limit was reached.\n\n if (node.node_count < maximum_nodes and\n node.connect_params and node.statement.operation in\n node.statement.database.explain_stmts):\n database_nodes.append(node)\n\n database_nodes = sorted(database_nodes,\n key=lambda x: x.duration)[-explain_plan_limit:]\n\n for node in database_nodes:\n node.generate_explain_plan = True\n\n else:\n for trace in traces:\n for node in trace.slow_sql:\n node.generate_explain_plan = True\n database_nodes.append(node)\n\n # Now generate the transaction traces. We need to cap the\n # number of nodes capture to the specified limit.\n\n trace_data = []\n\n for trace in traces:\n transaction_trace = trace.transaction_trace(\n self, maximum_nodes, connections)\n\n data = [transaction_trace,\n list(trace.string_table.values())]\n\n if self.__settings.debug.log_transaction_trace_payload:\n _logger.debug('Encoding slow transaction data where '\n 'payload=%r.', data)\n\n json_data = json_encode(data)\n\n level = self.__settings.agent_limits.data_compression_level\n level = level or zlib.Z_DEFAULT_COMPRESSION\n\n zlib_data = zlib.compress(six.b(json_data), level)\n\n pack_data = base64.standard_b64encode(zlib_data)\n\n if six.PY3:\n pack_data = pack_data.decode('Latin-1')\n\n root = transaction_trace.root\n\n if trace.record_tt:\n force_persist = True\n else:\n force_persist = False\n\n if trace.include_transaction_trace_request_uri:\n request_uri = trace.request_uri\n else:\n request_uri = None\n\n trace_data.append([transaction_trace.start_time,\n root.end_time - root.start_time,\n trace.path,\n request_uri,\n pack_data,\n trace.guid,\n None,\n force_persist,\n None,\n trace.synthetics_resource_id, ])\n\n return trace_data",
"def _apply_trx_trade_to_allocation(cls, allocation, block_trade):\n try:\n allocation.TrxTrade(block_trade)\n allocation.Commit()\n except Exception as e:\n error_message = 'Failed to stamp TrxTrade {0} on Allocation Trade: {1} , {2}'\n LOGGER.exception(error_message.format(block_trade.Oid(), allocation.Oid(), e))\n return False\n\n return True",
"def sign_transaction_essence(self, prepared_transaction_data):\n return self._call_account_method(\n 'signTransactionEssence', {\n 'preparedTransactionData': prepared_transaction_data\n }\n )",
"def is_sed(self) -> bool:\n return False",
"def signSign(self):\r\n if \"signature\" in self: # or \"signatures\" in self ?\r\n self.pop(\"id\", False)\r\n try:\r\n self[\"signSignature\"] = dposlib.core.crypto.getSignature(\r\n self, self._secondPrivateKey,\r\n exclude_second_sig=True,\r\n )\r\n except AttributeError:\r\n raise Exception(\"no second private Key available\")\r\n else:\r\n raise Exception(\"transaction not signed\")",
"async def check_trustline(\n cls, transaction: Transaction, server: Server, locks: Dict\n ):\n try:\n _, account = await get_account_obj_async(\n Keypair.from_public_key(transaction.to_address), server\n )\n except BaseRequestError:\n logger.exception(f\"Failed to load account {transaction.to_address}\")\n transaction.pending_execution_attempt = False\n await sync_to_async(transaction.save)()\n return\n trustline_found = False\n for balance in account[\"balances\"]:\n if balance.get(\"asset_type\") == \"native\":\n continue\n if (\n balance[\"asset_code\"] == transaction.asset.code\n and balance[\"asset_issuer\"] == transaction.asset.issuer\n ):\n trustline_found = True\n break\n if trustline_found:\n logger.debug(\n f\"detected transaction {transaction.id} is no longer pending trust\"\n )\n await cls.process_deposit(transaction, server, locks)\n else:\n transaction.pending_execution_attempt = False\n await sync_to_async(transaction.save)()",
"def _save_internal_transactions(self, blocks_traces):\n docs = [\n self._preprocess_internal_transaction(transaction)\n for transaction in blocks_traces\n if transaction[\"transactionHash\"]\n ]\n if docs:\n for chunk in bulk_chunks(docs, None, BYTES_PER_CHUNK):\n self.client.bulk_index(docs=chunk, index=self.indices[\"internal_transaction\"], doc_type=\"itx\",\n id_field=\"hash\", refresh=True)",
"def trace(self, trace=...):\n ...",
"def _save_miner_transactions(self, blocks_traces):\n docs = [self._preprocess_internal_transaction(transaction) for transaction in blocks_traces if\n not transaction[\"transactionHash\"]]\n self.client.bulk_index(docs=docs, index=self.indices[\"miner_transaction\"], doc_type=\"tx\", id_field=\"hash\",\n refresh=True)",
"def sign_trx(self, signture):\n self.trx_signature = signture",
"def record_transaction(self, transaction):\n\n if not self.__settings:\n return\n\n settings = self.__settings\n\n # Record the apdex, value and time metrics generated from the\n # transaction. Whether time metrics are reported as distinct\n # metrics or into a rollup is in part controlled via settings\n # for minimum number of unique metrics to be reported and thence\n # whether over a time threshold calculated as percentage of\n # overall request time, up to a maximum number of unique\n # metrics. This is intended to limit how many metrics are\n # reported for each transaction and try and cut down on an\n # explosion of unique metric names. The limits and thresholds\n # are applied after the metrics are reverse sorted based on\n # exclusive times for each metric. This ensures that the metrics\n # with greatest exclusive time are retained over those with\n # lesser time. Such metrics get reported into the performance\n # breakdown tab for specific web transactions.\n\n self.record_apdex_metrics(transaction.apdex_metrics(self))\n\n self.merge_custom_metrics(transaction.custom_metrics.metrics())\n\n self.record_time_metrics(transaction.time_metrics(self))\n\n # Capture any errors if error collection is enabled.\n # Only retain maximum number allowed per harvest.\n\n error_collector = settings.error_collector\n\n if (error_collector.enabled and settings.collect_errors and\n len(self.__transaction_errors) <\n settings.agent_limits.errors_per_harvest):\n self.__transaction_errors.extend(transaction.error_details())\n\n self.__transaction_errors = self.__transaction_errors[:\n settings.agent_limits.errors_per_harvest]\n\n if (error_collector.capture_events and\n error_collector.enabled and\n settings.collect_error_events):\n events = transaction.error_events(self.__stats_table)\n for event in events:\n self._error_events.add(event, priority=transaction.priority)\n\n # Capture any sql traces if transaction tracer enabled.\n\n if settings.slow_sql.enabled and settings.collect_traces:\n for node in transaction.slow_sql_nodes(self):\n self.record_slow_sql_node(node)\n\n # Remember as slowest transaction if transaction tracer\n # is enabled, it is over the threshold and slower than\n # any existing transaction seen for this period and in\n # the historical snapshot of slow transactions, plus\n # recording of transaction trace for this transaction\n # has not been suppressed.\n\n transaction_tracer = settings.transaction_tracer\n\n if (not transaction.suppress_transaction_trace and\n transaction_tracer.enabled and settings.collect_traces):\n\n # Transactions saved for Synthetics transactions\n # do not depend on the transaction threshold.\n\n self._update_synthetics_transaction(transaction)\n\n threshold = transaction_tracer.transaction_threshold\n\n if threshold is None:\n threshold = transaction.apdex_t * 4\n\n if transaction.duration >= threshold:\n self._update_slow_transaction(transaction)\n\n # Create the transaction event and add it to the\n # appropriate \"bucket.\" Synthetic requests are saved in one,\n # while transactions from regular requests are saved in another.\n\n if transaction.synthetics_resource_id:\n event = transaction.transaction_event(self.__stats_table)\n self._synthetics_events.add(event)\n\n elif (settings.collect_analytics_events and\n settings.transaction_events.enabled):\n\n event = transaction.transaction_event(self.__stats_table)\n self._transaction_events.add(event, priority=transaction.priority)\n\n # Merge in custom events\n\n if (settings.collect_custom_events and\n settings.custom_insights_events.enabled):\n self.custom_events.merge(transaction.custom_events)\n\n # Merge in span events\n\n if (settings.distributed_tracing.enabled and\n settings.span_events.enabled and settings.collect_span_events):\n if settings.infinite_tracing.enabled:\n for event in transaction.span_protos(settings):\n self._span_stream.put(event)\n elif transaction.sampled:\n for event in transaction.span_events(self.__settings):\n self._span_events.add(event, priority=transaction.priority)",
"def hasTx(self):\n\t\tif self.isTx:\n\t\t\treturn True\n\t\treturn textureFile( self.path.replace( self.extension, '.tx' ) ).exists",
"def _is_transaction_isolation_error(self, error):\n from psycopg2.extensions import TransactionRollbackError\n\n # Django can wrap errors, adding it to the `__cause__` attribute\n for e in (error, getattr(error, '__cause__', None)):\n if isinstance(e, TransactionRollbackError):\n return True\n return False",
"def save_transcription(trs_fname, trs):\n existed = os.path.exists(trs_fname)\n if not trs.endswith('\\n'):\n trs += '\\n'\n with codecs.open(trs_fname, 'w+', encoding='UTF-8') as trs_file:\n trs_file.write(trs)\n return existed",
"def record_trace(self):\n\n tfname = str(int(time.time())) + \".obd2_reader.trace\"\n self.tf_out = open(tfname, 'a')\n self.RecordTrace = 1\n print \"Recoding trace to:\", tfname",
"def isSSGen(tx):\n try:\n checkSSGen(tx)\n\n except Exception as e:\n log.debug(\"isSSGen: {}\".format(e))\n\n else:\n return True",
"def check(transaction):\n if not isinstance(transaction, Transaction):\n transaction = Transaction.objects.get(id=transaction)\n\n r = requests.post(\"https://www.blockonomics.co/api/searchhistory\",\n data=json.dumps({\"addr\": transaction.to_address}))\n try:\n history_data = json.loads(r.content.decode('utf-8'))['history'][0]\n except:\n return\n\n set_tx_details(history_data, transaction)",
"def is_sedes(obj):\n return hasattr(obj, 'serialize') and hasattr(obj, 'deserialize')",
"def save(self, *args, **kwargs):\n\n # Call the \"real\" save() method.\n super(Product, self).save(*args, **kwargs)\n\n # If sold and is a Widget\n if self.sale and self.kind == self.WIDGET:\n # But has not stamp\n try:\n self.stamp\n except:\n s = Stamp(owned_by=self.sale.customer, obtained_with=self)\n s.save()",
"def supports_transactions(self):\n return False",
"def save(self):\n self.lock()\n\n trader = self.strategy.trader()\n\n for trade in self.trades:\n t_data = trade.dumps()\n ops_data = [operation.dumps() for operation in trade.operations]\n\n # store per trade\n Database.inst().store_user_trade((trader.name, trader.account.name, self.instrument.market_id,\n self.strategy.identifier, trade.id, trade.trade_type, t_data, ops_data))\n\n # dumps of regions\n trader_data = {}\n regions_data = [region.dumps() for region in self.regions]\n\n Database.inst().store_user_trader((trader.name, trader.account.name, self.instrument.market_id,\n self.strategy.identifier, self.activity, trader_data, regions_data))\n\n self.unlock()"
] | [
"0.54398495",
"0.5149518",
"0.51403934",
"0.51361316",
"0.5022585",
"0.48563206",
"0.48534927",
"0.48511583",
"0.4807828",
"0.47867486",
"0.46448067",
"0.46145433",
"0.45834467",
"0.4583097",
"0.45755658",
"0.4567124",
"0.45389777",
"0.45228583",
"0.45225585",
"0.45150757",
"0.45129964",
"0.4472929",
"0.44627464",
"0.44578195",
"0.4440597",
"0.44364247",
"0.44289523",
"0.44144338",
"0.4412158",
"0.44016916"
] | 0.64152426 | 0 |
Record any apdex and time metrics for the transaction as well as any errors which occurred for the transaction. If the transaction qualifies to become the slow transaction remember it for later. | def record_transaction(self, transaction):
if not self.__settings:
return
settings = self.__settings
# Record the apdex, value and time metrics generated from the
# transaction. Whether time metrics are reported as distinct
# metrics or into a rollup is in part controlled via settings
# for minimum number of unique metrics to be reported and thence
# whether over a time threshold calculated as percentage of
# overall request time, up to a maximum number of unique
# metrics. This is intended to limit how many metrics are
# reported for each transaction and try and cut down on an
# explosion of unique metric names. The limits and thresholds
# are applied after the metrics are reverse sorted based on
# exclusive times for each metric. This ensures that the metrics
# with greatest exclusive time are retained over those with
# lesser time. Such metrics get reported into the performance
# breakdown tab for specific web transactions.
self.record_apdex_metrics(transaction.apdex_metrics(self))
self.merge_custom_metrics(transaction.custom_metrics.metrics())
self.record_time_metrics(transaction.time_metrics(self))
# Capture any errors if error collection is enabled.
# Only retain maximum number allowed per harvest.
error_collector = settings.error_collector
if (error_collector.enabled and settings.collect_errors and
len(self.__transaction_errors) <
settings.agent_limits.errors_per_harvest):
self.__transaction_errors.extend(transaction.error_details())
self.__transaction_errors = self.__transaction_errors[:
settings.agent_limits.errors_per_harvest]
if (error_collector.capture_events and
error_collector.enabled and
settings.collect_error_events):
events = transaction.error_events(self.__stats_table)
for event in events:
self._error_events.add(event, priority=transaction.priority)
# Capture any sql traces if transaction tracer enabled.
if settings.slow_sql.enabled and settings.collect_traces:
for node in transaction.slow_sql_nodes(self):
self.record_slow_sql_node(node)
# Remember as slowest transaction if transaction tracer
# is enabled, it is over the threshold and slower than
# any existing transaction seen for this period and in
# the historical snapshot of slow transactions, plus
# recording of transaction trace for this transaction
# has not been suppressed.
transaction_tracer = settings.transaction_tracer
if (not transaction.suppress_transaction_trace and
transaction_tracer.enabled and settings.collect_traces):
# Transactions saved for Synthetics transactions
# do not depend on the transaction threshold.
self._update_synthetics_transaction(transaction)
threshold = transaction_tracer.transaction_threshold
if threshold is None:
threshold = transaction.apdex_t * 4
if transaction.duration >= threshold:
self._update_slow_transaction(transaction)
# Create the transaction event and add it to the
# appropriate "bucket." Synthetic requests are saved in one,
# while transactions from regular requests are saved in another.
if transaction.synthetics_resource_id:
event = transaction.transaction_event(self.__stats_table)
self._synthetics_events.add(event)
elif (settings.collect_analytics_events and
settings.transaction_events.enabled):
event = transaction.transaction_event(self.__stats_table)
self._transaction_events.add(event, priority=transaction.priority)
# Merge in custom events
if (settings.collect_custom_events and
settings.custom_insights_events.enabled):
self.custom_events.merge(transaction.custom_events)
# Merge in span events
if (settings.distributed_tracing.enabled and
settings.span_events.enabled and settings.collect_span_events):
if settings.infinite_tracing.enabled:
for event in transaction.span_protos(settings):
self._span_stream.put(event)
elif transaction.sampled:
for event in transaction.span_events(self.__settings):
self._span_events.add(event, priority=transaction.priority) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _update_slow_transaction(self, transaction):\n\n slowest = 0\n name = transaction.path\n\n if self.__slow_transaction:\n slowest = self.__slow_transaction.duration\n if name in self.__slow_transaction_map:\n slowest = max(self.__slow_transaction_map[name], slowest)\n\n if transaction.duration > slowest:\n # We are going to replace the prior slow transaction.\n # We need to be a bit tricky here. If we are overriding\n # an existing slow transaction for a different name,\n # then we need to restore in the transaction map what\n # the previous slowest duration was for that, or remove\n # it if there wasn't one. This is so we do not incorrectly\n # suppress it given that it was never actually reported\n # as the slowest transaction.\n\n if self.__slow_transaction:\n if self.__slow_transaction.path != name:\n if self.__slow_transaction_old_duration:\n self.__slow_transaction_map[\n self.__slow_transaction.path] = (\n self.__slow_transaction_old_duration)\n else:\n del self.__slow_transaction_map[\n self.__slow_transaction.path]\n\n if name in self.__slow_transaction_map:\n self.__slow_transaction_old_duration = (\n self.__slow_transaction_map[name])\n else:\n self.__slow_transaction_old_duration = None\n\n self.__slow_transaction = transaction\n self.__slow_transaction_map[name] = transaction.duration",
"def transaction_trace_data(self, connections):\n\n _logger.debug('Generating transaction trace data.')\n\n if not self.__settings:\n return []\n\n # Create a set 'traces' that is a union of slow transaction,\n # and Synthetics transactions. This ensures we don't send\n # duplicates of a transaction.\n\n traces = set()\n if self.__slow_transaction:\n traces.add(self.__slow_transaction)\n traces.update(self.__synthetics_transactions)\n\n # Return an empty list if no transactions were captured.\n\n if not traces:\n return []\n\n # We want to limit the number of explain plans we do across\n # these. So work out what were the slowest and tag them.\n # Later the explain plan will only be run on those which are\n # tagged.\n\n agent_limits = self.__settings.agent_limits\n explain_plan_limit = agent_limits.sql_explain_plans_per_harvest\n maximum_nodes = agent_limits.transaction_traces_nodes\n\n database_nodes = []\n\n if explain_plan_limit != 0:\n for trace in traces:\n for node in trace.slow_sql:\n # Make sure we clear any flag for explain plans on\n # the nodes in case a transaction trace was merged\n # in from previous harvest period.\n\n node.generate_explain_plan = False\n\n # Node should be excluded if not for an operation\n # that we can't do an explain plan on. Also should\n # not be one which would not be included in the\n # transaction trace because limit was reached.\n\n if (node.node_count < maximum_nodes and\n node.connect_params and node.statement.operation in\n node.statement.database.explain_stmts):\n database_nodes.append(node)\n\n database_nodes = sorted(database_nodes,\n key=lambda x: x.duration)[-explain_plan_limit:]\n\n for node in database_nodes:\n node.generate_explain_plan = True\n\n else:\n for trace in traces:\n for node in trace.slow_sql:\n node.generate_explain_plan = True\n database_nodes.append(node)\n\n # Now generate the transaction traces. We need to cap the\n # number of nodes capture to the specified limit.\n\n trace_data = []\n\n for trace in traces:\n transaction_trace = trace.transaction_trace(\n self, maximum_nodes, connections)\n\n data = [transaction_trace,\n list(trace.string_table.values())]\n\n if self.__settings.debug.log_transaction_trace_payload:\n _logger.debug('Encoding slow transaction data where '\n 'payload=%r.', data)\n\n json_data = json_encode(data)\n\n level = self.__settings.agent_limits.data_compression_level\n level = level or zlib.Z_DEFAULT_COMPRESSION\n\n zlib_data = zlib.compress(six.b(json_data), level)\n\n pack_data = base64.standard_b64encode(zlib_data)\n\n if six.PY3:\n pack_data = pack_data.decode('Latin-1')\n\n root = transaction_trace.root\n\n if trace.record_tt:\n force_persist = True\n else:\n force_persist = False\n\n if trace.include_transaction_trace_request_uri:\n request_uri = trace.request_uri\n else:\n request_uri = None\n\n trace_data.append([transaction_trace.start_time,\n root.end_time - root.start_time,\n trace.path,\n request_uri,\n pack_data,\n trace.guid,\n None,\n force_persist,\n None,\n trace.synthetics_resource_id, ])\n\n return trace_data",
"def recordTransaction(self, loop, transaction):\n\n a = {}\n a['time'] = transaction.transactionTime\n a['atm'] = transaction.transactionATM.atmID\n a['transaction'] = transaction.transactionType\n a['cash'] = transaction.transactionATM.atmCash\n a['status'] = transaction.transactionStatus\n self._atmDict[loop] = a\n\n c = {}\n c['time'] = transaction.transactionTime\n c['client'] = transaction.transactionCard.cardAccount.accountClient.clientID\n c['account'] = transaction.transactionCard.cardAccount.accountNumber\n c['transaction'] = transaction.transactionType\n c['balance'] = transaction.transactionCard.cardAccount.accountBalance\n c['status'] = transaction.transactionStatus\n self._clientDict[loop] = c\n\n t = {}\n t['time'] = transaction.transactionTime\n t['transaction'] = transaction.transactionType\n t['amount'] = transaction.transactionAmount\n t['status'] = transaction.transactionStatus\n self._transactionDict[loop] = t",
"def trackTrans(self):\n self.data_struct['_trackTrans'] = True",
"def slow_transaction_data(self):\n\n # XXX This method no longer appears to be used. Being replaced\n # by the transaction_trace_data() method.\n\n if not self.__settings:\n return []\n\n if not self.__slow_transaction:\n return []\n\n maximum = self.__settings.agent_limits.transaction_traces_nodes\n\n transaction_trace = self.__slow_transaction.transaction_trace(\n self, maximum)\n\n data = [transaction_trace,\n list(self.__slow_transaction.string_table.values())]\n\n if self.__settings.debug.log_transaction_trace_payload:\n _logger.debug('Encoding slow transaction data where '\n 'payload=%r.', data)\n\n json_data = json_encode(data)\n\n level = self.__settings.agent_limits.data_compression_level\n level = level or zlib.Z_DEFAULT_COMPRESSION\n\n zlib_data = zlib.compress(six.b(json_data), level)\n\n pack_data = base64.standard_b64encode(zlib_data)\n\n if six.PY3:\n pack_data = pack_data.decode('Latin-1')\n\n root = transaction_trace.root\n\n trace_data = [[root.start_time,\n root.end_time - root.start_time,\n self.__slow_transaction.path,\n self.__slow_transaction.request_uri,\n pack_data]]\n\n return trace_data",
"def transaction_time(self, transaction_time):\n\n self._transaction_time = transaction_time",
"def _update_time_delivered(self, time_delivered):\n # Update db record's time_delivered field\n update = {'time_delivered': time_delivered}\n datebase.update_transaction_record(filter=self.filter, update=update)\n \n # Update db record's estimated_time field\n datebase.update_transaction_record(filter=self.filter, {estimated_time:'0'})\n \n # Update db record's transaction status to delivered\n self._update_transaction_status(transaction_status='delivered')\n \t\t self.transaction_info.update(delivery_status='delivered')\n \n # Update object\n \t\tself.transaction_info.update(time_delivered=time_delivered)\n self.transaction_info.update(estimated_time=0)\n self.transaction_info(transaction_status='delivered')\n\n \tdef _update_transaction_status(self, transaction_status, photo=None):\n \"\"\"\n Update record's transaction_status and send sms msg to update seeker\n \"\"\"\n # Send text message when status changes \n self.send_text(message_type=transaction_status)\n\n # Update db record's transaction status\n update = {'transaction_status': transaction_status}\n datebase.update_transaction_record(filter=self.filter, update=update)\n\n # Update object\n self.transaction_info.update('transaction_seeker': transaction_status)\n\n # If delivered ... TODO: do we actually want to remove from db? \n \t\t# if transaction_status == 'delivered':\n # datebase.delete_transaction_record()\n # return 1 \n # arguments against: we wont be able to access delivered photo if we want to do that",
"def record_apdex_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_apdex_metric(metric)",
"def transaction_failed_before_processing(self):",
"def _post_record(\n self, ret_record_args, error, cost, start_time, end_time, record\n ):\n\n ret_record_args['main_error'] = str(error)\n ret_record_args['calls'] = record\n ret_record_args['cost'] = cost\n ret_record_args['perf'] = Perf(start_time=start_time, end_time=end_time)\n ret_record_args['app_id'] = self.app_id\n ret_record_args['tags'] = self.tags\n\n ret_record = Record(**ret_record_args)\n\n if error is not None:\n if self.feedback_mode == FeedbackMode.WITH_APP:\n self._handle_error(record=ret_record, error=error)\n\n elif self.feedback_mode in [FeedbackMode.DEFERRED,\n FeedbackMode.WITH_APP_THREAD]:\n TP().runlater(\n self._handle_error, record=ret_record, error=error\n )\n\n raise error\n\n if self.feedback_mode == FeedbackMode.WITH_APP:\n self._handle_record(record=ret_record)\n\n elif self.feedback_mode in [FeedbackMode.DEFERRED,\n FeedbackMode.WITH_APP_THREAD]:\n TP().runlater(self._handle_record, record=ret_record)\n\n return ret_record",
"def record_slow_sql_node(self, node):\n\n if not self.__settings:\n return\n\n key = node.identifier\n stats = self.__sql_stats_table.get(key)\n if stats is None:\n # Only record slow SQL if not already over the limit on\n # how many can be collected in the harvest period.\n\n settings = self.__settings\n maximum = settings.agent_limits.slow_sql_data\n if len(self.__sql_stats_table) < maximum:\n stats = SlowSqlStats()\n self.__sql_stats_table[key] = stats\n\n if stats:\n stats.merge_slow_sql_node(node)\n\n return key",
"def transaction_failed(self):",
"async def test_all_transactions(self):\n response = await self.collect(get_request_text=self.GATLING_LOG)\n self.assert_measurement(response, value=\"2\")",
"def process_transaction(self, transaction):\n instrument = transaction.instrument\n if isinstance(instrument, Future):\n try:\n old_price = self._payout_last_sale_prices[instrument]\n except KeyError:\n self._payout_last_sale_prices[instrument] = transaction.price\n else:\n position = self.position_tracker.positions[instrument]\n amount = position.amount\n price = transaction.price\n\n self._cash_flow(\n self._calculate_payout(\n instrument.multiplier,\n amount,\n old_price,\n price,\n ),\n )\n\n if amount + transaction.amount == 0:\n del self._payout_last_sale_prices[instrument]\n else:\n self._payout_last_sale_prices[instrument] = price\n else:\n self._cash_flow(-(transaction.price * transaction.amount))\n\n self.position_tracker.execute_transaction(transaction)\n\n # we only ever want the dict form from now on\n transaction_dict = transaction.to_dict()\n try:\n self._processed_transactions[transaction.dt].append(\n transaction_dict,\n )\n except KeyError:\n self._processed_transactions[transaction.dt] = [transaction_dict]",
"def _log_update_time(self, *_):\n import time\n if not hasattr(self, '_time'):\n setattr(self, '_time', time.time())\n _time = time.time()\n debug('Time since last call: {:.6f}s'.format(_time - getattr(self, '_time')))\n setattr(self, '_time', _time)",
"def _record_current_time(self):\n now = time.time()\n delta = now - self._last_time\n self._last_time = now\n self._timing_recorder.append(delta)",
"def record_transaction(self, transaction: Transaction) -> bool:\n if self._locked:\n print('Failed to record transaction! Your account has been locked!'\n )\n return False\n\n if transaction.amount > self.bank_balance:\n print('Failed to record transaction! Not enough balance!')\n return False\n\n budget = self.budget_manager.get_budget(transaction.budget_category)\n if budget.locked:\n print('Failed to record transaction! This budget has been locked!')\n return False\n\n self.transactions.append(transaction)\n self.bank_balance -= transaction.amount\n budget.amount_spent += transaction.amount\n self._warn_and_lock_if_needed(transaction)\n return True",
"def update_isolation(self, time: int):",
"def audit(self):\n self.ping()",
"def Analyze(self):\n \n self._analyzeLogs()\n for user in self._start_times:\n self._result[user] = self._zipTimes(user)",
"def slow_update_duration(self):\n for i in range(len(self.data_file.sorted_data)):\n if self.data_file.sorted_data[i]['type'] == 'slow':\n slow_upd = self.data_file.sorted_data[i]['timestamp']\n Config.ANALYSIS.write(f\"slow at: {slow_upd}\\n\")\n if i == 0:\n after_slow = self.data_file.sorted_data[i + 1]['timestamp']\n Config.ANALYSIS.write(f\"after slow: ({slow_upd}, {after_slow}) \"\n f\"= {after_slow - slow_upd}\\n\\n\")\n elif i == len(self.data_file.sorted_data) - 1:\n before_slow = self.data_file.sorted_data[i - 1]['timestamp']\n Config.ANALYSIS.write(f\"before slow: ({before_slow}, {slow_upd}) \"\n f\"= {slow_upd - before_slow}\\n\\n\")\n else:\n before_slow = self.data_file.sorted_data[i - 1]['timestamp']\n after_slow = self.data_file.sorted_data[i + 1]['timestamp']\n Config.ANALYSIS.write(f\"before slow: ({before_slow}, {slow_upd}) \"\n f\"= {slow_upd - before_slow}\\n\")\n Config.ANALYSIS.write(f\"after slow: ({slow_upd}, {after_slow}) \"\n f\"= {after_slow - slow_upd}\\n\\n\")\n Config.ANALYSIS.write(\"\\n\\n\")",
"def output_stats(self):\n elapsed = self.timer.elapsed.total_seconds()\n count = self.copied + self.errored\n total = self.total\n # Time per key in milliseconds\n avg = round(elapsed / count * 1000, 3)\n # Time remaining in seconds\n remaining = 1.0 * elapsed / count * (total - count)\n # Time remaining in minutes\n remaining = round(remaining / 60.0, 1)\n # Time taken in minutes\n elapsed = round(elapsed / 60.0, 1)\n\n self.log.info(f\"{self.prefix}: {avg}ms avg, {elapsed}min passed, \"\n f\"{remaining}min remaining. ({count:,}/{total:,})\")",
"def test_avg_database_time(self):\n time = timeit(\n '''post(\"http://127.0.0.1:5000/database?format=json&merge=0\",\\\n data='{\"RUR\": 1.0, \"EUR\": 2.0, \"USD\": 3.0}')''',\n number=1000,\n globals=globals())\n print(time/1000, end=' ', flush=True)",
"def monitor_transactions(account):\n start_time = datetime.datetime.now()\n logger.info(\n format_log_message(\n 'Looking for new ripple transactions since last run'\n )\n )\n ledger_min_index = _get_min_ledger_index(account)\n marker = None\n has_results = True\n\n try:\n timeout = settings.RIPPLE_TIMEOUT\n except AttributeError:\n timeout = 5\n\n while has_results:\n try:\n response = account_tx(account,\n ledger_min_index,\n limit=PROCESS_TRANSACTIONS_LIMIT,\n marker=marker,\n timeout=timeout)\n except (RippleApiError, ConnectionError), e:\n logger.error(format_log_message(e))\n break\n\n transactions = response['transactions']\n marker = response.get('marker')\n has_results = bool(marker)\n\n for transaction in transactions:\n _store_transaction(account, transaction)\n\n transactions_timeout_reached = (\n datetime.datetime.now() - start_time >= datetime.timedelta(\n seconds=PROCESS_TRANSACTIONS_TIMEOUT\n )\n )\n\n if transactions_timeout_reached and has_results:\n has_results = False\n logger.error(\n 'Process_transactions command terminated because '\n '(%s seconds) timeout: %s',\n PROCESS_TRANSACTIONS_TIMEOUT, unicode(marker)\n )",
"def record_transaction(self) -> None:\n Menu.prompt_record_transaction()\n tx_data = Transaction.prompt_record_tx()\n new_tx = Transaction.generate_new_tx(tx_data)\n\n # Convert the user budget category int input to the enum\n budget_category_int = new_tx.budget_category\n budget_category = BudgetManager.category_mapping[budget_category_int]\n\n # Retrieve the budget object using the enum as the key\n budget = self.user.budget_manager.budget_dict[budget_category]\n\n # Validate the transaction before proceeding\n validated_tx, error_msg = self.validate_transaction_record(new_tx,\n budget)\n if not validated_tx:\n print(\"\\n[red]Warning:[/red] Unable to record transaction!\")\n print(error_msg)\n print(f\"{self.user.account}\\n\")\n print(budget)\n return\n\n # User has successfully recorded a transaction\n budget.add_amount_spent(new_tx.tx_amount)\n self.user.account.add_amount_spent(new_tx.tx_amount)\n self.user.tx_manager.add_transaction(new_tx)\n self.user.update_lock_status()\n print(\"\\nSuccessfully recorded the following transaction:\")\n print(new_tx)\n print(\"\\nTransaction has been recorded under the following budget \"\n \"category:\")\n print(budget)\n\n self.user.check_and_issue_user_warnings(budget)",
"def log_and_dispatch(self, state_manager, state_change):\n state_change_id = self.raiden.transaction_log.log(state_change)\n events = self.dispatch(state_manager, state_change)\n self.raiden.transaction_log.log_events(\n state_change_id,\n events,\n self.raiden.get_block_number()\n )",
"def log_results(self, filename=None):\n\n self.ad_log['train_auc'] = self.diag['train']['auc'][-1]\n self.ad_log['train_accuracy'] = self.diag['train']['acc'][-1]\n self.ad_log['train_time'] = self.train_time\n\n self.ad_log['test_auc'] = self.diag['test']['auc'][-1]\n self.ad_log['test_accuracy'] = self.diag['test']['acc'][-1]\n self.ad_log['test_time'] = self.test_time\n\n self.ad_log.save_to_file(filename=filename)",
"def record_time_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_time_metric(metric)",
"def record_trace(self):\n\n tfname = str(int(time.time())) + \".obd2_reader.trace\"\n self.tf_out = open(tfname, 'a')\n self.RecordTrace = 1\n print \"Recoding trace to:\", tfname",
"def _log_progress(self, t):\n\n # Run the update only 2 step before the actual logging happens in order to\n # make sure that the most recent possible values will be stored in\n # self.summary. This is a hacky workaround in order to support OffPolicyAgent\n # which runs 2 threads without coordination\n if (t+2) % self.log_freq == 0 and self.learn_started:\n episode_rewards = self.env_monitor.get_episode_rewards()\n self.episode_rewards = np.asarray(episode_rewards)\n if self.episode_rewards.size > 0:\n self.mean_ep_rew = np.mean(episode_rewards[-self.stats_n:])\n self.best_mean_ep_rew = max(self.best_mean_ep_rew, self.mean_ep_rew)\n\n if t % self.log_freq == 0 and self.learn_started:\n stats_logger.info(\"\")\n for s, lambda_v in self.log_info:\n stats_logger.info(s.format(lambda_v(t)))\n stats_logger.info(\"\")\n\n if self.summary:\n # Log with TensorBoard\n self.tb_writer.add_summary(self.summary, global_step=t)"
] | [
"0.57431966",
"0.56813395",
"0.5386816",
"0.5268158",
"0.52590114",
"0.5248265",
"0.51790476",
"0.51674783",
"0.51302767",
"0.5116144",
"0.5102775",
"0.5091443",
"0.50542235",
"0.5050009",
"0.5020564",
"0.50163",
"0.50032055",
"0.4916298",
"0.48824197",
"0.4858216",
"0.4846671",
"0.48407298",
"0.4829507",
"0.48275587",
"0.481562",
"0.4814692",
"0.4810644",
"0.48033282",
"0.47969192",
"0.47949752"
] | 0.7942365 | 0 |
Returns a list of slow transaction data collected during the reporting period. | def transaction_trace_data(self, connections):
_logger.debug('Generating transaction trace data.')
if not self.__settings:
return []
# Create a set 'traces' that is a union of slow transaction,
# and Synthetics transactions. This ensures we don't send
# duplicates of a transaction.
traces = set()
if self.__slow_transaction:
traces.add(self.__slow_transaction)
traces.update(self.__synthetics_transactions)
# Return an empty list if no transactions were captured.
if not traces:
return []
# We want to limit the number of explain plans we do across
# these. So work out what were the slowest and tag them.
# Later the explain plan will only be run on those which are
# tagged.
agent_limits = self.__settings.agent_limits
explain_plan_limit = agent_limits.sql_explain_plans_per_harvest
maximum_nodes = agent_limits.transaction_traces_nodes
database_nodes = []
if explain_plan_limit != 0:
for trace in traces:
for node in trace.slow_sql:
# Make sure we clear any flag for explain plans on
# the nodes in case a transaction trace was merged
# in from previous harvest period.
node.generate_explain_plan = False
# Node should be excluded if not for an operation
# that we can't do an explain plan on. Also should
# not be one which would not be included in the
# transaction trace because limit was reached.
if (node.node_count < maximum_nodes and
node.connect_params and node.statement.operation in
node.statement.database.explain_stmts):
database_nodes.append(node)
database_nodes = sorted(database_nodes,
key=lambda x: x.duration)[-explain_plan_limit:]
for node in database_nodes:
node.generate_explain_plan = True
else:
for trace in traces:
for node in trace.slow_sql:
node.generate_explain_plan = True
database_nodes.append(node)
# Now generate the transaction traces. We need to cap the
# number of nodes capture to the specified limit.
trace_data = []
for trace in traces:
transaction_trace = trace.transaction_trace(
self, maximum_nodes, connections)
data = [transaction_trace,
list(trace.string_table.values())]
if self.__settings.debug.log_transaction_trace_payload:
_logger.debug('Encoding slow transaction data where '
'payload=%r.', data)
json_data = json_encode(data)
level = self.__settings.agent_limits.data_compression_level
level = level or zlib.Z_DEFAULT_COMPRESSION
zlib_data = zlib.compress(six.b(json_data), level)
pack_data = base64.standard_b64encode(zlib_data)
if six.PY3:
pack_data = pack_data.decode('Latin-1')
root = transaction_trace.root
if trace.record_tt:
force_persist = True
else:
force_persist = False
if trace.include_transaction_trace_request_uri:
request_uri = trace.request_uri
else:
request_uri = None
trace_data.append([transaction_trace.start_time,
root.end_time - root.start_time,
trace.path,
request_uri,
pack_data,
trace.guid,
None,
force_persist,
None,
trace.synthetics_resource_id, ])
return trace_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def slow_transaction_data(self):\n\n # XXX This method no longer appears to be used. Being replaced\n # by the transaction_trace_data() method.\n\n if not self.__settings:\n return []\n\n if not self.__slow_transaction:\n return []\n\n maximum = self.__settings.agent_limits.transaction_traces_nodes\n\n transaction_trace = self.__slow_transaction.transaction_trace(\n self, maximum)\n\n data = [transaction_trace,\n list(self.__slow_transaction.string_table.values())]\n\n if self.__settings.debug.log_transaction_trace_payload:\n _logger.debug('Encoding slow transaction data where '\n 'payload=%r.', data)\n\n json_data = json_encode(data)\n\n level = self.__settings.agent_limits.data_compression_level\n level = level or zlib.Z_DEFAULT_COMPRESSION\n\n zlib_data = zlib.compress(six.b(json_data), level)\n\n pack_data = base64.standard_b64encode(zlib_data)\n\n if six.PY3:\n pack_data = pack_data.decode('Latin-1')\n\n root = transaction_trace.root\n\n trace_data = [[root.start_time,\n root.end_time - root.start_time,\n self.__slow_transaction.path,\n self.__slow_transaction.request_uri,\n pack_data]]\n\n return trace_data",
"def return_trade_history(\n self,\n start: Timestamp,\n end: Timestamp,\n ) -> list[dict[str, Any]]:\n limit = 100\n data: list[dict[str, Any]] = []\n start_ms = start * 1000\n end_ms = end * 1000\n while True:\n new_data = self.api_query_list('/trades', {\n 'startTime': start_ms,\n 'endTime': end_ms,\n 'limit': limit,\n })\n results_length = len(new_data)\n if data == [] and results_length < limit:\n return new_data # simple case - only one query needed\n\n latest_ts_ms = start_ms\n # add results to data and prepare for next query\n existing_ids = {x['id'] for x in data}\n for trade in new_data:\n try:\n timestamp_ms = trade['createTime']\n latest_ts_ms = max(latest_ts_ms, timestamp_ms)\n # since we query again from last ts seen make sure no duplicates make it in\n if trade['id'] not in existing_ids:\n data.append(trade)\n except (DeserializationError, KeyError) as e:\n msg = str(e)\n if isinstance(e, KeyError):\n msg = f'Missing key entry for {msg}.'\n self.msg_aggregator.add_warning(\n 'Error deserializing a poloniex trade. Check the logs for details',\n )\n log.error(\n 'Error deserializing poloniex trade',\n trade=trade,\n error=msg,\n )\n continue\n\n if results_length < limit:\n break # last query has less than limit. We are done.\n\n # otherwise we query again from the last ts seen in the last result\n start_ms = latest_ts_ms\n continue\n\n return data",
"def get_reports(self):\r\n return sorted(self._reports,\r\n key=lambda x: x['stats']['totalTimeMillis'],\r\n reverse=True)",
"def get_reports(self):\n return sorted(self._reports,\n key=lambda x: x['stats']['totalTimeMillis'],\n reverse=True)",
"def slow_queries(self):\n request = Request(method=\"get\", endpoint=\"/query/slow\")\n\n def response_handler(resp):\n if not resp.is_success:\n raise C8QLQueryListError(resp, request)\n return self._format_queries(resp.body)\n\n return self._execute(request, response_handler)",
"def showTransactions(self):\n self.scanTransactions()\n txns = []\n\n # Summarize the stats\n for x in range(len(self._trans)):\n stats = self._trans[x]\n trans_time = 0\n remote_calls = 0\n for name, stat in stats:\n trans_time += stat.total_tt\n remote_calls += 1\n txns.append((x, trans_time, remote_calls))\n\n results = [\"TX#\\tTime\\tCalls\",\n \"=\" * 22]\n\n for item in txns:\n results.append(\"%3d\\t%4f\\t%5d\" % item)\n \n return \"\\n\".join(results)",
"def fetch(self, daterange=(datetime.now() - timedelta(1), datetime.now())):\n cursor = self.conn.cursor()\n sql = 'SELECT measure_dt, ping, download, upload FROM speedlogs ' + \\\n ' WHERE measure_dt BETWEEN ? AND ?'\n cursor.execute(sql, daterange)\n return cursor.fetchall()",
"def gather_data(self, *args, **kwargs):\n instrument_arg = kwargs.get('instrument', 'EUR_USD')\n granularity_arg = kwargs.get('granularity', 'M1')\n candle_format = kwargs.get('candleFormat', 'bidask')\n start_time = kwargs.get('start', '2014-10-01T00:00:00.000000Z')\n count_arg = kwargs.get('count', 5000)\n out_data = []\n data_complete = False\n while(not data_complete):\n response = self.oanda.get_history(instrument=instrument_arg,\n granularity=granularity_arg,\n candleFormat=candle_format,\n start=start_time,\n count=count_arg)\n raw_data = response['candles']\n if (len(out_data) == 0):\n out_data = out_data + raw_data\n elif (len(out_data) > 1):\n # raw_data[0] is already in out_data as raw_data[-1] from last\n # iteration\n out_data = out_data + raw_data[1:]\n start_time = raw_data[-1]['time']\n if (len(raw_data) < 5000):\n data_complete = True\n\n out_data = self._list_to_df(out_data)\n return out_data",
"def get_gdax_historical_data():\n \n start = None\n while not start:\n start,end,tid = getStartAndEndHistoric()\n if not start:\n time.sleep(60)\n #Todo: change this to 1min\n firsttimestamp = start\n engine = sa.create_engine(sql_address)\n products = [\"LTC-USD\",\"LTC-BTC\",\"ETH-USD\",\"ETH-BTC\",\"BTC-USD\"]\n public_client = gdax.PublicClient()\n deltat = datetime.timedelta(seconds = 200)\n timewindows = []\n while end - start > datetime.timedelta(seconds=0):\n if start + deltat > end:\n endx = end\n else:\n endx = start + deltat\n timewindows.append([start,endx])\n start += deltat\n results = []\n total = len(timewindows)\n current_idx = 0\n timeold = time.time()\n numofqueries = 0\n engine = sa.create_engine(sql_address)\n Base.metadata.bind = engine\n DBSession = sa.orm.sessionmaker()\n DBSession.bind = engine\n session = DBSession()\n for startx,endx in timewindows:\n\n current_idx += 1\n for i in products:\n repeat = True\n while repeat:\n\n #delay if ratelimts are close\n if numofqueries < 3:\n while time.time() - timeold < 1:\n time.sleep(0.05)\n \n timeold = time.time()\n numofqueries = 0\n try:\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n except:\n time.sleep(30)\n public_client = gdax.PublicClient()\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n\n numofqueries += 1\n\n #rate limit exceeded has 'message' as dict.\n if not 'message' in alist:\n repeat = False\n for a in alist:\n a[0] = datetime.datetime.fromtimestamp(float(a[0]))\n tmp = i.split('-')\n d = dict(coin = tmp[0], currency = tmp[1], timestamp = a[0], low=a[1], high=a[2], open=a[3], close=a[4], volume=a[5])\n results.append(d)\n lasttimestamp = a[0]\n\n #upload with batch size of 10000\n if len(results) > 10000:\n engine.execute(\n GADXHistoricalDataOneSecondOHLC.__table__.insert(),\n results\n )\n results = []\n \n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n if update:\n update.end = lasttimestamp\n session.commit()\n else:\n new_update = historicalDataProgramState(entry_type = 'update',transaction_id = tid,start=firsttimestamp,end=lasttimestamp,platform='GDAX',status='incomplete')\n session.add(new_update)\n session.commit()\n if len(results) > 0:\n engine.execute(\n GADXHistoricalDataOneSecondOHLC.__table__.insert(),\n results\n )\n results = []\n \n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n if update:\n update.end = lasttimestamp\n session.commit()\n else:\n new_update = historicalDataProgramState(entry_type = 'update',transaction_id = tid,start=firsttimestamp,end=lasttimestamp,platform='GDAX',status='incomplete')\n session.add(new_update)\n session.commit()\n\n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n update.status='complete'\n order = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'order')).first()\n order.status='complete'\n session.commit()",
"def timings(self):\r\n return self._timings",
"def _get_meas_times_from_db(self):\n meas_times = []\n if self._data['report_save_historical_instances_ind'] != 'Y':\n # for non historical reports take measurement time from saved dataset\n dataset = self._jfile.get_current_stored_dataset()\n try:\n meas_time = datetime.datetime.strptime(dataset['meas_time'], '%Y-%m-%d %H:%M:%S')\n except ValueError:\n raise Exception(\"Cannot unformat string %s to datetime\" % dataset['meas_time'])\n meas_times.append(meas_time)\n\n else:\n # for historical reports take measurement times from db datasets\n where_sql = ''\n where_sql_list = list()\n params = [self._id, self._segment_value_id]\n\n if self._process_dataset_ids:\n for dataset_id in self._process_dataset_ids:\n if type(dataset_id) == list:\n where_sql_list.append(\"(report_data_set_instance_id >= %s AND report_data_set_instance_id <= %s)\")\n if dataset_id[0] < dataset_id[1]:\n params.append(dataset_id[0])\n params.append(dataset_id[1])\n else:\n params.append(dataset_id[1])\n params.append(dataset_id[0])\n else:\n where_sql_list.append(\"report_data_set_instance_id = %s\")\n params.append(dataset_id)\n where_sql = ' AND (%s)' % ' OR '.join(where_sql_list)\n\n self._db.Query(\"\"\"SELECT measurement_time\n FROM report_data_set_instance\n WHERE\n `element_id`= %%s\n AND segment_value_id = %%s\n %s\n ORDER BY measurement_time ASC\"\"\" % where_sql, tuple(params))\n meas_times = [item['measurement_time'] for item in self._db.record]\n\n return meas_times",
"def getTransferListSummary(self):\n p_ids_and_prices = {}\n players = self.getAllPlayerInfoTransferlist()\n\n # Get IDs of all players\n log_event(self.queue, \"Gathering player prices... \")\n for p in players:\n p_bidstatus = p[1]\n p_id = p[8]\n # removed Filter for unlisted / expired players\n if p_id not in p_ids_and_prices:\n p_sellprice = self.getPlayerSellPrice(p_id)\n # If sell price returns 0, need to fetch from Futbin\n if p_sellprice == 0:\n p_sellprice = self.getFutbinPrice_opentab(p_id)\n self.sleep_approx(5) # Delay iteration to not anger futbin\n # Add player ID and price to dict\n p_ids_and_prices[p_id] = p_sellprice\n\n for p_id in p_ids_and_prices:\n p_price = p_ids_and_prices[p_id]\n p_name = self.getPlayerCardName(p_id)\n log_event(self.queue, str(p_name) + \" - #\" +\n str(p_id) + \" Price \" + str(p_price))\n\n num_p_sold = 0\n num_p_expired = 0\n num_p_unlisted = 0\n num_p_listed = 0\n\n sold_p_value = 0\n expired_p_value = 0\n unlisted_p_value = 0\n listed_p_value = 0\n\n for p in players:\n p_bidstatus = p[1]\n p_id = p[8]\n p_soldprice = p[5] # is 0 if unlisted\n p_sellprice = int(p_ids_and_prices[p_id])\n\n if \"won\" in p_bidstatus:\n num_p_sold += 1\n sold_p_value += p_soldprice\n if \"expired\" in p_bidstatus:\n num_p_expired += 1\n expired_p_value += p_sellprice\n if (p_bidstatus == \"listFUTItem\"):\n num_p_unlisted += 1\n unlisted_p_value += p_sellprice\n if (p_bidstatus == \"listFUTItem has-auction-data\"):\n num_p_listed += 1\n listed_p_value += p_sellprice\n\n log_event(self.queue, \"Players sold: \" + str(num_p_sold))\n log_event(self.queue, \"Players expired: \" + str(num_p_expired))\n log_event(self.queue, \"Players listed: \" + str(num_p_listed))\n log_event(self.queue, \"Players unlisted: \" + str(num_p_unlisted))\n log_event(self.queue, \" - - - \")\n log_event(self.queue, \"Sold players value: \" + str(sold_p_value))\n log_event(self.queue, \"Expired players value: \" +\n str(expired_p_value))\n log_event(self.queue, \"Unlisted players value: \" +\n str(unlisted_p_value))\n log_event(self.queue, \"Listed players value: \" + str(listed_p_value))\n\n # TODO subtract bought price\n self.user_players_won += int(num_p_unlisted)\n self.p_ids_and_prices = p_ids_and_prices\n intel = [p_ids_and_prices, num_p_sold, num_p_expired, num_p_unlisted,\n num_p_listed, sold_p_value, expired_p_value, unlisted_p_value, listed_p_value]\n return intel",
"def describe_slow_log_records(\n self,\n request: dds_20151201_models.DescribeSlowLogRecordsRequest,\n ) -> dds_20151201_models.DescribeSlowLogRecordsResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_slow_log_records_with_options(request, runtime)",
"def time_list(self):\n return (self.N_T * (np.arange(self.N_itr) + 1) /\n self.N_itr * 1000 * self.DT)",
"def get_new_data(self):\n\n # record bar parse performance\n self.logger.debug(\"Started parsing new ticks.\")\n start_parse = time.time()\n for exchange in self.exchanges:\n exchange.parse_ticks()\n end_parse = time.time()\n duration = round(end_parse - start_parse, 5)\n\n self.logger.debug(\n \"Parsed \" + str(self.total_instruments) +\n \" instruments' ticks in \" + str(duration) + \" seconds.\")\n self.track_tick_processing_performance(duration)\n\n # wrap new 1 min bars in market events\n new_market_events = []\n for exchange in self.exchanges:\n bars = exchange.get_new_bars()\n for symbol in exchange.get_symbols():\n for bar in bars[symbol]:\n event = MarketEvent(exchange.get_name(), bar)\n new_market_events.append(event)\n # add bars to save-to-db-later queue\n # TODO: store new bars concurrently with a processpool\n self.bars_save_to_db.put(event)\n return new_market_events",
"def timeseries_report(self):\n report = pd.DataFrame(index=self.price.index)\n report.loc[:, \"FR Energy Throughput (kWh)\"] = self.ene_results['ene']\n report.loc[:, \"FR Energy Throughput Up (Charging) (kWh)\"] = self.variables['regu_c']*self.krd_avg*self.dt*self.storage.rte\n report.loc[:, \"FR Energy Throughput Up (Discharging) (kWh)\"] = self.variables['regu_d']*self.krd_avg*self.dt\n report.loc[:, \"FR Energy Throughput Down (Charging) (kWh)\"] = self.variables['regd_c']*self.krd_avg*self.dt*self.storage.rte\n report.loc[:, \"FR Energy Throughput Down (Discharging) (kWh)\"] = self.variables['regd_d']*self.krd_avg*self.dt\n report.loc[:, \"FR Energy Settlement Price Signal ($/kWh)\"] = self.price\n report.loc[:, 'Regulation Up (Charging) (kW)'] = self.variables['regu_c']\n report.loc[:, 'Regulation Up (Discharging) (kW)'] = self.variables['regu_d']\n report.loc[:, 'Regulation Down (Charging) (kW)'] = self.variables['regd_c']\n report.loc[:, 'Regulation Down (Discharging) (kW)'] = self.variables['regd_d']\n report.loc[:, \"Regulation Up Price Signal ($/kW)\"] = self.p_regu\n report.loc[:, \"Regulation Down Price Signal ($/kW)\"] = self.p_regd\n\n return report",
"async def get_trades(self) -> List[TradeRequest]:\n data = j.dumps({\n 'startindex': 0,\n 'statustype': 'inbound'\n })\n r = await self.request.request(url='https://www.roblox.com/my/money.aspx/getmyitemtrades', data=data, method='POST')\n data = json.loads(r.json()['d'])[\"Data\"]\n trades = []\n for trade in data:\n t = json.loads(trade)\n trades.append(TradeRequest(self.request, t['Date'], t['Expires'], t['TradePartner'], t['TradePartnerID'], t['Status'], t['TradeSessionID']))\n return trades",
"def tobs():\n temps = engine.execute(\"SELECT date, tobs FROM Measurement WHERE date BETWEEN '2016-08-23' AND '2017-08-23'\").fetchall()\n\n # Convert list of tuples into normal list\n temps_list = list(np.ravel(temps))\n\n return jsonify(temps_list)",
"def get_pending_transactions():\n\n return History.get_pending().get()",
"def get_all_coins_history(self, end_date=None, start_date=None, verbose=True):\n infos = []\n for coin in self.get_coins():\n if verbose:\n print(\"Collecting data for >> \" + coin)\n if start_date:\n start_date = start_date\n else:\n start_date = '20130428'\n if end_date:\n end_date = end_date\n else:\n now = str(datetime.now().date()).replace('-', '')\n end_date = now\n coin_url = self.coins[coin]\n coin_url = coin_url + '/historical-data/?start=' + start_date + '&end=' + end_date\n content = urlopen(coin_url).read()\n soup = BeautifulSoup(content, 'html.parser')\n results = soup.find_all(\"tr\", class_=\"text-right\")\n\n for result in results:\n date = result.find_all('td')[0].text\n\n open_val = result.find_all('td')[1].text\n if open_val == '-':\n open_val = None\n else:\n open_val = float(result.find_all('td')[1].text.replace(',', ''))\n\n high_val = result.find_all('td')[2].text\n if high_val == '-':\n high_val = None\n else:\n high_val = float(result.find_all('td')[2].text.replace(',', ''))\n\n low_val = result.find_all('td')[3].text\n if low_val == '-':\n low_val = None\n else:\n low_val = float(result.find_all('td')[3].text.replace(',', ''))\n\n close_val = result.find_all('td')[4].text\n if close_val == '-':\n close_val = None\n else:\n close_val = float(result.find_all('td')[4].text.replace(',', ''))\n\n volume = result.find_all('td')[5].text\n if volume == '-':\n volume = None\n else:\n volume = float(result.find_all('td')[5].text.replace(',', ''))\n\n market_cap = result.find_all('td')[6].text\n if market_cap == '-':\n market_cap = None\n else:\n market_cap = float(result.find_all('td')[6].text.replace(',', ''))\n temp = {\n \"coin\": coin, # soup.title.text.split()[0],\n \"date\": date,\n \"symbol\": soup.title.text.split()[1].replace('(', '').replace(')', ''),\n \"open_val\": open_val,\n \"high_val\": high_val,\n \"low_val\": low_val,\n \"close_val\": close_val,\n \"volume\": volume,\n \"market_cap\": market_cap\n }\n infos.append(temp)\n df_all = pd.DataFrame.from_dict(infos)\n df_all['middle_val'] = (df_all.high_val + df_all.low_val) / 2\n df_all['datetime'] = pd.to_datetime(df_all['date'])\n df_all = df_all.sort_values(by='datetime')\n self.coins_history = df_all",
"def get_report_list(jtl_file):\n df = None\n try:\n df = pd.read_csv(jtl_file,\n low_memory=False,\n error_bad_lines=False,\n quoting=csv.QUOTE_NONE,\n encoding='utf-8')\n except Exception as e:\n err_msg = 'read jtl file error. detail:{e}'.format(e=e)\n LOGGER.error(err_msg)\n if df is None:\n return\n threads = int(jtl_file.split(os.sep)[-1].split('_')[0])\n success, elapsed, latency, sent_bytes, receive_bytes = [df.get(x) for x in\n ['success', 'elapsed', 'Latency', 'sentBytes', 'bytes']]\n samples = df.shape[0]\n error_count = success.value_counts().get(False)\n if not error_count:\n error_count = 0\n error_rate = str(float(error_count / samples) * 100) + '%'\n label = df.loc[0, 'label']\n start_time = df.iat[0, 0]\n end_time = df.iloc[-1, 0]\n last_req_time = df.iat[-1, 1]\n\n # 如果最后一行数据无效,则取上一行\n i = 1\n while not len(str(end_time)) == 13 and not re.findall('[\\d]{13}', str(end_time)):\n i += 1\n end_time = df.iloc[-i, 0]\n last_req_time = df.iat[-i, 1]\n samples -= 1\n\n if isinstance(start_time, str):\n start_time = int(start_time)\n if isinstance(end_time, str):\n end_time = int(end_time)\n\n local_start_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time / 1000))\n local_end_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time / 1000))\n\n durations = (end_time + last_req_time - start_time) / 1000\n throughput = samples / durations\n\n report_list = [label, local_start_time, local_end_time, durations, threads, throughput, error_rate,\n elapsed.min(), elapsed.max(), elapsed.mean(), samples, sent_bytes.mean(), receive_bytes.mean(),\n latency.mean(), latency.min(), latency.max()]\n\n return report_list",
"def TOBS():\n session = Session(engine)\n # Query all passengers\n\n TOBS = session.query(Measurement.date,Measurement.tobs).filter(Measurement.date >= '2010-08-23').all()\n\n # Convert list of tuples into normal list\n all_TOBS = list(np.ravel(TOBS))\n\n return jsonify(all_TOBS)",
"def get_trades_history(self, symbol, start_time, end_time, limit=1000):\n payload = {'symbol': symbol, 'start': start_time, 'end': end_time, 'limit': limit}\n return self.public_request('GET', '/api/v1/trades', **payload)",
"def get_data(symbol_id='BTC', period_id='1DAY', request_limit=1000, tdelta=30):\n now = datetime.utcnow()\n month = timedelta(days=tdelta)\n past_month = (now - month).isoformat()\n\n parameters = {'symbol_id': symbol_id, 'period_id': period_id, 'time_start': past_month[:-3], 'limit':request_limit}\n response = requests.get(HISTORY_URL, params=parameters, headers=header)\n\n while response.status_code != 200:\n time.sleep(5)\n response = requests.get(HISTORY_URL, params=parameters, headers=header)\n \n data = response.json()\n \n # this is a commnet\n csv_headers = ['time_period_start', 'time_period_end', 'price_high', 'price_low', 'price_close', 'price_open', 'trades_count', \n 'volume_traded', 'time_open', 'time_close']\n\n\n with open(str(datafolder / f'{symbol_id}_{tdelta}_day.csv'), 'w', newline='') as f:\n writer = csv.DictWriter(f, csv_headers)\n writer.writeheader()\n for item in data:\n writer.writerow(item)",
"def returnTradeHistory(self, time=1 * 60 * 60, limit=100):\n assert limit <= 100, \"'limit' has to be smaller than 100\"\n return self.dpay.rpc.get_trade_history(\n transactions.formatTimeFromNow(-time),\n transactions.formatTimeFromNow(),\n limit,\n api=\"market_history\"\n )",
"def timeseries_report(self):\n try:\n n = self.n.value\n except AttributeError:\n n = self.n\n results = pd.DataFrame(index=self.variables.index)\n results['ICE Generation (kW)'] = self.variables['ice_gen']\n results['ICE On (y/n)'] = self.variables['on_ice']\n results['ICE P_min (kW)'] = self.p_min\n results['ICE Genset P_max (kW)'] = self.rated_power * n\n return results",
"def read_daily_messages_report(self):\n from itertools import repeat\n\n self.ID_TWEET_ORANGE_FLOW = kpi_from_db_config.ID_TWEET_ORANGE_FLOW\n self.ID_PROCESSING_MESSAGES = kpi_from_db_config.ID_PROCESSING_MESSAGES\n self.ID_CANDIDATES_PROCESSED = kpi_from_db_config.ID_CANDIDATES_PROCESSED\n\n list_id = [self.ID_TWEET_ORANGE_FLOW, \n self.ID_PROCESSING_MESSAGES, \n self.ID_CANDIDATES_PROCESSED]\n len_need_list = [7, 8, 2]\n list_result = [[] for i in repeat(None,len(list_id))]\n\n for i in range(len(list_id)):\n self.cursor.execute('''\n SELECT value\n FROM public.kpi_report\n WHERE id = %s\n ORDER BY created_at DESC\n LIMIT %s\n ''', [list_id[i], len_need_list[i]])\n rows_count = self.cursor.rowcount\n\n if (rows_count == len_need_list[i]): # If rows_count as expected \n for doc in self.cursor:\n list_result[i].append(int(doc[0]))\n elif (rows_count > 0 and rows_count < len_need_list[i]):\n for doc in self.cursor:\n list_result[i].append(int(doc[0]))\n list_result[i] = list_result[i] + [0] * (len_need_list[i] - rows_count) \n else:\n list_result[i] = [0] * len_need_list[i]\n\n return list_result",
"def get_tiingo_prices(tickers, start, end, api_key=None):\n\n all_results = []\n if api_key is None:\n api_key = os.getenv('TIINGO_API_KEY')\n # Sort tickers so that error logging can be used to identify progress\n tickers = sorted(tickers)\n\n for i, ticker in enumerate(tickers):\n try:\n df = web.DataReader(name=ticker,\n data_source='tiingo',\n start=start,\n end=end,\n api_key=api_key)\n df = df[['adjClose']]\n except KeyError as e:\n if e.args[0] == 'date':\n # Patch to handle issue in pandas_datareader\n # where empty results cause a KeyError\n print(f'Got empty df for i={i}, ticker={tickers[i]}')\n df = pd.DataFrame()\n except Exception as e:\n print('Received an unexpected error:', e)\n print(f'Only fetched up to {i-1} inclusive. Returning.')\n return pd.concat(all_results)\n\n if (i % 50 == 0) and i > 0:\n # Sleep to avoid timeouts. Empirically found 20s to be sufficient\n time.sleep(20)\n\n all_results.append(df)\n return pd.concat(all_results)",
"def temps(): \n \n # Create session and save reference to table\n session = Session(engine)\n Measurement = Base.classes.measurement\n\n # Query\n tobs_query = session.query(Measurement.date, func.avg(Measurement.tobs).label('tobs'))\\\n .filter(Measurement.date >= '2016-08-23').group_by(Measurement.date)\n \n tobs_list = []\n for row in tobs_query:\n tobs_list.append(row._asdict())\n \n return jsonify(tobs_list)\n\n session.close()",
"def tobs ():\n # Query the last 12 months \n # session.query(func.max (Measurement.date)).all()f \n # temperature observation data for this station \n\n last = session.query(func.max (Measurement.date)).all()\n prev = dt.date(last) - dt.timedelta(days=365)\n\n\n #make a query that goes back 12 months before that date\n results = session.query(Measurement.date, Measurement.tobs).\\\n filter(Measurement.date >= prev).all()\n\n all_Tobs = list(np.ravel(results))\n\n return jsonify(all_Tobs)"
] | [
"0.7311632",
"0.5812507",
"0.571646",
"0.57079303",
"0.5684339",
"0.55696833",
"0.55493486",
"0.553089",
"0.5528401",
"0.5502346",
"0.548358",
"0.544936",
"0.544525",
"0.54149175",
"0.5411108",
"0.54054636",
"0.5394847",
"0.5392003",
"0.53741133",
"0.5357601",
"0.5319892",
"0.5315574",
"0.5306587",
"0.5294445",
"0.5268564",
"0.5268471",
"0.52652085",
"0.5246509",
"0.5229982",
"0.5213194"
] | 0.6642906 | 1 |
Returns a list containing any slow transaction data collected during the reporting period. NOTE Currently only the slowest transaction for the reporting period is retained. | def slow_transaction_data(self):
# XXX This method no longer appears to be used. Being replaced
# by the transaction_trace_data() method.
if not self.__settings:
return []
if not self.__slow_transaction:
return []
maximum = self.__settings.agent_limits.transaction_traces_nodes
transaction_trace = self.__slow_transaction.transaction_trace(
self, maximum)
data = [transaction_trace,
list(self.__slow_transaction.string_table.values())]
if self.__settings.debug.log_transaction_trace_payload:
_logger.debug('Encoding slow transaction data where '
'payload=%r.', data)
json_data = json_encode(data)
level = self.__settings.agent_limits.data_compression_level
level = level or zlib.Z_DEFAULT_COMPRESSION
zlib_data = zlib.compress(six.b(json_data), level)
pack_data = base64.standard_b64encode(zlib_data)
if six.PY3:
pack_data = pack_data.decode('Latin-1')
root = transaction_trace.root
trace_data = [[root.start_time,
root.end_time - root.start_time,
self.__slow_transaction.path,
self.__slow_transaction.request_uri,
pack_data]]
return trace_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def transaction_trace_data(self, connections):\n\n _logger.debug('Generating transaction trace data.')\n\n if not self.__settings:\n return []\n\n # Create a set 'traces' that is a union of slow transaction,\n # and Synthetics transactions. This ensures we don't send\n # duplicates of a transaction.\n\n traces = set()\n if self.__slow_transaction:\n traces.add(self.__slow_transaction)\n traces.update(self.__synthetics_transactions)\n\n # Return an empty list if no transactions were captured.\n\n if not traces:\n return []\n\n # We want to limit the number of explain plans we do across\n # these. So work out what were the slowest and tag them.\n # Later the explain plan will only be run on those which are\n # tagged.\n\n agent_limits = self.__settings.agent_limits\n explain_plan_limit = agent_limits.sql_explain_plans_per_harvest\n maximum_nodes = agent_limits.transaction_traces_nodes\n\n database_nodes = []\n\n if explain_plan_limit != 0:\n for trace in traces:\n for node in trace.slow_sql:\n # Make sure we clear any flag for explain plans on\n # the nodes in case a transaction trace was merged\n # in from previous harvest period.\n\n node.generate_explain_plan = False\n\n # Node should be excluded if not for an operation\n # that we can't do an explain plan on. Also should\n # not be one which would not be included in the\n # transaction trace because limit was reached.\n\n if (node.node_count < maximum_nodes and\n node.connect_params and node.statement.operation in\n node.statement.database.explain_stmts):\n database_nodes.append(node)\n\n database_nodes = sorted(database_nodes,\n key=lambda x: x.duration)[-explain_plan_limit:]\n\n for node in database_nodes:\n node.generate_explain_plan = True\n\n else:\n for trace in traces:\n for node in trace.slow_sql:\n node.generate_explain_plan = True\n database_nodes.append(node)\n\n # Now generate the transaction traces. We need to cap the\n # number of nodes capture to the specified limit.\n\n trace_data = []\n\n for trace in traces:\n transaction_trace = trace.transaction_trace(\n self, maximum_nodes, connections)\n\n data = [transaction_trace,\n list(trace.string_table.values())]\n\n if self.__settings.debug.log_transaction_trace_payload:\n _logger.debug('Encoding slow transaction data where '\n 'payload=%r.', data)\n\n json_data = json_encode(data)\n\n level = self.__settings.agent_limits.data_compression_level\n level = level or zlib.Z_DEFAULT_COMPRESSION\n\n zlib_data = zlib.compress(six.b(json_data), level)\n\n pack_data = base64.standard_b64encode(zlib_data)\n\n if six.PY3:\n pack_data = pack_data.decode('Latin-1')\n\n root = transaction_trace.root\n\n if trace.record_tt:\n force_persist = True\n else:\n force_persist = False\n\n if trace.include_transaction_trace_request_uri:\n request_uri = trace.request_uri\n else:\n request_uri = None\n\n trace_data.append([transaction_trace.start_time,\n root.end_time - root.start_time,\n trace.path,\n request_uri,\n pack_data,\n trace.guid,\n None,\n force_persist,\n None,\n trace.synthetics_resource_id, ])\n\n return trace_data",
"def get_reports(self):\r\n return sorted(self._reports,\r\n key=lambda x: x['stats']['totalTimeMillis'],\r\n reverse=True)",
"def get_reports(self):\n return sorted(self._reports,\n key=lambda x: x['stats']['totalTimeMillis'],\n reverse=True)",
"def return_trade_history(\n self,\n start: Timestamp,\n end: Timestamp,\n ) -> list[dict[str, Any]]:\n limit = 100\n data: list[dict[str, Any]] = []\n start_ms = start * 1000\n end_ms = end * 1000\n while True:\n new_data = self.api_query_list('/trades', {\n 'startTime': start_ms,\n 'endTime': end_ms,\n 'limit': limit,\n })\n results_length = len(new_data)\n if data == [] and results_length < limit:\n return new_data # simple case - only one query needed\n\n latest_ts_ms = start_ms\n # add results to data and prepare for next query\n existing_ids = {x['id'] for x in data}\n for trade in new_data:\n try:\n timestamp_ms = trade['createTime']\n latest_ts_ms = max(latest_ts_ms, timestamp_ms)\n # since we query again from last ts seen make sure no duplicates make it in\n if trade['id'] not in existing_ids:\n data.append(trade)\n except (DeserializationError, KeyError) as e:\n msg = str(e)\n if isinstance(e, KeyError):\n msg = f'Missing key entry for {msg}.'\n self.msg_aggregator.add_warning(\n 'Error deserializing a poloniex trade. Check the logs for details',\n )\n log.error(\n 'Error deserializing poloniex trade',\n trade=trade,\n error=msg,\n )\n continue\n\n if results_length < limit:\n break # last query has less than limit. We are done.\n\n # otherwise we query again from the last ts seen in the last result\n start_ms = latest_ts_ms\n continue\n\n return data",
"def get_latest_transactions(self):\n first_run = False\n if not self._transactions:\n first_run = True\n transactions = []\n for account in self.accounts:\n self._logger.debug('Getting transactions for account \"%s\"', account.ynab_account.name)\n for transaction in account.get_latest_transactions():\n if not self._filter_transaction(transaction):\n transactions.append(transaction)\n self._logger.debug('Caching %s transactions', len(transactions))\n self._transactions.extend(transactions)\n if first_run:\n self._logger.info('First run detected, discarding transactions until now')\n return []\n return transactions",
"def slow_queries(self):\n request = Request(method=\"get\", endpoint=\"/query/slow\")\n\n def response_handler(resp):\n if not resp.is_success:\n raise C8QLQueryListError(resp, request)\n return self._format_queries(resp.body)\n\n return self._execute(request, response_handler)",
"def timings(self):\r\n return self._timings",
"def get_pending_transactions():\n\n return History.get_pending().get()",
"def returnTradeHistory(self, time=1 * 60 * 60, limit=100):\n assert limit <= 100, \"'limit' has to be smaller than 100\"\n return self.dpay.rpc.get_trade_history(\n transactions.formatTimeFromNow(-time),\n transactions.formatTimeFromNow(),\n limit,\n api=\"market_history\"\n )",
"def unbalanced(self):\n # TODO: Find a way to make a sql query to return all unbalanced transactions\n return []",
"def trades(self) -> List[ClosedTrade]:\n return store.completed_trades.trades",
"def get_all_latest_transactions(self):\n transactions = []\n for account in self.accounts:\n self._logger.debug('Getting transactions for account \"%s\"', account.ynab_account.name)\n for transaction in account.get_latest_transactions():\n if not self._filter_transaction(transaction):\n transactions.append(transaction)\n return transactions",
"def showTransactions(self):\n self.scanTransactions()\n txns = []\n\n # Summarize the stats\n for x in range(len(self._trans)):\n stats = self._trans[x]\n trans_time = 0\n remote_calls = 0\n for name, stat in stats:\n trans_time += stat.total_tt\n remote_calls += 1\n txns.append((x, trans_time, remote_calls))\n\n results = [\"TX#\\tTime\\tCalls\",\n \"=\" * 22]\n\n for item in txns:\n results.append(\"%3d\\t%4f\\t%5d\" % item)\n \n return \"\\n\".join(results)",
"def getTransferListSummary(self):\n p_ids_and_prices = {}\n players = self.getAllPlayerInfoTransferlist()\n\n # Get IDs of all players\n log_event(self.queue, \"Gathering player prices... \")\n for p in players:\n p_bidstatus = p[1]\n p_id = p[8]\n # removed Filter for unlisted / expired players\n if p_id not in p_ids_and_prices:\n p_sellprice = self.getPlayerSellPrice(p_id)\n # If sell price returns 0, need to fetch from Futbin\n if p_sellprice == 0:\n p_sellprice = self.getFutbinPrice_opentab(p_id)\n self.sleep_approx(5) # Delay iteration to not anger futbin\n # Add player ID and price to dict\n p_ids_and_prices[p_id] = p_sellprice\n\n for p_id in p_ids_and_prices:\n p_price = p_ids_and_prices[p_id]\n p_name = self.getPlayerCardName(p_id)\n log_event(self.queue, str(p_name) + \" - #\" +\n str(p_id) + \" Price \" + str(p_price))\n\n num_p_sold = 0\n num_p_expired = 0\n num_p_unlisted = 0\n num_p_listed = 0\n\n sold_p_value = 0\n expired_p_value = 0\n unlisted_p_value = 0\n listed_p_value = 0\n\n for p in players:\n p_bidstatus = p[1]\n p_id = p[8]\n p_soldprice = p[5] # is 0 if unlisted\n p_sellprice = int(p_ids_and_prices[p_id])\n\n if \"won\" in p_bidstatus:\n num_p_sold += 1\n sold_p_value += p_soldprice\n if \"expired\" in p_bidstatus:\n num_p_expired += 1\n expired_p_value += p_sellprice\n if (p_bidstatus == \"listFUTItem\"):\n num_p_unlisted += 1\n unlisted_p_value += p_sellprice\n if (p_bidstatus == \"listFUTItem has-auction-data\"):\n num_p_listed += 1\n listed_p_value += p_sellprice\n\n log_event(self.queue, \"Players sold: \" + str(num_p_sold))\n log_event(self.queue, \"Players expired: \" + str(num_p_expired))\n log_event(self.queue, \"Players listed: \" + str(num_p_listed))\n log_event(self.queue, \"Players unlisted: \" + str(num_p_unlisted))\n log_event(self.queue, \" - - - \")\n log_event(self.queue, \"Sold players value: \" + str(sold_p_value))\n log_event(self.queue, \"Expired players value: \" +\n str(expired_p_value))\n log_event(self.queue, \"Unlisted players value: \" +\n str(unlisted_p_value))\n log_event(self.queue, \"Listed players value: \" + str(listed_p_value))\n\n # TODO subtract bought price\n self.user_players_won += int(num_p_unlisted)\n self.p_ids_and_prices = p_ids_and_prices\n intel = [p_ids_and_prices, num_p_sold, num_p_expired, num_p_unlisted,\n num_p_listed, sold_p_value, expired_p_value, unlisted_p_value, listed_p_value]\n return intel",
"def time_list(self):\n return (self.N_T * (np.arange(self.N_itr) + 1) /\n self.N_itr * 1000 * self.DT)",
"def _get_meas_times_from_db(self):\n meas_times = []\n if self._data['report_save_historical_instances_ind'] != 'Y':\n # for non historical reports take measurement time from saved dataset\n dataset = self._jfile.get_current_stored_dataset()\n try:\n meas_time = datetime.datetime.strptime(dataset['meas_time'], '%Y-%m-%d %H:%M:%S')\n except ValueError:\n raise Exception(\"Cannot unformat string %s to datetime\" % dataset['meas_time'])\n meas_times.append(meas_time)\n\n else:\n # for historical reports take measurement times from db datasets\n where_sql = ''\n where_sql_list = list()\n params = [self._id, self._segment_value_id]\n\n if self._process_dataset_ids:\n for dataset_id in self._process_dataset_ids:\n if type(dataset_id) == list:\n where_sql_list.append(\"(report_data_set_instance_id >= %s AND report_data_set_instance_id <= %s)\")\n if dataset_id[0] < dataset_id[1]:\n params.append(dataset_id[0])\n params.append(dataset_id[1])\n else:\n params.append(dataset_id[1])\n params.append(dataset_id[0])\n else:\n where_sql_list.append(\"report_data_set_instance_id = %s\")\n params.append(dataset_id)\n where_sql = ' AND (%s)' % ' OR '.join(where_sql_list)\n\n self._db.Query(\"\"\"SELECT measurement_time\n FROM report_data_set_instance\n WHERE\n `element_id`= %%s\n AND segment_value_id = %%s\n %s\n ORDER BY measurement_time ASC\"\"\" % where_sql, tuple(params))\n meas_times = [item['measurement_time'] for item in self._db.record]\n\n return meas_times",
"def fetch(self, daterange=(datetime.now() - timedelta(1), datetime.now())):\n cursor = self.conn.cursor()\n sql = 'SELECT measure_dt, ping, download, upload FROM speedlogs ' + \\\n ' WHERE measure_dt BETWEEN ? AND ?'\n cursor.execute(sql, daterange)\n return cursor.fetchall()",
"async def get_trades(self) -> List[TradeRequest]:\n data = j.dumps({\n 'startindex': 0,\n 'statustype': 'inbound'\n })\n r = await self.request.request(url='https://www.roblox.com/my/money.aspx/getmyitemtrades', data=data, method='POST')\n data = json.loads(r.json()['d'])[\"Data\"]\n trades = []\n for trade in data:\n t = json.loads(trade)\n trades.append(TradeRequest(self.request, t['Date'], t['Expires'], t['TradePartner'], t['TradePartnerID'], t['Status'], t['TradeSessionID']))\n return trades",
"def get_trades_for_symbol(self, symbol, time_range=5):\n return [trade for trade in self\n if trade.symbol == symbol and (trade.timestamp > (int(time.time())-time_range*60))]",
"def get_new_data(self):\n\n # record bar parse performance\n self.logger.debug(\"Started parsing new ticks.\")\n start_parse = time.time()\n for exchange in self.exchanges:\n exchange.parse_ticks()\n end_parse = time.time()\n duration = round(end_parse - start_parse, 5)\n\n self.logger.debug(\n \"Parsed \" + str(self.total_instruments) +\n \" instruments' ticks in \" + str(duration) + \" seconds.\")\n self.track_tick_processing_performance(duration)\n\n # wrap new 1 min bars in market events\n new_market_events = []\n for exchange in self.exchanges:\n bars = exchange.get_new_bars()\n for symbol in exchange.get_symbols():\n for bar in bars[symbol]:\n event = MarketEvent(exchange.get_name(), bar)\n new_market_events.append(event)\n # add bars to save-to-db-later queue\n # TODO: store new bars concurrently with a processpool\n self.bars_save_to_db.put(event)\n return new_market_events",
"def get_gdax_historical_data():\n \n start = None\n while not start:\n start,end,tid = getStartAndEndHistoric()\n if not start:\n time.sleep(60)\n #Todo: change this to 1min\n firsttimestamp = start\n engine = sa.create_engine(sql_address)\n products = [\"LTC-USD\",\"LTC-BTC\",\"ETH-USD\",\"ETH-BTC\",\"BTC-USD\"]\n public_client = gdax.PublicClient()\n deltat = datetime.timedelta(seconds = 200)\n timewindows = []\n while end - start > datetime.timedelta(seconds=0):\n if start + deltat > end:\n endx = end\n else:\n endx = start + deltat\n timewindows.append([start,endx])\n start += deltat\n results = []\n total = len(timewindows)\n current_idx = 0\n timeold = time.time()\n numofqueries = 0\n engine = sa.create_engine(sql_address)\n Base.metadata.bind = engine\n DBSession = sa.orm.sessionmaker()\n DBSession.bind = engine\n session = DBSession()\n for startx,endx in timewindows:\n\n current_idx += 1\n for i in products:\n repeat = True\n while repeat:\n\n #delay if ratelimts are close\n if numofqueries < 3:\n while time.time() - timeold < 1:\n time.sleep(0.05)\n \n timeold = time.time()\n numofqueries = 0\n try:\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n except:\n time.sleep(30)\n public_client = gdax.PublicClient()\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n\n alist = public_client.get_product_historic_rates(i, start = startx, end = endx, granularity = 1)\n\n numofqueries += 1\n\n #rate limit exceeded has 'message' as dict.\n if not 'message' in alist:\n repeat = False\n for a in alist:\n a[0] = datetime.datetime.fromtimestamp(float(a[0]))\n tmp = i.split('-')\n d = dict(coin = tmp[0], currency = tmp[1], timestamp = a[0], low=a[1], high=a[2], open=a[3], close=a[4], volume=a[5])\n results.append(d)\n lasttimestamp = a[0]\n\n #upload with batch size of 10000\n if len(results) > 10000:\n engine.execute(\n GADXHistoricalDataOneSecondOHLC.__table__.insert(),\n results\n )\n results = []\n \n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n if update:\n update.end = lasttimestamp\n session.commit()\n else:\n new_update = historicalDataProgramState(entry_type = 'update',transaction_id = tid,start=firsttimestamp,end=lasttimestamp,platform='GDAX',status='incomplete')\n session.add(new_update)\n session.commit()\n if len(results) > 0:\n engine.execute(\n GADXHistoricalDataOneSecondOHLC.__table__.insert(),\n results\n )\n results = []\n \n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n if update:\n update.end = lasttimestamp\n session.commit()\n else:\n new_update = historicalDataProgramState(entry_type = 'update',transaction_id = tid,start=firsttimestamp,end=lasttimestamp,platform='GDAX',status='incomplete')\n session.add(new_update)\n session.commit()\n\n update = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'update')).first()\n update.status='complete'\n order = session.query(historicalDataProgramState).filter(sa.and_(historicalDataProgramState.transaction_id == tid,historicalDataProgramState.entry_type == 'order')).first()\n order.status='complete'\n session.commit()",
"def pending_transactions(self):\n return self._call_account_method(\n 'pendingTransactions'\n )",
"def transactions(self):\n url = f'{self._ynab.api_url}/budgets/{self.id}/transactions'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving transactions, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n return [Transaction(self._ynab, transaction)\n for transaction in response.json().get('data', {}).get('transactions', [])]",
"def get_trades_history(self, symbol, start_time, end_time, limit=1000):\n payload = {'symbol': symbol, 'start': start_time, 'end': end_time, 'limit': limit}\n return self.public_request('GET', '/api/v1/trades', **payload)",
"def get_transactions(self):\n # open a cursor object\n cur = self.get_cursor()\n\n # get transactions from database\n cur.execute(\"SELECT * FROM transactions\")\n transactions_data = cur.fetchall()\n\n # convert into a dict of values.\n transactions_list = []\n [transactions_list.append({'transaction_id': transaction[0],\n 'date': transaction[1],\n 'payee_id': transaction[2],\n 'description': transaction[3],\n 'amount': transaction[4]})\n for transaction in transactions_data]\n\n # close the cursor\n self.close_cursor()\n\n return transactions_list",
"def describe_slow_log_records(\n self,\n request: dds_20151201_models.DescribeSlowLogRecordsRequest,\n ) -> dds_20151201_models.DescribeSlowLogRecordsResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_slow_log_records_with_options(request, runtime)",
"async def get_blacklist_hist(self, search_time, limit=1000):\n\n start = search_time[0][0]\n end = search_time[0][1]\n\n url = f'https://{self.__api}/v3/blacklist/history'\n continuation = None\n full_resp = {}\n flag = True\n body = {\"filter[clientid]\": self.clientid, \"filter[start_time]\": start, \"filter[end_time]\": end,\n \"limit\": limit, \"continuation\": continuation}\n while True:\n with requests.get(url, params=body,\n headers={'X-WallarmAPI-UUID': self.__uuid,\n 'X-WallarmAPI-Secret': self.__secret}) as response:\n if response.status not in [200, 201, 202, 204, 304]:\n raise NonSuccessResponse(response.status, await response.text)\n continuation = response.json().get('body').get('continuation')\n\n if flag:\n full_resp = response.json()\n\n if continuation is not None:\n body['continuation'] = continuation\n if not flag:\n full_resp['body']['objects'].extend(response.json().get('body').get('objects'))\n else:\n break\n flag = False\n return full_resp",
"def _get_meas_times(self, last_meas_time):\n meas_times = list()\n data = None\n \n if self._process_type == 'soft_gen':\n meas_times = self._get_meas_times_from_db()\n else:\n if self._data['data_fetch_method'] == 'sql':\n # get from outer sql db\n data = self._get_meas_times_sql(last_meas_time)\n elif self._data['data_fetch_method'] == 'web service':\n # get from web service\n data = self._get_meas_times_web_service(last_meas_time)\n\n\n if data:\n clear_data = [row[0] for row in data['data']]\n # check if we have values in list of datetime type\n if clear_data:\n if type(clear_data[0]) == datetime.datetime:\n meas_times = clear_data\n else:\n # it's a date type\n meas_times = [datetime.datetime.combine(d, datetime.time.min) for d in clear_data]\n\n \n\n\n # sort measurement times if they weren't sorted before\n meas_times.sort()\n # if do not save history, take only last element\n if self._data['report_save_historical_instances_ind'] != 'Y':\n if len(meas_times) > 1:\n del meas_times[:-1]\n \n return meas_times",
"def transactions(self, dt=None):\n if dt is None:\n # flatten the by-day transactions\n return [\n txn\n for by_day in itervalues(self._processed_transactions)\n for txn in by_day\n ]\n\n return self._processed_transactions.get(dt, [])",
"def fetch_all_tx(self):\n transactions = []\n for block in self.chain:\n transactions.append(block.data)\n return transactions"
] | [
"0.6520807",
"0.5953189",
"0.5941957",
"0.5698872",
"0.56704205",
"0.55845326",
"0.55000305",
"0.54975384",
"0.53581554",
"0.5356301",
"0.5327412",
"0.53010184",
"0.5276863",
"0.52720934",
"0.5257284",
"0.52468723",
"0.5164559",
"0.5162055",
"0.51554185",
"0.5149581",
"0.5126841",
"0.51238465",
"0.512099",
"0.5117233",
"0.5102619",
"0.5096786",
"0.5094895",
"0.50867134",
"0.50841445",
"0.5080421"
] | 0.72200763 | 0 |
Resets the accumulated statistics back to initial state for metric data. | def reset_metric_stats(self):
self.__stats_table = {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset(self):\n self.sum_metric = 0.\n self.num_inst = 0.\n self.metrics.reset_stats()",
"def reset_metric_stats(self):\n self.__stats_table = {}",
"def stats_reset(self):\n self.stats.reset()",
"def stats_reset(self):\n self.stats.reset()",
"def reset(self) -> None:\n self.statistics = defaultdict(float)",
"def reset(self) -> None:\n self.statistics = defaultdict(int)",
"def reset(self):\n self.stats = {}",
"def reset(self):\n self.num_inst = 0\n self.sum_metric = 0.0",
"def reset_state(self):\n for name in self.metrics:\n self.metrics[name].reset_state()",
"def reset_stats() -> None:\n STATS[\"cleaned\"] = 0\n STATS[\"null\"] = 0\n STATS[\"unknown\"] = 0",
"def reset(self):\n self.start_times = {}\n self.stats = defaultdict(OnlineMeter) # float defaults to 0",
"def reset(self):\n self.avg = 0\n self.sum = 0\n self.cnt = 0",
"def reset_metric_variables(self) -> None:\n with self._lock:\n self._reset_metric_variables()",
"def clear_stats(self):\n self._stats = None",
"def reset(self):\n self.accumulation = None",
"def reset(self):\n self.accumulation = None",
"def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0",
"def reset_metrics(self):\n self.metrics['loss'] = 0.0\n self.metrics['num_tokens'] = 0\n self.metrics['correct_tokens'] = 0\n self.metrics['correct_pred'] = 0\n self.metrics['pred_count'] = 0",
"def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()",
"def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()",
"def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._ddp_backend = get_backend()",
"def reset(self):\n self.sum = [0.] * len(self.topk)\n self.data_num = 0\n self.pfm = [0.] * len(self.topk)",
"def reset(self) -> None:\n self.statistics = defaultdict(self._mp_hack)\n self._is_ddp = get_rank() > -1",
"def reset(self):\n self._total_value = 0.0\n self._count = 0",
"def reset_all(self) -> None:\n for metric in self:\n metric.reset()",
"def reset(self):\n self.ref_value = 0.0\n self._average = 0.0\n self.num_samples = 0",
"def reset(self):\n for i in range(0, len(self.__counts)):\n self.__counts[i] = 0\n self.__overflow = 0\n self.__total_count = 0\n self.__total_values = 0\n self.__min = None\n self.__max = None",
"def reset(self):\n self._value_estimates[:] = self.prior\n self.action_attempts[:] = 0\n self.last_action = None\n self.t = 0",
"def reset(self):\n self._accumulated_time.clear()\n self._hit_count.clear()",
"def reset(self):\n super().reset()\n self.m_n = 1\n self.m_num_errors = 0\n self.m_d = 0\n self.m_lastd = 0\n self.m_mean = 0.0\n self.m_std_temp = 0.0\n self.m_m2s_max = 0.0\n self.estimation = 0.0"
] | [
"0.8448358",
"0.83412653",
"0.83299094",
"0.83299094",
"0.8270503",
"0.8153123",
"0.79735655",
"0.7772603",
"0.77270293",
"0.77003264",
"0.76665866",
"0.7614711",
"0.75709176",
"0.7540221",
"0.75036174",
"0.75036174",
"0.7490715",
"0.7471787",
"0.74488425",
"0.74488425",
"0.74488425",
"0.7399214",
"0.7371752",
"0.7350859",
"0.73405546",
"0.73228246",
"0.7312936",
"0.72774065",
"0.7229525",
"0.71553284"
] | 0.84143066 | 1 |
Merges data from a single transaction. Snapshot is an instance of StatsEngine that contains stats for the single transaction. | def merge(self, snapshot):
if not self.__settings:
return
self.merge_metric_stats(snapshot)
self._merge_transaction_events(snapshot)
self._merge_synthetics_events(snapshot)
self._merge_error_events(snapshot)
self._merge_error_traces(snapshot)
self._merge_custom_events(snapshot)
self._merge_span_events(snapshot)
self._merge_sql(snapshot)
self._merge_traces(snapshot) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merge_metric_stats(self, snapshot):\n\n if not self.__settings:\n return\n\n for key, other in six.iteritems(snapshot.__stats_table):\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)",
"def record_transaction(self, transaction):\n\n if not self.__settings:\n return\n\n settings = self.__settings\n\n # Record the apdex, value and time metrics generated from the\n # transaction. Whether time metrics are reported as distinct\n # metrics or into a rollup is in part controlled via settings\n # for minimum number of unique metrics to be reported and thence\n # whether over a time threshold calculated as percentage of\n # overall request time, up to a maximum number of unique\n # metrics. This is intended to limit how many metrics are\n # reported for each transaction and try and cut down on an\n # explosion of unique metric names. The limits and thresholds\n # are applied after the metrics are reverse sorted based on\n # exclusive times for each metric. This ensures that the metrics\n # with greatest exclusive time are retained over those with\n # lesser time. Such metrics get reported into the performance\n # breakdown tab for specific web transactions.\n\n self.record_apdex_metrics(transaction.apdex_metrics(self))\n\n self.merge_custom_metrics(transaction.custom_metrics.metrics())\n\n self.record_time_metrics(transaction.time_metrics(self))\n\n # Capture any errors if error collection is enabled.\n # Only retain maximum number allowed per harvest.\n\n error_collector = settings.error_collector\n\n if (error_collector.enabled and settings.collect_errors and\n len(self.__transaction_errors) <\n settings.agent_limits.errors_per_harvest):\n self.__transaction_errors.extend(transaction.error_details())\n\n self.__transaction_errors = self.__transaction_errors[:\n settings.agent_limits.errors_per_harvest]\n\n if (error_collector.capture_events and\n error_collector.enabled and\n settings.collect_error_events):\n events = transaction.error_events(self.__stats_table)\n for event in events:\n self._error_events.add(event, priority=transaction.priority)\n\n # Capture any sql traces if transaction tracer enabled.\n\n if settings.slow_sql.enabled and settings.collect_traces:\n for node in transaction.slow_sql_nodes(self):\n self.record_slow_sql_node(node)\n\n # Remember as slowest transaction if transaction tracer\n # is enabled, it is over the threshold and slower than\n # any existing transaction seen for this period and in\n # the historical snapshot of slow transactions, plus\n # recording of transaction trace for this transaction\n # has not been suppressed.\n\n transaction_tracer = settings.transaction_tracer\n\n if (not transaction.suppress_transaction_trace and\n transaction_tracer.enabled and settings.collect_traces):\n\n # Transactions saved for Synthetics transactions\n # do not depend on the transaction threshold.\n\n self._update_synthetics_transaction(transaction)\n\n threshold = transaction_tracer.transaction_threshold\n\n if threshold is None:\n threshold = transaction.apdex_t * 4\n\n if transaction.duration >= threshold:\n self._update_slow_transaction(transaction)\n\n # Create the transaction event and add it to the\n # appropriate \"bucket.\" Synthetic requests are saved in one,\n # while transactions from regular requests are saved in another.\n\n if transaction.synthetics_resource_id:\n event = transaction.transaction_event(self.__stats_table)\n self._synthetics_events.add(event)\n\n elif (settings.collect_analytics_events and\n settings.transaction_events.enabled):\n\n event = transaction.transaction_event(self.__stats_table)\n self._transaction_events.add(event, priority=transaction.priority)\n\n # Merge in custom events\n\n if (settings.collect_custom_events and\n settings.custom_insights_events.enabled):\n self.custom_events.merge(transaction.custom_events)\n\n # Merge in span events\n\n if (settings.distributed_tracing.enabled and\n settings.span_events.enabled and settings.collect_span_events):\n if settings.infinite_tracing.enabled:\n for event in transaction.span_protos(settings):\n self._span_stream.put(event)\n elif transaction.sampled:\n for event in transaction.span_events(self.__settings):\n self._span_events.add(event, priority=transaction.priority)",
"def snapshot(self) -> Snapshot:\n snapshot = self.open(Snapshot.type).signed\n if not isinstance(snapshot, Snapshot):\n raise RuntimeError(\"Unexpected snapshot type\")\n return snapshot",
"def transaction(self, transaction):\n # Allow for a list of blocks..\n transaction = utils.request_type(transaction)\n\n res = r.get(self.url + self.tx_info + str(transaction))\n return self.execute(res)",
"def load_snapshot(self):\r\n assert self.snapshot is not None\r\n self.gain = self.snapshot[0]\r\n self.block = self.snapshot[1]\r\n self.locked = self.snapshot[2]\r\n self.bucket_num = self.snapshot[3]",
"def apply_transaction(self,\n header: BlockHeader,\n transaction: BaseTransaction\n ) -> Tuple[BlockHeader, Receipt, BaseComputation]:\n processed_tx = self.process_transaction(header.shard_id, transaction)\n return super().apply_transaction(header, processed_tx)",
"def _merge_report(self, target, new):\n time = None\n if 'ts' in new['parsed']:\n time = new['parsed']['ts']\n\n if (target.get('lastSeenDate', None) and\n time and\n target['lastSeenDate'] < time):\n target['lastSeenDate'] = time\n\n query_millis = int(new['parsed']['stats']['millis'])\n target['stats']['totalTimeMillis'] += query_millis\n target['stats']['count'] += 1\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']",
"def _merge_report(self, target, new):\r\n time = None\r\n if 'ts' in new['parsed']:\r\n time = new['parsed']['ts']\r\n\r\n if (target.get('lastSeenDate', None) and\r\n time and\r\n target['lastSeenDate'] < time):\r\n target['lastSeenDate'] = time\r\n\r\n query_millis = int(new['parsed']['stats']['millis'])\r\n target['stats']['totalTimeMillis'] += query_millis\r\n target['stats']['count'] += 1\r\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']",
"def rollback(self, snapshot):\n\n if not self.__settings:\n return\n\n _logger.debug('Performing rollback of data into '\n 'subsequent harvest period. Metric data and transaction events'\n 'will be preserved and rolled into next harvest')\n\n self.merge_metric_stats(snapshot)\n self._merge_transaction_events(snapshot, rollback=True)\n self._merge_synthetics_events(snapshot, rollback=True)\n self._merge_error_events(snapshot)\n self._merge_custom_events(snapshot, rollback=True)\n self._merge_span_events(snapshot, rollback=True)",
"def take_snapshot(self):\r\n self.snapshot = self.gain, self.block, self.locked, self.bucket_num",
"def snapshot(self, snapshot):\n self._context[\"snapshot\"] = snapshot",
"def merge(self, dataset):\n def merge_data(source, dest):\n for key, value in source.items():\n if isinstance(value, dict):\n merge_data(value, dest.setdefault(key, {}))\n else:\n dest[key] = value\n return dest\n\n merge_data(dataset.data, self._data)\n\n for h in dataset.task_history:\n if h not in self._task_history:\n self._task_history.append(h)",
"def restore(self, snapshot):\n self.unit_name = snapshot[\"unit_name\"]",
"def get_snapshot(self):\n data = {\n \"t\": self.sim.t,\n \"time\": self.time,\n \"vehicles\": self.sim.vehicles,\n \"stations\": self.sim.stations,\n \"state\": self.state,\n \"done\": self.is_done}\n return copy.deepcopy(data)",
"def populate_from_transaction(cls, transaction):\n # type: (Transaction) -> Baggage\n hub = transaction.hub or sentry_sdk.Hub.current\n client = hub.client\n sentry_items = {} # type: Dict[str, str]\n\n if not client:\n return Baggage(sentry_items)\n\n options = client.options or {}\n user = (hub.scope and hub.scope._user) or {}\n\n sentry_items[\"trace_id\"] = transaction.trace_id\n\n if options.get(\"environment\"):\n sentry_items[\"environment\"] = options[\"environment\"]\n\n if options.get(\"release\"):\n sentry_items[\"release\"] = options[\"release\"]\n\n if options.get(\"dsn\"):\n sentry_items[\"public_key\"] = Dsn(options[\"dsn\"]).public_key\n\n if (\n transaction.name\n and transaction.source not in LOW_QUALITY_TRANSACTION_SOURCES\n ):\n sentry_items[\"transaction\"] = transaction.name\n\n if user.get(\"segment\"):\n sentry_items[\"user_segment\"] = user[\"segment\"]\n\n if transaction.sample_rate is not None:\n sentry_items[\"sample_rate\"] = str(transaction.sample_rate)\n\n # there's an existing baggage but it was mutable,\n # which is why we are creating this new baggage.\n # However, if by chance the user put some sentry items in there, give them precedence.\n if transaction._baggage and transaction._baggage.sentry_items:\n sentry_items.update(transaction._baggage.sentry_items)\n\n return Baggage(sentry_items, mutable=False)",
"def load_snapshot(self):\r\n assert self.snapshot is not None\r\n self.max_gain = self.snapshot[0]\r\n self.array = self.snapshot[1]\r\n self.free_cell_list = self.snapshot[2]",
"def Merge(self, other):\n\n # Logging just in case\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: before\", %s);'\n %(self.persistant['id'],\n sql.FormatSqlValue('details',\n repr(self.persistant))))\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: deleted\", %s);'\n %(other.persistant['id'], \n sql.FormatSqlValue('details',\n repr(other.persistant))))\n\n # Fields which can be summed\n for f in ['plays', 'skips']:\n self.persistant[f] = (self.persistant.get(f, 0) +\n other.persistant.get(f, 0))\n\n # Date fields where we take the newest\n for f in ['last_played', 'last_skipped', 'last_action']:\n a = self.persistant.get(f, datetime.datetime(1970, 1, 1))\n b = other.persistant.get(f, datetime.datetime(1970, 1, 1))\n if a > b:\n v = a\n else:\n v = b\n if v != datetime.datetime(1970, 1, 1):\n self.persistant[f] = v\n\n # Date fields where we take the oldest\n for f in ['creation_time']:\n a = self.persistant.get(f, datetime.datetime(1970, 1, 1))\n b = other.persistant.get(f, datetime.datetime(1970, 1, 1))\n if a < b:\n v = a\n else:\n v = b\n if v != datetime.datetime(1970, 1, 1):\n self.persistant[f] = v\n\n # Fields where we only clobber ours if we don't have a value\n for f in ['artist', 'album', 'song']:\n if not self.persistant.has_key(f) or not self.persistant[f]:\n self.persistant[f] = other.persistant.get(f, None)\n\n # Sometimes the number is a placeholder\n if self.persistant.has_key('number') and self.persistant['number'] == -1:\n self.persistant['number'] = other.persistant.get('number', -1)\n if not self.persistant.has_key('number'):\n self.persistant['number'] = other.persistant.get('number', -1)\n\n # Update the id in the tags table\n tags = self.db.GetRows('select tag from tags where track_id=%d;'\n % other.persistant['id'])\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: tags: %d\", %s);'\n %(self.persistant['id'], other.persistant['id'],\n sql.FormatSqlValue('details', repr(tags))))\n\n try:\n self.db.ExecuteSql('update tags set track_id=%d where track_id=%d;'\n %(self.persistant['id'], other.persistant['id']))\n self.db.ExecuteSql('commit;')\n except:\n # This can happen if the is already a matching tag for the first track\n pass\n\n # Update the id in the paths table\n paths = self.db.GetRows('select path from paths where track_id=%d;'\n % other.persistant['id'])\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: paths: %d\", %s);'\n %(self.persistant['id'], other.persistant['id'],\n sql.FormatSqlValue('details', repr(paths))))\n \n self.db.ExecuteSql('update paths set track_id=%d where track_id=%d;'\n %(self.persistant['id'], other.persistant['id']))\n self.db.ExecuteSql('commit;')\n\n self.db.ExecuteSql('insert into events(timestamp, track_id, event, '\n 'details) values (now(), %d, \"merge: after\", %s);'\n %(self.persistant['id'],\n sql.FormatSqlValue('details',\n repr(self.persistant))))\n self.db.ExecuteSql('commit;')",
"def test_merge_sum(self):\n ars = self.ar[2009][11]['general']\n ars2 = awstats_reader.AwstatsReader(test_file_dir,\n 'joshuakugler.com')[2009][11]['general']\n self.assertEqual(ars.merge(ars2, 'LastUpdate', 'parsed'), 1262637)",
"def copyTransactionsFrom(self, other, verbose=0):\n ZODB.BaseStorage.copy(other, self, verbose)",
"def serialize_snapshot(self, snapshot, fields=None, version=None):\n fields = fields or self.snapshot_fields\n version = version or self.snapshot_version\n serialized_snapshot = serializers.serialize(\n 'python', [snapshot], fields=fields\n )[0]\n serialized_snapshot['version'] = version\n serialized_snapshot['extra_fields'] = {}\n return serialized_snapshot",
"def merge(*args):\n return _libsbml.Unit_merge(*args)",
"def add(self, transaction):\n if isinstance(transaction, Transaction):\n # If the transaction already exists\n if(transaction.hash in self.transaction_index):\n print(\"Debug: The transaction already exists in the list\")\n return None\n\n self.transaction_list.append(transaction)\n size = len(self.transaction_list)-1\n self.transaction_index[transaction.hash] = size\n else:\n raise Exception(\"Error: not a transaction\")",
"def get_transaction_data():\n data = parse_json()\n income_instances = create_transactions(data['incomes'])\n expense_instances = create_transactions(data['expenses'])\n for expense in expense_instances:\n expense.amount = -(expense.amount)\n transactions = income_instances + expense_instances\n return transactions",
"def load_snapshot(self):\r\n assert self.snapshot is not None\r\n self.name = self.snapshot[0]\r\n self.size = self.snapshot[1]\r\n self.cells = self.snapshot[2]\r\n self.bucket_array.load_snapshot()",
"def get_merged_data(self):\n return self._combinedata",
"def snapshot(self, snapshot_id):\r\n return self.connection.create_dbsnapshot(snapshot_id, self.id)",
"def snapshot(self):\n return {\"unit_name\": self.unit_name}",
"def _merge(self, other: dict):\n self._storage = dict_merge(self._storage, other)",
"def take_snapshot():\n df = scrape()\n for i in df.index:\n single = df.loc[i]\n # create or get locations\n loc, created = Location.objects.get_or_create(\n name=single['Location'],\n all_stands=single['Stands'],\n coordinates=single['Coords']\n )\n # add a new snapshot\n obj = Snapshot(\n location=loc,\n avail_bikes=single['Bikes'],\n free_stands=single['Free stands'],\n timestamp=datetime.now(tz=timezone('Europe/Warsaw'))\n )\n obj.save()",
"def snapshot(snapshot_type, result_q, time_delta):"
] | [
"0.5806089",
"0.50894815",
"0.5036111",
"0.50232536",
"0.49642876",
"0.4911518",
"0.487619",
"0.48526537",
"0.48363346",
"0.48321915",
"0.47403112",
"0.47268453",
"0.46747193",
"0.4672916",
"0.46666792",
"0.4641363",
"0.4636667",
"0.46207553",
"0.46078375",
"0.46022642",
"0.4585646",
"0.45775586",
"0.45727476",
"0.456886",
"0.45646688",
"0.4561287",
"0.45502204",
"0.4544669",
"0.4539312",
"0.4538765"
] | 0.6420531 | 0 |
Performs a "rollback" merge after a failed harvest. Snapshot is a copy of the main StatsEngine data that we attempted to harvest, but failed. Not all types of data get merged during a rollback. | def rollback(self, snapshot):
if not self.__settings:
return
_logger.debug('Performing rollback of data into '
'subsequent harvest period. Metric data and transaction events'
'will be preserved and rolled into next harvest')
self.merge_metric_stats(snapshot)
self._merge_transaction_events(snapshot, rollback=True)
self._merge_synthetics_events(snapshot, rollback=True)
self._merge_error_events(snapshot)
self._merge_custom_events(snapshot, rollback=True)
self._merge_span_events(snapshot, rollback=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rollback(self, stage, enodes, exception):",
"def test_backup_merge_with_restore(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self._take_n_backups(n=2)\n self.backupset.start = 1\n self.backupset.end = 2\n output, error = self.backup_restore()\n if error:\n self.fail(\"Restoring backup failed: {0}\".format(error))\n self.log.info(\"Finished restoring backup before merging\")\n status, output, message = self.backup_merge()\n if not status:\n self.fail(message)\n self.backupset.start = 1\n self.backupset.end = 1\n rest = RestConnection(self.backupset.restore_cluster_host)\n rest.flush_bucket()\n output, error = self.backup_restore()\n if error:\n self.fail(\"Restoring backup failed\")\n self.log.info(\"Finished restoring backup after merging\")",
"def rollback(self):\n pass",
"def rollback(self):\n raise NotImplementedError",
"def test_backup_merge_with_unmerged(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self._take_n_backups(n=2)\n self.backupset.start = 1\n self.backupset.end = 2\n self.log.info(\"Merging existing incremental backups\")\n status, output, message = self.backup_merge()\n if not status:\n self.fail(message)\n self.log.info(\"Taking more backups\")\n self._take_n_backups(n=2)\n self.backupset.start = 1\n self.backupset.end = 3\n self.log.info(\"Merging new backups into already merged backup\")\n status, output, message = self.backup_merge()\n if not status:\n self.fail(message)\n self.log.info(\"Successfully merged new backups with already merged backup\")",
"def test_fail_transaction(self):\n source_wallet = self.source_user.wallets.last()\n target_wallet = self.target_user.wallets.last()\n\n source_balance_init = source_wallet.balance\n target_balance_init = target_wallet.balance\n\n data = {\n 'initial_amount': 1100,\n 'source_wallet': source_wallet,\n 'target_wallet': target_wallet,\n }\n execute_wallet_transaction(data)\n\n source_wallet.refresh_from_db()\n target_wallet.refresh_from_db()\n\n self.assertTrue(source_balance_init == source_wallet.balance)\n self.assertTrue(target_balance_init == target_wallet.balance)\n\n self.assertEqual(source_wallet.outcome_transactions.last().status, TRANSACTION_FAIL_STATUS)",
"def rollback(self, context: 'IconScoreContext', _block_height: int, _block_hash: bytes):\n Logger.info(tag=ROLLBACK_LOG_TAG, msg=\"rollback() start\")\n\n self.prep_address_converter: 'PRepAddressConverter' = context.storage.meta.get_prep_address_converter(context)\n\n self.preps = self._load_preps(context)\n self.term = self._load_term(context)\n\n Logger.info(tag=ROLLBACK_LOG_TAG, msg=f\"rollback() end: {self.term}\")",
"def roll_back_demo():\n # return harvey rupp to belmont hill team\n bh = Team.query.get(161)\n print(f'retrieved {bh}')\n hr = Runner.query.get(1700)\n print(f'retrieved {hr}')\n if bh not in hr.teams:\n bh.runners.append(hr)\n db.session.commit()\n\n # set primary_key values below which will be untouched\n first_deleted_race = 19\n first_deleted_runner = 3712\n first_deleted_result = 4750\n first_deleted_school = 68\n first_deleted_team = 315\n first_deleted_location = 8\n first_deleted_course = 9\n first_deleted_league = 4\n\n # do not allow unless user is administrator\n if not current_user.is_administrator():\n return redirect(url_for('races.results', race_id=race.id))\n\n # delete races and associated results for races in delete range\n races = Race.query.all()\n for race in races:\n if race.id >= first_deleted_race:\n delete_race_by_id(race.id)\n\n # disassociate runners from teams and delete\n teams = Team.query.all()\n for team in teams:\n if team.id >= first_deleted_team:\n team.runners.clear()\n db.session.commit()\n\n runners = Runner.query.all()\n for runner in runners:\n if runner.id >= first_deleted_runner:\n db.session.delete(runner)\n db.session.commit()\n\n # delete teams\n for team in teams:\n if team.id >= first_deleted_team:\n db.session.delete(team)\n db.session.commit()\n\n # delete courses\n courses = Course.query.all()\n for course in courses:\n if course.id >= first_deleted_course:\n db.session.delete(course)\n db.session.commit()\n\n # disassociate locaions from schools and delete\n schools = School.query.all()\n for school in schools:\n if school.id >= first_deleted_school:\n school.locations.clear()\n db.session.commit()\n\n locations = Location.query.all()\n for location in locations:\n if location.id >= first_deleted_location:\n db.session.delete(location)\n db.session.commit()\n\n # disassociate schools from leagues and delete\n leagues = League.query.all()\n for league in leagues:\n if league.id >= first_deleted_league:\n league.schools.clear()\n db.session.commit()\n\n for school in schools:\n if school.id >= first_deleted_school:\n db.session.delete(school)\n db.session.commit()\n\n # delete leagues\n for league in leagues:\n if league.id >= first_deleted_league:\n db.session.delete(league)\n db.session.commit()\n\n # recalculate all runners seed times\n async_update_all_seed_times.delay()\n\n # update league standings via background task\n for league_id in [1, 2]:\n async_update_league_standings.delay(league_id=league_id)\n return redirect(url_for('core.index'))",
"def rollback(self):\n # PEP 249\n raise impala.error.NotSupportedError()",
"def rollback(self):\n self._rollback = True",
"def rollback(self):\n raise TransactionRollback('rollback called outside of transaction')",
"def test_merge_backup_with_failover_logs(self):\n self.log.info(\"Load 1st batch docs\")\n create_gen1 = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n self._load_all_buckets(self.master, create_gen1, \"create\", 0)\n failed_persisted_bucket = []\n rest = RestConnection(self.master)\n cluster_nodes = rest.get_nodes()\n for bucket in self.buckets:\n ready = RebalanceHelper.wait_for_stats_on_all(self.backupset.cluster_host,\n bucket.name, 'ep_queue_size',\n 0, timeout_in_seconds=120)\n if not ready:\n failed_persisted_bucket.append(bucket.name)\n if failed_persisted_bucket:\n self.fail(\"Buckets %s did not persisted.\" % failed_persisted_bucket)\n self.log.info(\"Stop persistence at each node\")\n clusters = copy.deepcopy(cluster_nodes)\n shell = RemoteMachineShellConnection(self.backupset.backup_host)\n for bucket in self.buckets:\n for node in clusters:\n shell.execute_command(\"%scbepctl%s %s:11210 -b %s stop\" % \\\n (self.cli_command_location,\n self.cmd_ext,\n node.ip,\n bucket.name))\n shell.disconnect()\n self.log.info(\"Load 2nd batch docs\")\n create_gen2 = BlobGenerator(\"ent-backup2\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n self._load_all_buckets(self.master, create_gen2, \"create\", 0)\n self.sleep(5)\n self.log.info(\"Crash cluster via kill memcached\")\n for node in clusters:\n for server in self.servers:\n if node.ip == server.ip:\n num_entries = 4\n reach_num_entries = False\n while not reach_num_entries:\n shell = RemoteMachineShellConnection(server)\n shell.kill_memcached()\n ready = False\n while not ready:\n if not RestHelper(RestConnection(server)).is_ns_server_running():\n self.sleep(10)\n else:\n ready = True\n cmd = \"%scbstats%s %s:11210 failovers -u %s -p %s | grep num_entries \" \\\n \"| gawk%s '{printf $2}' | grep -m 5 '4\\|5\\|6\\|7'\" \\\n % (self.cli_command_location, self.cmd_ext, server.ip,\n \"cbadminbucket\", \"password\", self.cmd_ext)\n output, error = shell.execute_command(cmd)\n shell.disconnect()\n if output:\n self.log.info(\"number failover logs entries reached. %s \" % output)\n reach_num_entries = True\n self.backup_create()\n self.log.info(\"Start backup data\")\n self.backup_cluster()\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)\n self.log.info(\"Load 3rd batch docs\")\n create_gen3 = BlobGenerator(\"ent-backup3\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n self._load_all_buckets(self.master, create_gen3, \"create\", 0)\n self.backup_cluster()\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)",
"def rollback(self):\n try:\n if self._cur_batch:\n self._cur_batch.rollback()\n except ValueError:\n # ignore \"Batch must be in progress to rollback\" error\n pass\n self._cur_batch = None\n self._num_mutations = 0",
"def rollback(self) -> None:\n with self.lock:\n self.wait(self._rollback_gen())",
"def rollback(commit_id):\n _confirm_branch()\n \n require('settings', provided_by=[production, staging])\n require('branch', provided_by=[stable, master, branch])\n \n maintenance_up()\n checkout_latest()\n git_reset(commit_id)\n gzip_assets()\n deploy_to_s3()\n maintenance_down()",
"def _rollback_context(self, persister):\n try:\n # Rollback the job transactional context.\n persister.rollback()\n\n except _errors.DatabaseError as error:\n _LOGGER.error(\n \"Error in %s rolling back job's context.\",\n self.__action.__name__, exc_info=error\n )\n\n # Update the job status.\n self.__result = False\n message = \"Tried to execute action ({0}).\".format(\n self.__action.__name__)\n self._add_status(Job.ERROR, Job.COMPLETE, message, True)\n\n # Finish context which means mark the job as finished\n # and update procedure's information.\n self._finish_context(False)",
"def _do_rollback(self):\n self.backend.rollback()",
"def rollback(self, schema: ArchiveSchema, writer: ArchiveFileWriter, version: int):\n # Get an updated shapshot listing.\n snapshots = self.snapshots.rollback(version=version)\n # Materialize the modified archive.\n self._write(schema=schema, writer=writer, snapshots=snapshots)\n # Update the cached objects\n self.schema = schema\n self.snapshots = snapshots",
"def rollback(self, exc):\n USER.info('%s: Rolling Back Failed Build', self.recipe.name)\n cascade = False\n if isinstance(exc, AssertionError):\n logging.error('Error during verify() of %s', self.recipe.name)\n cascade = True\n if cascade or isinstance(exc, PakitLinkError):\n if not cascade:\n logging.error('Error during linking of %s', self.recipe.name)\n walk_and_unlink(self.recipe.install_dir, self.recipe.link_dir)\n cascade = True\n if cascade or (not isinstance(exc, PakitLinkError) and\n not isinstance(exc, AssertionError)):\n if not cascade:\n logging.error('Error during build() of %s', self.recipe.name)\n try:\n Command('rm -rf ' + self.recipe.install_dir).wait()\n except PakitCmdError: # pragma: no cover\n pass",
"def rollbackSnapshotLXCContainer(self,node,vmid,snapname):\n post_data = {}\n data = self.connect('post','nodes/%s/lxc/%s/snapshot/%s/rollback' % (node,vmid,snapname), post_data) \n return data",
"def revert(self, snapshot):\n state_root, checkpoint_id = snapshot\n\n with self.state_db() as state_db:\n # first revert the database state root.\n state_db.root_hash = state_root\n # now roll the underlying database back\n\n self.chaindb.revert(checkpoint_id)",
"def rollback(self):\n self._connection.execute_nonquery(\"sql\", \"ROLLBACK\", True)",
"def rollbackVirtualMachine(self,node,vmid,snapname):\n post_data = None\n data = self.connect('post',\"nodes/%s/qemu/%s/snapshot/%s/rollback\" % (node,vmid,snapname), post_data)\n return data",
"def rollback(self):\r\n self.db.rollback()",
"def test_merge_backup_with_merge_kill_and_re_merge(self):\n gen = BlobGenerator(\"ent-backup1\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self._take_n_backups(n=self.backupset.number_of_backups)\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)\n self.log.info(\"Start to merge backup\")\n self.backupset.start = randrange(1, self.backupset.number_of_backups)\n self.backupset.end = 2\n\n self.merged = True\n merge_threads = []\n merge_thread = Thread(target=self.backup_merge)\n merge_threads.append(merge_thread)\n merge_thread.start()\n merge_kill_thread = Thread(target=self._kill_cbbackupmgr)\n merge_threads.append(merge_kill_thread)\n merge_kill_thread.start()\n for merge_thread in merge_threads:\n merge_thread.join()\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)\n result, output, _ = self.backup_merge()\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)",
"def rollback(self):\n self.success = False\n self.close()",
"def test_revision_diff_delete_then_rollback(self):\n payload = base.DocumentFixture.get_minimal_fixture()\n bucket_name = test_utils.rand_name('bucket')\n created_documents = self.create_documents(bucket_name, payload)\n revision_id = created_documents[0]['revision_id']\n\n # Delete all previously created documents.\n deleted_documents = self.create_documents(bucket_name, [])\n comparison_revision_id = deleted_documents[0]['revision_id']\n\n # Validate that the empty bucket is deleted.\n self._verify_buckets_status(\n revision_id, comparison_revision_id, {bucket_name: 'deleted'})\n\n # Rollback to first non-empty revision.\n rollback_revision_id = self.rollback_revision(revision_id)['id']\n # Validate that diffing rolled-back revision against 1 is unmodified.\n self._verify_buckets_status(\n revision_id, rollback_revision_id, {bucket_name: 'unmodified'})\n\n # Validate that diffing rolled-back revision against 2 is created\n # (because the rolled-back revision is newer than revision 2).\n self._verify_buckets_status(\n comparison_revision_id, rollback_revision_id,\n {bucket_name: 'created'})",
"def _rollback_to_last_consistent_state(self):\n\n with recording_failure_handler():\n need_unfinished_action_rollback = not self._action_recorder.is_empty() and not self._action_recorder.last_action_is_finished()\n\n if need_unfinished_action_rollback:\n\n with recording_failure_handler():\n (name, args, kwargs) = self._action_recorder.get_unfinished_action()\n action = self._action_registry.get_action(name)\n\n # we try to rollback the unfinished action\n action.rollback_action(args=args, kwargs=kwargs, was_interrupted=True)\n\n with recording_failure_handler():\n self._action_recorder.rollback_unfinished_action()\n\n return True\n\n return False",
"def rollback(self):\n self.db.rollback()",
"def rollback(self, target_revision_id):\n url = DeckhandClient.get_path(\n DeckhandPaths.ROLLBACK\n ).format(target_revision_id)\n\n response = self._post_request(url)\n self._handle_bad_response(response)"
] | [
"0.6461394",
"0.60570455",
"0.5918345",
"0.5801982",
"0.57985705",
"0.56479573",
"0.5587846",
"0.5585826",
"0.55499566",
"0.55391157",
"0.55083054",
"0.54737455",
"0.54621094",
"0.5453677",
"0.54407775",
"0.5388638",
"0.5380963",
"0.53626865",
"0.53533244",
"0.53509045",
"0.5342233",
"0.5339826",
"0.5336493",
"0.53087234",
"0.53017044",
"0.52920604",
"0.5272769",
"0.5270568",
"0.5263331",
"0.52626574"
] | 0.77395844 | 0 |
Merges metric data from a snapshot. This is used both when merging data from a single transaction into the main stats engine, and for performing a rollback merge. In either case, the merge is done the exact same way. | def merge_metric_stats(self, snapshot):
if not self.__settings:
return
for key, other in six.iteritems(snapshot.__stats_table):
stats = self.__stats_table.get(key)
if not stats:
self.__stats_table[key] = other
else:
stats.merge_stats(other) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merge(self, snapshot):\n\n if not self.__settings:\n return\n\n self.merge_metric_stats(snapshot)\n self._merge_transaction_events(snapshot)\n self._merge_synthetics_events(snapshot)\n self._merge_error_events(snapshot)\n self._merge_error_traces(snapshot)\n self._merge_custom_events(snapshot)\n self._merge_span_events(snapshot)\n self._merge_sql(snapshot)\n self._merge_traces(snapshot)",
"def rollback(self, snapshot):\n\n if not self.__settings:\n return\n\n _logger.debug('Performing rollback of data into '\n 'subsequent harvest period. Metric data and transaction events'\n 'will be preserved and rolled into next harvest')\n\n self.merge_metric_stats(snapshot)\n self._merge_transaction_events(snapshot, rollback=True)\n self._merge_synthetics_events(snapshot, rollback=True)\n self._merge_error_events(snapshot)\n self._merge_custom_events(snapshot, rollback=True)\n self._merge_span_events(snapshot, rollback=True)",
"def merge_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, other in metrics:\n key = (name, '')\n stats = self.__stats_table.get(key)\n if not stats:\n self.__stats_table[key] = other\n else:\n stats.merge_stats(other)",
"def _merge_report(self, target, new):\n time = None\n if 'ts' in new['parsed']:\n time = new['parsed']['ts']\n\n if (target.get('lastSeenDate', None) and\n time and\n target['lastSeenDate'] < time):\n target['lastSeenDate'] = time\n\n query_millis = int(new['parsed']['stats']['millis'])\n target['stats']['totalTimeMillis'] += query_millis\n target['stats']['count'] += 1\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']",
"def _resample_and_merge(ts, agg_dict):\n grouped = ts.group_serie(agg_dict['sampling'])\n existing = agg_dict.get('return')\n name = agg_dict.get(\"name\")\n resource = None if name is None else mock.Mock(id=str(uuid.uuid4()))\n metric = mock.Mock(id=str(uuid.uuid4()), name=name)\n agg_dict['return'] = (\n processor.MetricReference(metric, \"mean\", resource),\n carbonara.AggregatedTimeSerie.from_grouped_serie(\n grouped,\n carbonara.Aggregation(agg_dict['agg'],\n agg_dict['sampling'],\n None)))\n if existing:\n existing[2].merge(agg_dict['return'][2])\n agg_dict['return'] = existing",
"def _merge_report(self, target, new):\r\n time = None\r\n if 'ts' in new['parsed']:\r\n time = new['parsed']['ts']\r\n\r\n if (target.get('lastSeenDate', None) and\r\n time and\r\n target['lastSeenDate'] < time):\r\n target['lastSeenDate'] = time\r\n\r\n query_millis = int(new['parsed']['stats']['millis'])\r\n target['stats']['totalTimeMillis'] += query_millis\r\n target['stats']['count'] += 1\r\n target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']",
"def merge(self, dataset):\n def merge_data(source, dest):\n for key, value in source.items():\n if isinstance(value, dict):\n merge_data(value, dest.setdefault(key, {}))\n else:\n dest[key] = value\n return dest\n\n merge_data(dataset.data, self._data)\n\n for h in dataset.task_history:\n if h not in self._task_history:\n self._task_history.append(h)",
"def put_snapshot_object(session, key, data, snapshot=None):\n # type: (Session, Text, Any, Optional[Text]) -> None\n url_tail = \"/{}/{}/{}/{}/{}\".format(\n CoordConstsV2.RSC_NETWORKS,\n session.network,\n CoordConstsV2.RSC_SNAPSHOTS,\n session.get_snapshot(snapshot),\n CoordConstsV2.RSC_OBJECTS,\n )\n _put_stream(session, url_tail, data, {CoordConstsV2.QP_KEY: key})",
"def merge_snapshot(self):\n disks = self.get_disks()\n disk_files_tree = []\n for disk in disks:\n disk_files_tree += (DiskImageHelper.get_backing_files_tree(disk.file))\n merge_snapshot_cmd = \"virsh blockpull --domain {domain_name} {disk_path} --wait\".format(\n domain_name=self.name, disk_path=disk.file)\n\n logging.debug(\"Executing: '%s'\" % merge_snapshot_cmd)\n logging.info(\"Merging base to new snapshot for '%s' device\" % disk.device)\n\n # launch command\n merge_snapshot_cmds = shlex.split(merge_snapshot_cmd)\n merge_snapshot = subprocess.Popen(merge_snapshot_cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n shell=False)\n\n # wait to terminate\n status = merge_snapshot.wait()\n\n if status != 0:\n logging.error(\"Error for '%s': %s\" % (merge_snapshot_cmd, merge_snapshot.stderr.read()))\n logging.critical(\"{exe} returned {status} state\".format(exe=merge_snapshot_cmds[0], status=status))\n raise Exception(\"blockpull didn't work properly\")\n\n current_disk_files = [disk.file for disk in self.get_disks()]\n\n # remove old disk device files without current ones\n for file in [disk_file_tree for disk_file_tree in disk_files_tree if disk_file_tree not in current_disk_files]:\n logging.info(\"Removing old disk file: '%s'\" % file)\n os.remove(file)",
"def load_snapshot(base_path, snap_num, subvolumes, group, fields, matches):\n n_init = []\n\n snap_key = 'N{}_ThisFile_Redshift'.format('groups' if group == 'Haloprop' else 'subgroups')\n for subvolume in subvolumes: \n n_init.append(load_header(base_path, subvolume)[snap_key][snap_num])\n \n # initialize objects structure\n result = {}\n \n with h5py.File(file_path(base_path, subvolumes[0], 'subvolume'), 'r') as f:\n # galprop and haloprop both have a redshift quantity so we can use that to query for the snapshot we want\n filter_field = '{}Redshift'.format(group)\n \n if not fields:\n fields = list(f[group].keys())\n\n # make sure the redshift field is included in fields\n if filter_field not in fields:\n fields.append(filter_field) \n \n for field in fields:\n if field not in f[group].keys():\n raise Exception(\"Catalog does not have requested field [{}]!\".format(field))\n\n shape = list(f[group][field].shape)\n shape[0] = np.sum(n_init)\n\n # allocate within return dict\n result[field] = np.zeros(shape, dtype=f[group][field].dtype)\n\n if matches:\n with h5py.File(file_path(base_path, subvolumes[0], 'matches'), 'r') as f:\n for field in f[group].keys():\n result[field] = np.zeros(shape, dtype=f[group][field].dtype)\n\n header = load_header(base_path, subvolumes[0])\n filter_condition = header['Redshifts'][snap_num]\n\n offset = 0\n\n for subvolume in subvolumes:\n subvol_result = load_subvolume(base_path, subvolume, group, fields, matches, False)\n\n idx = subvol_result[filter_field][:] == filter_condition\n\n for field in subvol_result.keys():\n if len(subvol_result[field].shape) != 1:\n result[field][offset:offset+n_init[0], :] = subvol_result[field][idx]\n else:\n result[field][offset:offset+n_init[0]] = subvol_result[field][idx]\n\n offset += n_init[0]\n del n_init[0]\n \n return result",
"def mergeAggregatedCsvData(self, contexts, obj, aggData1, aggData2):\n return aggData1 + aggData2",
"def snapshot(snapshot_type, result_q, time_delta):",
"def load_snapshot(self):\r\n assert self.snapshot is not None\r\n self.name = self.snapshot[0]\r\n self.size = self.snapshot[1]\r\n self.cells = self.snapshot[2]\r\n self.bucket_array.load_snapshot()",
"def reduce_data():\n snapshots = Snapshot.objects.all()\n locations = Location.objects.all()\n lst = []\n for snapshot in snapshots:\n lst.append([snapshot.location.name, snapshot.avail_bikes,\n snapshot.free_stands, snapshot.timestamp])\n cols = ['location', 'avail_bikes', 'free_stands', 'timestamp']\n df = pd.DataFrame(lst, columns=cols)\n df['time'] = df['timestamp'].dt.round('30min').dt.strftime('%H:%M')\n\n group = df.groupby(['location', 'time'])\n means = group.mean()\n sd = group.std()\n today = date.today()\n first = today.replace(day=1)\n last_month = first - timedelta(days=1)\n\n for name, time in means.index:\n subset_mean = means.xs((name, time), level=(0, 1), axis=0)\n subset_sd = sd.xs((name, time), level=(0, 1), axis=0)\n m = Stat.objects.get_or_create(\n location=locations.get(name=name),\n avail_bikes_mean=subset_mean['avail_bikes'],\n free_stands_mean=subset_mean['free_stands'],\n avail_bikes_sd=subset_sd['avail_bikes'],\n free_stands_sd=subset_sd['free_stands'],\n time=time,\n month=last_month\n )\n\n # snaps = Snapshot.objects.all()\n # i = 0\n # length = len(snaps)\n # for s in snaps:\n # i += 1\n # print(i)\n # if i > 35000:\n # s.save()\n # reduce_data()",
"def _aggregate_log_values(self, source, dest):\n remove = []\n for key, item in source.items():\n if \"data\" not in item:\n # Assume it's a sub-group\n dest[key] = {}\n self._aggregate_log_values(item, dest[key])\n else:\n aggregator = self._get_aggregator_for_key(key, item['agg'])\n value = aggregator(item['data'])\n if item['precision'] is not None:\n value = round(value, item['precision'])\n dest[key] = value\n if item['scope'] == 'get':\n remove.append(key)\n for key in remove:\n del source[key]",
"def test_backup_merge_with_restore(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self._take_n_backups(n=2)\n self.backupset.start = 1\n self.backupset.end = 2\n output, error = self.backup_restore()\n if error:\n self.fail(\"Restoring backup failed: {0}\".format(error))\n self.log.info(\"Finished restoring backup before merging\")\n status, output, message = self.backup_merge()\n if not status:\n self.fail(message)\n self.backupset.start = 1\n self.backupset.end = 1\n rest = RestConnection(self.backupset.restore_cluster_host)\n rest.flush_bucket()\n output, error = self.backup_restore()\n if error:\n self.fail(\"Restoring backup failed\")\n self.log.info(\"Finished restoring backup after merging\")",
"def _AddSnapshot(self, snapshot):\n if self._history.count(snapshot) == 0:\n self._history.append(snapshot)",
"def test_merge_sum(self):\n ars = self.ar[2009][11]['general']\n ars2 = awstats_reader.AwstatsReader(test_file_dir,\n 'joshuakugler.com')[2009][11]['general']\n self.assertEqual(ars.merge(ars2, 'LastUpdate', 'parsed'), 1262637)",
"def aggregate_statistics(self, new_stats):\n \n if isinstance(new_stats,RunStatistics):\n new_stats = [new_stats, ]\n elif isinstance(new_stats,list):\n if any(not isinstance(_,RunStatistics) for _ in new_stats):\n raise MadGraph5Error, \"The 'new_stats' argument of the function \"+\\\n \"'updtate_statistics' must be a (possibly list of) \"+\\\n \"RunStatistics instance.\"\n \n keys = set([])\n for stat in [self,]+new_stats:\n keys |= set(stat.keys())\n\n new_stats = new_stats+[self,]\n for key in keys:\n # Define special rules\n if key=='max_precision':\n # The minimal precision corresponds to the maximal value for PREC\n self[key] = min( _[key] for _ in new_stats if key in _)\n elif key=='min_precision':\n # The maximal precision corresponds to the minimal value for PREC\n self[key] = max( _[key] for _ in new_stats if key in _)\n elif key=='averaged_timing':\n n_madloop_calls = sum(_['n_madloop_calls'] for _ in new_stats if\n 'n_madloop_calls' in _)\n if n_madloop_calls > 0 :\n self[key] = sum(_[key]*_['n_madloop_calls'] for _ in \n new_stats if (key in _ and 'n_madloop_calls' in _) )/n_madloop_calls\n else:\n # Now assume all other quantities are cumulative\n self[key] = sum(_[key] for _ in new_stats if key in _)",
"def add_snapshot(self):\n\n\t\tself.mu_values = self.cvt_handler.mu_values\n\t\tdim_mu = self.mu_values.shape[1]\n\t\taux_snapshot = self.file_handler.parse(self.namefile_prefix + str(dim_mu-1) + self.file_format, self.output_name)\n\t\tsnapshot = aux_snapshot.reshape(aux_snapshot.shape[0],1)\n\t\tself.snapshots = np.append(self.snapshots, snapshot, 1)\n\t\t\n\t\tself.print_info()",
"def snapshot(self, snapshot):\n self._context[\"snapshot\"] = snapshot",
"def snapshot(self, agent_memory):\n\n read_cmd = \"SELECT \"\n for r in self.TABLE_COLUMNS:\n read_cmd += r + \", \"\n read_cmd = read_cmd.strip(\", \")\n read_cmd += \" FROM \" + self.TABLE + \" WHERE uuid=?\"\n data = agent_memory._db_read_one(read_cmd, self.memid)\n if not data:\n raise (\"tried to snapshot nonexistent memory\")\n\n archive_memid = self.new(agent_memory, snapshot=True)\n new_data = list(data)\n new_data[0] = archive_memid\n\n if hasattr(self, \"ARCHIVE_TABLE\"):\n archive_table = self.ARCHIVE_TABLE\n else:\n archive_table = self.TABLE\n write_cmd = \"INSERT INTO \" + archive_table + \"(\"\n qs = \"\"\n for r in self.TABLE_COLUMNS:\n write_cmd += r + \", \"\n qs += \"?, \"\n write_cmd = write_cmd.strip(\", \")\n write_cmd += \") VALUES (\" + qs.strip(\", \") + \")\"\n agent_memory._db_write(write_cmd, *new_data)\n link_archive_to_mem(agent_memory, self.memid, archive_memid)",
"def add_snapshot(self, dest, source=None):\n raise NotImplementedYet()",
"def RestoreSnapshot(\r\n self,\r\n snapshot_data: Any,\r\n ):\r\n\r\n (\r\n self_id,\r\n iter,\r\n num_results,\r\n ignore_whitespace_ctr,\r\n ) = snapshot_data\r\n\r\n assert self_id == id(self)\r\n assert iter.Offset <= self.normalized_iter.Offset\r\n assert num_results <= len(self.results)\r\n assert ignore_whitespace_ctr <= self._ignore_whitespace_ctr\r\n\r\n self.normalized_iter = iter\r\n self._ignore_whitespace_ctr = ignore_whitespace_ctr\r\n\r\n if len(self.results) != num_results:\r\n del self.results[num_results - len(self.results):]",
"def test_merge_aggregate_traditional(self):\n mdict = copy.deepcopy(self.dict1)\n mdict[\"A\"] = \"b\"\n ret = dictupdate.merge_overwrite(copy.deepcopy(self.dict1), {\"A\": \"b\"})\n self.assertEqual(mdict, ret)",
"def restore(self, snapshot):\n self.unit_name = snapshot[\"unit_name\"]",
"def take_snapshot(self):\r\n self.snapshot = self.name, self.size, copy.copy(self.cells)\r\n self.bucket_array.take_snapshot()",
"def perform_snapshot(context, region, installed_region='us-east-1'):\n LOG.info('Reviewing snapshots in region %s', region)\n\n # fetch these, in case we need to figure out what applies to an instance\n configurations = dynamo.list_configurations(context, installed_region)\n LOG.debug('Fetched all possible configuration rules from DynamoDB')\n\n # build a list of any IDs (anywhere) that we should ignore\n ignore_ids = utils.build_ignore_list(configurations)\n\n # setup some lookup tables\n cache_data = utils.build_cache_maps(context, configurations, region, installed_region)\n all_instances = cache_data['instance_id_to_data']\n instance_configs = cache_data['instance_id_to_config']\n volume_snap_recent = cache_data['volume_id_to_most_recent_snapshot_date']\n\n for instance_id in set(all_instances.keys()):\n # before we go do some work\n if timeout_check(context, 'perform_snapshot'):\n break\n\n if instance_id in ignore_ids:\n continue\n\n snapshot_settings = instance_configs[instance_id]\n\n # parse out snapshot settings\n retention, frequency = utils.parse_snapshot_settings(snapshot_settings)\n\n # grab the data about this instance id, if we don't already have it\n instance_data = all_instances[instance_id]\n\n ami_id = instance_data['ImageId']\n LOG.info('Reviewing snapshots in region %s on instance %s', region, instance_id)\n\n for dev in instance_data.get('BlockDeviceMappings', []):\n # before we go make a bunch more API calls\n if timeout_check(context, 'perform_snapshot'):\n break\n\n # we probably should have been using volume keys from one of the\n # caches here, but since we're not, we're going to have to check here too\n LOG.debug('Considering device %s', dev)\n volume_id = dev['Ebs']['VolumeId']\n\n if volume_id in ignore_ids:\n continue\n\n # find snapshots\n recent = volume_snap_recent.get(volume_id)\n now = datetime.datetime.now(dateutil.tz.tzutc())\n\n # snapshot due?\n if should_perform_snapshot(frequency, now, volume_id, recent):\n LOG.debug('Performing snapshot for %s, calculating tags', volume_id)\n else:\n LOG.debug('NOT Performing snapshot for %s', volume_id)\n continue\n\n # perform actual snapshot and create tag: retention + now() as a Y-M-D\n delete_on_dt = now + retention\n delete_on = delete_on_dt.strftime('%Y-%m-%d')\n\n volume_data = utils.get_volume(volume_id, region=region)\n expected_tags = utils.calculate_relevant_tags(\n instance_data.get('Tags', None),\n volume_data.get('Tags', None))\n\n utils.snapshot_and_tag(\n instance_id,\n ami_id,\n volume_id,\n delete_on,\n region,\n additional_tags=expected_tags)",
"def merge(coverage_frame, ck_frame='metrics/ck.csv', halstead_frame='metrics/halstead.csv',\n jdepend_frame='metrics/jdepend.csv',\n keywords_frame='metrics/keywords.csv'):\n ck = pd.read_csv(ck_frame)\n halstead = pd.read_csv(halstead_frame)\n jdepend = pd.read_csv(jdepend_frame)\n keywords = pd.read_csv(keywords_frame)\n\n coverage_col = 'TARGET_CLASS' if 'TARGET_CLASS' in coverage_frame.columns else 'class'\n\n merge_frame = coverage_frame.merge(ck, left_on=coverage_col, right_on='class')\n merge_frame = merge_frame.merge(halstead, on='class')\n merge_frame = merge_frame.merge(jdepend, on='class')\n merge_frame = merge_frame.merge(keywords, left_on='class', right_on='class-name')\n return merge_frame",
"def merge(*args):\n return _libsbml.Unit_merge(*args)"
] | [
"0.72327065",
"0.5681518",
"0.5414538",
"0.509253",
"0.5089406",
"0.50668776",
"0.50477487",
"0.50349045",
"0.5023128",
"0.50017947",
"0.49228266",
"0.48627433",
"0.47599393",
"0.4753302",
"0.47469756",
"0.47447816",
"0.47401235",
"0.47382542",
"0.47220156",
"0.4715443",
"0.47067896",
"0.46773222",
"0.46512926",
"0.46499857",
"0.46487054",
"0.4639972",
"0.46301946",
"0.46147826",
"0.4611982",
"0.45998287"
] | 0.7712152 | 0 |
Merges in a set of custom metrics. The metrics should be provide as an iterable where each item is a tuple of the metric name and the accumulated stats for the metric. | def merge_custom_metrics(self, metrics):
if not self.__settings:
return
for name, other in metrics:
key = (name, '')
stats = self.__stats_table.get(key)
if not stats:
self.__stats_table[key] = other
else:
stats.merge_stats(other) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_metrics(self, metrics):\n for i, metric in enumerate(self.config.metrics):\n tf.summary.scalar(metric, metrics[i])",
"def aggregate(all_metrics, reducer, suffix):\n # Collect metric separately\n separated_metrics = {} # type: dict[frozenset, list[dict]]\n for el in all_metrics:\n key = frozenset(el[\"metric\"][\"dimensions\"].items())\n if key not in separated_metrics:\n separated_metrics[key] = [el]\n else:\n separated_metrics[key].append(el)\n\n # Collect all dimensions\n dims = {}\n for metric_dims in separated_metrics.keys():\n for prop, val in dict(metric_dims).iteritems():\n if prop in dims:\n dims[prop].add(val)\n else:\n dims[prop] = set(val)\n\n # Sort each metric\n for _, metric in separated_metrics.iteritems():\n metric.sort(key=lambda v: v[\"metric\"][\"timestamp\"])\n\n separated_metrics = sorted(separated_metrics.values(), key=len)\n separated_metrics.reverse()\n\n # Compute the new values\n new_values = []\n all_timestamps = map(\n lambda l: map(\n lambda x: x[\"metric\"][\"timestamp\"], l),\n separated_metrics)\n metric_count = len(separated_metrics)\n for index in range(0, len(separated_metrics[0])):\n new_value = reducer[0](\n separated_metrics[0][index][\"metric\"][\"value\"],\n metric_count)\n new_timestamp = separated_metrics[0][index][\"metric\"][\"timestamp\"]\n for metric_index in range(1, metric_count):\n new_value = reducer[1](new_value, helpers.interpolate(\n new_timestamp,\n separated_metrics[metric_index],\n all_timestamps[metric_index]\n ), metric_count)\n new_values.append((new_timestamp, new_value))\n\n # Aggregate the other details:\n metric_name = separated_metrics[0][0][\"metric\"][\"name\"] + suffix\n meta = separated_metrics[0][0][\"meta\"]\n new_metrics = [\n helpers.create_agg_metric(\n metric_name,\n meta,\n dims,\n val[0],\n val[1]\n ) for val in new_values\n ]\n return new_metrics",
"def record_custom_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for name, value in metrics:\n self.record_custom_metric(name, value)",
"def _build_metric_list_to_collect(self, additional_metrics):\n metrics_to_collect = {}\n\n # Defaut metrics\n for default_metrics in self.DEFAULT_METRICS.itervalues():\n metrics_to_collect.update(default_metrics)\n\n # Additional metrics metrics\n for option in additional_metrics:\n additional_metrics = self.AVAILABLE_METRICS.get(option)\n if not additional_metrics:\n if option in self.DEFAULT_METRICS:\n self.log.warning(\n u\"`%s` option is deprecated.\"\n u\" The corresponding metrics are collected by default.\", option\n )\n else:\n self.log.warning(\n u\"Failed to extend the list of metrics to collect:\"\n u\" unrecognized `%s` option\", option\n )\n continue\n\n self.log.debug(\n u\"Adding `%s` corresponding metrics to the list\"\n u\" of metrics to collect.\", option\n )\n metrics_to_collect.update(additional_metrics)\n\n return metrics_to_collect",
"def _aggregate_metrics(metrics, aggfunc, base):\n return base.Struct(**_UNCOMPRESSED_METRICS)(\n left_side_bearing=aggfunc(_m.left_side_bearing for _m in metrics),\n right_side_bearing=aggfunc(_m.right_side_bearing for _m in metrics),\n character_width=aggfunc(_m.character_width for _m in metrics),\n character_ascent=aggfunc(_m.character_ascent for _m in metrics),\n character_descent=aggfunc(_m.character_descent for _m in metrics),\n character_attributes=0,\n )",
"def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])",
"def aggregate_metrics(metrics):\n if len(metrics) == 1:\n return metrics[0]\n else:\n agg_metrics = metrics[0]\n for metric in agg_metrics.keys():\n vals = [x[metric] for x in metrics]\n agg_metrics[metric] = [np.mean(vals), np.std(vals)]\n return agg_metrics",
"def merge_measurements(measurements_list: List[Measurements]) -> \\\n Tuple[Measurements, List[MetricName]]:\n summed_metrics: Measurements = {}\n\n all_metrics_names = set() # Sum of set of names.\n for measurements in measurements_list:\n all_metrics_names.update(measurements.keys())\n\n for metric_name in all_metrics_names:\n if metric_name in METRICS_METADATA:\n\n if METRICS_METADATA[metric_name].type == MetricType.GAUGE:\n operation = lambda values: sum(values) / len(values) # noqa\n else:\n assert METRICS_METADATA[metric_name].type == MetricType.COUNTER\n operation = sum\n\n else:\n log.debug('By default, unknown metric %r uses \"sum\" as merge operation.', metric_name)\n operation = sum\n\n summed_metrics[metric_name] = operation(\n [measurements[metric_name] for measurements in measurements_list\n if metric_name in measurements])\n\n return summed_metrics",
"def optimize_metrics(self,\n metrics: list = None,\n verbose: bool = True):\n\n if metrics is None:\n metrics = self._supported_metrics\n else:\n metrics = [metric.lower() for metric in metrics]\n assert all(metric in self._supported_metrics for metric in metrics)\n for i in metrics:\n super(ThresholdOptimizer, self).__getattribute__(f'get_best_{i}_metrics')(verbose=verbose)",
"def update(self, current_iter, *metrics, **named_metrics):\n\n # Same order as __init__() in python>=3.6\n if len(metrics) > 0:\n for key, metric in zip(self.metrics.keys(), metrics):\n self.metrics[key].append((current_iter, metric))\n \n # Random order with names\n elif len(named_metrics) > 0:\n for name, metric in named_metrics.item():\n self.metrics[name].append((metric))\n\n else:\n raise ValueError(\"No valid value to update losses\")",
"def add_metrics(self,\n metrics_: Optional[Dict[str, Any]] = None,\n add_to_child_: bool = True,\n **kwargs: Any) -> None:\n if self._child_stack and add_to_child_:\n self._child_stack[-1].add_metrics(metrics_, **kwargs)\n else:\n def collect(target: Dict[str, Any]):\n if metrics_:\n for key, val in metrics_.items():\n key = stage_type.add_metric_prefix(key)\n target[key] = to_number_or_numpy(val)\n if kwargs:\n for key, val in kwargs.items():\n key = stage_type.add_metric_prefix(key)\n target[key] = to_number_or_numpy(val)\n\n stage_type = self._stage.type\n if self._stage.batch.is_active:\n collect(self._batch_metrics)\n elif self._stage.epoch is not None and self._stage.epoch.is_active:\n collect(self._epoch_metrics)\n else:\n collect(self._stage_metrics)\n self._stage.push_metrics(self._stage_metrics)",
"def _add_metrics_to_metrics_provider(cls, mp, metrics):\n providers_info = cls._METRICS_PROVIDER_INFO[mp.type][mp.namespace][\"providers\"]\n provided_metrics = next(\n provider_info[\"provided_metrics\"]\n for provider_info in providers_info\n if provider_info[\"name\"] == mp.name\n )\n\n # Check if the provided metrics are equal to the metrics\n num_metrics = len(metrics)\n if len(provided_metrics) != num_metrics:\n raise ValueError(\n f\"Found {len(provided_metrics)} metrics for metrics provider \"\n f\"{mp.name}. Expected {num_metrics}.\"\n )\n\n # Check what type of provider is used at the moment\n if mp.type == MetricsProviderType.STATIC:\n valued_metric_class = StaticMetric\n elif mp.type == MetricsProviderType.PROMETHEUS:\n valued_metric_class = PrometheusMetric\n else:\n raise NotImplementedError()\n # Iterate through the provided metrics\n valued_metrics = []\n for i, (metric_name, metric_value) in enumerate(provided_metrics):\n metric = metrics[i]\n if metric.mp_metric_name != metric_name:\n msg = (\n f\"Unexpected name {metric.mp_metric_name}. Expected: {metric_name}.\"\n )\n raise ValueError(msg)\n valued_metric = valued_metric_class(metric, metric_value)\n valued_metrics.append(valued_metric)\n mp.set_valued_metrics(valued_metrics)",
"def calc_metric(output, metrics):\n score = []\n for metric in metrics:\n metric_mod = __import__(\"sklearn.metrics\", fromlist=[metric])\n metric_func = getattr(metric_mod, metric)\n score.append(metric_func(output[0], output[1]))\n return score, output",
"def register_additional_metric_ops(\n self, metric_ops: Dict[str, Tuple[tf.Tensor, tf.Tensor]]) -> None:\n for metric_name, (value_op, update_op) in metric_ops.items():\n if metric_name in self._metric_names:\n raise ValueError('tried to register new metric with name %s, but a '\n 'metric with that name already exists.' % metric_name)\n self._metric_names.append(metric_name)\n self._metric_value_ops.append(value_op)\n self._metric_update_ops.append(update_op)\n\n # Update metric variables incrementally with only the new elements in the\n # metric_variables collection.\n collection = self._graph.get_collection(\n tf.compat.v1.GraphKeys.METRIC_VARIABLES)\n collection = collection[len(self._metric_variable_nodes):]\n\n # Note that this is a node_list - it's not something that TFMA\n # configures, but something that TF.Learn configures.\n #\n # As such, we also use graph.get_tensor_by_name directly, instead of\n # TFMA's version which expects names encoded by TFMA.\n for node in collection:\n self._metric_variable_nodes.append(node)\n with self._graph.as_default():\n placeholder = tf.compat.v1.placeholder(\n dtype=node.dtype, shape=node.get_shape())\n self._metric_variable_placeholders.append(placeholder)\n self._metric_variable_assign_ops.append(\n tf.compat.v1.assign(node, placeholder))\n\n with self._graph.as_default():\n self._all_metric_variable_assign_ops = tf.group(\n *self._metric_variable_assign_ops)\n self._all_metric_update_ops = tf.group(*self._metric_update_ops)\n self._reset_variables_op = tf.compat.v1.local_variables_initializer()\n self._session.run(self._reset_variables_op)\n\n self._perform_metrics_update_fn = self._session.make_callable(\n fetches=self._all_metric_update_ops,\n feed_list=self._perform_metrics_update_fn_feed_list)",
"def generateDerivedMetrics(kernelMetrics, statistics, throughputMetrics = {}, countMetrics = {}, combinedMetrics = {}):\n\n # combine single metrics \n for combinedMetric in combinedMetrics:\n for kernel in kernelMetrics:\n logging.debug(\"Combining metrics for kernel {}\".format(kernel))\n # iterate over each run, take the number of runs to be\n # the length of the first source metric\n if combinedMetrics[combinedMetric][0] in kernelMetrics[kernel]:\n combinedMetricCounts = []\n sourceMetricMissing = False\n # go through each run\n for run in range(0, len(kernelMetrics[kernel][ combinedMetrics[combinedMetric][0] ])):\n\n combinedMetricRunCount = 0\n # take all the source metrics and add them into the\n # combined metric\n for sourceMetric in combinedMetrics[combinedMetric]:\n if sourceMetric in kernelMetrics[kernel]:\n # TODO delete once debugged print(\"runs of {} {}\".format(sourceMetric, kernelMetrics[kernel][sourceMetric]))\n combinedMetricRunCount = combinedMetricRunCount + kernelMetrics[kernel][sourceMetric][run]\n else:\n sourceMetricMissing = True\n logging.info(\"Source metric {} missing for combined metric {}, combined metric will not be\"\n \"added\".format(sourceMetric, combinedMetric))\n # append this run ot the end of the list\n combinedMetricCounts.append(combinedMetricRunCount)\n if not sourceMetricMissing:\n kernelMetrics[kernel][combinedMetric] = combinedMetricCounts\n\n # take throughputs and convert them to counts\n # doesn't use averages since that can skew results\n for throughputMetricName, countMetricName in zip(throughputMetrics, countMetrics):\n for kernel in kernelMetrics:\n logging.debug(\"Generating count metrics for {} in kernel {}\".format(throughputMetricName, kernel))\n if throughputMetricName in kernelMetrics[kernel]:\n counts = []\n for run in range(0, len(kernelMetrics[kernel][throughputMetricName])):\n count = kernelMetrics[kernel][throughputMetricName][run] * kernelMetrics[kernel][\"Duration\"][run]\n counts.append(count)\n kernelMetrics[kernel][countMetricName] = counts",
"def summarize_metrics(metrics):\n summarized = {}\n for k in metrics:\n if k.endswith('mse'):\n summarized[k[:-3] + 'rmse'] = np.sqrt(np.mean(metrics[k]))\n elif k.startswith('err'):\n summarized[k + '_mean'] = np.mean(metrics[k])\n summarized[k + '_rmse'] = np.sqrt(np.mean(metrics[k]**2))\n elif k.endswith('nomean'):\n summarized[k] = metrics[k]\n else:\n summarized[k] = np.mean(metrics[k])\n\n return summarized",
"def get_metrics(self, add_metrics={}):\n tot_px_cnt = self.res * int(self.tensors['samples_evaluated'][0])\n\n if self.debug:\n sum_per_class = self.tensors['TP'] + self.tensors['TN'] + self.tensors['FP'] + self.tensors['FN']\n unique = sum_per_class.unique()\n assert len(unique) == 1, 'Expect to observe the exact same number for all classes.'\n assert unique[0] == self.tensors['PX_CNT'].sum() == tot_px_cnt, 'Expect exactly one type of prediction per pixel.'\n\n mask_non_observed = (self.tensors['PX_CNT']).bool()\n mask_bg = self.tensors['M']\n mask_combined = (self.tensors['M'] * mask_non_observed).bool() # in PyTorch 1.4 no logical AND\n\n if self.debug:\n assert mask_combined.sum() <= mask_bg.sum()\n assert mask_combined.sum() <= mask_non_observed.sum()\n \n accuracies = (self.tensors['TP'] + self.tensors['TN']) / tot_px_cnt\n acc = torch.mean(accuracies[mask_combined])\n acc_bg_included = torch.mean(accuracies[mask_non_observed])\n\n IoUs = self.tensors['TP'] / (tot_px_cnt - self.tensors['TN']) # per class: I/U, U = sum(TP,FP,FN) = all - TN\n mIoU = torch.mean(IoUs[mask_combined])\n mIoU_bg_included = torch.mean(IoUs[mask_non_observed])\n\n if self.debug:\n if torch.cuda.is_available():\n for i in [accuracies, acc, acc_bg_included, IoUs, mIoU, mIoU_bg_included]:\n assert i.is_cuda\n\n results = OrderedDict()\n\n for i in ['acc','mIoU']:\n for j in ['','_bg_included']:\n results[ i + j + '_' + self.fold ] = float(eval(i+j+'.cpu()'))\n\n for i in range(self.tensors['TP'].shape[0]):\n results['IoU_class_' + str(i) + '_' + self.fold] = float(IoUs[i].cpu())\n results['acc_class_' + str(i) + '_' + self.fold] = float(accuracies[i].cpu())\n\n if self.debug:\n for k in results:\n if isinstance(results[k], float) and not math.isnan(results[k]):\n # don't apply check to nans and str; we don't use exactly 1 due to smaller rounding error\n assert results[k] <= 1.0001, f'Failure for {k,results[k],type(results[k])}: any metric derived from the confusion matrix should be <= 1.'\n\n #for t in self.tensors:\n # results[t + '_' + self.fold] = self.tensors[t].cpu()\n\n if add_metrics:\n for k in add_metrics:\n results[k + '_' + self.fold] = float(add_metrics[k])\n\n return results",
"def metrics_group():",
"def merge_accumulators(self, accumulators):\n raise NotImplementedError",
"def compute_metrics(self, x, extra=None):\n if self.__metrics is None and extra is None:\n return None\n\n ret = {}\n if self.__metrics is not None:\n for m in self.__metrics:\n ret[m.name] = self._mdmetric(x, m)\n\n if extra is not None and extra.name not in ret:\n ret[extra.name] = self._mdmetric(x, extra)\n\n return ret",
"def _update_metric(\n metrics: List[mlflow.entities.Metric], dataset: MetricsDict = {}\n ) -> MetricsDict:\n for metric in metrics:\n metric_dict = {\"step\": metric.step, \"value\": metric.value}\n if metric.key in dataset:\n if isinstance(dataset[metric.key], list):\n dataset[metric.key].append(metric_dict)\n else:\n dataset[metric.key] = [dataset[metric.key], metric_dict]\n else:\n dataset[metric.key] = metric_dict\n return dataset",
"def calculate_metrics(jobs, metrics_names):\n metrics_def_dict = {mn: {'metric': mn.split('_')[0], 'agg': mn.split('_')[1], 'data': [], 'value': -1} for mn in metrics_names}\n\n for job in jobs:\n if job['category'] == 'run' and job['jobstatus'] == 'finished':\n for mn, mdata in metrics_def_dict.items():\n if 'per' in mdata['metric']:\n if mdata['metric'].split('per')[0] in job and mdata['metric'].split('per')[1] in job and job[mdata['metric'].split('per')[1]] > 0:\n mdata['data'].append(job[mdata['metric'].split('per')[0]]/(1.0*job[mdata['metric'].split('per')[1]]))\n elif mdata['metric'] in job and job[mdata['metric']]:\n mdata['data'].append(job[mdata['metric']])\n\n for mn, mdata in metrics_def_dict.items():\n if 'avg' in mdata['agg']:\n mdata['value'] = sum(mdata['data'])/(1.0*len(mdata['data'])) if len(mdata['data']) > 0 else -1\n if 'sum' in mdata['agg']:\n mdata['value'] = sum(mdata['data'])\n\n metrics = {}\n for mn, mdata in metrics_def_dict.items():\n if mdata['value'] > 0:\n if 'percent' in mdata['agg']:\n metrics[mn] = round(mdata['value'] * 100.0, 2)\n else:\n metrics[mn] = round(mdata['value'], 2)\n\n return metrics",
"def collect_stats(self, cursor):\n metrics = self.config.get('metrics', DEFAULT_METRICS)\n if isinstance(metrics, str):\n if metrics == \"all\":\n # puffer_pool_status is only for 5.5, so we ignore that by default\n metrics = CATEGORIES.keys()\n metrics.remove('buffer_pool_stats')\n else:\n # support comma-separated list\n metrics = re.split(\"\\s*,\\s*\", metrics)\n\n self.logger.debug(\"metrics to collect: %s\" % \", \".join(metrics))\n for cat in metrics:\n if cat in CATEGORIES:\n self.add_category_stats(cat, cursor)\n else:\n self.logger.warning(\"%s is not a valid metric category\" % cat)\n\n if 'newrelic' in metrics:\n self.derive_newrelic_stats()",
"def compute_additional_metrics(metric_summary: pd.DataFrame, metrics: str_t, pos_label: str):\n metric_slice = metric_summary[[\"true_labels_folds\", \"predicted_labels_folds\"]].copy()\n metric_out = {}\n\n # ensure list\n if isinstance(metrics, str):\n metrics = [metrics]\n\n for metric in metrics:\n score_funcs = dict(getmembers(sklearn.metrics))\n if metric in score_funcs:\n score_func = score_funcs[f\"{metric}\"]\n elif f\"{metric}_score\" in score_funcs:\n score_func = score_funcs[f\"{metric}_score\"]\n metric = f\"{metric}_score\" # noqa: PLW2901\n else:\n raise ValueError(f\"Metric '{metric}' not found.\")\n metric_out[metric] = metric_slice.apply(_apply_score, args=(score_func, pos_label), axis=1)\n metric_out = pd.concat(metric_out, names=[\"score\", \"folds\"], axis=1)\n\n metric_out = metric_out.stack([\"score\", \"folds\"])\n metric_out = metric_out.groupby(metric_out.index.names[:-1]).agg(\n [(\"mean\", lambda x: np.mean), (\"std\", lambda x: np.std(x))] # noqa: ARG005\n )\n\n metric_out = metric_out.unstack(\"score\").sort_index(axis=1, level=\"score\")\n metric_out.columns = metric_out.columns.map(\"_test_\".join)\n metric_summary = metric_summary.join(metric_out)\n\n return metric_summary",
"def evaluate_with_metrics(self, dataset, metrics, *args, **kwargs):\n\n utils.assert_raise(isinstance(metrics, dict), ValueError,\n '\"metrics\" must be a dict with metric_name -> metric_function')\n result = dict()\n\n for sample in dataset:\n output = self.predict(sample)\n\n for key, call in metrics.items():\n holder = result.get(key, list())\n holder.append(call(output, sample))\n\n result[key] = holder\n\n return result",
"def add_metrics(self, metric_dict: dict):\n self.metric_dict.update(metric_dict)",
"def send_metrics(self):\n metrics = self.get_metrics()\n if not metrics:\n return\n\n for mkey, metric in metrics.items():\n for mname, mval in metric.items():\n try:\n self.agent.record_custom_metric(self.convert_metric_name(mkey, mname), mval, None)\n except Exception as e:\n print_(e)",
"def metrics(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupMultipleMetricsMetricArgs']]]]:\n return pulumi.get(self, \"metrics\")",
"def metrics(self, metrics):\n\n self._metrics = metrics",
"def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None:\n loss_sum = sum(log.get('loss', 0) for log in logging_outputs)\n ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)\n sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)\n nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)\n\n metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)"
] | [
"0.67883134",
"0.6653943",
"0.66358435",
"0.65713274",
"0.65646595",
"0.64966273",
"0.64369106",
"0.6254954",
"0.6035345",
"0.6030221",
"0.5992154",
"0.5904505",
"0.58831435",
"0.5876968",
"0.58738685",
"0.586156",
"0.58568037",
"0.58403516",
"0.58366096",
"0.5835867",
"0.58253866",
"0.5809571",
"0.57690775",
"0.57638687",
"0.5761877",
"0.57279545",
"0.5714877",
"0.56369394",
"0.56266624",
"0.5622822"
] | 0.8258332 | 0 |
Checks if player ready to be rendered on the character sheet | def is_player_ready(self):
player = self.base.game_instance['player_ref']
if (player
and base.player_states["is_alive"]
and base.player_states["is_idle"]
and not base.player_states["is_moving"]
and not base.player_states["is_running"]
and not base.player_states["is_crouch_moving"]
and not base.player_states["is_crouching"]
and not base.player_states["is_standing"]
and not base.player_states["is_jumping"]
and not base.player_states["is_h_kicking"]
and not base.player_states["is_f_kicking"]
and not base.player_states["is_using"]
and not base.player_states["is_attacked"]
and not base.player_states["is_busy"]
and not base.player_states["is_turning"]
and not base.player_states["is_mounted"]
and not base.player_states["horse_riding"]
and not self.base.game_instance["is_player_sitting"]
and not player.get_python_tag("is_on_horse")
):
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_ready(self):\n if self.game.has_started():\n return True\n return self.status == self.PLAYER_READY",
"def ready(self):\n return self.shader is not None and self.texturesReady()",
"def check_ready(self):\r\n print \"Checking ready\"\r\n\t\tif self.game.trough.is_full():\r\n print \"Ready\"\r\n\t\t\tself.ready()\r\n\t\t\treturn True\r\n\t\tprint \"Not Ready\"\r\n\t\treturn False",
"def set_ready(self):\n if self.game.has_started() or self.status == self.PLAYER_READY:\n return\n self.status = self.PLAYER_READY\n self.game.player_is_ready()",
"def start_game_check(self):\n if len(self.pending_players) > 0:\n return False\n else:\n return True",
"def enough_players():\n return True",
"def requestReady(self):\n if self.team[self.team_num][self.map_pos].avatarLabel['text'] == \"\":\n return;\n \n if self.isHost:\n obj = {\"worldName\":self.worldInfo.worldName}\n main.cManager.sendRequest(Constants.CMSG_START_TO_READY_GAME, obj)\n \n else:\n obj ={\"worldName\": self.worldInfo.worldName}\n main.cManager.sendRequest(Constants.CMSG_READY, obj)\n self.isReady = 1",
"def is_ready() -> bool:\n return True",
"def check_for_tie(self):\n\n\t\tif len(self.player_model.available_cells) > 0:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True",
"def is_ready(self) -> bool:\n pass",
"def wait_to_play(self):\n\n\t\tself.player_model.current_player = self.player_model.rival_player\n\t\tself.player_frame.prepare_to_wait_turn(self.player_model.rival_player.name, self.player_model.available_cells)",
"def can_play(self) -> bool:\n purple_card = self.game.board.purple\n return (\n self.game.current_player != self\n and purple_card is not None\n and purple_card.space > len(self.game.board.yellow[self])\n )",
"def set_not_ready(self):\n if self.game.has_started() or self.status == self.PLAYER_NOT_READY:\n return\n self.status = self.PLAYER_NOT_READY",
"def won_game(self):\n for player in self.players:\n if len(player.cards) == 0:\n\n return True\n return False",
"def texturesReady(self):\n return (self.modulateTexture is not None and\n self.clipTexture is not None and\n self.colourTexture is not None and\n self.modulateTexture.ready() and\n self.clipTexture .ready() and\n self.colourTexture .ready())",
"def is_ready(cls):\n\n return False",
"def is_ready(self):\n return (self.is_calibrated() and not self.has_error()\n and not self.is_moving())",
"def still_valid(self) -> bool:\n return self._data.player_alive(self._data.player_turn)",
"def check_win(self):\n return UNEXPOSED not in self.get_game() and self.get_game().count(FLAG) == len(self.get_pokemon_location)",
"def ready(self):\n return len(self.player1.ships) == len(\n self.player2.ships) == len(self.SHIP_INFO)",
"def won(self):\n if self.current_room.name == \"Victory\":\n return True\n else:\n return False",
"def ready(self):\n return self.time >= self.cooldown",
"def _check_for_win(self):\n slots_available = any(\n [slot.available for slot in self.board.iter_slots() if not slot.mine]\n )\n if not slots_available:\n self.status = GameStatusEnum.won\n self.end_time = datetime.utcnow()",
"def is_game_won(self):\n return True",
"def is_ready_update(self):\n raise UnityTrainerException(\"The is_ready_update method was not implemented.\")",
"def ready(self):\n return self.counter > 0",
"def check_if_won(self):\n if self.player_points > self.enemy_points:\n self.bHasWon = True\n else:\n self.bHasWon = False",
"def CheckVictoryCondition(self):\n opponentVictory = True\n for char in self.screen.characters:\n if char.team == 1 and char.leader and not char.dead:\n opponentVictory = False\n if opponentVictory:\n self.screen.refresh()\n self.music.stop()\n sys.exit()\n\n for victory in self.victories:\n playerVictory = True\n nextLevel = victory['next_level']\n if victory['condition'] == 'destroy':\n for char in self.screen.characters:\n if not char.dead and char.team == 2:\n playerVictory = False\n elif victory['condition'] == 'kill leaders':\n for char in self.screen.characters:\n if not char.dead and char.team == 2 and char.leader:\n playerVictory = False\n if playerVictory:\n print('You win')\n if self.music:\n self.music.stop()\n self.screen.objects = []\n self.screen.tileEffects = []\n self = Level(self.screen, nextLevel)",
"def isReady(self):\n return self._lowLevelIsReady()",
"def maybe_start(self):\r\n\t\tif not [p for p in self.players if not p.ready]\\\r\n\t\t and len(self.players) == self.max_players \\\r\n\t\t and not self.started:\r\n\t\t\tself.start()"
] | [
"0.69993716",
"0.6624655",
"0.6611897",
"0.6352339",
"0.6298705",
"0.62477887",
"0.6227472",
"0.6222125",
"0.6217452",
"0.6090686",
"0.607338",
"0.6063791",
"0.6063283",
"0.605453",
"0.60500675",
"0.6024416",
"0.6018847",
"0.5996054",
"0.5977044",
"0.5920064",
"0.5918121",
"0.591741",
"0.5909322",
"0.5895338",
"0.5852375",
"0.5848146",
"0.5845665",
"0.58281004",
"0.5825527",
"0.58065075"
] | 0.6954562 | 1 |
Run the script at given path catching exceptions. This function should only be used internally by Pyto. | def runScriptAtPath(path):
sys.argv = [path]
for arg in PytoClasses.Python.shared.args:
sys.argv.append(str(arg))
def run() -> None:
os.system = PytoClasses.Python.shared.system
directory = os.path.expanduser(os.path.dirname(path))
sys.path.insert(0, directory)
try:
global __script__
spec = importlib.util.spec_from_file_location("__main__", path)
__script__ = importlib.util.module_from_spec(spec)
spec.loader.exec_module(__script__)
PytoClasses.Python.shared.values = [item for item in dir(__script__) if not item.startswith("__")]
except SystemExit:
print("SystemExit")
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
extracts = traceback.extract_tb(sys.exc_info()[2])
count = len(extracts)
lineNumber = -1
fileName = path
for i, extract in enumerate(extracts):
if extract[0] == fileName:
lineNumber = extract[1]
break
count -= 1
if (type(e) == SyntaxError): # The last word in a `SyntaxError` exception is the line number
lineNumber = [int(s) for s in (str(e)[:-1]).split() if s.isdigit()][-1]
PytoClasses.Python.shared.errorType = exc_type.__name__
PytoClasses.Python.shared.errorReason = str(e)
PytoClasses.EditorViewController.visible.showErrorAtLine(lineNumber)
print(traceback.format_exc(limit=-count))
sys.path.remove(directory)
PytoClasses.ReviewHelper.shared.launches = PytoClasses.ReviewHelper.shared.launches+1
PytoClasses.ReviewHelper.shared.requestReview()
PytoClasses.Python.shared.isScriptRunning = False
thread = threading.Thread(target=run, args=())
def loop():
while PytoClasses.Python.shared.isScriptRunning:
time.sleep(1)
ignoredThreads.append(thread)
raise Exception("Stopped script!")
def runLoop():
try:
loop()
except:
pass
thread.start()
runLoop()
return __script__ | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def runScript(path=None):\n if path:\n exec(compile(open(path, \"rb\").read(), path, 'exec'))",
"def do_exec(self, arg):\n self.run_file(arg['path'])",
"def _run_file(file_path, globals_):\n script_name = os.path.basename(file_path)\n\n sys.path = (_PATHS.script_paths(script_name) +\n _PATHS.scrub_path(script_name, sys.path))\n\n fix_google_path()\n\n execfile(_PATHS.script_file(script_name), globals_)",
"async def _run_script(self, path: Path) -> None:\n with open(path, 'r') as f:\n self.conn.executemany(f.read())",
"def run_file(file_path, globals_, script_dir=SCRIPT_DIR):\n fix_sys_path()\n script_name = os.path.basename(file_path)\n script_name = SCRIPT_EXCEPTIONS.get(script_name, script_name)\n script_path = os.path.join(script_dir, script_name)\n print script_path\n execfile(script_path, globals_)",
"def do_run_script(self, arg):\n try:\n with open(os.path.join(os.getcwd(), arg), 'r') as fin:\n script = fin.readlines()\n for line in script:\n self.onecmd(line)\n except (FileNotFoundError) as exc:\n print(exc)",
"def run_call(path: Path) -> None:\n if not (path / \"__main__.py\").exists():\n return\n try:\n subprocess.check_call([sys.executable, path.as_posix()], stdout=subprocess.DEVNULL)\n except subprocess.CalledProcessError as e:\n raise SnapshotMismatchError(f\"Path {path} cannot be imported: {e}\") from None",
"def script_test(path):\n log.info(\" ... EXECUTING {}\".format(str(path)))\n\n cmd = [sys.executable, str(path)]\n cp = subprocess.run(cmd, stderr=subprocess.PIPE)\n if cp.returncode:\n log.info(\" ... FAILED\")\n log.info(\" ___ TRACEBACK\")\n log.info(cp.stderr.decode(\"utf-8\") + \"\\n\\n\")\n return False\n else:\n log.info(\" ... PASSED\")\n return True",
"def run(path):\n # https://github.com/friendlycode/grparks/issues/20\n print(\"TODO: modify file here\")\n print(path)",
"def run_script (script, *l) :\n if not os.path.exists (script) :\n raise PQHException (\"file %s not found\" % script)\n py = get_interpreter_path ()\n cmd = \"%s %s\" % (py, script)\n if len (l) > 0 :\n cmd += \" \" + \" \".join ( [str (x) for x in l])\n out,err = run_cmd (cmd)\n return out,err",
"def open_script(script_path):\n pass",
"def execute(file_path):\n os.startfile(file_path)",
"def runScript(self, script):\n data = FilePath(__file__).parent().child('data')\n sample_file = data.child('1.input.ofx')\n\n args = (script, [sample_file.path])\n log.msg('executing %r' % (args,))\n out, err, rc = yield utils.getProcessOutputAndValue(*args, env=None)\n log.msg('rc: %r' % (rc,))\n log.msg('out: %r' % (out,))\n log.msg('err: %r' % (err,))\n if rc != 0:\n self.fail(\"Failed: %s\\n\\n%s\" % (out, err))",
"def run_script(self):\n pass",
"def PyHiew_ExecuteScript(script, g, strip_path = False):\r\n PY_COMPILE_ERR = None\r\n try:\r\n execfile(script, g)\r\n except Exception, e:\r\n PY_COMPILE_ERR = str(e) + \"\\n\" + traceback.format_exc()\r\n PY_COMPILE_ERR = PY_COMPILE_ERR.replace(\r\n script[:-len(os.path.basename(script))],\r\n '')\r\n if PYHIEW_SHOW_EXEC_ERRORS:\r\n MessageBox(PY_COMPILE_ERR)\r\n\r\n return PY_COMPILE_ERR",
"def run_script(self, params, config_no):\n raise NotImplementedError()",
"def run(self, script, *args, **kwargs):\n return self._run('run', script, *args, **kwargs)",
"def exec_script(self, script):\n filename = os.path.join(self.script_dir, script + \".sh\")\n # http://docs.python.org/library/os.html#os.X_OK\n if os.access(filename, os.X_OK):\n with open(filename):\n subprocess.call(filename)\n self.vibrate(0.1)",
"def RunCmdFile(self, path):\n if not self.sim42interp == self.shell.interp:\n self.UseCommandInterface(True)\n self.shell.run(\"read \" + path, prompt=0, verbose=0)",
"def run_setup_script(self, script_path):\n try:\n f = open(script_path, 'r')\n setup_script = f.read()\n # print(setup_script)\n c = self.conn.cursor()\n c.executescript(setup_script)\n except (Error, IOError) as e:\n print('[Datanase] Error:')\n print(e)",
"def run_script(self, pathname, caller=None):\n self.msg(2, \"run_script\", pathname)\n\n pathname = os.path.realpath(pathname)\n m = self.findNode(pathname)\n if m is not None:\n return m\n\n if sys.version_info[0] != 2:\n with open(pathname, 'rb') as fp:\n encoding = util.guess_encoding(fp)\n\n with open(pathname, _READ_MODE, encoding=encoding) as fp:\n contents = fp.read() + '\\n'\n if contents.startswith(BOM):\n # Ignore BOM at start of input\n contents = contents[1:]\n\n else:\n with open(pathname, _READ_MODE) as fp:\n contents = fp.read() + '\\n'\n\n co_ast = compile(contents, pathname, 'exec', ast.PyCF_ONLY_AST, True)\n co = compile(co_ast, pathname, 'exec', 0, True)\n m = self.createNode(Script, pathname)\n self._updateReference(caller, m, None)\n self._scan_code(m, co, co_ast)\n m.code = co\n if self.replace_paths:\n m.code = self._replace_paths_in_code(m.code)\n return m",
"def exec_file(self, path):\n assert os.path.isabs(path)\n\n source = None\n\n try:\n with open(path, 'rt') as fd:\n source = fd.read()\n except Exception as e:\n raise SandboxLoadError(self._context.source_stack,\n sys.exc_info()[2], read_error=path)\n\n self.exec_source(source, path)",
"def execute(self, code, environment = dict()):\r\n if not self.config.get('scripting', 'enable') and type(code) == str:\r\n self.send(code, log = False)\r\n else:\r\n if type(code) == str:\r\n c = compile(code, 'errors.log', 'exec')\r\n else:\r\n c = code\r\n eval(c, self.getEnvironment(environment))",
"def call_script(self, script):\n filename, callable = script.rsplit(':', 1)\n filename = os.path.abspath(filename)\n module = imp.load_source('script', filename)\n script = getattr(module, callable.strip())\n\n try:\n script(self.options, self.buildout, self.augmented_environment())\n except TypeError:\n # BBB: Support hook scripts that do not take the environment as\n # the third parameter\n script(self.options, self.buildout)",
"def exec_file(path: str, global_vars: Dict[str, Any]) -> None:\n with open(path) as file:\n exec(compile(file.read(), path, \"exec\"), global_vars) # pylint: disable=exec-used",
"def run(filename):\n try:\n with open(filename) as f:\n interp.runcode(f.read())\n except IOError as e:\n self.perror(e)",
"def run_workdir(self, path):\n pass",
"def run(path):\n config = conf.get_yaml_field(gl.configFile)\n exe_con = config['ENABLE_EXECUTION']\n exe_num = config['EXECUTION_NUM']\n rerun = config['ENABLE_RERUN']\n reruns_nums = config['RERUN_NUM']\n repeat = config['ENABLE_REPEAT']\n repeat_num = config['REPEAT_NUM']\n exec_mode = config['ENABLE_EXEC_MODE']\n debug_mode = config['ENABLE_DEBUG_MODE']\n last_failed = config['ENABLE_LAST_FAILED']\n failed_first = config['ENABLE_FAILED_FIRST']\n\n # custom function\n RunTestCase.copy_custom_function()\n\n # failed first\n failed_first_args = (' --ff ' if failed_first else '') if not last_failed else ''\n\n # last failed\n last_failed_args = (' --lf ' if last_failed else '') if not failed_first else ''\n\n # Enable repeat case.\n repeat_args = ' --count={} '.format(repeat_num) if repeat else ''\n\n # Enable CPU concurrency\n py_args = ' -n {} '.format(exe_num) if exe_con else ''\n\n # Enable failed retry\n reruns_args = ' --reruns {} '.format(reruns_nums) if rerun else ''\n\n # debug mode print debug info.\n debug = '' if debug_mode else '--tb=no'\n\n \"\"\"\n Load the pytest framework,\n which must be written here or DDT will be loaded first.\n from httptesting.case import test_load_case\n \"\"\"\n case_path = gl.loadcasePath\n # Output mode console or report.\n if exec_mode:\n cmd = 'cd {} && py.test -q -s {} {} {} {}'.format(\n case_path, reruns_args, 'test_load_case.py',\n repeat_args, debug\n )\n else:\n cmd = 'cd {} && py.test {} {} {} {} {} {} --html={} {} --self-contained-html'.format(\n case_path,\n py_args,\n reruns_args,\n last_failed_args,\n failed_first_args,\n 'test_load_case.py',\n repeat_args,\n path,\n debug\n )\n try:\n os.system(cmd)\n except (KeyboardInterrupt, SystemExit):\n print('已终止执行.')",
"def main():\n if os.path.isdir(path):\n for filename in os.listdir(path):\n if filename.endswith('.asm'):\n execute_asm_file(path + '/' + filename, filename)\n else:\n execute_asm_file(path, path[path.rfind(\"/\") + 1:])",
"def run_script(func):\n try:\n sys.exit(func(sys.argv[1:], STDIN, sys.stdout))\n except KeyboardInterrupt:\n logger.error(\"Interrupted\")\n sys.exit(EXIT_ERROR)"
] | [
"0.72653073",
"0.6912623",
"0.68354243",
"0.67718136",
"0.67703193",
"0.65939707",
"0.6413898",
"0.63070154",
"0.61949843",
"0.61937946",
"0.6164765",
"0.6151639",
"0.60480416",
"0.60265625",
"0.60191596",
"0.6006073",
"0.5994495",
"0.59873414",
"0.5959248",
"0.58620226",
"0.5859723",
"0.58492535",
"0.5796345",
"0.5794666",
"0.57743543",
"0.5767361",
"0.57512784",
"0.572017",
"0.5686311",
"0.5680942"
] | 0.7851395 | 0 |
Expected defaults when no project exists | def test_no_project_defaults(self):
ep = exposed.ExposedProject()
self.assertIsNone(ep.display)
self.assertIsNone(ep.shared)
self.assertIsNone(ep.settings)
self.assertIsNone(ep.title)
self.assertIsNone(ep.id)
self.assertIsNone(ep.path())
with self.assertRaises(RuntimeError):
ep.title = 'Some Title' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_project(self):\n pass",
"def _determine_default_project(project=None):\n if project is None:\n project = _get_gcd_project()\n\n if project is None:\n project = _helpers._determine_default_project(project=project)\n\n return project",
"def test_create_project(self):\n pass",
"def test_create_project(self):\n pass",
"def test_create_project(self):\n pass",
"def project():",
"def project():",
"def project():",
"def configure_project():\n pass",
"def test_read_project(self):\n pass",
"def test_read_project(self):\n pass",
"def test_replace_project(self):\n pass",
"def test_get_projects(self):\n pass",
"def test_create_project_request(self):\n pass",
"def test_add_project(self):\n pass",
"def test_missing_project(self):\n task = Task({\n 'name': 'test',\n 'id': 1,\n 'stage_id' : [1, 'name'],\n 'date_deadline': False,\n 'date_start': False,\n 'date_end': False,\n 'partial_messages': [{'date':'2018-10-21 12:00:00'}],\n 'kanban_state': 'blocked',\n 'planned_hours': 100,\n 'priority': '1'\n })\n self.assertIsNotNone(task)\n self.assertEqual(task.project, 'Not assigned to project')",
"def test_no_such_project(self):\n project = cd.project.get_internal_project()\n cd.project.load(None)\n\n with self.assertRaises(Exception):\n self.run_step('FAKE')\n\n cd.project.load(project)",
"def project(project_no_init: Project) -> Project:\n from pdm.cli.utils import merge_dictionary\n\n data = {\n \"project\": {\n \"name\": \"test-project\",\n \"version\": \"0.0.0\",\n \"description\": \"\",\n \"authors\": [],\n \"license\": {\"text\": \"MIT\"},\n \"dependencies\": [],\n \"requires-python\": \">=3.7\",\n },\n \"build-system\": DEFAULT_BACKEND.build_system(),\n }\n\n merge_dictionary(project_no_init.pyproject._data, data)\n project_no_init.pyproject.write()\n # Clean the cached property\n project_no_init._environment = None\n return project_no_init",
"def init_project(self,project_name,project_dir):\n projectkey = id_generator(10)\n if \"towercrane\" not in os.listdir(project_dir):\n print(f'Initializing project:\"{project_name}\" with projectkey: \"{projectkey}\" ')\n self.TowercraneConfig = {\"project_name\":project_name,\n \"projectkey\":projectkey,\n \"publicurl\":\"private_project\"\n }\n write_config(project_dir,self.TowercraneConfig)\n project_insert_report = self.db.create_project(project_name,project_dir,projectkey)\n print(project_insert_report)\n \n elif \"towercrane\" in os.listdir(project_dir):\n self.TowercraneConfig = read_config(project_dir)\n print(f'project:\"{self.TowercraneConfig[\"project_name\"]}\" with projectkey: \"{self.TowercraneConfig[\"projectkey\"]}\" Already Exists')",
"def project_default(tmp_path):\n from nitpick.constants import NITPICK_STYLE_TOML\n from tests.helpers import ProjectMock, tomlstring\n\n nitpick_style = Path(__file__).parent.parent / NITPICK_STYLE_TOML\n return ProjectMock(tmp_path).pyproject_toml(\n f\"\"\"\n [tool.nitpick]\n style = {tomlstring(nitpick_style)}\n \"\"\"\n )",
"def test_patch_project(self):\n pass",
"def init():\n defaults = _project_defaults()\n\n if Project.prompt:\n defaults['name'] = prompt(\"Enter the project's name:\", defaults['name'])\n defaults['package'] = prompt(\"Enter the project's package:\", defaults['package'])\n defaults['author'] = prompt(\"Enter the project's author:\", defaults['author'])\n defaults['author_email'] = prompt(\"Enter the project's author's email:\", defaults['author_email'])\n defaults['description'] = prompt(\"Enter the project's description:\", defaults['description'])\n\n # print(\"defaults:\\n{defaults}\".format(defaults=pformat(defaults)))\n\n if Project.use_templates:\n\n template = Template()\n\n for template_dir in [os.path.abspath(os.path.join(herringlib, 'herringlib', 'templates'))\n for herringlib in HerringFile.herringlib_paths]:\n\n info(\"template directory: %s\" % template_dir)\n # noinspection PyArgumentEqualDefault\n template.generate(template_dir, defaults, overwrite=False)",
"def test_list_project(self):\n pass",
"def default_context(project_name: str) -> None:\n return BuilderContext(\n project_name=project_name,\n kube_name=project_name.replace(\"_\", \"-\"),\n project_description=\"Generated by pytest.\",\n ci_type=CIType.none,\n db=DatabaseType.none,\n db_info=DB_INFO[DatabaseType.none],\n enable_redis=False,\n enable_migrations=False,\n enable_kube=False,\n enable_routers=True,\n add_dummy=False,\n self_hosted_swagger=False,\n force=True,\n )",
"def test_no_project_id(self):\n\n self.assertRaises(Exception, kaput.init, 'abc', None)",
"def get_project(con):\n try:\n return con.project_read(fq_name=conf.get('default_project', 'UNEXPECTED_VALUE'))\n except:\n log.debug('Unable to find project default-domain, admin:', exc_info=True)\n return None",
"def test_not_github(self):\n project_src_path = 'project-src'\n os.environ['PROJECT_SRC_PATH'] = project_src_path\n generic_ci_env = platform_config.BasePlatformConfig()\n self.assertEqual(generic_ci_env.project_src_path, project_src_path)",
"def test_default_init_parameters(isolated_runner, mocker, project_init, template):\n create_from_template = mocker.patch(\"renku.core.commands.init.create_from_template\")\n mocker.patch(\"renku.cli.githooks.install\")\n\n data, commands = project_init\n\n new_project = Path(data[\"test_project\"])\n assert not new_project.exists()\n result = isolated_runner.invoke(cli, commands[\"init_test\"] + commands[\"id\"], commands[\"confirm\"])\n assert 0 == result.exit_code, format_result_exception(result)\n create_from_template.assert_called_once()\n metadata = create_from_template.call_args[1][\"metadata\"]\n assert {\n \"__template_source__\",\n \"__template_ref__\",\n \"__template_id__\",\n \"__namespace__\",\n \"__repository__\",\n \"__project_slug__\",\n \"__sanitized_project_name__\",\n } <= set(metadata.keys())\n assert metadata[\"__template_source__\"] == \"renku\"\n assert metadata[\"__template_ref__\"] is None\n assert metadata[\"__template_id__\"] == template[\"id\"]\n assert metadata[\"__namespace__\"] == \"\"\n assert metadata[\"__repository__\"] == \"\"\n assert metadata[\"__project_slug__\"] == \"\"\n assert metadata[\"__sanitized_project_name__\"] == \"\"",
"def test_get_project(self):\n self.assertEqual(self.remote_project.get_project(), self.project)",
"def test_set_project_default_power_schedule(self):\n pass"
] | [
"0.7302375",
"0.69367903",
"0.6899856",
"0.6899856",
"0.6899856",
"0.6896533",
"0.6896533",
"0.6896533",
"0.68203735",
"0.6759572",
"0.6759572",
"0.67091924",
"0.6589286",
"0.65550405",
"0.6552097",
"0.65425485",
"0.6540739",
"0.6533683",
"0.63593394",
"0.6352195",
"0.6341002",
"0.63120073",
"0.6301329",
"0.6293345",
"0.6279331",
"0.6270914",
"0.62369347",
"0.61655974",
"0.615675",
"0.6149929"
] | 0.7610206 | 0 |
Should abort stopping and not raise an error when no internal step is available to stop. | def test_step_stop_aborted(self, _step: PropertyMock):
_step.return_value = None
es = exposed.ExposedStep()
es.stop() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _gracefully_stop(self):\n pass",
"def halt(*_, **kwargs):\n raise ExecutionFinished(\"Reached halt\")",
"def stop() -> None:",
"def abort() -> NoReturn:\n raise AbortSignal",
"def stop(self) -> None:\n ...",
"def stop(self) -> None:",
"def stop(self) -> None:",
"def abort(self):\n try:\n self.acqRunning = False\n except:\n print('Cannot abort properly')",
"def abort(self):\n raise NotImplementedError",
"def _stop(self):",
"def aborting(self):\n \n pass",
"def __exit__(self, exc_type, exc_val, exc_tb) -> None:\n self.stop()",
"def need_stop(self, path):",
"def abort(self):\n print(\"abort\")",
"def _prepare_to_stop(self):\n pass",
"def stop(self):\r\n self.terminating = True",
"def do_abort(self):\n self.abort = True\n if self.monitor: self.monitor.stop( )",
"def stop():",
"def stop():",
"def stop():",
"def stop():",
"def stop(self):\n self.halt = True",
"def test_stop_step_no_halt(self):\n support.create_project(self, 'homer2')\n support.add_step(self, contents='\\n'.join([\n 'import cauldron as cd',\n 'cd.shared.test = 0',\n 'cd.shared.other = 0',\n 'cd.step.breathe()',\n 'cd.shared.test = 1',\n 'cd.step.stop()',\n 'cd.shared.test = 2'\n ]))\n support.add_step(self, contents='\\n'.join([\n 'import cauldron as cd',\n 'cd.shared.other = 1'\n ]))\n\n support.run_command('run')\n project = cd.project.get_internal_project()\n step = project.steps[1]\n\n self.assertEqual(project.shared.fetch('test'), 1)\n self.assertEqual(project.shared.fetch('other'), 1)\n self.assertNotEqual(-1, step.dom.find('cd-StepStop'))",
"def stop(self) -> None:\n pass",
"def stop(self) -> None:\n raise NotImplementedError()",
"def stop(self) -> None:\n raise NotImplementedError()",
"def stop(self):\n raise NotImplementedError",
"def stop(self):\n raise NotImplementedError",
"def stop(self):\n raise NotImplementedError()",
"def stop(self):\n raise NotImplementedError()"
] | [
"0.7060672",
"0.6969455",
"0.6947691",
"0.6831138",
"0.6816764",
"0.6798415",
"0.6798415",
"0.6757007",
"0.6750949",
"0.6735412",
"0.6730144",
"0.66933066",
"0.66833615",
"0.6682303",
"0.6665194",
"0.6663481",
"0.66407984",
"0.6631349",
"0.6631349",
"0.6631349",
"0.6631349",
"0.66267425",
"0.66251135",
"0.65959656",
"0.65954393",
"0.65954393",
"0.65833724",
"0.65833724",
"0.6577739",
"0.6577739"
] | 0.7588589 | 0 |
Should abort stopping and not raise an error when no internal project is available to stop. | def test_project_stop_aborted(self, get_internal_project: MagicMock):
get_internal_project.return_value = None
ep = exposed.ExposedProject()
ep.stop() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stopBuild(reason=\"<no reason given>\"):",
"def _gracefully_stop(self):\n pass",
"def abort(self):\n try:\n self.acqRunning = False\n except:\n print('Cannot abort properly')",
"def stop() -> None:",
"def test_stop_project(self):\n support.create_project(self, 'homer3')\n support.add_step(self, contents='\\n'.join([\n 'import cauldron as cd',\n 'cd.shared.test = 0',\n 'cd.step.breathe()',\n 'cd.shared.test = 1',\n 'cd.project.stop()',\n 'cd.shared.test = 2'\n ]))\n support.add_step(self, contents='\\n'.join([\n 'import cauldron as cd',\n 'cd.shared.test = 3'\n ]))\n\n support.run_command('run')\n project = cd.project.get_internal_project()\n step = project.steps[1]\n\n self.assertEqual(project.shared.fetch('test'), 1)\n self.assertNotEqual(-1, step.dom.find('cd-StepStop'))",
"def _stop(self):",
"def stop(self) -> None:\n ...",
"def stop():",
"def stop():",
"def stop():",
"def stop():",
"def need_stop(self, path):",
"def stop(self) -> None:",
"def stop(self) -> None:",
"def stopclean(self):\n raise Exception(\"Not implemented\")",
"def _prepare_to_stop(self):\n pass",
"def shutdown(self):\n rospy.loginfo(\"Stopping Project\")\n rospy.sleep(1)",
"def stop(self):\n\t\tself._run_flag = False\n\t\tself.wait()",
"def do_abort(self):\n self.abort = True\n if self.monitor: self.monitor.stop( )",
"def exit_engine(self):\n self.stop_flag = True",
"def aborting(self):\n \n pass",
"def terminate(self):\n self._running = False",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def abort(self):\n print(\"abort\")",
"def stop_run(arn=None):\n pass"
] | [
"0.7033246",
"0.6999605",
"0.68412125",
"0.6814739",
"0.66516757",
"0.6600153",
"0.6580697",
"0.6565686",
"0.6565686",
"0.6565686",
"0.6565686",
"0.65105325",
"0.6509005",
"0.6509005",
"0.6490828",
"0.64628285",
"0.6435273",
"0.6418074",
"0.640065",
"0.6397539",
"0.63974357",
"0.6396683",
"0.63787335",
"0.63787335",
"0.63787335",
"0.63787335",
"0.63787335",
"0.63787335",
"0.6371794",
"0.6360139"
] | 0.7577744 | 0 |
Should fail to get internal project and return None after eventually timing out. | def test_get_internal_project_fail(
self,
sleep: MagicMock,
time_time: MagicMock,
internal_project: PropertyMock
):
project = exposed.ExposedProject()
time_time.side_effect = range(20)
internal_project.return_value = None
result = project.get_internal_project()
self.assertIsNone(result)
self.assertEqual(10, sleep.call_count) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_internal_project(\n self,\n sleep: MagicMock,\n internal_project: PropertyMock\n ):\n project = exposed.ExposedProject()\n internal_project.side_effect = [None, None, None, 'test']\n result = project.get_internal_project()\n self.assertEqual('test', result)\n self.assertEqual(2, sleep.call_count)",
"def test_get_project(self):\n pass",
"def get_project(con):\n try:\n return con.project_read(fq_name=conf.get('default_project', 'UNEXPECTED_VALUE'))\n except:\n log.debug('Unable to find project default-domain, admin:', exc_info=True)\n return None",
"def Project(self):\n\n if not self.connected:\n return None\n\n try:\n return _ReadNoProxy(GOOGLE_GCE_METADATA_PROJECT_URI)\n except urllib2.HTTPError as e:\n raise MetadataServerException(e)\n except urllib2.URLError as e:\n raise CannotConnectToMetadataServerException(e)",
"def test_get_project(self):\n self.assertEqual(self.remote_project.get_project(), self.project)",
"def get_current_project():\n return get_from_session(KEY_PROJECT)",
"def test_project_stop_aborted(self, get_internal_project: MagicMock):\n get_internal_project.return_value = None\n ep = exposed.ExposedProject()\n ep.stop()",
"def get_project(self, name=None):\n if not name:\n if not self.select_project:\n log.error(\"no default project name specified\")\n return\n name = self.select_project\n\n if name in self.projects:\n return self.projects[name]\n\n log.debug( \"project {} not found in {} projects \".format(name, len(self.projects)) )\n return None",
"def get_project(self):\n raise NotImplementedError(\"get_project is not implemented\")",
"def getMain(self):\n\n if self.__projects:\n return self.__projects[0]\n else:\n return None",
"def test_no_such_project(self):\n project = cd.project.get_internal_project()\n cd.project.load(None)\n\n with self.assertRaises(Exception):\n self.run_step('FAKE')\n\n cd.project.load(project)",
"def test_get_projects(self):\n pass",
"def get_project(self, id):\n for project in self.projects:\n if project.id == int(id):\n ret_val = project\n break\n else:\n ret_val = None\n\n return ret_val",
"def get_project(db, id):\n \n for element in db:\n if element['project_no'] == id:\n return element\n return None",
"def get_project(self, project_name):\n raise self._get_notimplementederror(\"get_project\")",
"def test_read_project(self):\n pass",
"def test_read_project(self):\n pass",
"def test_returns_404_if_project_doesnt_exist(self):\n # Act\n response = self.client.get(\n \"/api/v2/projects/queries/999/similar-projects/\",\n headers={\"Authorization\": self.user_session_token},\n )\n self.assertEqual(response.status_code, 404)",
"def GetProject(args):\n return args.project or properties.VALUES.core.project.GetOrFail()",
"def get_current_project(self):\n\n try:\n command = self._oc_command([\"project\", \"-q\"])\n output = run_cmd(command, return_output=True)\n except subprocess.CalledProcessError as ex:\n raise ConuException(\"Failed to obtain current project name : %s\" % ex)\n\n try:\n return output.rstrip() # remove '\\n'\n except IndexError:\n raise ConuException(\"Failed to obtain project name\")",
"def test_get_status_no_project(\n get_internal_project: MagicMock,\n step_writer_serialize: MagicMock,\n):\n get_internal_project.return_value = None\n\n response = statuses.get_status(0, force=True)\n\n assert response['success'], \"\"\"\n Expect the status process to be successful.\n \"\"\"\n assert response['data']['project'] is None, \"\"\"\n Expect there to be no project data.\n \"\"\"\n assert 0 == step_writer_serialize.call_count, \"\"\"\n Expect no step serialization to be carried out.\n \"\"\"\n assert [] == response['data']['step_changes'], \"\"\"\n Expect no step changes to exist without project data.\n \"\"\"\n assert response['hash'].startswith('forced-'), \"\"\"\n Expect a forced call to have a forced hash.\n \"\"\"",
"def test_returns_404_if_project_doesnt_exist(self):\n # Act\n response = self.client.get(\"/api/v2/projects/999/queries/aoi/\")\n self.assertEqual(response.status_code, 404)",
"def test_returns_404_if_project_doesnt_exist(self):\n # Act\n response = self.client.get(\n \"/api/v2/projects/999/queries/notasks/\",\n headers={\"Authorization\": self.user_session_token},\n )\n self.assertEqual(response.status_code, 404)",
"def test_projects_get(self):\n response = self.client.open('/project-tracker/projects',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def get(self, name):\n try:\n return self.projects[name]\n except KeyError:\n print(\"No project called %s was found\" %name)",
"def __get_project_version__(self):\n api = FortifyApi(self.ssc_server, token=self.token, verify_ssl=False)\n try:\n response = api.get_project_versions() # api should support a search expression here. alas...\n if response.success:\n for project_version in response.data['data']:\n if project_version['project']['name'] == self.application_name:\n if project_version['name'] == self.fortify_version:\n # we have a matching project version\n Logger.app.debug(\"Found existing project version {0}\".format(project_version['id']))\n return project_version['id']\n # Didn't find a matching project version, verify that our project exists\n for project_version in response.data['data']:\n if project_version['project']['name'] == self.application_name:\n # Our project exsits, so create a new version\n return self.__create_project_version__()\n # Let upload_scan know that our project doesn't exist\n return -2\n elif \"401\" in response.message:\n # Avoid printing error for invalid token. Return -1 to reauth\n return -1\n else:\n Logger.app.critical(\"Failed to get project version. {0}\".format(response.message))\n except Exception as e:\n Logger.app.critical(\"Exception trying to get project version. {0}\".format(e.message))\n\n return None",
"def _determine_default_project(project=None):\n if project is None:\n project = _get_gcd_project()\n\n if project is None:\n project = _helpers._determine_default_project(project=project)\n\n return project",
"def get_project(self):\n if self.api_version == 2:\n return self.creds.get('tenant_id') or self.creds.get('tenant_name')\n else:\n return self.creds.get('project_id') or self.creds.get('project_name')",
"def test_get_projects_throws_if_project_does_not_exist(fc: fetcher.Fetcher):\n with pytest.raises(exceptions.NotFoundError) as exc:\n fc.get_projects(\"BadProject\")\n assert \"An error occured while getting projects.\" in str(exc.value)",
"def get_project(arn=None):\n pass"
] | [
"0.70699006",
"0.68557197",
"0.66254395",
"0.64616627",
"0.63413024",
"0.62891513",
"0.62088645",
"0.61985654",
"0.612253",
"0.60614514",
"0.60256624",
"0.6006625",
"0.5999078",
"0.59616834",
"0.5941112",
"0.5911068",
"0.5911068",
"0.59038454",
"0.5900037",
"0.5870759",
"0.58625996",
"0.58564776",
"0.5852726",
"0.5851352",
"0.584494",
"0.58329433",
"0.58195937",
"0.58105063",
"0.57972395",
"0.5792592"
] | 0.7359438 | 0 |
Should write to the console using a write_source function call on the internal step report's stdout_interceptor. | def test_write_to_console(self, _step: PropertyMock):
trials = [2, True, None, 'This is a test', b'hello']
for message in trials:
_step_mock = MagicMock()
write_source = MagicMock()
_step_mock.report.stdout_interceptor.write_source = write_source
_step.return_value = _step_mock
step = exposed.ExposedStep()
step.write_to_console(message)
args, kwargs = write_source.call_args
self.assertEqual('{}'.format(message), args[0]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_render_to_console(self, _step: PropertyMock):\n message = ' {{ a }} is not {{ b }}.'\n\n _step_mock = MagicMock()\n write_source = MagicMock()\n _step_mock.report.stdout_interceptor.write_source = write_source\n _step.return_value = _step_mock\n step = exposed.ExposedStep()\n step.render_to_console(message, a=7, b='happy')\n\n args, kwargs = write_source.call_args\n self.assertEqual('7 is not happy.', args[0])",
"def stdout(self):\n pass",
"def test_PrintSmoke(self):\n stage = self.ConstructStage()\n with self.OutputCapturer():\n stage._Print('hi there')\n self.AssertOutputContainsLine('hi there', check_stderr=True)",
"def setUp(self):\n self.actualstdout = sys.stdout\n sys.stdout = StringIO.StringIO()",
"def setUp(self):\n\t\tself.output = self.switchstdout()",
"def enable(self):\n self.out = StringIO()\n self._stdout = sys.stdout\n sys.stdout = self.out",
"def p(self):\n self.printstdout = True",
"def javaScriptConsoleMessage(self, message, line_number, source_id):\n print 'Console:', message, line_number, source_id",
"def print_cmd_line(s, target, src, env):\n sys.stdout.write(\" Making %s...\\n\"% (' and '.join([str(x) for x in target])))",
"def capture_stdout(sq, method):\n capture = io.StringIO()\n sys.stdout = capture\n if method == \"print\":\n print(sq)\n else:\n sq.display()\n sys.stdout = sys.__stdout__\n return capture",
"def test_capture_stdout():\n\n sys.stdout.write('Print to stdout')\n\n assert False",
"def log_stdout(self, function):\n return function()",
"def stdout(self):\n if not hasattr(self, \"my_stdout_proxy\"):\n self.my_stdout_proxy = self.outfile_proxy()\n self.my_stdout_proxy_created = 1\n return self.my_stdout_proxy",
"def test_export_custom(self): # pylint: disable=no-self-use\n mock_record_str = Mock(str)\n\n def formatter(record): # pylint: disable=unused-argument\n return mock_record_str\n\n mock_stdout = Mock()\n exporter = ConsoleLogExporter(out=mock_stdout, formatter=formatter)\n log_data = LogData(\n log_record=LogRecord(),\n instrumentation_scope=InstrumentationScope(\n \"first_name\", \"first_version\"\n ),\n )\n exporter.export([log_data])\n mock_stdout.write.assert_called_once_with(mock_record_str)",
"def hook_print():\n sys.stdout = PrintHook()",
"def write_line(self, line):\n # TODO(iannucci): have step_runner log the step metadata as a protobuf\n # and/or put it in the Step proto message.\n return self.logging.write_line(line)",
"def test_stdout_log(self, logger: Logger) -> None:\n task = OctaveTask()\n task.session_id = \"123\"\n handler = OutputHandler(task)\n logger.addHandler(handler)\n\n # Write something to the log\n msg = \"I am a message\"\n logger.info(msg)\n\n assert len(handler.contents) == 1\n assert handler.messages() == msg",
"def test_stdout(self):\n stdout = StringIO()\n self.patch(sys, 'stdout', stdout)\n\n # Suppress warnings so that if there are any old-style plugins that\n # lore queries for don't confuse the assertion below. See #3070.\n self.patch(warnings, 'warn', lambda *a, **kw: None)\n self.test_buildTeX()\n self.assertEqual(stdout.getvalue(), '')",
"def redirect_stdout():\n save_stdout = sys.stdout\n sys.stdout = _TQDMFile(sys.stdout)\n yield\n sys.stdout = save_stdout",
"def print_out():\n pass",
"def stdout2Log(self):\n sys.stdout = self\n sys.stderr = self\n return",
"def do_print(self, line):\n cmd_args = io.parse_cmd_args(line, io.output_cmd_pattern)\n if cmd_args:\n success = self.manager.print_to_console(\n cmd_args.get('target'), \n cmd_args.get('filters')\n )\n if success:\n self.console_print(\"There, you asked for it!\", settings.INFO_FORMAT)\n else:\n self.console_print(\"Sorry, something kinda went wrong! You can try again.\", settings.ERROR_FORMAT)\n else:\n self.console_print(settings.COMMMAND_ARGS_ERROR_MSG, settings.ERROR_FORMAT)",
"def test_PrintLoudlySmoke(self):\n stage = self.ConstructStage()\n with self.OutputCapturer():\n stage._PrintLoudly('hi there')\n self.AssertOutputContainsLine(r'\\*{10}', check_stderr=True)\n self.AssertOutputContainsLine('hi there', check_stderr=True)",
"def tprint(self, cmd, end='\\n'):\n if ENABLE_DEBUG:\n stackIndex = 0\n for index, stackFrame in enumerate(stack()):\n caller = getframeinfo(stackFrame[0])\n if caller.filename == fullPath:\n stackIndex = index\n break \n caller = getframeinfo(stack()[stackIndex][0])\n self.fileHandle.write(\"# \" + targetFile + \":\" + str(caller.lineno) + '\\n')\n self.tprint_raw(cmd, end)",
"def test_debug_output(self):\n assert output(self.msg) is not None",
"def write_output(self):",
"def dumps(self) -> str:\n code_file_path = os.path.join(\n self.project.source_directory,\n self.filename\n )\n code = dict(\n filename=self.filename,\n path=code_file_path,\n code=render.code_file(code_file_path)\n )\n\n if not self.is_running:\n # If no longer running, make sure to flush the stdout buffer so\n # any print statements at the end of the step get included in\n # the body\n self.report.flush_stdout()\n\n # Create a copy of the body for dumping\n body = self.report.body[:]\n\n if self.is_running:\n # If still running add a temporary copy of anything not flushed\n # from the stdout buffer to the copy of the body for display. Do\n # not flush the buffer though until the step is done running or\n # it gets flushed by another display call.\n body.append(self.report.read_stdout())\n\n body = ''.join(body)\n\n has_body = len(body) > 0 and (\n body.find('<div') != -1 or\n body.find('<span') != -1 or\n body.find('<p') != -1 or\n body.find('<pre') != -1 or\n body.find('<h') != -1 or\n body.find('<ol') != -1 or\n body.find('<ul') != -1 or\n body.find('<li') != -1\n )\n\n std_err = (\n self.report.read_stderr()\n if self.is_running else\n self.report.flush_stderr()\n ).strip('\\n').rstrip()\n\n dom = templating.render_template(\n 'step-body.html',\n last_display_update=self.report.last_update_time,\n elapsed_time=self.get_elapsed_timestamp(),\n code=code,\n body=body,\n has_body=has_body,\n id=self.definition.name,\n title=self.report.title,\n subtitle=self.report.subtitle,\n summary=self.report.summary,\n error=self.error,\n index=self.index,\n is_running=self.is_running,\n progress_message=self.progress_message,\n progress=int(round(max(0, min(100, 100 * self.progress)))),\n sub_progress_message=self.sub_progress_message,\n sub_progress=int(round(max(0, min(100, 100 * self.sub_progress)))),\n std_err=std_err\n )\n\n if not self.is_running:\n self.dom = dom\n return dom",
"def testStdoutReadDuringCapture(self):\n with self.OutputCapturer():\n print('foo')\n self.AssertOutputContainsLine('foo')\n print('bar')\n self.AssertOutputContainsLine('bar')\n self.AssertOutputContainsLine('foo')\n self.AssertOutputContainsLine('bar')",
"def javaScriptConsoleMessage(self, message, line, source):\n\n super(GRobotWebPage, self).javaScriptConsoleMessage(message, line,\n source)\n log_type = \"error\" if \"Error\" in message else \"info\"\n getattr(logger, log_type)(\"%s(%d): %s\" % (source or '<unknown>', line, message))",
"def test_main_output(self, capsys):\n args = self.args.copy()\n args[\"out_file\"] = \"text.txt\"\n UI.main(**args)\n captured = capsys.readouterr().out\n assert \"Results written to text.txt\" in captured"
] | [
"0.7060722",
"0.6297328",
"0.62191683",
"0.6036808",
"0.59491473",
"0.5853387",
"0.58372104",
"0.5825977",
"0.5810231",
"0.5803809",
"0.5773342",
"0.57336247",
"0.5707204",
"0.5692095",
"0.56633615",
"0.5617709",
"0.5614963",
"0.55947864",
"0.55731905",
"0.5567739",
"0.55644125",
"0.55444604",
"0.5526443",
"0.54957384",
"0.5490355",
"0.54782814",
"0.5461037",
"0.5456289",
"0.5430993",
"0.5425914"
] | 0.7509089 | 0 |
Should render to the console using a write_source function call on the internal step report's stdout_interceptor. | def test_render_to_console(self, _step: PropertyMock):
message = ' {{ a }} is not {{ b }}.'
_step_mock = MagicMock()
write_source = MagicMock()
_step_mock.report.stdout_interceptor.write_source = write_source
_step.return_value = _step_mock
step = exposed.ExposedStep()
step.render_to_console(message, a=7, b='happy')
args, kwargs = write_source.call_args
self.assertEqual('7 is not happy.', args[0]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_write_to_console(self, _step: PropertyMock):\n trials = [2, True, None, 'This is a test', b'hello']\n\n for message in trials:\n _step_mock = MagicMock()\n write_source = MagicMock()\n _step_mock.report.stdout_interceptor.write_source = write_source\n _step.return_value = _step_mock\n step = exposed.ExposedStep()\n step.write_to_console(message)\n\n args, kwargs = write_source.call_args\n self.assertEqual('{}'.format(message), args[0])",
"def stdout(self):\n pass",
"def render_entry_log(self):\n self.render_log(self.selenium_testcase_entry_template)",
"def test_PrintSmoke(self):\n stage = self.ConstructStage()\n with self.OutputCapturer():\n stage._Print('hi there')\n self.AssertOutputContainsLine('hi there', check_stderr=True)",
"def enable(self):\n self.out = StringIO()\n self._stdout = sys.stdout\n sys.stdout = self.out",
"def setUp(self):\n\t\tself.output = self.switchstdout()",
"def setUp(self):\n self.actualstdout = sys.stdout\n sys.stdout = StringIO.StringIO()",
"def capture_stdout(sq, method):\n capture = io.StringIO()\n sys.stdout = capture\n if method == \"print\":\n print(sq)\n else:\n sq.display()\n sys.stdout = sys.__stdout__\n return capture",
"def render_exit_log(self):\n self.render_log(self.selenium_testcase_exit_template)",
"def log_stdout(self, function):\n return function()",
"def hook_print():\n sys.stdout = PrintHook()",
"def p(self):\n self.printstdout = True",
"def test_stdout(self):\n stdout = StringIO()\n self.patch(sys, 'stdout', stdout)\n\n # Suppress warnings so that if there are any old-style plugins that\n # lore queries for don't confuse the assertion below. See #3070.\n self.patch(warnings, 'warn', lambda *a, **kw: None)\n self.test_buildTeX()\n self.assertEqual(stdout.getvalue(), '')",
"def dumps(self) -> str:\n code_file_path = os.path.join(\n self.project.source_directory,\n self.filename\n )\n code = dict(\n filename=self.filename,\n path=code_file_path,\n code=render.code_file(code_file_path)\n )\n\n if not self.is_running:\n # If no longer running, make sure to flush the stdout buffer so\n # any print statements at the end of the step get included in\n # the body\n self.report.flush_stdout()\n\n # Create a copy of the body for dumping\n body = self.report.body[:]\n\n if self.is_running:\n # If still running add a temporary copy of anything not flushed\n # from the stdout buffer to the copy of the body for display. Do\n # not flush the buffer though until the step is done running or\n # it gets flushed by another display call.\n body.append(self.report.read_stdout())\n\n body = ''.join(body)\n\n has_body = len(body) > 0 and (\n body.find('<div') != -1 or\n body.find('<span') != -1 or\n body.find('<p') != -1 or\n body.find('<pre') != -1 or\n body.find('<h') != -1 or\n body.find('<ol') != -1 or\n body.find('<ul') != -1 or\n body.find('<li') != -1\n )\n\n std_err = (\n self.report.read_stderr()\n if self.is_running else\n self.report.flush_stderr()\n ).strip('\\n').rstrip()\n\n dom = templating.render_template(\n 'step-body.html',\n last_display_update=self.report.last_update_time,\n elapsed_time=self.get_elapsed_timestamp(),\n code=code,\n body=body,\n has_body=has_body,\n id=self.definition.name,\n title=self.report.title,\n subtitle=self.report.subtitle,\n summary=self.report.summary,\n error=self.error,\n index=self.index,\n is_running=self.is_running,\n progress_message=self.progress_message,\n progress=int(round(max(0, min(100, 100 * self.progress)))),\n sub_progress_message=self.sub_progress_message,\n sub_progress=int(round(max(0, min(100, 100 * self.sub_progress)))),\n std_err=std_err\n )\n\n if not self.is_running:\n self.dom = dom\n return dom",
"def stdout(self):\n if not hasattr(self, \"my_stdout_proxy\"):\n self.my_stdout_proxy = self.outfile_proxy()\n self.my_stdout_proxy_created = 1\n return self.my_stdout_proxy",
"def javaScriptConsoleMessage(self, message, line_number, source_id):\n print 'Console:', message, line_number, source_id",
"def __execute_reporter(self):\n if not self.__args.report:\n return\n reporter.HTMLReporter().generate_report_from_file(\n self.__lst_json_files)",
"def test_PrintLoudlySmoke(self):\n stage = self.ConstructStage()\n with self.OutputCapturer():\n stage._PrintLoudly('hi there')\n self.AssertOutputContainsLine(r'\\*{10}', check_stderr=True)\n self.AssertOutputContainsLine('hi there', check_stderr=True)",
"def _print_source(f):\n\n @_wraps(f)\n def wrapper(*args, **kwargs):\n source = _getsource(f)\n print(_clean_source(source))\n return f(*args, **kwargs)\n\n return wrapper",
"def print_cmd_line(s, target, src, env):\n sys.stdout.write(\" Making %s...\\n\"% (' and '.join([str(x) for x in target])))",
"def setup(self) -> \"None\":\n # Patch the renderer to extend the output height\n renderer._output_screen_diff = _patched_output_screen_diff\n\n if config.page and sys.stdout.isatty():\n # Use a temporary file as display output if we are going to page the output\n from tempfile import TemporaryFile\n\n self.out_file = TemporaryFile(\"w+\")\n\n else:\n if config.page:\n log.warning(\"Cannot page output because standard output is not a TTY\")\n # If we are not paging output, determine when to print it\n if config.dump_file is None or str(config.dump_file) in (\n \"-\",\n \"/dev/stdout\",\n ):\n self.out_file = sys.stdout\n elif str(config.dump_file) == \"/dev/stderr\":\n self.out_file = sys.stderr\n else:\n try:\n self.out_file = open(config.dump_file, \"w+\")\n except (\n FileNotFoundError,\n PermissionError,\n io.UnsupportedOperation,\n ) as error:\n log.error(error)\n log.error(\n f\"Output file `{config.dump_file}` cannot be opened. \"\n \"Standard output will be used.\"\n )\n self.out_file = sys.stdout\n\n # Ensure we do not recieve the \"Output is not a terminal\" message\n Vt100_Output._fds_not_a_terminal.add(self.out_file.fileno())\n # Do not use stderr instead of stdout if stdout is not a tty\n self.out_file = cast(\"TextIO\", self.out_file)\n self.output = create_output(self.out_file, always_prefer_tty=False)\n\n # Use the width and height of stderr (this gives us the terminal size even if\n # output is being piped to a non-tty)\n # if hasattr(self.output, '_get_size'):\n setattr(self.output, \"get_size\", create_output(stdout=sys.stderr).get_size)\n\n # Disable character position requests when dumping output to stop extra output\n # This also speeds things up as we do not need to wait for the response\n # Ignore typing here as mypy does not understand __class__\n class DumpingOutput(self.output.__class__): # type: ignore\n # Disable character position requests when dumping output\n responds_to_cpr = False\n\n # Patch the output to prevent CPR detection\n self.output.__class__ = DumpingOutput\n\n # Set pre-run commands\n self.pre_run.append(self.post_dump)",
"def test_capture_stdout():\n\n sys.stdout.write('Print to stdout')\n\n assert False",
"def do_print(self, line):\n cmd_args = io.parse_cmd_args(line, io.output_cmd_pattern)\n if cmd_args:\n success = self.manager.print_to_console(\n cmd_args.get('target'), \n cmd_args.get('filters')\n )\n if success:\n self.console_print(\"There, you asked for it!\", settings.INFO_FORMAT)\n else:\n self.console_print(\"Sorry, something kinda went wrong! You can try again.\", settings.ERROR_FORMAT)\n else:\n self.console_print(settings.COMMMAND_ARGS_ERROR_MSG, settings.ERROR_FORMAT)",
"def render_log(self, template):\n\n # only write to the log file if it exists\n if self._selenium_log_file:\n\n id = self.id()\n description = self.shortDescription()\n\n # grab the stack frame info from test_* method\n (obj, filename, lineno, function, code_context, index) \\\n = self.get_test_frame()\n\n # render the test case debug\n html = render_to_string(\n template, {\n 'id': id,\n 'description': description,\n 'filename': filename,\n 'lineno': lineno,\n 'function': function,\n 'code_context': code_context,\n 'index': index,\n 'png': self.get_image_uri(),\n 'text': self.get_visible_text()})\n\n # write it to the file\n self._selenium_log_file.write(html.encode('utf8'))",
"def test_export_custom(self): # pylint: disable=no-self-use\n mock_record_str = Mock(str)\n\n def formatter(record): # pylint: disable=unused-argument\n return mock_record_str\n\n mock_stdout = Mock()\n exporter = ConsoleLogExporter(out=mock_stdout, formatter=formatter)\n log_data = LogData(\n log_record=LogRecord(),\n instrumentation_scope=InstrumentationScope(\n \"first_name\", \"first_version\"\n ),\n )\n exporter.export([log_data])\n mock_stdout.write.assert_called_once_with(mock_record_str)",
"def javaScriptConsoleMessage(self, message, line, source):\n\n super(GRobotWebPage, self).javaScriptConsoleMessage(message, line,\n source)\n log_type = \"error\" if \"Error\" in message else \"info\"\n getattr(logger, log_type)(\"%s(%d): %s\" % (source or '<unknown>', line, message))",
"def test_debug_output(self):\n assert output(self.msg) is not None",
"def redirect_stdout():\n save_stdout = sys.stdout\n sys.stdout = _TQDMFile(sys.stdout)\n yield\n sys.stdout = save_stdout",
"def display_stdout_and_err_in_curr_cell(self):\n ipy_display(self.output_widget)",
"def render_source(self, filename, obj):\n raise NotImplementedError()"
] | [
"0.7027907",
"0.6089574",
"0.59871966",
"0.5953501",
"0.5823776",
"0.58008",
"0.5722767",
"0.569426",
"0.5688539",
"0.5679437",
"0.56706893",
"0.5645537",
"0.5632503",
"0.5632419",
"0.5579383",
"0.55504066",
"0.54777354",
"0.5475465",
"0.547519",
"0.5462693",
"0.5446832",
"0.54348505",
"0.5432767",
"0.5431265",
"0.5427429",
"0.54177105",
"0.5372156",
"0.53701395",
"0.53401256",
"0.53392524"
] | 0.75739104 | 0 |
Should raise a ValueError when there is no current step to operate upon by the write function call. | def test_write_to_console_fail(self, _step: PropertyMock):
_step.return_value = None
step = exposed.ExposedStep()
with self.assertRaises(ValueError):
step.write_to_console('hello') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _step(self) -> None:",
"def step(self):\r\n raise NotImplementedError",
"def step(self):\n raise NotImplementedError()",
"def step(self):\n raise NotImplementedError()",
"def step(self):\n raise NotImplementedError()",
"def step(self):\n raise NotImplementedError(\n f'{self.__class__.__name__} must implement a `step` method.'\n )",
"def step(self):\n raise NotImplementedError",
"def _step(self):\n pass",
"def after_step():\n raise NotImplementedError",
"def record(self, step):",
"def do_step(self) -> None:",
"def _report_step(self, learning_rate, step, train_stats=None,\n valid_stats=None):\n if self.report_manager is not None:\n return self.report_manager.report_step(\n learning_rate, step, train_stats=train_stats,\n valid_stats=valid_stats)",
"def _handle_write(self):\n pass",
"def result(self, step):\n indent_extra = 0\n if self.current_rule:\n indent_extra = self.indent_size\n\n step = self.steps.pop(0)\n indent = make_indentation(2 * self.indent_size + indent_extra)\n if self.show_aligned_keywords:\n # -- RIGHT-ALIGN KEYWORDS (max. keyword width: 6):\n text = u\"%s%6s %s ... \" % (indent, step.keyword, step.name)\n else:\n text = u\"%s%s %s ... \" % (indent, step.keyword, step.name)\n self.stream.write(text)\n\n status_text = step.status.name\n if self.show_timings:\n status_text += \" in %0.3fs\" % step.duration\n\n unicode_errors = 0\n if step.error_message:\n try:\n self.stream.write(u\"%s\\n%s\\n\" % (status_text, step.error_message))\n except UnicodeError as e:\n unicode_errors += 1\n self.stream.write(u\"%s\\n\" % status_text)\n self.stream.write(u\"%s while writing error message: %s\\n\" % \\\n (e.__class__.__name__, e))\n if self.RAISE_OUTPUT_ERRORS:\n raise\n else:\n self.stream.write(u\"%s\\n\" % status_text)\n\n if self.show_multiline:\n if step.text:\n try:\n self.doc_string(step.text)\n except UnicodeError as e:\n unicode_errors += 1\n self.stream.write(u\"%s while writing docstring: %s\\n\" % \\\n (e.__class__.__name__, e))\n if self.RAISE_OUTPUT_ERRORS:\n raise\n if step.table:\n self.table(step.table)",
"def step(self):\n raise TaskError(\"Task %s: subclass should override step() method!\" %\n self)",
"def step(self, step=None):\n pass",
"def test_save_npy_with_invalid_step(temp_dir):\n data = np.array([[1, 2, 3], [4, 5, 6]])\n\n with pytest.raises(ValueError):\n save_npy(temp_dir, data, step={\"invalid\": \"dict\"})",
"def report_step_progress(self, step):\n dot_status = self.dot_status[step.status.name]\n if step.status == Status.failed:\n if (step.exception and\n not isinstance(step.exception, AssertionError)):\n # -- ISA-ERROR: Some Exception\n dot_status = self.dot_status[\"error\"]\n step.feature = self.current_feature\n step.scenario = self.current_scenario\n self.failures.append(step)\n self.stream.write(dot_status)\n self.stream.flush()",
"def getCurrentStep():",
"def simulation_step(self):\n if self.data_valid.get():\n print(\"Output pin %s writing %s\" % (self.name, self.debug_data.get()))",
"def test_failed_glue(self):\n sink = self.tool.glue(self.line, self.head, (90, 50))\n self.assertTrue(sink is None)",
"def step(self):\n\n pass",
"def _step(self, whence):\n pass",
"def handle_write(self):\n pass",
"def test_write_to_console(self, _step: PropertyMock):\n trials = [2, True, None, 'This is a test', b'hello']\n\n for message in trials:\n _step_mock = MagicMock()\n write_source = MagicMock()\n _step_mock.report.stdout_interceptor.write_source = write_source\n _step.return_value = _step_mock\n step = exposed.ExposedStep()\n step.write_to_console(message)\n\n args, kwargs = write_source.call_args\n self.assertEqual('{}'.format(message), args[0])",
"def on_step_end(self, step, logs={}):\n self.total_steps += 1\n if self.total_steps % self.interval != 0:\n # Nothing\n return\n\n filepath = self.filepath.format(step=self.total_steps, **logs)\n if self.verbose > 0:\n print('\\nStep {}: saving kmodel to {}'.format(self.total_steps,\n filepath))\n self.kmodel.save(filepath)",
"def test_write_skips_invalid_rows(self):\n writer = BaseTSVWriter([\n ('Prop1', Mock(side_effect=InvalidTsvRowException)),\n ])\n\n row = NonCallableMock()\n\n valid, invalid = writer.write(self.tsv_file, [row])\n\n assert valid == []\n assert invalid == [row]\n assert self.tsv_value == \"Prop1\\r\\n\"",
"def report_step_progress(self, step):\n pass",
"def test_step_out_of_bounds_indices(self):\n _, backend = _collect_episode_data(num_episodes=6)\n data_reader = in_memory_backend.InMemoryBackendReader(backend)\n self.assertRaises(IndexError, operator.getitem, data_reader.steps,\n len(data_reader.steps))\n self.assertRaises(IndexError, operator.getitem, data_reader.steps,\n -len(data_reader.steps) - 1)",
"def create_step(self, step):\n raise NotImplementedError"
] | [
"0.60865235",
"0.6002841",
"0.59607214",
"0.59607214",
"0.59607214",
"0.59400934",
"0.5931165",
"0.59239656",
"0.59183896",
"0.5840517",
"0.57957006",
"0.5792233",
"0.57350117",
"0.57000464",
"0.5628943",
"0.5625073",
"0.5619831",
"0.56141627",
"0.5597384",
"0.5548896",
"0.5536715",
"0.55229527",
"0.5506704",
"0.55008084",
"0.54638463",
"0.5455872",
"0.5454955",
"0.5438352",
"0.54267335",
"0.5387193"
] | 0.6328565 | 0 |
Should render an empty stack frame when the stack data is invalid. | def test_render_stop_display_error(
self,
get_formatted_stack_frame: MagicMock,
render_template: MagicMock
):
get_formatted_stack_frame.return_value = None
step = MagicMock()
exposed.render_stop_display(step, 'FAKE')
self.assertEqual({}, render_template.call_args[1]['frame']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_empty_stack() -> None:\n with raises(GrammarParseError):\n grammar_parser.parse(\"ab}\", lexer_mode=\"VALUE_MODE\")",
"def empty(self):\r\n return len(self.stack) == 0",
"def show_stack(self) -> None:\n print(\"Show stack: \")\n ok = 1\n for i in reversed(self.items):\n print(i)\n ok = 0\n if ok:\n print(\"The stack is empty!\")\n print(\"\\n\")",
"def empty(self) -> bool:\n if len(self.stack)==0:\n return True\n else:\n return False",
"def is_empty(self):\n return len(self.the_stack) == 0",
"def empty(self) -> bool:\n if len(self.input_stack)==0 and len(self.output_stack)==0:\n return True\n else:\n return False",
"def test_empty_stack_has_no_value(empty_stack):\n assert empty_stack.top is None",
"def is_empty(self):\n return len(self.stack) == 0",
"def empty(self):\n return len(self.stack) == 0",
"def empty(self):\n return len(self.stack) == 0",
"def empty(self) -> bool:\n if len(self.stackOut) == 0 and len(self.stackIn) == 0:\n return True",
"def empty(self):\r\n return self.stack == []",
"def empty(self) -> bool:\n return len(self.input_stack) == 0 and len(self.output_stack) == 0",
"def empty(self):\n return len(self.stack1) == 0 and len(self.stack2) == 0",
"def empty(self):\n return len(self.stack1) == 0 and len(self.stack2) == 0",
"def empty(self):\n return len(self.stacks[self.activeStackIndex]) == 0",
"def empty(self):\n return self.stack == []",
"def undo_stack_not_empty(self):\n return self.undo_stack.stack_not_empty()",
"def empty(self) -> bool:\n return not self._input_stack and not self._output_stack",
"def empty(self):\n return self.input_stack == [] and self.output_stack == []",
"def empty(self) -> bool:\n return len(self.stack) == 0",
"def empty(self) -> bool:\n return not self.stack",
"def empty(self) -> bool:\n return not self.stack",
"def isEmpty(self):\n return len(self.stack) == 0",
"def is_empty(self):\n\n return not self._stack",
"def isEmpty(self):\n return not self.stack",
"def make_empty_stack():\n return Stack(0, None)",
"def empty(self) -> bool:\n return 1 if not self.stack else 0",
"def test_peek_empty():\n test_stack = stack.Stack()\n\n with pytest.raises(stack.StackEmptyError):\n test_stack.peek()",
"def empty_stack(stack):\n if stack.top is None:\n return True\n else:\n return False"
] | [
"0.6172748",
"0.6088951",
"0.6082694",
"0.6078208",
"0.6071998",
"0.6070815",
"0.604263",
"0.6012433",
"0.5990902",
"0.5990902",
"0.5970938",
"0.5947907",
"0.58997005",
"0.58715206",
"0.58715206",
"0.5864916",
"0.58454406",
"0.58197826",
"0.58059406",
"0.5792144",
"0.5775396",
"0.57690877",
"0.57690877",
"0.5745798",
"0.573892",
"0.5702028",
"0.5697813",
"0.56972295",
"0.56751746",
"0.5668252"
] | 0.61049175 | 1 |
Calculates % of alphanumeric characters in string. | def _alnum_percent(line):
total = len(line)
test_set = set()
for letter in string.ascii_letters:
test_set.add(letter)
test_set.add(' ')
# Return a failure (no good characters) if there are no characters
if total < 1:
return 0
alnum_count = 0
star_count = 0
bar_count = 0
for letter in line:
# if letter.isalnum():
if letter in test_set:
alnum_count += 1
if letter == '*':
star_count += 1
if letter == 'I' or letter == 'i' or letter == 'l' or letter == '|':
bar_count += 1
# TODO(searow): properly implement this, but sticking this here for now.
if star_count / total > 0.1:
return 0
if bar_count / total > 0.5:
return 0
return alnum_count / total | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def letter_percent(s):\r\n\r\n alpha = 'abcdefghijklmnopqrstuvwxyz'\r\n s_lower = s.lower()\r\n s_length = 0\r\n letter_count = {} # empty dictionary\r\n keys = letter_count.keys()\r\n\r\n for char in s_lower:\r\n if char in alpha:\r\n s_length = s_length + 1\r\n if char in letter_count:\r\n letter_count[char] = letter_count[char] + 1\r\n else:\r\n letter_count[char] = 1\r\n\r\n for char in sorted(keys):\r\n letter_count[char] = (letter_count[char] / s_length) * 100\r\n print(char, \"{:.1f}%\".format(letter_count[char]))",
"def percent_without_letter(l):\n\treturn len(words_without_letter(l)) / len(word_set)",
"def escPercent(text):\n pat = re.compile(r'%(?!\\()')\n return pat.sub('%%', text)",
"def token_percentage(word, text):\n word_count = text.count(word)\n text_len = len(text)\n return percentage(word_count, text_len)",
"def count_string_indiv(num, num_patients):\n output = \"%.0f/\" % num\n output += str(num_patients)\n if num_patients is not 0:\n percentage = (num / num_patients) * 100\n else:\n percentage = 0.0\n output += ' (%.1f%%)' % percentage\n return output",
"def analyze_text(text):\n # Your code here\n total=0\n total_es = 0\n\n for c in text:\n if c.isalpha():\n total += 1\n\n total_es = text.lower().count('e')\n percentage = float(total_es/total)\n\n print(total, total_es, percentage * 100)",
"def alphanum_score(words):\n\n\t# Add your code here\n\treturn",
"def percent_of(part, whole):\n return part * 100 / whole",
"def frequencyLetterDic(s):\n pass",
"def convert_percent_str(x):\n if x:\n return float(str(x).strip(\"% \"))\n return 0",
"def calc_percent(byte_counter, data_len):\n if data_len is None or not data_len:\n # case where length is not present in metadata or zero\n return '---.-%'\n return '%6s' % ('%3.1f%%'\n % (float(byte_counter) / float(data_len) * 100.0))",
"def myHash(string, base=91, mod=1000000321):\n value = 0\n for pos, elem in enumerate(string[::-1]): # считаем значение полинома\n value += ord(elem) * base**pos # в последней задаче сделано с помощью массива (динамика)\n return value % mod",
"def percent_encode(src):\n\tdst = ''\n\treserved = re.compile('[A-Za-z0-9-._~]')\n\tfor char in src:\n\t\tif reserved.search(char):\n\t\t\tdst += char\n\t\telse:\n\t\t\t# Percent encode needed chars\n\t\t\t# Convert each byte to hex\n\t\t\tfor byte in list(char.encode('UTF-8')):\n\t\t\t\tif reserved.search(chr(byte)):\n\t\t\t\t\tdst += chr(byte)\n\t\t\t\telse:\n\t\t\t\t\tdst += '%'+hex(byte)[2:].upper()\n\treturn dst",
"def at_content(seq):\n result = float(str(seq).count('A') + str(seq).count('T'))/len(seq) *100\n return result",
"def getGCpercentage(DNA):\n dnaLength = len(DNA) #counts the length of the DNA string\n findG = DNA.count(\"G\") #finds the letter G in DNA string\n findC = DNA.count(\"C\") #finds the letter C in DNA string\n print(findG)\n print(findC)\n print(dnaLength)\n GCpercent = ((findC + findG)/dnaLength) * 100 #calculates percentage of Gs and Cs\n print(\"Percentage of G and C:\",\" %6.2f\" % GCpercent)\n \n return getGCpercentage",
"def percent_str(part, total):\n return str(round(100 * float(part) / float(total), 2)) + '%'",
"def format_percentage(num):\n return \"{}%\".format(num)",
"def customHashFunc(str):\n return sum(ord(chr) for chr in str)%128",
"def gc_rate(dna: str, percent=False):\n c = Counter(dna)\n result = (c[\"G\"] + c[\"C\"]) / len(dna)\n return result * 100 if percent else result",
"def _get_accuracy(text):\n sta_obj = [m.start() for m in re.finditer('%',text)]\n return([float(text[x-3:x:1]) for x in sta_obj])",
"def readable_percent(value, d):\n return \"%s %%\" % (str(round(100.0*float(value), int(d))))",
"def random_characters(alpha, numeric_percent_chance=20):\n\n random.seed()\n string_length = len(alpha)\n alphanumeric = ''\n\n for i in range(0, string_length):\n check_int = random.randrange(1, 100)\n\n if check_int <= numeric_percent_chance:\n alphanumeric += str(alpha_to_leet(alpha[i]))\n else:\n alphanumeric += alpha[i]\n\n return alphanumeric",
"def letter_freq( text ):\n\tchars = string.ascii_uppercase\n\ttext = text.upper()\n\tresult = get_letter_dict()\n\ttotal = 0\n\tfor char in chars:\n\t\tcount = text.count(char)\n\t\tresult[char] = count\n\t\ttotal += count\n\tif total != 0:\n\t\tfor char in chars:\n\t\t\tresult[char] = (result[char]*10000 / total) / float(100)\n\treturn result",
"def get_freq(string:str) -> float:\n import numpy\n try:\n freq = float(string.replace(\"%\", \"\")) / 100\n except AttributeError as e:\n # if string is np.nan\n freq = numpy.nan\n return freq",
"def h_ascii(key, N):\n if type(key) == str:\n if type(N) == int:\n s = 0\n for i in range(len(key)):\n s += ord(key[i])\n return s % N\n else:\n raise ValueError\n else:\n raise ValueError",
"def percentCommand(self):\n if self.digits[\"text\"] == '0':\n return\n else:\n number = float(self.digits[\"text\"])\n number /= 100\n self.digits[\"text\"] = str(number)\n return self.digits[\"text\"]",
"def per(a):\n return a * 100",
"def alpha_percent_normalize(perc):\n\n alpha_float = clamp(float(perc.strip('%')), 0.0, 100.0) / 100.0\n alpha_dec = fmt_float(alpha_float, 3)\n alpha = \"%02X\" % round_int(alpha_float * 255.0)\n return alpha, alpha_dec",
"def percent(value):\n return f\"{value:,.2f} %\"",
"def add_percentage(grade):\n\tif type(grade) == float:\n\t\tperc_grade = str(grade) + '%'\n\t\treturn perc_grade\n\telse:\n\t\treturn grade"
] | [
"0.73372185",
"0.6893399",
"0.6517647",
"0.63861024",
"0.61528224",
"0.6047942",
"0.60439736",
"0.5990186",
"0.58984005",
"0.588371",
"0.5831391",
"0.58165365",
"0.57671547",
"0.5763353",
"0.57626456",
"0.57338387",
"0.5726077",
"0.5723785",
"0.5721825",
"0.57174385",
"0.5693655",
"0.5690244",
"0.562634",
"0.56194204",
"0.5619242",
"0.56012535",
"0.5545487",
"0.554227",
"0.5524246",
"0.5515458"
] | 0.75368613 | 0 |
Analyzes text lines, in order read from OCR processing. Populates the MailFields object with information gathered from OCR. Uses information from each of the lines to best figure out who is the main addresssee and which box it is trying to reach. | def parse_text_lines(self, text_lines):
self.__fields = mail_fields.MailFields()
alphanum_threshold = 0.5
# Only evaluate lines that are predominantly alphanumeric
for line in text_lines:
if _alnum_percent(line) > alphanum_threshold:
try:
parsed = usaddress.tag(line)[0]
except usaddress.RepeatedLabelError as e:
# If usaddress gets confused, just throw away the answer as if
# we got nothing for now.
# TODO(searow): fix this to handle multiple tags and labels.
parsed = {}
for tag in parsed:
self._add_to_fields(tag, parsed[tag])
return self.__fields | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process(self) -> None:\n self.parsed = email.message_from_bytes(self.rawmailcontent, policy=email.policy.EmailPolicy()) # type: email.message.EmailMessage\n\n self.subject = self.parsed[\"subject\"]\n\n if self.parsed[\"X-Jicket-Initial-ReplyID\"] is not None and self.parsed[\"X-Jicket-Initial-ReplyID\"] == self.parsed[\"In-Reply-To\"]:\n self.threadstarter = True\n elif self.config.ticketAddress in self.parsed[\"From\"]: # Take more heuristic approach\n self.threadstarter = True\n\n self.rawmailcontent = None # No need to store after processing\n\n self.get_text_bodies(self.parsed)\n self.textfrombodies()",
"def parse(self):\n\t\tfor part in self.mail.walk():\n\t\t\tself.process_part(part)",
"def process(Email):\n # convert to lower case\n email = Email.read().lower()\n # strip any HTML\n temp = regx.sub(\"<.*?>\", \" \", email)\n # replace numbers for 0-9 with \"number\"\n temp = regx.sub(\"[0-9]+\", \"number\", temp)\n # replace Http adress to \"httpaddr\"\n temp = regx.sub(\"(http|https)://[^\\s]*\", \"httpaddr\", temp)\n # replace email adress with \"emailaddr\"\n temp = regx.sub(\"[^\\s]+@.*?\\s+\", \"emailaddr\", temp)\n # replace currency sign\n temp = regx.sub(\"[$]+\", \"dollar\", temp)\n temp = regx.sub(\"[']\", \" \", temp)\n # ========================== Tokenize Email ===========================\n # temp = regx.sub(\">+|:+|#+|[$]+|[.]+|@+|/+|-+|&+|[*]+|[+]+|=+|[]]+|[?]+|[()]+|[{}]+|,+|[']+|<+|_+|;+|%+\", \"\", temp)\n\n # remove punctuation\n temp = temp.translate(str.maketrans('', '', string.punctuation))\n\n # split the string in list of words\n tokenized_list = temp.split()\n stemmer = PorterStemmer()\n a = []\n vocab = VocabArray.getVocab()\n extracted_features = mat.zeros((1, len(vocab)))\n\n i = 0\n print(\"========================== Processed Email =========================\")\n for w in range(len(tokenized_list)):\n if len(tokenized_list[w]) < 1:\n continue\n\n # stem the word\n word = stemmer.stem(tokenized_list[w])\n print(word, end=\" \")\n if i > 20:\n i = 0\n print(\"\\n\")\n # get index of the word from vocab list\n indices = mat.where(vocab == word)[0]\n i += 1\n if len(indices) == 0:\n continue\n\n a.append(indices)\n extracted_features[:, indices] = 1\n\n word_indices = mat.c_[mat.array(a)]\n print(\"\\n\")\n return word_indices, extracted_features",
"def process(self):\n\n linelang = defaultdict(int)\n wordlang = defaultdict(int)\n\n linefont = defaultdict(int)\n wordfont = defaultdict(int)\n\n inputfiles = self.input_files\n for input_file in inputfiles:\n\n alignurl = input_file.url\n pcgts = parse(alignurl, True)\n page = pcgts.get_Page()\n regions = page.get_TextRegion()\n\n for region in regions:\n lines = region.get_TextLine()\n\n for line in lines:\n try:\n llang = line.primaryLanguage\n linelang[llang] += 1\n except TypeError:\n pass\n\n try:\n lfont = line.fontFamily\n linefont[lfont] += 1\n except TypeError:\n pass\n\n words = line.get_Word()\n for word in words:\n try:\n wlang = word.language\n wordlang[wlang] += 1\n except TypeError:\n pass\n\n try:\n wfont = word.get_TextStyle().fontFamily\n wordfont[wfont] += 1\n except TypeError:\n pass\n\n #predominant language\n try:\n lang = max(linelang, key=lambda k: linelang[k])\n except TypeError:\n try:\n lang = max(wordlang, key=lambda k: wordlang[k])\n except TypeError:\n lang = 'German'\n\n #predominant font\n try:\n font = max(linefont, key=lambda k: linefont[k])\n except TypeError:\n try:\n font = max(wordfont, key=lambda k: wordfont[k])\n except TypeError:\n font = 'Antiqua'\n\n\n print(lang)\n print(font)",
"def fiber_provider_parse(self, txt):\n\n #PLANNED WORK (PW) Notification\n p1 = re.compile(r'^PLANNED WORK \\(PW\\) Notification',re.MULTILINE)\n\n #PW Reference number: PWIC12345\n p2 = re.compile(r'^PW Reference number:\\s(?P<pw_ref>\\w+)',re.MULTILINE)\n \n #Start Date and Time: 2019-Apr-09 06:00 UTC\n p3 = re.compile(r'^Start Date and Time:\\s(?P<start_time>[\\w\\-]+\\s[0-9:]+)\\s(?P<start_tz>[A-Za-z]+)', re.MULTILINE)\n\n #End Date and Time: 2019-Apr-09 10:00 UTC\n p4 = re.compile(r'^End Date and Time:\\s(?P<end_time>[\\w\\-]+\\s[0-9:]+)\\s(?P<end_tz>[A-Za-z]+)', re.MULTILINE)\n \n #Service ID: IC-99999\n p5 = re.compile(r'^Service ID:\\s(?P<service>[\\w\\-]+)',re.MULTILINE)\n\n\n\n if p1.search(txt):\n #Notifcation about new planned work\n self.ticket_type = 'New maintenance'\n \n try:\n self.provider_ref = p2.search(txt).group('pw_ref')\n\n timeformat = '%Y-%b-%d %H:%M' #Date/Time format used by Fiber Provider\n \n local_start_time = datetime.strptime(p3.search(txt).group('start_time'), timeformat)\n start_tz = p3.search(txt).group('start_tz')\n self.start_time = MailParser.convert_time(local_start_time, start_tz)\n\n local_end_time = datetime.strptime(p4.search(txt).group('end_time'), timeformat)\n end_tz = p4.search(txt).group('end_tz')\n self.end_time = MailParser.convert_time(local_end_time, end_tz)\n\n self.service = p5.search(txt).group('service')\n except:\n raise Exception('Cannot parse some properties')\n\n #here would go conditions for cancelled work and modification\n # but without actual email its difficult to create\n # \n return self",
"def parseOutText(f):\n\n\n f.seek(0) ### go back to beginning of file (annoying)\n all_text = f.read()\n ### split off metadata\n \n content = re.split(\"X-FileName:.*$\", all_text, flags=re.MULTILINE, maxsplit=1)\n words = \"\"\n if len(content) > 1:\n text_string = content[1]\n\n ## remove mails that are forwarded or to which are responded\n # e.g. ---------------------- Forwarded\"\n text_string = re.split(\"-*\\sForwarded\", text_string, maxsplit=1)[0]\n\n # -----Original Message-----\n text_string = re.split(\"-*\\Original\\sMessage\", text_string, maxsplit=1)[0]\n\n # Vince J Kaminski@ECT\n # 04/30/2001 02:28 PM\n # To:\tStanley Horton/Corp/Enron@Enron, Danny McCarty/ET&S/Enron@Enron\n # cc:\tVince J Kaminski/HOU/ECT@ECT \n # or\n # Vince J Kaminski@ECT\n # 04/30/2001 02:28 PM\n # to:\tStanley Horton/Corp/Enron@Enron, Danny McCarty/ET&S/Enron@Enron\n # cc:\tVince J Kaminski/HOU/ECT@ECT \n \n text_string = re.split(\"((.*\\n){2})[Tt]o:\\s\", text_string, maxsplit=1)[0]\n\n ### remove punctuation\n # should be autopmatically by scikit learn\n #text_string = text_string.translate(string.maketrans(\"\", \"\"), string.punctuation)\n\n ### project part 2: comment out the line below\n #words = text_string\n\n ### split the text string into individual words, stem each word,\n ### and append the stemmed word to words (make sure there's a single\n ### space between each stemmed word)\n from nltk.stem.snowball import SnowballStemmer\n\n stemmer = SnowballStemmer(\"english\")\n words = [stemmer.stem(word) for word in text_string.split()]\n\n\n\n return \" \".join(words)",
"def _parse_contact_information(self):\n left_column = self.content.find(\"div\", class_=\"linkeSpalte40\")\n graubox = left_column.find(\n lambda tag: tag.name == \"div\" and tag[\"class\"] == [\"grauBox\"]\n )\n\n emails_raw = graubox.find_all(\"a\", class_=\"mail\")\n websites_raw = graubox.find_all(\"a\", class_=\"noDecoration\")\n telephone_raw = graubox.find_all(\"span\", class_=\"telefonnummer\")\n address_raw = [\n e.nextSibling for e in graubox.find_all(\"em\") if e.text == \"Anschrift:\"\n ]\n\n address = address_raw[0].li.get_text(\"\\n\") if address_raw else None\n emails = [re.sub(r\"^mailto:\", \"\", e.attrs[\"href\"]) for e in emails_raw]\n phone_numbers = [t.text for t in telephone_raw]\n websites = [w.attrs[\"href\"] for w in websites_raw]\n\n return {\n \"address\": address,\n \"emails\": emails,\n \"phone_numbers\": phone_numbers,\n \"websites\": websites,\n }",
"def parse_report_line(self,line):\n\n report = self.new_police_report()\n report['original_text'] = line\n \n #\n # extract month and day\n match_date = REPORT_DATE_REGEXP.search(line)\n assert(match_date)\n start_index=match_date.start('month')\n stop_index=match_date.end('month')\n report['date_month'] = int(line[start_index:stop_index])\n\n start_index=match_date.start('day')\n stop_index=match_date.end('day')\n report['date_day'] = int(line[start_index:stop_index])\n\n my_logger.debug('extracted date (%d/%d)' % (report['date_month'],report['date_day']))\n\n #############################################\n # extract location & scale\n line = line[0:match_date.start('month')-1] # truncate after start of date\n \n #\n # trim off preceding html and trailing comma\n start_index=line.rfind('>')+1\n assert(start_index>0)\n\n stop_index=line.rfind(',',start_index)\n \n if stop_index >= 2:\n #\n # found a comma, \n line = line[start_index:stop_index]\n else:\n #\n # no comma found\n line = line[start_index:]\n my_logger.debug('truncated string: (%s)' % line)\n report['address']=line\n #\n # try to determine which case:\n # a block\n # an exact address\n # an establishment\n # an intersection\n # special cases, like: \"downtown mountain view\"\n # \n\n if (BLOCK_REGEXP.match(line)!=None):\n my_logger.debug('BLOCK detected')\n report['map_scale']=mapscale.BLOCK\n elif (INTERSECTION_REGEXP.match(line)!=None):\n my_logger.debug('INTERSECTION detected')\n report['map_scale']=mapscale.INTERSECTION\n elif (EXACT_REGEXP.match(line)!=None):\n my_logger.debug('EXACT detected')\n report['map_scale']=mapscale.EXACT\n else:\n #\n # must be manually assigned\n report['map_scale']=mapscale.OTHER\n\n\n return report",
"def uniprot_txt_parser(uniprot_txt_lines):\n uniprot = {}\n entry_line = [i for i,l in enumerate(uniprot_txt_lines) if l[:2]=='ID']\n entry_line.append(len(uniprot_txt_lines))\n begin_end = [(begin,entry_line[i+1]) for i,begin in enumerate(entry_line[:-1])]\n for begin,end in begin_end:\n for line in uniprot_txt_lines[begin:end]:\n line = line.rstrip('\\r\\n')\n line = line.rstrip('.')\n line = line.replace(';',' ')\n words = line.split()\n if words[0] == 'AC':\n acc = words[1]\n uniprot[acc] = {}\n elif words[0] == 'DR' and words[1] =='InterPro':\n if uniprot[acc].has_key('interpro'):\n uniprot[acc]['interpro'].append((words[2],1))\n else:\n uniprot[acc]['interpro'] = [(words[2],1)]\n elif words[0] == 'DR' and words[1] == 'Pfam':\n if uniprot[acc].has_key('pfam'):\n uniprot[acc]['pfam'].append((words[2],int(words[-1])))\n else:\n uniprot[acc]['pfam'] = [(words[2],int(words[-1]))]\n elif words[0] == 'DR' and words[1] == 'SMART':\n if uniprot[acc].has_key('smart'):\n uniprot[acc]['smart'].append((words[2],words[-1]))\n else:\n uniprot[acc]['smart'] = [(words[2],words[-1])]\n elif words[0] == 'DR' and words[1] == 'SUPFAM':\n if uniprot[acc].has_key('supfam'):\n uniprot[acc]['supfam'].append((words[2],words[-1]))\n else:\n uniprot[acc]['supfam'] = [(words[2],words[-1])]\n elif words[0] == 'DR' and words[1] == 'PROSITE':\n if uniprot[acc].has_key('prosite'):\n uniprot[acc]['prosite'].append((words[2],words[-1]))\n else:\n uniprot[acc]['prosite'] = [(words[2],words[-1])]\n # elif words[0] == 'DR' and words[1] =='PDB':\n # w = words[-1].replace('/',' ')\n # w = w.replace('=',' ')\n # w = w.replace('-',' ')\n # w = w.split()\n # w = words[2:-1]+w\n\n # if uniprot[acc].has_key('pdb'):\n # uniprot[acc]['pdb'].append(w)\n # else:\n # uniprot[acc]['pdb'] = [w]\n\n return uniprot",
"def process_email(email_contents):\n\n # Load Vocabulary\n vocab_list = get_vocab_list();\n\n # Init return value\n word_indices = [];\n\n # ========================== Preprocess Email ===========================\n\n # Find the Headers ( \\n\\n and remove )\n # Uncomment the following lines if you are working with raw emails with the\n # full headers\n\n # hdrstart = strfind(email_contents, ([char(10) char(10)]));\n # email_contents = email_contents(hdrstart(1):end);\n\n # Lower case\n email_contents = email_contents.lower()\n\n # Strip all HTML\n # Looks for any expression that starts with < and ends with > and replace\n # and does not have any < or > in the tag it with a space\n email_contents = re.sub(\"<[^<>]+>\", \" \", email_contents)\n\n # Handle Numbers\n # Look for one or more characters between 0-9\n email_contents = re.sub(\"[0-9]+\", \"number\", email_contents)\n\n # Handle URLS\n # Look for strings starting with http:// or https://\n email_contents = re.sub(\"(http|https)://[^\\s]*\", \"httpaddr\", email_contents)\n\n # Handle Email Addresses\n # Look for strings with @ in the middle\n email_contents = re.sub(\"[^\\s]+@[^\\s]+\", \"emailaddr\", email_contents)\n\n # Handle $ sign\n email_contents = re.sub(\"[$]+\", \"dollar\", email_contents)\n\n # ========================== Tokenize Email ===========================\n\n # Output the email to screen as well\n print(\"\\n==== Processed Email ====\\n\");\n\n # Process file\n l = 0;\n\n # Tokenize and also get rid of any punctuation\n stemmer = PorterStemmer()\n email_contents = re.split(r'[@$/#.-:&\\*\\+=\\[\\]?!(){},\\'\\'\\\">_<;%\\s\\n\\r\\t]+', email_contents)\n for s in email_contents:\n\n # Remove any non alphanumeric characters\n s = re.sub(\"[^a-zA-Z0-9]\", \"\", s)\n\n # Stem the word \n # (the porter_stemmer sometimes has issues, so we use a try catch block)\n #try:\n s = stemmer.stem(s.strip())\n #except:\n # s = \"\"\n # continue\n\n # Skip the word if it is too short\n if len(s) < 1:\n continue\n\n # Look up the word in the dictionary and add to word_indices if\n # found\n # ====================== YOUR CODE HERE ======================\n # Instructions: Fill in this function to add the index of s to\n # word_indices if it is in the vocabulary. At this point\n # of the code, you have a stemmed word from the email in\n # the variable s. You should look up s in the\n # vocabulary list (vocabList). If a match exists, you\n # should add the index of the word to the word_indices\n # vector. Concretely, if s = 'action', then you should\n # look up the vocabulary list to find where in vocabList\n # 'action' appears. For example, if vocabList{18} =\n # 'action', then, you should add 18 to the word_indices \n # vector (e.g., word_indices = [word_indices ; 18]; ).\n # \n # Note: vocabList[idx] returns a the word with index idx in the\n # vocabulary list.\n # \n # Note: You can use s1 == s2 to compare two strings (s1 and\n # s2). It will return True only if the two strings are equivalent.\n #\n\n\n\n # =============================================================\n\n # Print to screen, ensuring that the output lines are not too long\n if (l + len(s)) > 78:\n print()\n l = 0\n print(f\"{s} \", end=\"\")\n l = l + len(s) + 1\n\n # Print footer\n print('\\n\\n=========================')\n return word_indices",
"def get_text_lines(instText):\n\n # Find out which part this is\n part = instText.part\n # Get the necessary parameters: lng, ext, dir\n sLng = part.corpus.get_lng_display()\n sDir = part.dir\n sName = instText.fileName\n sFormat = instText.get_format_display()\n # Now try to get the information\n oBack = get_crpp_text(sLng, sDir, sFormat, sName)\n # Prepare what we return\n if oBack == None or oBack['status'] == 'error':\n return None\n else:\n return oBack",
"def processEmail(email_contents):\n # % Load Vocabulary\n vocabList = getVocabList()\n\n # % Init return value\n word_indices = []\n\n # % ========================== Preprocess Email ===========================\n # % Find the Headers ( \\n\\n and remove )\n # % Uncomment the following lines if you are working with raw emails with the\n # % full headers\n # %\n # % hdrstart = strfind(email_contents, ([char(10) char(10)]));\n # % email_contents = email_contents(hdrstart(1):end);\n\n # % Lower case\n email_contents = email_contents.lower()\n\n # % Strip all HTML\n # % Looks for any expression that starts with < and ends with > and replace\n # % and does not have any < or > in the tag it with a space\n email_contents = re.sub(r'<[^<>]+>', ' ', email_contents)\n\n # % Handle Numbers\n # % Look for one or more characters between 0-9\n email_contents = re.sub(r'[0-9]+', 'number', email_contents)\n\n # % Handle URLS\n # % Look for strings starting with http:// or https://\n email_contents = re.sub(r'(http|https)://[^\\s]*', 'httpaddr', email_contents)\n\n # % Handle Email Addresses\n # % Look for strings with @ in the middle\n email_contents = re.sub(r'[^\\s]+@[^\\s]+', 'emailaddr', email_contents)\n\n # % Handle $ sign\n email_contents = re.sub(r'[$]+', 'dollar ', email_contents)\n\n # Pick words-like strings\n email_contents_list = re.findall(r'[\\w]+', email_contents)\n email_contents = ' '.join(email_contents_list)\n\n # % ========================== Tokenize Email ===========================\n #\n # % Output the email to screen as well\n print('\\n==== Processed Email ====\\n')\n\n # % Tokenize and also get rid of any punctuation\n porter_stemmer = PorterStemmer()\n words = word_tokenize(email_contents)\n email_contents_list = []\n for index, word in enumerate(words):\n stemmed_word = porter_stemmer.stem(word)\n email_contents_list.append(stemmed_word)\n try:\n index = vocabList.index(stemmed_word)\n except ValueError:\n continue\n else:\n word_indices.append(index)\n\n email = ' '.join(email_contents_list)\n print('Email contents:\\n', email)\n return word_indices",
"def _parse_line(self, line):\n fields = line.split('|', 4) # stop splitting after fourth | found\n line_info = {'raw_message': line}\n if len(fields) == 5:\n line_info.update(dict(zip(self._fieldnames, fields)))\n return line_info",
"def extract_information(preprocessed_sentences):\n parsed = list(map(lambda sentence: nlp(sentence), preprocessed_sentences))\n\n quantities = list(filter(lambda sentence: eh.sentence_has_type(sentence, 'QUANTITY'), parsed))\n dates = list(filter(lambda sentence: eh.sentence_has_type(sentence, 'DATE'), parsed))\n\n hurricane_name = eh.extract_frequent_regex_match(parsed, '[Hh]urricane ([A-Z][a-z]+)').most_common(1)[0][0]\n hurricane_category = eh.extract_frequent_regex_match(parsed, '[Cc]ategory ([0-9]+)').most_common(1)[0][0]\n\n tropical_storm_name = eh.extract_frequent_regex_match(parsed, '[Tt]ropical [Ss]torm ([A-Z][a-z]+)').most_common(1)[0][0]\n formation_date, middle_month = extract_storm_timeline(dates, hurricane_name)\n\n preperation_info = extract_preparation_information(parsed)\n prep_gpes = preperation_info[0].most_common(3)\n\n restore_info = extract_restoration_information(parsed)\n\n landfall_info = extract_landfall_information(parsed)\n\n wind_info = extract_wind_information(quantities)\n rain_info = extract_rain_information(quantities)\n size_info = extract_size_information(parsed)\n\n # formation_info = extract_formation_info(parsed)\n death_info = extract_death_damages_info(parsed)\n\n print(constants.HURRICANE_SENTENCE.format(hurricane_name, middle_month, hurricane_category))\n print(constants.LANDFALL_SENTENCE.format(hurricane_name, landfall_info[2], landfall_info[3], landfall_info[0], landfall_info[1]))\n print(constants.WIND_SENTENCE.format(wind_info[0], wind_info[1], wind_info[2]))\n print(constants.RAIN_SENTENCE.format(hurricane_name, rain_info[1], rain_info[0], rain_info[2]))\n print(constants.FORMATION_SENTENCE.format(formation_date, tropical_storm_name))\n print(constants.PREPARATION_SENTENCE.format(prep_gpes[0][0], prep_gpes[1][0], prep_gpes[2][0], preperation_info[1].\n most_common(1)[0][0]))\n print(constants.SIZE_SENTENCE.format(size_info[0], size_info[1]))",
"def _process_text_line(self, line, columns, format, lower_case, num_line,\n fill_missing=0, filter_case=None,\n strict_separator=False):\n if not isinstance(line, list) and not isinstance(\n line, tuple) and not isinstance(line, numpy.ndarray):\n if format != \"tsv\":\n raise Exception(\"unable to process format \" + format)\n line = line.strip(\"\\r\\n \").replace(\"\\n\", \" \")\n line = DatabaseCore2._split_expr.split(line)\n\n if filter_case is not None:\n line = [filter_case(s) for s in line]\n\n try:\n if fill_missing > 0:\n m = max(columns.keys())\n if m >= len(line):\n line = copy.copy(line)\n add = 0\n while m >= len(line) and add < fill_missing:\n a, b = columns[len(line)]\n if b is int:\n line.append(\"0\")\n elif b is float:\n line.append(\"0.0\")\n elif b is decimal.Decimal:\n line.append(\"0\")\n elif b is str:\n line.append(\"\")\n else:\n line.append(\"\")\n add += 1\n\n res = {}\n for c, v in columns.items():\n if \"AUTOFILL\" in v:\n res[v[0]] = \"NULL\"\n elif \"AUTOINCREMENT\" in v:\n continue\n else:\n if c >= len(line):\n self.LOG(\n \"(a)line number \",\n num_line,\n \"*unable to process a line columns \",\n c,\n \"#\",\n line,\n \" columns \",\n columns)\n return None\n\n val = line[c]\n if len(v) > 2 and v[2].lower() not in [\n \"primarykey\", \"autofill\"]:\n val = v[2](val)\n\n try:\n if isinstance(v[1], tuple):\n val = v[1][0](val)\n elif v[1] is datetime.datetime:\n if isinstance(val, datetime.datetime):\n pass\n elif isinstance(val, str):\n val = datetime.datetime.parse(val)\n else:\n raise TypeError(\n \"unable to convert %s into datetime\" % str(\n type(val)))\n else:\n val = v[1](val)\n except ValueError: # as e :\n self.LOG(\n \"(b)line number \",\n num_line,\n \"**unable to process a line columns \",\n c,\n \"#\",\n v[0],\n \" type \",\n v[1],\n \" value \",\n repr(\n line[c]))\n return None\n\n if isinstance(val, str):\n val = val.replace(\"'\", \"''\")\n if lower_case:\n val = val.lower()\n res[v[0]] = val\n\n return res\n except Exception:\n self.LOG(\"(c)line number\", num_line,\n \"***unable to process a line columns:\", line)\n return None",
"def process_line(line):\n\n name_comp_list = []\n givenname_comp_list = []\n surname_comp_list = []\n geocode_comp_list = []\n locality_comp_list = []\n date1_comp_list = []\n date2_comp_list = []\n\n # Split the line into the basic fields - - - - - - - - - - - - - - - - - - -\n #\n if (config.in_file_type in ['CSV','CSVQ','TAB','TABQ']):\n # Comma or tabulator separated\n try:\n line_list = config.line_parser.parse(line)\n except:\n log_message('CSV line parsing failed with inout: '+line,'err')\n\n if (len(line_list) < config.input_len):\n log_message('Input line does not contain enough fields,' +\\\n 'fill up with empty fields','warn')\n while (len(line_list) < config.input_len):\n line_list.append('')\n\n config.curr_line_list = line_list # Save current line list\n\n # Extract fields into different component lists - - - - - - - - - - - - - -\n #\n if (config.input_component['name'] != []): # Extract name fields\n for i in config.input_component['name']:\n name_comp_list.append(line_list[i])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for i in config.input_component['givenname']:\n givenname_comp_list.append(line_list[i])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for i in config.input_component['surname']:\n surname_comp_list.append(line_list[i])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for i in config.input_component['geocode']:\n geocode_comp_list.append(line_list[i])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for i in config.input_component['locality']:\n locality_comp_list.append(line_list[i])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for i in config.input_component['date1']:\n date1_comp_list.append(line_list[i])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for i in config.input_component['date2']:\n date2_comp_list.append(line_list[i])\n\n elif (config.in_file_type == 'COL'): # Column based input file - - - - - - -\n\n if (len(line) < config.input_len):\n log_message('Input line is not long enough, fill up with spaces','warn')\n line += ' '*(config.input_len-len(line))\n\n if (config.input_component['name'] != []): # Extract name fields\n for (col_start,length) in config.input_component['name']:\n name_comp_list.append(line[col_start,col_start+length])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for (col_start,length) in config.input_component['givenname']:\n givenname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for (col_start,length) in config.input_component['surname']:\n surname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for (col_start,length) in config.input_component['geocode']:\n geocode_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for (col_start,length) in config.input_component['locality']:\n locality_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for (col_start,length) in config.input_component['date1']:\n date1_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for (col_start,length) in config.input_component['date2']:\n date2_comp_list.append(line[col_start,col_start+length])\n\n # elif (config.in_file_type == 'SQL'): # - - - - - - - - - - - - - - - - - -\n\n ################################\n # Add later: SQL database access\n ################################\n\n msg = [' Component basic field lists:', \\\n ' Name: '+str(name_comp_list), \\\n ' Given name: '+str(givenname_comp_list), \\\n ' Surname: '+str(surname_comp_list), \\\n ' Geocode: '+str(geocode_comp_list), \\\n ' Locality: '+str(locality_comp_list), \\\n ' Date1: '+str(date1_comp_list), \\\n ' Date2: '+str(date2_comp_list)]\n log_message(msg,'v2')\n\n name_comp = ''\n givenname_comp = ''\n surname_comp = ''\n geocode_comp = ''\n locality_comp = ''\n date1_comp = ''\n date2_comp = ''\n\n # Now clean and then concatenate component lists into strings - - - - - - - -\n #\n if (name_comp_list != []): # Name component\n name_comp = name_comp_list[0] # Start with first field in list\n\n for f in name_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['name'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['name'] == 1):\n sep = check_field_spill(name_comp, f)\n\n name_comp = name_comp+sep+f # Append separator and field\n\n if (givenname_comp_list != []): # Givenname component - - - - - - - - - - -\n givenname_comp = givenname_comp_list[0] # Start with first field in list\n\n for f in givenname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['givenname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['givenname'] == 1):\n sep = check_field_spill(givenname_comp, f)\n\n givenname_comp = givenname_comp+sep+f # Append separator and field\n\n if (surname_comp_list != []): # Surname component - - - - - - - - - - - - -\n surname_comp = surname_comp_list[0] # Start with first field in list\n\n for f in surname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['surname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['surname'] == 1):\n sep = check_field_spill(surname_comp, f)\n\n surname_comp = surname_comp+sep+f # Append separator and field\n\n if (geocode_comp_list != []): # Geocode component - - - - - - - - - - - - -\n geocode_comp = geocode_comp_list[0] # Start with first field in list\n\n for f in geocode_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['geocode'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['geocode'] == 1):\n sep = check_field_spill(geocode_comp, f)\n\n geocode_comp = geocode_comp+sep+f # Append separator and field\n\n if (locality_comp_list != []): # Locality component - - - - - - - - - - - -\n locality_comp = locality_comp_list[0] # Start with first field in list\n\n for f in locality_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['locality'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['locality'] == 1):\n sep = check_field_spill(locality_comp, f)\n\n locality_comp = locality_comp+sep+f # Append separator and field\n\n if (date1_comp_list != []): # Date1 component - - - - - - - - - - - - - - -\n date1_comp = date1_comp_list[0] # Start with first field in list\n\n for f in date1_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date1'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date1'] == 1):\n if (date1_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date1_comp = date1_comp+sep+f # Append separator and field\n\n if (date2_comp_list != []): # Date2 component - - - - - - - - - - - - - - -\n date2_comp = date2_comp_list[0] # Start with first field in list\n\n for f in date2_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date2'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date2'] == 1):\n if (date2_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date2_comp = date2_comp+sep+f # Append separator and field\n\n # Check if name component is given or givenname and surname separately - - -\n #\n if (config.input_component['givenname'] != []) or \\\n (config.input_component['surname'] != []):\n name_comp = [givenname_comp, surname_comp]\n\n msg = [' Components:', \\\n ' Name: \"'+str(name_comp)+'\"', \\\n ' Geocode: \"'+geocode_comp+'\"', \\\n ' Locality: \"'+locality_comp+'\"', \\\n ' Date1: \"'+date1_comp+'\"', \\\n ' Date2: \"'+date2_comp+'\"']\n log_message(msg,'v1')\n\n return [name_comp, geocode_comp, locality_comp, date1_comp, date2_comp]",
"def extractLineData(sentEnPath, sentFrPath, sentRefPath, sentAnnotPath,\n enList=[], frList=[], refList=[], annotList=[]):\n # get the sentences and annotations\n with open(sentEnPath) as enFile:\n enList = enList + [s.replace(u'\\n', u'') for s in enFile.readlines()]\n with open(sentFrPath) as frFile:\n frList = frList + [s.replace(u'\\n', u'') for s in frFile.readlines()]\n with open(sentRefPath) as refFile:\n refList = refList + [s.replace(u'\\n', u'') for s in refFile.readlines()]\n with open(sentAnnotPath) as annotFile:\n sentAnnotList = annotFile.readlines()\n dic = {u'0\\n': u'0.0', u'1\\n': u'1.0', u'1.1.0\\n': u'1.1', u'0.1.0\\n': u'0.1'}\n tempList = []\n for annot in sentAnnotList:\n if annot in dic:\n tempList.append(dic[annot])\n else:\n tempList.append(annot.replace(u'\\n', u''))\n annotList = annotList + tempList\n return enList, frList, refList, annotList",
"def read_enron_emails(input_file, start_line_number,data):\n\n with open(input_file, 'r') as file:\n lines = file.readlines()\n\n count = 1\n for line in lines:\n print (count, line.strip())\n read_enron_email(line.strip(), start_line_number,data)\n count = count + 1\n\n file.close()",
"def extract_text(infile):\n # Get text from mudraw\n text = subprocess.check_output(['mudraw', '-F', 'txt', infile])\n\n # Cleanup raw text\n match = re.search(\n r'.*?Activity \\/ Remarks(?P<table1>.*?)Activities not shown on the ' +\n r'DABS Chart Side:.*?Activity \\/ Remarks(?P<table2>.*?)For detailed ' +\n r'information regarding the DABS',\n text,\n re.MULTILINE | re.DOTALL)\n if not match:\n raise ExtractionError('Could not extract text from PDF.')\n false_or_none_string = lambda x: bool(x) and x.lower() != 'none'\n data = '\\n\\n\\n'.join(match.groups())\n raw_parts = re.sub(r'\\n[ \\t]+\\n', '\\n\\n', data).split('\\n\\n\\n')\n parts = filter(false_or_none_string, map(lambda x: x.strip(), raw_parts))\n\n # Write CSV\n headers = (\n b'Firing-Nr\\nD-/R-Area\\nNOTAM-Nr',\n b'Validity UTC',\n b'Lower Limit\\nAMSL or FL',\n b'Upper Limit\\nAMSL or FL',\n b'Location',\n b'Center Point',\n b'Covering Radius',\n b'Activity / Remarks',\n )\n rows = []\n for i, part in enumerate(parts):\n # Regexes\n multiple_newlines_re = re.compile(r'\\n+')\n height_re = re.compile(r'(GND|[0-9]+m \\/ [0-9]+ft|FL[0-9]{2,3}|REF AIP)')\n center_radius_re = re.compile(r'([0-9]{6}N [0-9]{7}E)\\s+?(.*?NM)')\n\n # Separate columns (warning: hackish code ahead!)\n row = {}\n step1 = re.split(r'([0-2][0-9][0-6][0-9] - [0-2][0-9][0-6][0-9])', part)\n row['nr'] = step1[0].strip()\n timestring = '\\n'.join(step1[1:-1])\n row['validity'] = multiple_newlines_re.sub('\\n', timestring)\n step2 = filter(None, height_re.split(step1[-1].strip()))\n row['lower'] = step2[0]\n row['upper'] = step2[2]\n step3 = filter(None, center_radius_re.split(step2[-1].strip()))\n row['location'] = step3[0].strip()\n row['center'] = step3[1].strip()\n row['radius'] = step3[2].strip()\n row['activity'] = multiple_newlines_re.sub('\\n', step3[3].strip())\n\n # Add to list of rows\n rows.append((\n row['nr'].encode('utf8'),\n row['validity'].encode('utf8'),\n row['lower'].encode('utf8'),\n row['upper'].encode('utf8'),\n row['location'].encode('utf8'),\n row['center'].encode('utf8'),\n row['radius'].encode('utf8'),\n row['activity'].encode('utf8'),\n ))\n\n return tablib.Dataset(*rows, headers=headers)",
"def parseLine(self, line):\n\n # Bail out on lines with a malformed timestamp\n try:\n timestamp = time.mktime(time.strptime(line[1:25], \"%a %b %d %H:%M:%S %Y\"))\n except:\n return\n \n text = line[27:]\n \n if self.myname: \n self.attendance.mark(timestamp, self.myname)\n text = self.re_myname.sub(self.myname + ' ', text) \n \n damage = self.re_damage.search(text)\n #damage = False\n death = self.re_death.search(text)\n #death = False\n miss = self.re_miss.search(text)\n #miss = False\n #defensive = self.re_defensive.search(text)\n defensive = False\n loot = self.re_loot.search(text)\n attendance = self.re_attendance.search(text)\n if damage:\n (attacker, atktype, defender, amount, nonmelee) = damage.groups()\n if nonmelee:\n atktype = 'non-melee'\n if self.extract and (self.extract == attacker or self.extract == defender):\n self.fights.getFight(timestamp, attacker, defender).addAttack(timestamp, atktype, int(amount))\n if attacker.count(' ') == 0:\n self.attendance.mark(timestamp, attacker)\n if defender.count(' ') == 0:\n self.defender.mark(timestamp, defender)\n elif miss:\n (attacker, atktype, defender) = miss.groups()\n if self.extract and (self.extract == attacker or self.extract == defender):\n self.fights.getFight(timestamp, attacker, defender).addAttack(timestamp, atktype, 'miss')\n if attacker.count(' ') == 0:\n self.attendance.mark(timestamp, attacker)\n if defender.count(' ') == 0:\n self.defender.mark(timestamp, defender)\n elif defensive:\n (attacker, atktype, defender, defensetype) = defensive.groups()\n if self.extract and (self.extract == attacker or self.extract == defender):\n self.fights.getFight(timestamp, attacker, defender).addAttack(timestamp, atktype, defensetype)\n if attacker.count(' ') == 0:\n self.attendance.mark(timestamp, attacker)\n if defender.count(' ') == 0:\n self.defender.mark(timestamp, defender)\n elif death:\n (defender, junk, attacker) = death.groups()\n if junk.count('have slain'):\n (defender, attacker) = (attacker, defender)\n # Use PC deaths to track their attendance\n if defender.count(' ') == 0:\n self.attendance.mark(timestamp, defender)\n elif attacker.count(' ') == 0:\n self.kills.addKill(timestamp, defender)\n if self.extract and (self.extract == attacker or self.extract == defender):\n self.fights.addDeath(timestamp, attacker, defender)\n if attacker.count(' ') == 0:\n self.attendance.mark(timestamp, attacker)\n elif loot:\n (looter, item) = loot.groups()\n self.loot.addLoot(timestamp, looter, item)\n self.attendance.mark(timestamp, looter)\n elif attendance:\n attendee = attendance.group(1)\n self.attendance.mark(timestamp, attendee)",
"def process_line(self, line):\n find_result = re.findall(LINE_REGEX, line)\n line_data = {r[0]: r[1] for r in find_result}\n self.process_url(line_data.get('request_to'))\n self.process_status_code(line_data.get('response_status'))",
"def loadText(self,textFileName):\n #--Text File\n infoKey = None\n text = None\n texts = {}\n reHeader = re.compile('^#')\n reInfo = re.compile('@ +(\\d) +\"(.+?)\" +(\\d+)')\n reSingleQuote = re.compile('[\\x91\\x92]')\n reDoubleQuote = re.compile('[\\x93\\x94]')\n reEllipsis = re.compile('\\x85')\n reEolSpaces = re.compile(r' +\\r\\n')\n reExtraSpaces = re.compile(r' +')\n reIllegalChars = re.compile(r'[@#]')\n #--Read file\n textFile = file(textFileName,'rb')\n for line in textFile:\n if reHeader.match(line): continue\n maInfo = reInfo.match(line)\n if maInfo:\n infoKey = (int(maInfo.group(1)),maInfo.group(2),maInfo.group(3))\n texts[infoKey] = text = []\n else:\n text.append(line)\n textFile.close()\n #--Strip and clean texts\n updated = []\n unmatched = []\n trimmed = {}\n for infoKey in texts.keys():\n if infoKey not in self.infos:\n unmatched.append(infoKey)\n continue\n text = ''.join(texts[infoKey])\n #--Required Subs\n text = text.strip(' \\r\\n')\n text = reSingleQuote.sub('\\'',text)\n text = reDoubleQuote.sub('\"',text)\n text = reEllipsis.sub('...',text)\n text = reIllegalChars.sub('',text)\n #--Optional subs\n text = reEolSpaces.sub('\\r\\n',text)\n text = reExtraSpaces.sub(' ',text)\n #--Trim?\n if len(text) > 511:\n trimmed[infoKey] = (text[:511],text[511:])\n text = text[:511]\n info = self.infos[infoKey]\n if text != info.text:\n info.text = text\n info.setChanged()\n updated.append(infoKey)\n #--Report\n buff = cStringIO.StringIO()\n for header,infoKeys in ((_('Updated'),updated),(_('Unmatched'),unmatched)):\n if infoKeys:\n buff.write('=== %s\\n' % (header,))\n for infoKey in infoKeys:\n buff.write('* %s\\n' % (infoKey,))\n if trimmed:\n buff.write('=== %s\\n' % (_('Trimmed'),))\n for infoKey,(preTrim,postTrim) in trimmed.items():\n buff.write(`infoKey`+'\\n'+preTrim+'<<<'+postTrim+'\\n\\n')\n return buff.getvalue()",
"def extract_features(tlc):\n text = clean_text(tlc['body'])\n fields = dict()\n # add features here #\n fields['Top_comment_word_count'] = len(text.split(' '))\n fields['Top_comment_text'] = text\n\n # Extract time-based features\n def get_day_of_week(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').weekday() + 1\n\n def get_day_of_month(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').day\n\n def get_time_of_day(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').hour\n time_local = time.localtime(tlc['created_utc'])\n time_local = time.strftime(\"%Y-%m-%d %H:%M:%S\", time_local)\n fields['Top_comment_day'] = get_day_of_month(time_local)\n fields['Top_comment_day_of_week'] = get_day_of_week(time_local)\n fields['Top_comment_hour'] = get_time_of_day(time_local)\n\n # Extract gender value\n gp = GenderPerformr()\n probs, _ = gp.predict(tlc['author'])\n # Rescale it from [0,1] to [-1,1]\n fields['Top_comment_author_gender_value'] = 2 * probs - 1\n\n # Extract percentage of mispellings\n check = SpellChecker(\"en_US\")\n tokenizer = get_tokenizer(\"en_US\")\n # Prevent the denominator from 0\n def weird_division(n, d):\n return n / d if d else 0\n\n def get_mispellings_percentage(text):\n mispelling_count = 0\n total_count = 0\n if text == 'nan':\n return total_count\n else:\n check.set_text(text)\n for err in check:\n mispelling_count = mispelling_count + 1\n for w in tokenizer(text):\n total_count = total_count + 1\n value = weird_division(mispelling_count, total_count)\n return value\n fields['Top_comment_mispellings'] = get_mispellings_percentage(text)\n\n # Get politeness, agreement, support scores, and rescale them from [1,5] to [-1,1]\n ar = Agreementr()\n pr = Politenessr()\n sr = Supportr()\n fields['Top_comment_agreement_value'] = 0.5*float(ar.predict([text]))-1.5\n fields['Top_comment_politeness_value'] = 0.5*float(pr.predict([text]))-1.5\n fields['Top_comment_support_value'] = 0.5*float(sr.predict([text]))-1.5\n\n # Get toxicity scores\n KEY = \"yourkey.txt\" # os.getenv(\"GOOGLE_API_KEY\")\n service = discovery.build('commentanalyzer', 'v1alpha1', developerKey=KEY)\n\n def get_results(request_id, response, exception):\n toxicity_scores.append((request_id, response))\n\n toxicity_scores = []\n count = 0\n batch = service.new_batch_http_request(callback=get_results)\n analyze_request = {\n 'comment': {'text': text},\n \"requestedAttributes\": {\n \"TOXICITY\": {},\n \"SEVERE_TOXICITY\": {},\n \"ATTACK_ON_COMMENTER\": {}\n }\n }\n batch.add(service.comments().analyze(body=analyze_request), request_id=str(count))\n batch.execute()\n toxic_score = toxicity_scores[0][1]['attributeScores']['TOXICITY']['summaryScore']['value']\n attack_score = toxicity_scores[0][1]['attributeScores']['ATTACK_ON_COMMENTER']['summaryScore']['value']\n if toxic_score > 0.5:\n fields['Top_comment_untuned_toxicity'] = 1\n else:\n fields['Top_comment_untuned_toxicity'] = 0\n if toxic_score > 0.8 and attack_score > 0.5:\n fields['Top_comment_tuned_toxicity'] = 1\n else:\n fields['Top_comment_tuned_toxicity'] = 0\n # end of feature extractions #\n return fields",
"def _populate(self):\n if not hasattr(self, 'multiline'):\n start = self.start\n end = self.end\n txt = self.filetext\n self.start_line = txt.count('\\n', 0, start) + 1\n self.start_column = start - txt.rfind('\\n', 0, start) - 1\n self.end_line = txt.count('\\n', start, end) + self.start_line\n self.end_column = end - txt.rfind('\\n', 0, end) - 1\n self.multiline = self.start_line != self.end_line",
"def processEmail(email_contents):\n # Lower case\n email_contents = email_contents.lower()\n # Strip all HTML\n email_contents = re.sub('<[^<>]+>', ' ', email_contents)\n # Handle Numbers\n email_contents = re.sub('[0-9]+', 'number', email_contents)\n # Handle URLS\n email_contents = re.sub('(http|https)://[^\\s]*', 'httpaddr', email_contents)\n # Handle Email Addresses\n email_contents = re.sub('[^\\s]+@[^\\s]+', 'emailaddr', email_contents)\n # Handle $ sign\n email_contents = re.sub('[$]+', 'dollar', email_contents)\n # Remove any non alphanumeric characters\n email_contents = re.sub('[^a-zA-Z]', ' ', email_contents)\n # Tokenize ane remove single characters\n ps = PorterStemmer()\n email_contents = [ps.stem(token) for token\n in email_contents.split(\" \") if len(token) > 1]\n\n vocabList = getVocabList()\n word_indices = []\n for word in email_contents:\n ind = vocabList[vocabList.vocab == word].index\n if ind.any():\n word_indices.append(ind[0])\n print(word, '\\t', ind[0])\n\n return email_contents, word_indices",
"def _parse_records(self, customization=None):\n def _add_parsed_record(record, records):\n \"\"\"\n Atomic function to parse a record\n and append the result in records\n \"\"\"\n if record != \"\":\n logger.debug('The record is not empty. Let\\'s parse it.')\n parsed = self._parse_record(record, customization=customization)\n if parsed:\n logger.debug('Store the result of the parsed record')\n records.append(parsed)\n else:\n logger.debug('Nothing returned from the parsed record!')\n else:\n logger.debug('The record is empty')\n\n records = []\n record = \"\"\n # read each line, bundle them up until they form an object, then send for parsing\n for linenumber, line in enumerate(self.bibtex_file_obj):\n logger.debug('Inspect line %s', linenumber)\n if line.strip().startswith('@'):\n # Remove leading whitespaces\n line = line.lstrip()\n logger.debug('Line starts with @')\n # Parse previous record\n _add_parsed_record(record, records)\n # Start new record\n logger.debug('The record is set to empty')\n record = \"\"\n # Keep adding lines to the record\n record += line\n\n # catch any remaining record and send it for parsing\n _add_parsed_record(record, records)\n logger.debug('Set the list of entries')\n self.bib_database.entries = records",
"def process_message(mail):\n\tmessage = email.message_from_string(mail)\t#parsing metadata\n\tdatetuple = email.utils.parsedate_tz(message.__getitem__('Date'))\n\tfiledirectory = basedirectory\n\tif not datetuple:\n\t\tdatetuple = email.utils.parsedate_tz(message.__getitem__('Delivery-date'))\n\tif directory_for_year: \n\t\tfiledirectory = os.path.join(filedirectory, str(datetuple[0]))\n\tif directory_for_month:\n\t\tfiledirectory = os.path.join(filedirectory, str(datetuple[1])) \n\tdateposix = email.utils.mktime_tz(datetuple)\n\tlocaldate = datetime.datetime.fromtimestamp(dateposix)\n\tdatestring = localdate.strftime('%Y%m%d-%H%M') # +'-'+'-'.join(time.tzname) #\n\tsender = email.utils.parseaddr(message['To'])[1].replace('@','_').replace('.','-')\n\tsubject = email.header.decode_header(message['Subject'])[0][0]\n\tfilename = datestring + '_' + sender[:60] + '_' + subject[:60]\n\n\t# parsing mail content\n\tmailstring = ''\n\tfor headername, headervalue in message.items():\n\t\tmailstring += headername + ': ' + headervalue + '\\r\\n'\t# add \\r\\n or\n\tif message.get_content_maintype() == 'text':\n\t\tmailstring += message.get_payload(decode=True)\n\n\t# handle multipart: \n\telif message.get_content_maintype() == 'multipart':\n\t\tpartcounter = 0\n\t\tfor part in message.walk():\n\t\t\tif part.get_content_maintype() == 'text':\t# also: text/html\n\t\t\t\tfor header, value in part.items():\n\t\t\t\t\tmailstring += header + ': ' + value + '\\r\\n'\n\t\t\t\t\tmailstring += '\\r\\n' + part.get_payload(decode=True) + '\\r\\n'\n\t\t\t# skip multipart containers\n\t\t\telif part.get_content_maintype() != 'multipart':\n\t\t\t\tpartcounter += 1\n\t\t\t\ttry:\n\t\t\t\t\tattachmentname = email.header.decode_header(part.get_filename())[0][0]\n\t\t\t\texcept:\n\t\t\t\t\tattachmentname = \"\"\n\t\t\t\t\tprint(\"Error when parsing filename.\")\n\t\t\t\tif not attachmentname:\n\t\t\t\t\text = mimetypes.guess_extension(part.get_content_type())\n\t\t\t\t\tif not ext:\n\t\t\t\t\t\text = '.bin'\t# use generic if unknown extension\n\t\t\t\t\tattachmentname = 'attachment' + str(partcounter) + ext\n\t\t\t\tattfilename = filename + '_' + attachmentname\n\t\t\t\twrite_to_file(filedirectory, attfilename, part.get_payload(decode=True))\n\twrite_to_file(filedirectory, filename+'.txt', mailstring)",
"def _process_incoming_mail(raw_message, recipients):\n recipients = [x[1] for x in email.utils.getaddresses([recipients])]\n\n incoming_msg = mail.InboundEmailMessage(raw_message)\n\n if 'X-Google-Appengine-App-Id' in incoming_msg.original:\n raise InvalidIncomingEmailError('Mail sent by App Engine')\n\n # Use the subject to find the issue number.\n # Originally the tag was (issueNNN).\n # Then we changed it to be (issue NNN by WHO).\n # We want to match either of these, and we need to deal with\n # the fact that some mail readers will fold the long subject,\n # turning a single space into \"\\r\\n \".\n # We use \"issue\\s*\" to handle all these forms,\n # and we omit the closing ) to accept both the original and the \"by WHO\" form.\n subject = incoming_msg.subject or ''\n match = re.search(r'\\(issue\\s*(?P<id>\\d+)', subject)\n if match is None:\n raise InvalidIncomingEmailError('No issue id found: %s', subject)\n issue_id = int(match.groupdict()['id'])\n issue = models.Issue.get_by_id(issue_id)\n if issue is None:\n raise InvalidIncomingEmailError('Unknown issue ID: %d' % issue_id)\n sender = email.utils.parseaddr(incoming_msg.sender)[1]\n\n body = None\n for _, payload in incoming_msg.bodies('text/plain'):\n # FIXME(andi): Remove this when issue 2383 is fixed.\n # 8bit encoding results in UnknownEncodingError, see\n # http://code.google.com/p/googleappengine/issues/detail?id=2383\n # As a workaround we try to decode the payload ourselves.\n if payload.encoding == '8bit' and payload.charset:\n body = payload.payload.decode(payload.charset)\n # If neither encoding not charset is set, but payload contains\n # non-ASCII chars we can't use payload.decode() because it returns\n # payload.payload unmodified. The later type cast to db.Text fails\n # with a UnicodeDecodeError then.\n elif payload.encoding is None and payload.charset is None:\n # assume utf-8 but set replace flag to go for sure.\n body = payload.payload.decode('utf-8', 'replace')\n else:\n body = payload.decode()\n break\n if body is None or not body.strip():\n raise InvalidIncomingEmailError('Ignoring empty message.')\n elif len(body) > django_settings.RIETVELD_INCOMING_MAIL_MAX_SIZE:\n # see issue325, truncate huge bodies\n trunc_msg = '... (message truncated)'\n end = django_settings.RIETVELD_INCOMING_MAIL_MAX_SIZE - len(trunc_msg)\n body = body[:end]\n body += trunc_msg\n\n # If the subject is long, this might come wrapped into more than one line.\n subject = ' '.join([x.strip() for x in subject.splitlines()])\n msg = models.Message(issue_key=issue.key, parent=issue.key,\n subject=subject,\n sender=sender,\n recipients=[x for x in recipients],\n date=datetime.datetime.now(),\n text=body,\n draft=False)\n\n # Add sender to reviewers if needed.\n all_emails = [str(x).lower()\n for x in ([issue.owner.email()] +\n issue.reviewers +\n issue.cc +\n issue.collaborator_emails())]\n if sender.lower() not in all_emails:\n query = models.Account.query(models.Account.lower_email == sender.lower())\n account = query.get()\n if account is not None:\n issue.reviewers.append(account.email) # e.g. account.email is CamelCase\n else:\n issue.reviewers.append(db.Email(sender))\n\n issue.calculate_updates_for(msg)\n issue.put()\n msg.put()",
"def transform(self, email_path):\n mail = open(email_path, 'r')\n content = mail.read(self.max_read_len)\n i = 0\n while not(content[i] == '\\n' and content[i + 1] == '\\n') and i < len(content) - self.ngram:\n i += 1\n header = content[:i]\n # TODO find a smarter way deal with the header-body problem\n body = content[i + 2:]\n if len(body) + len(header) > self.max_read_len:\n body = body[:max(1000, self.max_read_len - len(header))]\n header_set = self.tokenize(header)\n body_set = self.tokenize(body)\n mail.close()\n return (header_set, body_set)",
"def process(self, processors) -> MultiLineString:"
] | [
"0.60554934",
"0.58570105",
"0.5800822",
"0.57903785",
"0.5709253",
"0.5683452",
"0.56657594",
"0.5564154",
"0.54445773",
"0.5323824",
"0.52994883",
"0.5294524",
"0.5293484",
"0.5284312",
"0.52493984",
"0.5248615",
"0.5206312",
"0.51865774",
"0.5184688",
"0.5175195",
"0.5164879",
"0.5164168",
"0.5132815",
"0.51286614",
"0.51051915",
"0.51050574",
"0.50853634",
"0.5054095",
"0.50410324",
"0.50316143"
] | 0.7433099 | 0 |
Test the clear method works for posixbased systems | def test_clear_posix(self):
with mock.patch("hangman.cli.screen.os.system") as mock_system:
hangman.cli.screen.Screen.clear()
mock_system.assert_called_with("clear") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clear():\n\n # windows \n if os.name == \"nt\": \n _ = os.system(\"cls\") \n # mac and linux\n else: \n _ = os.system(\"clear\")",
"def clear(): \n if os.name == \"nt\":\n os.system(\"cls\")\n else:\n os.system(\"clear\")",
"def clear():\r\n if name == 'nt':\r\n _ = system('cls')\r\n else:\r\n _ = system('clear')",
"def clear():\n if os.name == 'nt': \n os.system('cls') \n else: \n os.system('clear')",
"def clear():\n if platform.system() == \"Windows\":\n os.system('cls')\n elif platform.system() == \"Linux\":\n os.system('clear')",
"def clear():\n\n os.system(\"clear\")",
"def clear():\n if \"Windows\" in system():\n call(\"cls\")\n else:\n call(\"clear\")",
"def reset():\n if os.name == \"posix\": #In linux\n os.system(\"clear\")\n elif os.name == (\"ce\", \"nt\", \"dos\"): #In windows\n os.system(\"cls\")",
"def clear() -> None:\n\n os.system('cls' if os.name == 'nt' else 'clear')",
"def clear() -> None:\n\n os.system('cls' if os.name == 'nt' else 'clear')",
"def clear():\r\n os.system('cls' if os.name == 'nt' else 'clear')",
"def clear():\n os.system('cls' if os.name == 'nt' else 'clear')",
"def clear():\n os.system('cls' if os.name == 'nt' else 'clear')",
"def clean():\n if system() == 'Windows':\n os.system('cls')\n else:\n os.system('clear')",
"def do_clear(self, line):\n\t if os.name == 'nt':\n\t os.system('cls')\n\t else:\n\t os.system('clear')",
"def clearscreen():\n if os.name == 'nt':\n os.system('cls')\n elif os.name == 'posix':\n os.system('clear')\n else:\n print \"Untested OS. Please tell the developer you're on: %s\" % os.name \n sys.exit(0)",
"def do_clear(self, arg):\r\n if platform.system == \"Windows\":\r\n os.system(\"cls\")\r\n else:\r\n os.system(\"clear\")",
"def clear():",
"def test_clear_windows(self):\n with mock.patch(\"hangman.cli.screen.os.system\") as mock_system:\n hangman.cli.screen.Screen.clear()\n mock_system.assert_called_with(\"cls\")",
"def clear_screen():\n if name == \"nt\":\n system('cls')\n else:\n system('clear')",
"def clear():\n try:\n try:\n # For Macs and Linux\n os.system('clear');\n except:\n # For Windows REPORTED BUG: Sometimes does not work on 64 bit Windows\n os.system('cls');\n except:\n # If nothing else works, a hacky, non optimal solution\n for i in range(50): print(\"\")",
"def clear():\n sub.call('cls', shell=True)",
"def clear_screen():\n if os.name == 'nt':\n os.system(\"cls\")\n else:\n os.system(\"clear\")",
"def clear_screen():\n if os.name == 'nt':\n os.system('cls')\n else:\n os.system('clear')",
"def _clear_screen():\n if os.name == 'nt':\n os.system('cls')\n else:\n os.system('clear')",
"def clearTerminal():\r\n os.system('cls' if os.name == 'nt' else 'clear')",
"def do_clear(self, args):\n if (len(args.split()) > 0):\n self.__bad_arguments(\"clear\")\n else:\n os.system('clear')",
"def screen_clear():\n from subprocess import call\n import os\n call('clear' if os.name == 'posix' else 'cls')",
"def test_clear(self):\n self.assertTrue(self.ec.clear())",
"def clear_screen():\n\n # Clear command as function of OS\n command = \"cls\" if system_name().lower()==\"windows\" else \"clear\"\n\n # Action\n system_call(command)"
] | [
"0.7761506",
"0.7746805",
"0.7741762",
"0.772958",
"0.7596242",
"0.74570656",
"0.7336103",
"0.7284548",
"0.72207433",
"0.72207433",
"0.71165365",
"0.7097402",
"0.7097402",
"0.70822984",
"0.6909185",
"0.68754077",
"0.6858489",
"0.6781232",
"0.6764691",
"0.6755633",
"0.67161864",
"0.6657783",
"0.6607532",
"0.6605387",
"0.6574682",
"0.6562531",
"0.65448666",
"0.6538169",
"0.64280653",
"0.6423562"
] | 0.79437184 | 0 |
Test the goodbye method | def test_goodbye(self):
with mock.patch("builtins.print") as mock_print:
hangman.cli.screen.Screen.goodbye()
output = ",".join([str(x) for x in mock_print.call_args_list])
self.assertTrue("Goodbye" in output) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def goodbye(self, args):\n\t\tself.write_line(\"GOODBYE\")\n\t\tself.close();",
"async def testgoodbye(self, ctx, *, member = None):\n\n # Check if we're suppressing @here and @everyone mentions\n if self.settings.getServerStat(ctx.message.guild, \"SuppressMentions\"):\n suppress = True\n else:\n suppress = False\n\n isAdmin = ctx.message.author.permissions_in(ctx.message.channel).administrator\n if not isAdmin:\n checkAdmin = self.settings.getServerStat(ctx.message.guild, \"AdminArray\")\n for role in ctx.message.author.roles:\n for aRole in checkAdmin:\n # Get the role that corresponds to the id\n if str(aRole['ID']) == str(role.id):\n isAdmin = True\n\n # Only allow admins to change server stats\n if not isAdmin:\n await ctx.channel.send('You do not have sufficient privileges to access this command.')\n return\n\n if member == None:\n member = ctx.message.author\n if type(member) is str:\n memberName = member\n member = DisplayName.memberForName(memberName, ctx.message.guild)\n if not member:\n msg = 'I couldn\\'t find *{}*...'.format(memberName)\n # Check for suppress\n if suppress:\n msg = Nullify.clean(msg)\n await ctx.channel.send(msg)\n return\n # Here we have found a member, and stuff.\n # Let's make sure we have a message\n message = self.settings.getServerStat(ctx.message.guild, \"Goodbye\")\n if message == None:\n await ctx.channel.send('Goodbye message not setup. You can do so with the `{}setgoodbye [message]` command.'.format(ctx.prefix))\n return\n await self._goodbye(member, ctx.message.guild, ctx.message.channel)\n\n # Print the goodbye channel\n welcomeChannel = self.settings.getServerStat(ctx.message.guild, \"WelcomeChannel\")\n if welcomeChannel:\n for channel in ctx.message.guild.channels:\n if str(channel.id) == str(welcomeChannel):\n welcomeChannel = channel\n break\n if welcomeChannel:\n msg = 'The current goodbye channel is **{}**.'.format(welcomeChannel.mention)\n else:\n if self._getDefault(ctx.guild):\n msg = 'The current goodbye channel is the default channel (**{}**).'.format(self._getDefault(ctx.guild).mention)\n else:\n msg = 'There is *no channel* set for goodbye messages.'\n await ctx.channel.send(msg)",
"def test_quit_game(run):\n out, _ = run(dork.cli.quit_game)\n assert \"Thank you\" in out",
"def test_teardown(self):\n with pytest.raises(NotImplementedError):\n self.behaviour.teardown()",
"def teardown(self, exception):",
"def _bye(self):\n self.get(\"BYE\",'')\n self.send()",
"def teardown_function(self):\r\n raise AppModule.Unimplemented()",
"def test_teardown(self):\n with pytest.raises(NotImplementedError):\n self.handler.teardown()",
"def teardown(self) -> None:",
"def teardown(self) -> None:",
"def teardown(self) -> None:",
"def teardown(self):",
"def teardown(self):",
"def teardown(self):",
"def test_terminate_run(self):\n pass",
"def __exit__(self, *args):\n if self.teardown:\n super().__exit__(*args)",
"def teardown_method(self):",
"def test_teardown(self):\n assert self.http_handler.teardown() is None\n self.assert_quantity_in_outbox(0)",
"def test_teardown(self):\n assert self.search_behaviour.teardown() is None\n self.assert_quantity_in_outbox(0)",
"def die(self):\n pass",
"def test_uninstall(self):\n pass",
"def quit(self, reason=\"\", *args, **kwargs):\n pass",
"def stopTest(self, test):",
"def test_do_quit(self):\n for string in self.random_strings:\n self.assertTrue(self.CommandParser.do_quit(string))",
"def teardown(self):\n pass",
"def teardown(self):\n pass",
"def teardown(self):\n pass",
"async def goodbyemessage(self, ctx):\n await util.command_group_help(ctx)",
"def teardown(self,**kwargs):\n pass",
"def do_exit(self,*args):\r\n return True"
] | [
"0.7509933",
"0.6987562",
"0.6891158",
"0.6829135",
"0.68261707",
"0.68175083",
"0.6816748",
"0.6797453",
"0.6768984",
"0.6768984",
"0.6768984",
"0.67623085",
"0.67623085",
"0.67623085",
"0.67440975",
"0.66107154",
"0.658389",
"0.6566832",
"0.65569544",
"0.65374553",
"0.6533507",
"0.6510029",
"0.6507859",
"0.6487468",
"0.64665747",
"0.64665747",
"0.64665747",
"0.6445463",
"0.644296",
"0.64395785"
] | 0.7486902 | 1 |
Returns the total number of hives in this apiary. | def hives_count(self) -> int:
return self.hives.count() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def count(self) -> float:\n return pulumi.get(self, \"count\")",
"def count(self) -> int:\n return pulumi.get(self, \"count\")",
"def total_number_of_animals(self):\n animals = self.animal()\n print 'Total number of animals on island: {:4}'.format(\n animals[\"Herbivores\"] + animals[\"Carnivores\"])",
"def totalCount(self):\n return sum(self.values())",
"def totalCount(self):\n return sum(self.values())",
"def totalCount(self):\n return sum(self.values())",
"def getHP(self):\n return len(self.deck)",
"def count(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'count')\r\n\r\n return http.Request('GET', url), parsers.parse_json",
"def total_count(self) -> int:\n return self.__total_count",
"def total(self):\n return self._evaluate()['hits']['total']",
"def tally(self):\n return self.count",
"def count(self):\n return self.get_count()",
"def get_total_count(self):\n return self.total_count",
"def response_count(self) -> int:\n return pulumi.get(self, \"response_count\")",
"def get_inventory_count(self):\n resp = self.app.get('/inventories')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)",
"def GetCount(self):\n return self._server.get_count()",
"def total_cards(self):\n amount = 0\n for palo in self._cards:\n amount = amount + len(self._cards[palo])\n\n return amount",
"def num_herbs(self):\n return self._num_herbs",
"def get_count(cls):\n total = 0\n for counter in SimpleCounterShard.objects.all():\n total += counter.count\n return total",
"def Count(self):\r\n\t\treturn self._get_attribute('count')",
"def Count(self):\r\n\t\treturn self._get_attribute('count')",
"def get_count(self):\n\n\t\treturn self.__count",
"def total(self) -> int:\n return self._total",
"def num_animals(self):\n return self._num_herbs + self._num_carns",
"def herb_count(self):\n return len(self.herbivores)",
"def get_total_instruments(self):\n\n total = 0\n for exchange in self.exchanges:\n total += len(exchange.symbols)\n return total",
"def get_count(self):\r\n return self.count",
"def get_total_expenses(self):\n return sum(self.expenses.values())",
"def get_TotalCount(self):\n return self._output.get('TotalCount', None)",
"def Count(self):\n return self._get_attribute('count')"
] | [
"0.6977683",
"0.6883911",
"0.6836276",
"0.6753868",
"0.6753868",
"0.6753868",
"0.6739675",
"0.67286235",
"0.67053115",
"0.66854024",
"0.6670815",
"0.66571957",
"0.6634862",
"0.6606549",
"0.65562564",
"0.6529486",
"0.6525879",
"0.65013546",
"0.64641476",
"0.64448",
"0.64448",
"0.6438567",
"0.64284086",
"0.642345",
"0.6422671",
"0.64137036",
"0.63925606",
"0.6389981",
"0.6388301",
"0.63857096"
] | 0.8205529 | 0 |
Convenience method which overrides the call method to call the getExpansion function | def __call__(self, data):
return self.getExpansion(data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getExpansion(self, data):\n pass",
"def expansion_method(self, expansion_method):\n\n self._expansion_method = expansion_method",
"def expand(self) -> List[TOKEN]:\n return [self.function, *self.args]",
"def get_expansion(block, expansion=None):\n if isinstance(expansion, int):\n assert expansion > 0\n elif expansion is None:\n if hasattr(block, 'expansion'):\n expansion = block.expansion\n elif issubclass(block, ViPNAS_Bottleneck):\n expansion = 1\n else:\n raise TypeError(f'expansion is not specified for {block.__name__}')\n else:\n raise TypeError('expansion must be an integer or None')\n return expansion",
"def call(self):\n self.call() # Call a function",
"def __call__(self):\n return self.fn()",
"def __call__(self, *args, **kwargs):\n return self.call(*args, **kwargs)",
"def __call__():",
"def __call__():",
"def __call__():",
"def __call__():",
"def __call__():",
"def run_whatis(self, expanded, unexpanded) :\n\t\treturn self.run_man(expanded, unexpanded)",
"def call(self):",
"def run_call(self, expanded, unexpanded) : \n\t\tif not expanded :\n\t\t\treturn self.errormessage(\"Needs an object id to call\")\n\n\t\t# Michel@DC: you should factor the object out of this eval and\n\t\t# validate it with\n\t\t# SecurityManager.checkPermission('View', object).\n\t\t# Also, 'eval' without an namespace qualifying 'in'\n\t\t# clause can be bad! Try and do this without eval.\n\n\t\t# Jerome: Don't know how without eval !\n\t\t# new code looks very ugly and accessing to object's\n\t\t# properties doesn't work anymore, unfortunately.\n\n\t\tobjectstr = string.join(unexpanded, ' ')\n\t\tpos = string.find(objectstr, '(')\n\t\tif pos == -1 :\n\t\t\t# called without arguments\n\t\t\tobjpath = objectstr\n\t\t\tobjargs = \"\"\n\t\telse :\n\t\t\t# called with arguments, skip them\n\t\t\t# because we only want the object name\n\t\t\tobjpath = objectstr[:pos]\n\t\t\tobjargs = objectstr[pos:]\n\n\t\tobjpath = string.replace(objpath, '.', '/')\n\t\tobject = self.toObject(self.__context, objpath)\n\t\tif object is None :\n\t\t\t# maybe should do something to re-allow properties to be used\n\t\t\treturn self.errormessage(\"Object %s not found\" % objectstr)\n\t\telse :\n\t\t\tif not self.HasPerms(object, 'View') :\n\t\t\t\treturn -1\n\t\t\telse :\n\t\t\t\t_ = context = self.__context\n\t\t\t\tcallresult = str(eval(\"object%s\" % objargs))\n\t\t\t\tself.printf(\"%s\" % callresult)\n\t\t\t\tself.htmlmessage(callresult, safe=1)",
"def get_expansion(block, expansion=None):\n if isinstance(expansion, int):\n assert expansion > 0\n elif expansion is None:\n if hasattr(block, 'expansion'):\n expansion = block.expansion\n elif issubclass(block, BasicBlock):\n expansion = 1\n elif issubclass(block, Bottleneck):\n expansion = 4\n else:\n raise TypeError(f'expansion is not specified for {block.__name__}')\n else:\n raise TypeError('expansion must be an integer or None')\n return expansion",
"def test_get_systems_expanded(self):\n pass",
"def call(self) -> global___Snippet.ClientCall:",
"def call(self) -> global___Snippet.ClientCall:",
"def __call__(self):\n context = Context()\n return self.recipe.execute(context, self.cmd, self.cmd_args)",
"def expand(self, element:Element, context:ExpansionContext):\n\n raise NotImplementedError()",
"def __call__(self, *args, **kw):\n return self.transform(term.__call__(self, *args, **kw))",
"def test_call_wrapped_function(self):\r\n\r\n # adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs\r\n\r\n exp = {'0': 'R27DLI_4812',\r\n '1': 'U1PLI_7889',\r\n '2': 'W3Cecum_4858',\r\n '3': 'R27DLI_3243',\r\n }\r\n app = GenericRepSetPicker(params={'Algorithm': 'most_abundant',\r\n 'ChoiceF': make_most_abundant, 'ChoiceFRequiresSeqs': True})\r\n obs = app(self.tmp_seq_filepath, self.tmp_otu_filepath)\r\n self.assertEqual(obs, exp)",
"def run_help(self, expanded, unexpanded) :\n\t\treturn self.run_man(expanded, unexpanded)",
"def __call__(self, *arg, **kwargs):\n return self._fun(*arg, **kwargs)",
"def call(self, *args, **kwargs):",
"def get_expansion(self, prec, padic_num):\n padic_expansion = list(padic_num.expansion())\n if isinstance(padic_expansion[0], list):\n return padic_expansion\n else:\n # Eistenstein extension case.\n padic_list = []\n for i in range(0, len(padic_expansion), 2):\n term = [padic_expansion[i]]\n padic_list.append(term)\n\n # Fill the rest of the list to the sufficient precision.\n for i in range(prec - len(padic_list)):\n padic_list.append([]) \n return padic_list",
"def __call__(self, *args, **kwargs):\n return self._func(*args, **kwargs)",
"def call(self, **kwargs):\n return getattr(self.resource, self.function)(**kwargs)",
"def display(self):\n from sage.tensor.modules.format_utilities import FormattedExpansion\n from sage.misc.latex import latex\n resu_txt = str(self.parent()._chart[:]) + ' |--> ' + \\\n str(ExpressionNice(self._express))\n resu_latex = latex(self.parent()._chart[:]) + r' \\mapsto' + \\\n latex(ExpressionNice(self._express))\n return FormattedExpansion(resu_txt, resu_latex)"
] | [
"0.74891585",
"0.5920144",
"0.54512185",
"0.5370277",
"0.53289384",
"0.5261651",
"0.5257707",
"0.5253625",
"0.5253625",
"0.5253625",
"0.5253625",
"0.5253625",
"0.525186",
"0.5245593",
"0.51983786",
"0.5187937",
"0.51806074",
"0.51613045",
"0.51613045",
"0.5146855",
"0.51071787",
"0.50719315",
"0.5056144",
"0.50152665",
"0.50062287",
"0.50025994",
"0.49969396",
"0.49856463",
"0.4985423",
"0.49798384"
] | 0.775161 | 0 |
Imports a database from the tmp directory. Use very carefully! (or just to remind yourself how to import mysql data) Modify this code directly if needed, as it hardwires the username, db name and filename. | def mysql_import():
# first make another copy of the db
run("mysqldump -u database_user database_name -p > ~/tmp/exported_db_temp.sql")
# then import from the backup
run("mysql -u database_user -p -D database_name < ~/tmp/exported_db.sql") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def import_db(import_file):\n import_data(import_file)",
"def test_load_database_from_path(tmp_path):\n path = tmp_path / \"test.db\"\n database = load_database(path_or_database=path, fast_logging=False)\n assert isinstance(database, DataBase)\n assert database.path is not None\n assert database.fast_logging is False",
"def importToSQLITE(self, db_file, sqlite_db_name):\n\n command = \"{} {} {} {}\".format('cat', db_file, '| sqlite3', sqlite_db_name)\n call(command, shell = True)",
"def initdb():\n\tc, conn = connect()\n\tsql = []\n\twith open('data\\\\database.sql') as f:\n\t\tfor line in f:\n\t\t\tsql.append(line.strip())\n\t\n\tfor query in sql:\n\t\tc.execute(query)\n\tconn.commit()",
"def load_db(path_to_db):\n db_run = db(path_to_db) # Instantiates the DB by reading the file\n db_run.import_config_db() # Imports configuration DB\n db_run.conn.row_factory = sqlite3.Row # Better select results\n return(db_run)",
"def import_file(filepath, db):\n # Logging\n log_main = logging.getLogger(__name__)\n log_import = log_main.getChild('import_files')\n log_import = log_import.getChild(filepath.split('/')[-1])\n log_import.info('started')\n start = time()\n\n # Variables used in data processing\n memory_buff = StringIO()\n curr = None\n cols = ['tweetID', 'date', 'message', 'username', 'userID', 'language',\n 'longitude', 'latitude', 'retweet']\n sql = \"\"\"COPY \"raw_tweets\" (\"tweetID\", \"date\", \"message\", \"username\", \"userID\", \"language\", \"longitude\", \"latitude\", \"retweet\") \n FROM STDIN \n WITH (FORMAT CSV, HEADER TRUE, DELIMITER '\\t');\n \"\"\"\n \n # Try reading the file\n try:\n df = pd.read_csv(filepath, \n usecols=cols, engine='c', \n memory_map=True, low_memory=False,\n dtype={'userID': np.int64, 'tweetID': np.int64})\n except Exception as e:\n log_import.warn('error on read_csv')\n memory_buff.close()\n print (e)\n return\n\n # Attempt to open up a connection to database.\n try:\n connn = db.connect()\n conn = db.raw_connection()\n curr = conn.cursor()\n except (Exception) as e:\n log_import.warn('error on server connection')\n memory_buff.close()\n if curr is not None:\n curr.close()\n print (e)\n return\n\n # Try copying the files to table.\n try:\n # Save to our buffer\n df[cols].to_csv(memory_buff, sep='\\t',\n header=True, index=False, encoding='utf-8')\n\n # Point buffer to start of memory block\n memory_buff.seek(0)\n\n # Copy records using native Postgres COPY command (FAST)\n curr.copy_expert(sql, memory_buff)\n\n # Save transaction and commit to DB\n conn.commit()\n except (Exception) as e:\n log_import.warn('error while copying to database')\n memory_buff.close()\n if curr is not None:\n curr.close()\n print (e)\n return\n finally:\n memory_buff.close()\n if curr is not None:\n curr.close()\n log_import.info('finished ({:.2f})'.format(time() - start))\n return",
"def copy_db():\n with cd(\"/tmp\"), lcd(\"/tmp\"):\n sudo(\"pg_dump gsi > /tmp/latest.sql\", user=\"postgres\")\n run(\"tar zcvf latest.sql.tgz latest.sql\")\n get(\"/tmp/latest.sql.tgz\", \"latest.sql.tgz\")\n sudo(\"rm /tmp/latest.sql.tgz /tmp/latest.sql\")",
"def createDataBase(mysql,dbase):\n\tsql = 'CREATE DATABASE IF NOT EXISTS '+ dbase + ';'\n\tmysql.query(sql)\n\tmysql.select_db(dbase)\n\tsql = 'DROP TABLE IF EXISTS names;'\n\tmysql.query(sql)\n\tsql = 'DROP TABLE IF EXISTS files;'\n\tmysql.query(sql)\n\tsql = 'DROP TABLE IF EXISTS linesinfile;'\n\tmysql.query(sql)\n\tsql = 'DROP TABLE IF EXISTS allfiles;'\n\tmysql.query(sql)\n\tsql = 'DROP TABLE IF EXISTS allnames;'\n\tmysql.query(sql)\n\tsql = 'CREATE TABLE names (id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY, \\\n\tname TINYTEXT NOT NULL);'\n\tmysql.query(sql)\n\tsql = 'CREATE TABLE files (id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY, \\\n\t\tname BIGINT NOT NULL, \\\n\t\tfilename TEXT NOT NULL \\\n\t\tREFERENCES names(id));'\n\tmysql.query(sql)\n\tsql = 'CREATE TABLE linesinfile (id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY, \\\n\t\tfilename BIGINT NOT NULL, \\\n\t\tnumber TEXT NOT NULL, \\\n\t\tref TINYTEXT NOT NULL \\\n\t\tREFERENCES files(id));'\n\tmysql.query(sql) \n\tsql = 'CREATE TABLE allfiles (id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY, \\\n\tfile TEXT NOT NULL);'\n\tmysql.query(sql)\n\tsql = 'CREATE TABLE allnames (id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY, \\\n\t\tfile BIGINT NOT NULL, \\\n\t\tname TEXT NOT NULL \\\n\t\tREFERENCES allfiles(id));'\n\tmysql.query(sql)",
"def prepare_db():\n conn = sqlite.connect(\"temp.db\")\n sql = conn.cursor()\n sql.execute(\"SELECT sql FROM sqlite_master WHERE name='points'\")\n rows = sql.fetchall()\n if len(rows) == 0:\n print \"Database does not exist. Creating Database...\"\n sql.execute('''CREATE TABLE points\n (date datetime, humidity real, temp_c real, temp_f real, index_c real, index_f)''')\n print \"Database created\"\n conn.close()",
"def load_tables(query_root, data_dir, host, port, db_name, user, password):\n try:\n conn = PGDB(host, port, db_name, user, password)\n try:\n for table in TABLES:\n filepath = os.path.join(data_dir, LOAD_DIR, table.lower() + \".tbl.csv\")\n conn.copyFrom(filepath, separator=\"|\", table=table)\n conn.commit()\n except Exception as e:\n print(\"unable to run load tables. %s\" %e)\n return 1\n conn.close()\n return 0\n except Exception as e:\n print(\"unable to connect to the database. %s\" % e)\n return 1",
"def load_testdb(c, dbname=\"test_template\", fpath=\"tests/test_db.sql\"):\n default_env = {\n \"PATH\": os.environ[\"PATH\"],\n \"PYTHONPATH\": os.path.abspath(os.path.dirname(__file__)),\n \"LANG\": \"en_US.UTF-8\",\n \"POSTGRES_DB\": dbname,\n \"POSTGRES_HOST\": \"localhost\",\n \"POSTGRES_USER\": \"postgres\",\n \"POSTGRES_PORT\": \"5432\",\n }\n\n env = os.environ\n env.update(default_env)\n\n psql_command = (\n f'psql -h {default_env[\"POSTGRES_HOST\"]} '\n f'-p {default_env[\"POSTGRES_PORT\"]} '\n f'-U {default_env[\"POSTGRES_USER\"]}'\n )\n\n c.run(f'{psql_command} postgres -c \"drop database if exists {dbname}\";', env=env)\n c.run(f'{psql_command} postgres -c \"create database {dbname}\";', env=env)\n c.run(f\"{psql_command} {dbname} < {fpath}\", env=env)\n # update test db to the latest migrations\n c.run(f\"alembic -c ./alembic.ini upgrade head\", env=env)",
"def connect_db_and_load_data(cls):\n db.connect()\n db.create_tables([Product], safe=True)\n load_data(transform_data('./inventory.csv'))",
"def db():\n\n db_obj = dump_db.DumpDB()\n db_obj.load_from_csv(CONF.BACKUP_DB_PATH)\n return db_obj",
"def connect_dataBase(db_dir, create_cmd):\n just_created = False #flag of is the db is already exist\n \n if not os.path.isfile(db_dir):\n #create the db file in the directory\n with open(db_dir , 'w') as f:\n just_created = True\n #print 'database handler created -- ' , db_dir \n\n try:\n conn = lite.connect(db_dir)\n except lite.Error, e:\n #print \"Error %s:\" % db_dir\n sys.exit(1)\n finally:\n if just_created:\n #create the table \n create_dataBase(conn, create_cmd)\n return True",
"def tempdb():\n fd, minitwit.app.config['DATABASE'] = tempfile.mkstemp()\n minitwit.init_db()\n try:\n yield\n finally:\n os.close(fd)\n os.unlink(minitwit.app.config['DATABASE'])",
"def _create_local_database(db_file_path):\n conn = sql.connect(db_file_path)\n cur = conn.cursor()\n\n table = str('CREATE TABLE app_config ('\n 'ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,'\n 'Name TEXT UNIQUE NOT NULL,'\n 'Value TEXT);')\n cur.execute(table)\n\n table = str('CREATE TABLE menu_data ('\n 'ContextId TEXT PRIMARY KEY NOT NULL,'\n 'Value TEXT);')\n cur.execute(table)\n\n table = str('CREATE TABLE profiles ('\n 'ID INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,'\n 'Guid TEXT NOT NULL UNIQUE,'\n 'IsActive BOOLEAN DEFAULT (0) NOT NULL,'\n 'SortOrder INTEGER NOT NULL);')\n cur.execute(table)\n\n table = str('CREATE TABLE profiles_config ('\n 'Guid TEXT NOT NULL,'\n 'Name TEXT NOT NULL,'\n 'Value TEXT,'\n 'PRIMARY KEY (Guid, Name ),'\n 'FOREIGN KEY (Guid)'\n 'REFERENCES Profiles (Guid) ON DELETE CASCADE ON UPDATE CASCADE);')\n cur.execute(table)\n\n table = str('CREATE TABLE session ('\n 'Name TEXT PRIMARY KEY NOT NULL,'\n 'Value TEXT);')\n cur.execute(table)\n\n table = str('CREATE TABLE settings_monitor ('\n 'Name TEXT PRIMARY KEY NOT NULL,'\n 'Value TEXT);')\n cur.execute(table)\n\n table = str('CREATE TABLE search ('\n 'ID INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,'\n 'Guid TEXT NOT NULL REFERENCES profiles (Guid) ON DELETE CASCADE ON UPDATE CASCADE,'\n 'Type TEXT NOT NULL,'\n 'Value TEXT NOT NULL,'\n 'Parameters TEXT,'\n 'LastAccess TEXT);')\n cur.execute(table)\n\n if conn:\n conn.close()",
"def test_init_db(self, tmpdir):\n if ENV_DATABASE in os.environ:\n del os.environ[ENV_DATABASE]\n filename = '{}/my.db'.format(str(tmpdir))\n connect_string = 'sqlite:{}'.format(filename)\n os.environ[ENV_DATABASE] = connect_string\n # Call the init_db method to create all database tables\n DatabaseDriver.init_db()\n # Connect to the database and ensure we can run a simple query without\n # and SQL error\n con = DatabaseDriver.connect()\n assert con.execute('SELECT * from team').fetchone() is None\n con.close()",
"def load_database(db_session, fixture):\n # TODO: the fixture file path controls\n\n # load the fixture\n datas = pickle.loads(fixture)\n db_session.add_all(datas)\n db_session.commit()\n print \"load database ok\"",
"def _load_data(\n data_dir,\n script_dir,\n database: str = \"ibis_testing\",\n **_: Any,\n ) -> None:\n duckdb = pytest.importorskip(\"duckdb\")\n\n schema = (script_dir / 'schema' / 'duckdb.sql').read_text()\n\n conn = duckdb.connect(str(data_dir / f\"{database}.ddb\"))\n for stmt in filter(None, map(str.strip, schema.split(';'))):\n conn.execute(stmt)\n\n for table in TEST_TABLES:\n src = data_dir / f'{table}.csv'\n conn.execute(\n f\"COPY {table} FROM {str(src)!r} (DELIMITER ',', HEADER, SAMPLE_SIZE 1)\"\n )",
"def test_load_database_after_pickling(tmp_path):\n path = tmp_path / \"test.db\"\n database = load_database(path_or_database=path, fast_logging=False)\n database = pickle.loads(pickle.dumps(database))\n assert hasattr(database.engine, \"connect\")",
"def setup_db(filepath, tables=(), reset=False):\n \n if os.path.exists(filepath) and not reset:\n return\n \n if os.path.exists(filepath) and reset:\n os.remove(filepath)\n \n # create table with appropriate columns\n with get_conn(filepath) as conn:\n for tab in tables:\n make_table(conn, tab.name,\n tab.text_fields, tab.real_fields)",
"def initdb():\n db = getdb()\n\n with open(os.path.join(config.BASE_DIRECTORY, 'schema.sql')) as f:\n db.executescript(f.read())",
"def iDb(self):\n try:\n self.db.importDb()\n self.accept()\n except PermissionError:\n self.reject()",
"def init_db():\n # with current_app.open_resource(\"schema.sql\") as f:\n # db.executescript(f.read().decode(\"utf8\"))\n print(\"初始化数据库脚本文件!!!\")",
"def init_db():\n db = get_db()\n cur = db.cursor()\n ##读取SQL文件,获得sql语句的list\n with open(file='./flaskr/schema.sql', mode='r+') as f:\n sql_list = f.read().split(';')[:-1] # sql文件最后一行加上;\n sql_list = [x.replace('\\n', ' ') if '\\n' in x else x for x in sql_list] # 将每段sql里的换行符改成空格\n ##执行sql语句,使用循环执行sql语句\n for sql_item in sql_list:\n # print (sql_item)\n cur.execute(sql_item)",
"def import_datafile(db, infile):\n res = stat(infile)\n mtime = datetime.utcfromtimestamp(res.st_mtime)\n\n hash = md5hash(infile)\n\n data_file = db.model.data_file\n\n # Should maybe make sure error is not set\n rec = db.get(data_file, hash)\n # We are done if we've already imported\n if rec is not None:\n return False\n\n # Values to insert\n cols = dict(\n file_hash=hash,\n file_mtime=mtime,\n basename=infile.stem,\n csv_data=None)\n\n try:\n cols['csv_data'] = extract_datatable(infile)\n except NotImplementedError as e:\n secho(str(e), fg='red', dim=True)\n\n tbl = data_file.__table__\n sql = (insert(tbl)\n .values(file_path=str(infile), **cols)\n .on_conflict_do_update(\n index_elements=[tbl.c.file_path],\n set_=dict(**cols)))\n db.session.execute(sql)\n return True",
"def dburl(\n tmp_path_factory: pytest.TempPathFactory,\n person_data: pandas.DataFrame,\n student_data: pandas.DataFrame,\n school_data: pandas.DataFrame,\n ) -> str:\n path = tmp_path_factory.mktemp('alchemy') / 'test.db'\n url = f'sqlite:///{path.absolute()}'\n connection = sqlalchemy.create_engine(url)\n person_data.to_sql('person', connection, index=False)\n student_data.to_sql('student', connection, index=False)\n school_data.to_sql('school', connection, index=False)\n return url",
"def copy_db():\n local('ssh %s pg_dump -U djangoproject -c djangoproject | psql djangoproject' % env.hosts[0])",
"def init_db():\n with LoggerApi.app_context():\n db = get_db()\n with LoggerApi.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()",
"def importDatabase(self):\n db_conn.execute(\"INSERT INTO Fietsenstalling (Naam, Achternaam, Telefoon, FietsNr, PIN) VALUES \"\n \"(?, ?, ?, ?, ?);\", (naamInvoer.get(), achternaamInvoer.get(), telefoonnummerInvoer.get(), FietsNr, pincodeInvoer.get()))\n\n db_conn.commit()"
] | [
"0.68345016",
"0.64033103",
"0.6076044",
"0.6069602",
"0.60656613",
"0.6017671",
"0.59419686",
"0.59211564",
"0.5919211",
"0.5879916",
"0.57659775",
"0.5732443",
"0.5716429",
"0.5715574",
"0.5714675",
"0.5714328",
"0.568365",
"0.56806725",
"0.56564546",
"0.5650345",
"0.56437576",
"0.56302184",
"0.56176937",
"0.55936867",
"0.558858",
"0.5580476",
"0.55759877",
"0.55394083",
"0.55270535",
"0.5521717"
] | 0.7754526 | 0 |
Set up an ssh shortcut. Called by setup_ssh_keys. You can call it separately if desired. | def update_ssh_shortcut(output_keyfile, quickname=None):
if quickname:
with settings(warn_only=True):
local("touch $HOME/.ssh/config")
local(r"echo '' >> $HOME/.ssh/config")
local(r"echo 'Host %s' >> $HOME/.ssh/config" % quickname)
local(r"echo '' >> $HOME/.ssh/config")
local(r"echo 'Hostname %s' >> $HOME/.ssh/config" % host_name)
local(r"echo 'User %s' >> $HOME/.ssh/config" % user)
local(r"echo 'IdentityFile ~/.ssh/%s' >> $HOME/.ssh/config" % output_keyfile)
local(r"echo 'ServerAliveCountMax 3' >> $HOME/.ssh/config")
local(r"echo 'ServerAliveInterval 10' >> $HOME/.ssh/config") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def createPuttyShortcuts(folder = \"Putty Connections\"):\n desktop = winshell.desktop()\n cpath = os.path.join(desktop, folder)\n\n if not os.path.exists(cpath):\n os.mkdir(cpath)\n \n for c in getPuttyConnections():\n if c.strip() != \"\":\n path = os.path.join(cpath, c + \".lnk\")\n target = \"C:\\\\Program Files (x86)\\\\PuTTY\\\\putty.exe\"\n args = \"-load \" + c\n wdir = \"C:\\\\Program Files (x86)\\PuTTY\\\\\"\n try:\n createShortcut(path, target, wdir = wdir, args = args)\n except Exception, e:\n print \"could not create shortcut for \" + c",
"def setup_ssh_keys(output_keyfile=\"id_rsa\", ssh_type=\"rsa\", quickname=None):\n with settings(warn_only=True):\n local(\"mkdir -p $HOME/.ssh\")\n with cd(\"$HOME/.ssh\"):\n local(\"ssh-keygen -t %s -f %s\" % (ssh_type, output_keyfile))\n for host in env.hosts:\n local(\"scp %s.pub %s:temp_id_key.pub\" % (output_keyfile, host))\n with settings(warn_only=True):\n run(\"mkdir -p $HOME/.ssh\")\n run(\"cat $HOME/temp_id_key.pub >> ~/.ssh/authorized_keys\")\n run(\"rm $HOME/temp_id_key.pub\")\n run(\"chmod 600 $HOME/.ssh/authorized_keys\")\n run(\"chmod 700 $HOME/.ssh\")\n run(\"chmod go-w $HOME\")\n if quickname:\n update_ssh_shortcut(output_keyfile, quickname)",
"def cmd_setup_ssh(public_key_file):\n\n def add_helper(key_file):\n if exists(key_file):\n try:\n fingerprint = str(check_output('ssh-keygen -lf ' + key_file, shell=True)).split(' ', 4)[1]\n key = open(key_file, 'r').read().strip()\n echo(\"Adding key '{}'.\".format(fingerprint), fg='white')\n setup_authorized_keys(fingerprint, PIKU_SCRIPT, key)\n except Exception:\n echo(\"Error: invalid public key file '{}': {}\".format(key_file, format_exc()), fg='red')\n elif public_key_file == '-':\n buffer = \"\".join(stdin.readlines())\n with NamedTemporaryFile(mode=\"w\") as f:\n f.write(buffer)\n f.flush()\n add_helper(f.name)\n else:\n echo(\"Error: public key file '{}' not found.\".format(key_file), fg='red')\n\n add_helper(public_key_file)",
"def _start_ssh(self):\n try:\n message = '\\nEnter number you want to connect: '\n num = raw_input(message)\n while not int(num) in self.instance_list:\n num = raw_input(message)\n\n message_user = 'Enter username for ssh_login(blank = %s): ' % DEFAULT_USER \n user = raw_input(message_user)\n if not user:\n user = DEFAULT_USER\n \n target = self.instance_list[int(num)]\n ssh_key_path = os.path.join(SSH_DIR, target['key'])\n if not os.path.exists(ssh_key_path):\n print 'SSH key not found! KEY_PATH[ %s ]' % ssh_key_path\n return\n\n command = COMMAND % {'sshkey' : ssh_key_path, 'user' : user, 'server' : target['dns'], 'port' : self.port}\n\n print 'Connecting to \"%s\"... [SSH COMMAND: %s ]' % (target['name'], command)\n os.system(command)\n except KeyboardInterrupt:\n print '\\nAborted!'\n finally:\n sys.exit()",
"def open_ssh():\n print('Opening SSH...')",
"def __setup_deploy(self):\r\n # Create a SSH Key-pair and push it to the robot\r\n if not self.ssh_key.exists():\r\n subprocess.run(['ssh-keygen',\r\n '-b', '4096',\r\n '-t', 'rsa',\r\n '-f', self.ssh_key,\r\n '-q', '-N', ''\r\n ])\r\n\r\n os.chmod(self.ssh_key, 0o600)\r\n os.chmod(self.ssh_pub, 0o600)\r\n print('Please enter the password if asked.')\r\n subprocess.run(\r\n ['ssh-copy-id',\r\n '-i', self.ssh_key,\r\n 'robot@{}'.format(self.settings['ip'])\r\n ], stderr=open(os.devnull, 'wb'))\r\n print('Try to log into the brick:')\r\n print('\\tssh -i {} robot@{}'.format(self.ssh_key, self.settings['ip']))",
"def setup_shortcuts():\n os.system(\"gsettings set org.gnome.shell.extensions.dash-to-dock hot-keys false\")",
"def setupSSH(key_rsa_path, key_append_path, key_gen_cmd, HostList):\n # Generate SSH key on localhost\n LocalKey = getLocalKey(key_gen_cmd, key_rsa_path)\n\n # Setup passwordless SSH with each of the specified machines\n for i in HostList:\n if i[0] != 'localhost':\n\n box_ip = i[1]\n user = i[2]\n pwd = i[3]\n\n out = subprocess.Popen(\"echo $\" + user, shell=True,\n stdout=subprocess.PIPE)\n box_user = out.stdout.read().rstrip('\\n')\n out = subprocess.Popen(\"echo $\" + pwd, shell=True,\n stdout=subprocess.PIPE)\n box_pwd = out.stdout.read().rstrip('\\n')\n try:\n\n RemoteKey = getRemoteKey(key_gen_cmd, key_rsa_path, box_ip,\n box_user, box_pwd)\n appendLocalKeyInRemote(LocalKey, key_append_path, box_ip,\n box_user, box_pwd)\n appendRemoteKeyInLocal(RemoteKey, key_append_path, box_ip)\n logging.info(\"Passwordless SSH has been setup b/w \\\n localhost & %s\", box_ip)\n\n except (paramiko.SSHException, paramiko.BadHostKeyException,\n paramiko.AuthenticationException, socket.error) as e:\n logging.info(\"Passwordless SSH setup failed b/w localhost & %s \\\n with %s, please verify host connectivity\", box_ip, e)",
"def installShortcutKeys(self):\r\n #TODO: Deal with commented out shortcuts\r\n Key_Escape = 0x01000000 # not in PythonQt\r\n Key_Space = 0x20 # not in PythonQt\r\n self.shortcuts = []\r\n keysAndCallbacks = (\r\n # ('z', self.toolsBox.undoRedo.undo),\r\n # ('y', self.toolsBox.undoRedo.redo),\r\n ('h', self.toggleCrosshair),\r\n (Key_Escape, lambda : self.editor.setActiveEffect(None)),\r\n ('e', lambda : self.editor.setActiveEffect(self.editor.effectByName('Erase'))),\r\n ('p', lambda : self.editor.setActiveEffect(self.editor.effectByName('Paint'))),\r\n ('d', lambda : self.editor.setActiveEffect(self.editor.effectByName('Draw'))),\r\n ('w', lambda : self.editor.setActiveEffect(self.editor.effectByName('Wand'))),\r\n ('r', lambda : self.editor.setActiveEffect(self.editor.effectByName('Rectangle'))),\r\n # (Key_Space, self.toolsBox.toggleFloatingMode),\r\n )\r\n for key,callback in keysAndCallbacks:\r\n shortcut = qt.QShortcut(slicer.util.mainWindow())\r\n shortcut.setKey( qt.QKeySequence(key) )\r\n shortcut.connect( 'activated()', callback )\r\n self.shortcuts.append(shortcut)",
"def create_shortcut(startup_path):\n\n startup = startup_path\n path = os.path.join(startup, \"shortcut.lnk\")\n target = os.path.dirname(os.path.dirname(__file__))+str(\"\\LEAP_MyMouse_.exe\")\n icon = os.path.dirname(os.path.dirname(__file__))+str(\"\\\\res\\icons\\leapmymouse.png\")\n\n shell = win32com.client.Dispatch(\"WScript.Shell\")\n shortcut = shell.CreateShortCut(path)\n shortcut.Targetpath = target\n shortcut.IconLocation = icon\n shortcut.WindowStyle = 7 # 7 - Minimized, 3 - Maximized, 1 - Normal\n shortcut.save()",
"def main():\n # Set these to your own details.\n myssh = connect('example.com')\n myssh.put('ssh.py')\n myssh.close()",
"def ssh_setup(existing_key: Optional[Path] = None, force: bool = False):\n\n if not shutil.which(\"ssh\"):\n raise errors.SSHNotFoundError()\n\n system_config = SystemSSHConfig()\n\n include_string = f\"Include {system_config.renku_ssh_root}/*.conf\\n\\n\"\n\n if include_string not in system_config.ssh_config.read_text():\n with system_config.ssh_config.open(mode=\"r+\") as f:\n content = f.read()\n f.seek(\n 0, 0\n ) # NOTE: We need to add 'Include' before any 'Host' entry, otherwise it is included as part of a host\n f.write(include_string + content)\n\n if not existing_key and not force and system_config.is_configured:\n communication.confirm(f\"Keys already configured for host {system_config.renku_host}. Overwrite?\", abort=True)\n\n if existing_key:\n communication.info(\"Linking existing keys\")\n existing_public_key = existing_key.parent / (existing_key.name + \".pub\")\n\n if not existing_key.exists() or not existing_public_key.exists():\n raise errors.KeyNotFoundError(\n f\"Couldn't find private key '{existing_key}' or public key '{existing_public_key}'.\"\n )\n\n if system_config.keyfile.exists():\n system_config.keyfile.unlink()\n if system_config.public_keyfile.exists():\n system_config.public_keyfile.unlink()\n\n os.symlink(existing_key, system_config.keyfile)\n os.symlink(existing_public_key, system_config.public_keyfile)\n else:\n communication.info(\"Generating keys\")\n keys = generate_ssh_keys()\n system_config.keyfile.touch(mode=0o600)\n system_config.public_keyfile.touch(mode=0o644)\n with system_config.keyfile.open(\n \"wt\",\n ) as f:\n f.write(keys.private_key)\n\n with system_config.public_keyfile.open(\"wt\") as f:\n f.write(keys.public_key)\n\n communication.info(\"Writing SSH config\")\n with system_config.jumphost_file.open(mode=\"wt\") as f:\n # NOTE: The * at the end of the jumphost name hides it from VSCode\n content = textwrap.dedent(\n f\"\"\"\n Host jumphost-{system_config.renku_host}*\n HostName {system_config.renku_host}\n Port 2022\n User jovyan\n \"\"\"\n )\n f.write(content)",
"def init():\n\n @click.command()\n @click.option('--cell', required=True,\n envvar='TREADMILL_CELL',\n callback=cli.handle_context_opt,\n expose_value=False)\n @click.option('--ssh', help='SSH client to use.',\n type=click.Path(exists=True, readable=True))\n @click.argument('app')\n @click.argument('command', nargs=-1)\n def ssh(ssh, app, command):\n \"\"\"SSH into Treadmill container.\"\"\"\n if ssh is None:\n ssh = _DEFAULT_SSH\n\n if app.find('#') == -1:\n # Instance is not specified, list matching and exit.\n raise click.BadParameter('Specify full instance name: xxx#nnn')\n\n app_discovery = discovery.Discovery(context.GLOBAL.zk.conn, app, 'ssh')\n app_discovery.sync()\n\n # Restore default signal mask disabled by python spawning new thread\n # for Zk connection.\n #\n # TODO: should this be done as part of zkutils.connect?\n for sig in range(1, signal.NSIG):\n try:\n signal.signal(sig, signal.SIG_DFL)\n except OSError:\n pass\n\n # TODO: not sure how to handle mutliple instances.\n for (app, hostport) in app_discovery.items():\n _LOGGER.info('%s :: %s', app, hostport)\n if hostport:\n host, port = hostport.split(b':')\n run_ssh(host, port, ssh, list(command))\n\n return ssh",
"def install_ssh(app):\n os.system('lxc-attach -n %s -- apk update' % app)\n os.system('lxc-attach -n %s -- apk add openssh' % app)\n # Config sshd\n config = '/var/lib/lxc/%s/rootfs/etc/ssh/sshd_config' % app\n with open(config, \"a\") as myfile:\n myfile.write(\"RSAAuthentication yes\\nPubkeyAuthentication yes\\nPermitRootLogin yes\\nPermitEmptyPasswords yes\")\n os.system('lxc-attach -n %s -- /etc/init.d/sshd start' % app)",
"def ssh():\n env['remote_port'] = env['port_map']['22']\n\n sys.stdout.write('Connecting to SSH session on remote port %(remote_port)s\\n' % env)\n\n run('chmod 600 %(pair_private_key)s' % env)\n\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.connect(\n hostname=env['relay_server'],\n port=int(env['remote_port']),\n username=env['pair_user'],\n key_filename=env['pair_private_key']\n )\n\n channel = client.invoke_shell()\n posix_shell(channel)",
"def ssh(args, config):\n print('{}'.format(ssh.__doc__))",
"def connect_instance(tag, key_name, user_name):\n inst = get_instance(tag)\n cmd = boto.manage.cmdshell.sshclient_from_instance(\n inst,\n SSH_FOLDER + key_name + \".pem\",\n user_name=user_name\n )\n return inst, cmd",
"def setupShortcuts(self):\r\n # productive\r\n profprint()\r\n macros = (\r\n (\"Ctrl+Return\", self.segmentNeedle),\r\n (\"Ctrl+z\", self.logic.deleteLastNeedle),\r\n (\"Ctrl+y\", self.acceptNeedleTipEstimate),\r\n (\"Ctrl+n\", self.rejectNeedleTipEstimate),\r\n (\"Ctrl+u\", self.acceptNeedleTipEstimateAsNewTempMarker),\r\n )\r\n\r\n for keys, f in macros:\r\n k = qt.QKeySequence(keys)\r\n s = qt.QShortcut(k, slicer.util.mainWindow())\r\n s.connect('activated()', f)\r\n s.connect('activatedAmbiguously()', f)\r\n print \"'%s' -> '%s'\" % (keys, f.__name__)\r\n # convenient for the python console\r\n globals()['nfw'] = nfw = slicer.modules.NeedleFinderWidget\r\n globals()['nfl'] = nfl = slicer.modules.NeedleFinderWidget.logic\r\n print \"nfl -> NeedleFinderLogic\"\r\n print \"nfw -> NeedleFinderWidget\"",
"def start(self):\n keyfile = self._getKeyPath()\n if j.do.getSSHKeyPathFromAgent(\"$(key.name)\", die=False) is None:\n cmd = 'ssh-add %s' % keyfile\n j.do.executeInteractive(cmd)",
"def press_on_configure_ssh(driver):\n assert wait_on_element(driver, 5, xpaths.services.ssh_Service_Button, 'clickable')\n driver.find_element_by_xpath(xpaths.services.ssh_Service_Button).click()",
"def open(self):\n logging.debug('Connecting to device %s' % self.paramiko_cfg.get('hostname'))\n self.ssh = paramiko.SSHClient()\n self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n self.ssh.connect(**self.paramiko_cfg)",
"def setup_authorized_keys(ssh_fingerprint, script_path, pubkey):\n\n authorized_keys = join(environ['HOME'], '.ssh', 'authorized_keys')\n if not exists(dirname(authorized_keys)):\n makedirs(dirname(authorized_keys))\n # Restrict features and force all SSH commands to go through our script\n with open(authorized_keys, 'a') as h:\n h.write(\"\"\"command=\"FINGERPRINT={ssh_fingerprint:s} NAME=default {script_path:s} $SSH_ORIGINAL_COMMAND\",no-agent-forwarding,no-user-rc,no-X11-forwarding,no-port-forwarding {pubkey:s}\\n\"\"\".format(**locals()))\n chmod(dirname(authorized_keys), S_IRUSR | S_IWUSR | S_IXUSR)\n chmod(authorized_keys, S_IRUSR | S_IWUSR)",
"def set_shortcut_key(self):\n self.shortcutKey = self.shortcutComboBox2.currentText()\n self.iniSettings.setValue(\"shortcut\", self.shortcutKey)\n self.register_shortcut_listener()\n if self.shortcutKey == \"ESC\":\n self.shortcutKeyHex = 0x1B\n elif self.shortcutKey == \"F1\":\n self.shortcutKeyHex = 0x70\n elif self.shortcutKey == \"F2\":\n self.shortcutKeyHex = 0x71\n elif self.shortcutKey == \"F3\":\n self.shortcutKeyHex = 0x72\n elif self.shortcutKey == \"F4\":\n self.shortcutKeyHex = 0x73\n elif self.shortcutKey == \"F5\":\n self.shortcutKeyHex = 0x74\n elif self.shortcutKey == \"F6\":\n self.shortcutKeyHex = 0x75\n elif self.shortcutKey == \"F7\":\n self.shortcutKeyHex = 0x76\n elif self.shortcutKey == \"F8\":\n self.shortcutKeyHex = 0x77\n elif self.shortcutKey == \"F9\":\n self.shortcutKeyHex = 0x78\n elif self.shortcutKey == \"F10\":\n self.shortcutKeyHex = 0x79\n elif self.shortcutKey == \"1\":\n self.shortcutKeyHex = 0x31\n elif self.shortcutKey == \"2\":\n self.shortcutKeyHex = 0x32\n elif self.shortcutKey == \"3\":\n self.shortcutKeyHex = 0x33\n elif self.shortcutKey == \"4\":\n self.shortcutKeyHex = 0x34\n elif self.shortcutKey == \"5\":\n self.shortcutKeyHex = 0x35\n elif self.shortcutKey == \"6\":\n self.shortcutKeyHex = 0x36\n elif self.shortcutKey == \"7\":\n self.shortcutKeyHex = 0x37\n elif self.shortcutKey == \"8\":\n self.shortcutKeyHex = 0x38\n elif self.shortcutKey == \"9\":\n self.shortcutKeyHex = 0x39\n elif self.shortcutKey == \"0\":\n self.shortcutKeyHex = 0x30",
"def ssh_cmd(ctx):\n pass",
"def setupShortcuts(self):\n\n macros = (\n (\"Ctrl+Return\", self.segmentNeedle),\n (\"Ctrl+z\", self.logic.deleteLastNeedle),\n )\n\n for keys,f in macros:\n k = qt.QKeySequence(keys)\n s = qt.QShortcut(k,slicer.util.mainWindow())\n s.connect('activated()', f)\n s.connect('activatedAmbiguously()', f)\n print \"SlicerRC - '%s' -> '%s'\" % (keys, f.__name__)",
"def __init__(self, settings, server=None):\n print(\"SSH Action Handler Started\")\n self.server = server\n self.active_ssh_tasks = {}\n self.key_location = settings[\"ssh_key_location\"]\n self.server_addr = settings[\"ssh_server_addr\"]\n self.server_username = settings[\"ssh_server_username\"]",
"def create_shortcut_to_desktop(target,title):\n s = os.path.basename(target)\n fname = os.path.splitext(s)[0]\n winshell.CreateShortcut(Path = os.path.join(winshell.desktop(), fname + '.lnk'),\n Target = target,\n Icon=(target, 0),\n Description=title)",
"def ssh_tunnel(self, ssh_tunnel):\n\n self._ssh_tunnel = ssh_tunnel",
"def __init__(self,\n comms_address: str,\n args: str = host_utils.DEFAULT_SSH_OPTIONS,\n key_info: Optional[data_types.KeyInfo] = None,\n log_cmd: str = \"\",\n auto_reopen: bool = True,\n open_on_start: bool = True,\n username: str = \"root\"):\n self.comms_address = comms_address\n args = host_utils.generate_ssh_args(\n comms_address,\n log_cmd,\n username,\n options=args,\n key_info=key_info)\n super().__init__(\n command=\"ssh\",\n args=args,\n auto_reopen=auto_reopen,\n open_on_start=open_on_start)",
"def openSSH(target, user):\r\n ssh = paramiko.SSHClient()\r\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\r\n ssh.connect(target, username=user)\r\n return ssh"
] | [
"0.6442464",
"0.64414567",
"0.6348135",
"0.61858404",
"0.61858267",
"0.6147583",
"0.6134668",
"0.6058073",
"0.59874713",
"0.58345175",
"0.5792532",
"0.5744603",
"0.572959",
"0.5720658",
"0.56933945",
"0.56765187",
"0.56707656",
"0.56636703",
"0.55993026",
"0.558472",
"0.5567214",
"0.5554396",
"0.5542118",
"0.5533366",
"0.5524518",
"0.55240136",
"0.5491325",
"0.5479465",
"0.54737926",
"0.5469327"
] | 0.6833319 | 0 |
Generate a new SSH key and deliver it to the server. If quickname is provided, also set up an ssh shortcut. Use this to enable passwordless access to webfaction. | def setup_ssh_keys(output_keyfile="id_rsa", ssh_type="rsa", quickname=None):
with settings(warn_only=True):
local("mkdir -p $HOME/.ssh")
with cd("$HOME/.ssh"):
local("ssh-keygen -t %s -f %s" % (ssh_type, output_keyfile))
for host in env.hosts:
local("scp %s.pub %s:temp_id_key.pub" % (output_keyfile, host))
with settings(warn_only=True):
run("mkdir -p $HOME/.ssh")
run("cat $HOME/temp_id_key.pub >> ~/.ssh/authorized_keys")
run("rm $HOME/temp_id_key.pub")
run("chmod 600 $HOME/.ssh/authorized_keys")
run("chmod 700 $HOME/.ssh")
run("chmod go-w $HOME")
if quickname:
update_ssh_shortcut(output_keyfile, quickname) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_ssh_shortcut(output_keyfile, quickname=None):\n if quickname:\n with settings(warn_only=True):\n local(\"touch $HOME/.ssh/config\")\n local(r\"echo '' >> $HOME/.ssh/config\")\n local(r\"echo 'Host %s' >> $HOME/.ssh/config\" % quickname)\n local(r\"echo '' >> $HOME/.ssh/config\")\n local(r\"echo 'Hostname %s' >> $HOME/.ssh/config\" % host_name)\n local(r\"echo 'User %s' >> $HOME/.ssh/config\" % user)\n local(r\"echo 'IdentityFile ~/.ssh/%s' >> $HOME/.ssh/config\" % output_keyfile)\n local(r\"echo 'ServerAliveCountMax 3' >> $HOME/.ssh/config\")\n local(r\"echo 'ServerAliveInterval 10' >> $HOME/.ssh/config\")",
"def ssh_keygen(username):\n d = user_exists(username)\n assert d, fabric.colors.red(\"User does not exist: %s\" % username)\n\n home = d['home']\n if not fabric.contrib.files.exists(os.path.join(home, \".ssh/id_rsa.pub\")):\n fabric.api.run(\"mkdir -p %s\" % os.path.join(home, \".ssh/\"))\n fabric.api.run(\n \"ssh-keygen -q -t rsa -f '%s' -N ''\" %\n os.path.join(\n home, '.ssh/id_rsa'))\n run('chown indabom:indabom {}'.format(\"/home/indabom/.ssh\"))\n run('chown indabom:indabom {}'.format(\"/home/indabom/.ssh/id_rsa\"))\n run('chown indabom:indabom {}'.format(\"/home/indabom/.ssh/id_rsa.pub\"))",
"def gen_key(app):\n\tos.system('lxc-attach -n %s -- ssh-keygen -t rsa -N \"\" -f key' % app)",
"def pushkey(self, addr, passwd, keyname=\"\", pubkey=\"\", port=22, login=\"root\"):\n ExecutorSSH(addr, port=port, login=login, passwd=passwd, pushkey=keyname, pubkey=pubkey)",
"def create_ssh_keypair(keyname, comment):\n sshdir = os.path.join(util.get_homedir(), '.ssh')\n util.create_directory(sshdir, 0o700)\n keyfile = os.path.join(sshdir, keyname)\n if util.try_stat(keyfile):\n raise RuntimeError('~/.ssh/{} already exists'.format(keyname))\n subprocess.check_call(['ssh-keygen', '-f', keyfile, '-N', \"\", '-q', '-C', comment])\n os.chmod(keyfile, 0o600)\n os.chmod(keyfile + '.pub', 0o644)\n return keyfile",
"def create_key(name):\n\tinput_data = GPG.gen_key_input(\n\t\tkey_type='RSA',\n\t\tkey_length='1024',\n\t\tname_real='PGP File System',\n\t\tname_comment=create_comment(name),\n\t\tname_email='[email protected]'\n\t)\n\treturn GPG.gen_key(input_data)",
"def create_key(name):\n input_data = GPG.gen_key_input(\n key_type='RSA',\n key_length='1024',\n name_real='PGP File System',\n name_comment=create_comment(name),\n name_email='[email protected]'\n )\n return GPG.gen_key(input_data)",
"def ssh_keygen(type=\"ed25519\", keysize=None, id_file=\"\", pem=False, derivation_rounds=None, comment=None, force=False, urls=ssh_registration_urls, open_urls_for_existing_file=False):\n if not id_file:\n id_file = path.expanduser(\"~/.ssh/id_{}\".format(type))\n pub_file = id_file + \".pub\"\n \n if path.exists(id_file) and path.exists(pub_file) and not force:\n print(\"SSH key file {} already exists\".format(id_file))\n if not open_urls_for_existing_file:\n return\n else:\n params = [\"ssh-keygen\", \"-t\", type, \"-f\", id_file];\n if keysize:\n params += [\"-b\", str(keysize)]\n if not pem:\n params += [\"-o\"]\n if derivation_rounds is None:\n derivation_rounds = 100\n if derivation_rounds:\n if not pem:\n params += [\"-a\", str(derivation_rounds)]\n else:\n print(\"Using key derivation {} with PEM is not supported\".format(derivation_rounds))\n if comment is not None:\n params += [\"-C\", comment]\n print(\"SSH key file {} does not exist, creating new one with {}, format {} (with {} derivation rounds) and size {}\\n{}\".format(id_file, type, \"PEM\" if pem else \"RFC4716\", derivation_rounds or 0, keysize or \"default\", params))\n tools.run(*params)\n \n print(\"Copying SSH key into clipboard\")\n import subprocess\n subprocess.call(\"/usr/bin/pbcopy\", stdin=open(pub_file))\n for url in urls:\n print(\"Opening {}\".format(url))\n tools.run(\"open\", \"https://uberspace.de/dashboard/authentication\")",
"def gen_keys_old(name):\n d = 'keys'\n if not os.path.isdir(d):\n os.mkdir(d)\n if not os.path.isfile('%s/%s.pem'%(d,name)):\n open('%s/%s.pem'%(d,name),'w').write(Crypto.PublicKey.RSA.generate(1024,os.urandom).exportKey('PEM'))",
"def add_local_ssh_key(self):\n return\n user_ssh_key = open('/home/<$user>/.ssh/id_rsa.pub').read()\n key = digitalocean.SSHKey(token=self.secret_token,\n name='machine-name',\n public_key=user_ssh_key)\n key.create()",
"def create_key ():",
"def create_ssh_key_file(username: str, ssh_key: bytes, ip_address: str):\n\n if not os.path.exists(\"./ansible/keys\"):\n os.mkdir(\"./ansible/keys\")\n\n with open(f\"./ansible/keys/admin_{ip_address}.pem\", \"w\") as ssh_key_file:\n ssh_key_file.write(ssh_key.decode())\n\n os.system(f\"chmod 400 ./ansible/keys/admin_{ip_address}.pem\")",
"def cmd_setup_ssh(public_key_file):\n\n def add_helper(key_file):\n if exists(key_file):\n try:\n fingerprint = str(check_output('ssh-keygen -lf ' + key_file, shell=True)).split(' ', 4)[1]\n key = open(key_file, 'r').read().strip()\n echo(\"Adding key '{}'.\".format(fingerprint), fg='white')\n setup_authorized_keys(fingerprint, PIKU_SCRIPT, key)\n except Exception:\n echo(\"Error: invalid public key file '{}': {}\".format(key_file, format_exc()), fg='red')\n elif public_key_file == '-':\n buffer = \"\".join(stdin.readlines())\n with NamedTemporaryFile(mode=\"w\") as f:\n f.write(buffer)\n f.flush()\n add_helper(f.name)\n else:\n echo(\"Error: public key file '{}' not found.\".format(key_file), fg='red')\n\n add_helper(public_key_file)",
"def ssh_public_key(self, key_name: str) -> str:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )",
"def create_keypair(key_name):\n if os.path.isfile(SSH_FOLDER + key_name + \".pem\"):\n return # Key already created\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n key = ec2.create_key_pair(key_name)\n key.save(SSH_FOLDER)",
"def upload_public_key():\n log('Adicionando chave publica no servidor', green)\n ssh_file = '~/.ssh/id_rsa.pub'\n target_path = '~/.ssh/uploaded_key.pub'\n put(ssh_file, target_path)\n run('echo `cat ~/.ssh/uploaded_key.pub` >> ~/.ssh/authorized_keys && rm -f ~/.ssh/uploaded_key.pub')",
"def do_minerkey(argv):\n\n global PRIVATE_KEY\n\n if not PRIVATE_KEY:\n print(\"Error: private key is missing. Use command 'new' to generate key\")\n else:\n PRIVATE_KEY = wallet.get_private_key()\n minerkey = wallet.private_key_to_wif(PRIVATE_KEY, 0, 0)\n file = open(\"data/minerkey\", \"w\")\n file.write(minerkey)\n print(\"Minerkey was created in WIF format and saved to 'data/minerkey'\")\n file.close()",
"def download_key():\n data = check_args(('cloudProvider', ))\n provider = jobs.init_provider(data, True)\n key = encrypt_key(provider.get_key(), data['username'])\n return make_response(keyName=provider.keyname, key=key)",
"def generate_key(self):\n cmd = self.generate_key_cmd()\n self.show(cmd)\n if self.dryrun:\n return None\n s, _, _ = self.as_user(cmd)\n assert s == 0, ('failed to generate key', cmd)\n keyname = self.extract_key_name()\n return keyname",
"def _generateSSHKey(self, private_filepath, public_filepath):\n self.log.debug(\"Writing SSH keys to: \" + private_filepath + \" and \" + public_filepath)\n\n (ssh_dir, filename) = os.path.split(os.path.expanduser(private_filepath))\n if not os.path.exists(ssh_dir):\n self.log.debug(\"SSH Directory doesn't exist, creating \" + ssh_dir)\n os.makedirs(ssh_dir)\n\n key = paramiko.RSAKey.generate(1024)\n key.write_private_key_file(os.path.expanduser(private_filepath))\n \n with open(os.path.expanduser(public_filepath),\"w\") as public:\n public.write(\"%s %s\" % (key.get_name(), key.get_base64()))\n\n public.close()",
"def generate_key():\r\n # generating key\r\n key = Fernet.generate_key()\r\n\r\n key_dir = os.path.join(os.path.dirname(__file__), \"resources/key\")\r\n\r\n # writing key in file\r\n with open(key_dir, \"wb\") as keyFile:\r\n keyFile.write(key)",
"def ssh_setup(existing_key: Optional[Path] = None, force: bool = False):\n\n if not shutil.which(\"ssh\"):\n raise errors.SSHNotFoundError()\n\n system_config = SystemSSHConfig()\n\n include_string = f\"Include {system_config.renku_ssh_root}/*.conf\\n\\n\"\n\n if include_string not in system_config.ssh_config.read_text():\n with system_config.ssh_config.open(mode=\"r+\") as f:\n content = f.read()\n f.seek(\n 0, 0\n ) # NOTE: We need to add 'Include' before any 'Host' entry, otherwise it is included as part of a host\n f.write(include_string + content)\n\n if not existing_key and not force and system_config.is_configured:\n communication.confirm(f\"Keys already configured for host {system_config.renku_host}. Overwrite?\", abort=True)\n\n if existing_key:\n communication.info(\"Linking existing keys\")\n existing_public_key = existing_key.parent / (existing_key.name + \".pub\")\n\n if not existing_key.exists() or not existing_public_key.exists():\n raise errors.KeyNotFoundError(\n f\"Couldn't find private key '{existing_key}' or public key '{existing_public_key}'.\"\n )\n\n if system_config.keyfile.exists():\n system_config.keyfile.unlink()\n if system_config.public_keyfile.exists():\n system_config.public_keyfile.unlink()\n\n os.symlink(existing_key, system_config.keyfile)\n os.symlink(existing_public_key, system_config.public_keyfile)\n else:\n communication.info(\"Generating keys\")\n keys = generate_ssh_keys()\n system_config.keyfile.touch(mode=0o600)\n system_config.public_keyfile.touch(mode=0o644)\n with system_config.keyfile.open(\n \"wt\",\n ) as f:\n f.write(keys.private_key)\n\n with system_config.public_keyfile.open(\"wt\") as f:\n f.write(keys.public_key)\n\n communication.info(\"Writing SSH config\")\n with system_config.jumphost_file.open(mode=\"wt\") as f:\n # NOTE: The * at the end of the jumphost name hides it from VSCode\n content = textwrap.dedent(\n f\"\"\"\n Host jumphost-{system_config.renku_host}*\n HostName {system_config.renku_host}\n Port 2022\n User jovyan\n \"\"\"\n )\n f.write(content)",
"def upload_key():\n data = check_args(('cloudProvider', 'key'))\n provider = jobs.init_provider(data, True)\n key = decrypt_key(data['key'], data['username'])\n provider.save_key(key)\n return make_response()",
"def create_keypair(self, username):\n msg = \"create_keypair not implemented\"\n raise NotImplementedError(msg)",
"def generate_key():\n key = crypto.Key.generate_key()\n click.echo('Private Key (len {}):: \\n{}'.format(\n len(key.get_privkey()),\n hexlify(key.get_privkey())))\n click.echo('Public Key (len {})::\\n{}'.format(\n len(key.get_pubkey()),\n hexlify(key.get_pubkey())))",
"def sshkey():\n with settings( hide( 'everything' ), warn_only=True ):\n print ( '\\rChecking %s... ' % env['host'] ),\n\n try:\n dsa = open( os.getenv('HOME') + '/.ssh/id_dsa.pub', 'r' ).readline().split()\n except IOError as e:\n sys.exit( 'SSH ID file not found' )\n run( 'if [ -d .ssh ]; then true; else mkdir .ssh; fi' )\n exists = run( 'grep \\'%s\\' ~/.ssh/authorized_keys' % dsa[1] )\n if not exists.succeeded:\n run ( 'echo %s %s %s >> ~/.ssh/authorized_keys' % (dsa[0], dsa[1], dsa[2]) )\n print 'SSH key added!'\n else:\n print 'SSH key already present, no update required'",
"def new_public_key(self):\n\n option = 'new_public_key'\n _file = self.__get_option(option)\n\n if _file and not os.path.exists(_file) and not os.path.isfile(_file):\n self.log.error(\"Paramenter '%s' points to non-existing file '%s')\" % \\\n (option, _file))\n raise ConfigError('File Error', \"Paramenter '%s' points to non-existing file '%s')\" % \\\n (option, _file))\n else:\n return None",
"def create_user_key_file(username: str):\n\n user: User = UserModel().get_user(username=username)\n user_key: Key = user.public_key\n\n public_key: bytes = user_key.public_key\n\n if not os.path.exists(\"./ssh_ca\"):\n os.mkdir(\"./ssh_ca\")\n\n with open(f\"./ssh_ca/{username}.pub\") as public_key_file:\n public_key_file.write(public_key.decode())",
"def create_keypair(address_type, addresses_path, address_prefix, name):\n vkey_file = get_vkey_file(addresses_path, address_prefix, name)\n skey_file = get_skey_file(addresses_path, address_prefix, name)\n\n if(path.exists(vkey_file)) :\n print(address_prefix, \"key pair already exists for\", name)\n return\n \n makedirs(path.dirname(vkey_file), mode=0o777, exist_ok=True)\n\n run_params = ['cardano-cli', address_type, 'key-gen', '--verification-key-file', vkey_file, '--signing-key-file', skey_file]\n subprocess_run(run_params, capture_output=False, text=True)\n return",
"def makeKey( self, bSerial, sVersion, bNumcam, sMac ):\n\n\t\tbSeed = 0\n\t\tbSeed = self._setSerial( bSeed, bSerial )\n\t\tbSeed = self._setVersion( bSeed, sVersion )\n\t\tbSeed = self._setNumcam( bSeed, bNumcam )\n\t\tbSeed = self._setMac( bSeed, sMac )\n\n\t\tsKey = commands.getoutput( '/usr/local/bin/make-key -s %s' % bSeed )\n\t\tif len( sKey ) != 24:\n\t\t\traise Exception, 'make-key did not return a valid key [%s]' % sKey\n\n\t\treturn sKey"
] | [
"0.67290777",
"0.635065",
"0.6294067",
"0.62377936",
"0.61897206",
"0.5912573",
"0.58597594",
"0.5857335",
"0.5813296",
"0.5799843",
"0.57982856",
"0.5796951",
"0.5789643",
"0.56701356",
"0.56502396",
"0.5586603",
"0.5574599",
"0.5564199",
"0.5562149",
"0.55100757",
"0.5497694",
"0.54795367",
"0.5477538",
"0.54654604",
"0.5463374",
"0.5444217",
"0.5441464",
"0.5434708",
"0.54322183",
"0.5415867"
] | 0.69770044 | 0 |
Installs pip itself if needed. | def install_pip():
with settings(warn_only=True):
run('mkdir $HOME/lib/python2.7')
run('easy_install-2.7 pip') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pip_install():\n _require_environment()\n remote(PIP_INSTALL_PREFIX)",
"def pipInstall(self):\n\n print \"Does Nothing\"",
"def _setup_pip(self, context):\n # We run ensurepip in isolated mode to avoid side effects from\n # environment vars, the current directory and anything else\n # intended for the global Python environment\n cmd = [context.env_exec_cmd, '-Im', 'ensurepip', '--upgrade',\n '--default-pip']\n subprocess.check_output(cmd, stderr=subprocess.STDOUT)",
"def pip_installs():\n pip = r'pip-2.7 install --install-option=\"--install-scripts=$PWD/bin\" --install-option=\"--install-lib=$PWD/lib/python2.7\" '\n with settings(warn_only=True):\n run(\"mkdir $HOME/tmp\")\n with cd(remote_dir):\n for installation in install_list:\n run(\"export TEMP=$HOME/tmp && %s %s\" % (pip, installation))\n run(\"echo '#%s' >> $HOME/.bash_profile\" % python_add_str)",
"def install_pip(pkg_version=None):\n # FIXME: https://github.com/ansible/ansible-container/issues/919\n\n if pkg_version:\n pkg_name = \"pip==\" + pkg_version\n else:\n pkg_name = \"pip\"\n\n try:\n subprocess.check_call([\"easy_install\", \"--user\", pkg_name])\n except subprocess.CalledProcessError:\n print \"[Error] while installing pip\"",
"def __install(self):\n command = self.pipComboBox.currentText()\n if command == self.__default:\n command = \"\"\n \n packages = []\n for itm in self.resultList.selectedItems():\n packages.append(itm.text(0).strip())\n if packages:\n self.__pip.installPackages(packages, cmd=command)",
"def pip_packages():\n packages = reduce(lambda a, x: \"%s %s\" % (a, x), PIP_PACKAGES, '')\n sudo(\"pip install %s &> /dev/null\" % packages)",
"def install_pkg(pip, package):\n if not os.path.isdir(INSTALL_DIR):\n os.makedirs(INSTALL_DIR)\n pip_cmds = ['mayapy', pip, 'install', package, '--target', INSTALL_DIR, '--log', DEPENDENCY_INSTALL_LOG]\n print(pip_cmds)\n installer = subprocess.Popen(pip_cmds)\n installer.wait()\n print(\"Successfully installed package {}\".format(package))\n if installer.returncode != 0:\n raise RuntimeError(\"Failed to install package: {}, please check logs in: {}\".format(package, DEPENDENCY_INSTALL_LOG))",
"def install():\n verun('pip install -r {0}'.format(requirements))",
"def _pipInstall(self, directory: Directory) -> None:\n\n pipExec = os.path.join(os.path.dirname(sys.executable), \"pip\")\n\n pipArgs = [sys.executable, pipExec] + self.makePipArgs(directory)\n\n # The platform update is tested for dependencies when it's first uploaded\n # PIP has a bug, when you have updated packages for several dependent files\n # and try to install them all at once, some of the packages don't update.\n pipArgs += ['--no-deps']\n\n pipArgs = ' '.join(pipArgs)\n\n try:\n spawnPty(pipArgs)\n logger.info(\"Peek package update complete.\")\n\n except Exception as e:\n logSpawnException(e)\n\n # Update the detail of the exception and raise it\n e.message = \"Failed to install packages from the new release.\"\n raise",
"def update_dependencies():\n pip = env.virtualenv.child('bin', 'pip')\n reqs = env.code_dir.child('deploy-requirements.txt')\n sudo('%s -q install -U pip' % pip)\n sudo('%s -q install -r %s' % (pip, reqs))",
"def install(self):\n other_args = list(requirement_args(self._argv, want_other=True))\n archive_path = join(self._temp_path, self._downloaded_filename())\n # -U so it installs whether pip deems the requirement \"satisfied\" or\n # not. This is necessary for GitHub-sourced zips, which change without\n # their version numbers changing.\n run_pip(['install'] + other_args + ['--no-deps', '-U', archive_path])",
"def pipupdate():\n\n packages = [d for d in pkg_resources.working_set]\n subprocess.call('pip install --upgrade ' + ' '.join(packages))",
"def upgrade_pip():\n out_info(\"Upgrading pip...\")\n pipexe = [sys.executable, \"-m\", \"pip\"]\n pipexe.extend([\"install\", \"--no-cache-dir\", \"-qq\", \"--upgrade\"])\n if not IS_ADMIN and not IS_VIRTUALENV:\n pipexe.append(\"--user\")\n pipexe.append(\"pip\")\n run(pipexe)",
"def pip_requirements():\n\n require(\n \"virtualenv_path\",\n \"requirements_path\",\n \"http_proxy\",\n \"https_proxy\",\n \"sudo_user\",\n )\n cmd = \"pip install --quiet --requirement %s\" % env.requirements_path\n\n # append packages url if specified\n if env.get(\"packages_url\") is not None:\n cmd += \" -f %s\" % env.get(\"packages_url\")\n\n with context_managers.proxy(env.http_proxy, env.https_proxy):\n with context_managers.virtualenv(env.virtualenv_path):\n sudo(cmd, user=env.sudo_user)",
"def pip(c):\n\n if Path('requirements.txt').exists():\n c.run(\"pip install -r requirements.txt\")\n\n for sp_ns in ns_foreach_task_subdir():\n try:\n sp_ns.tasks.pip(c)\n except UnexpectedExit:\n pass",
"def check_pip():\n try:\n import pip\n except ImportError:\n out_error(\"Import pip failed. Please Install python3-pip \"\n \"and try again\")\n exit(1)\n upgrade_pip()\n importlib.reload(pip)\n pip_version = pip.__version__\n del pip\n\n get_installed_packages()\n out_info(\"Installed pip: {}\".format(pip_version))",
"def install(package):\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", package])",
"def install_pip():\n pip_install_txt = os.path.join(os.path.abspath(os.path.join(__file__, os.pardir)), \"build_test_dependencies.txt\")\n call_subprocess(\"python3 -m pip install -r %s\" % pip_install_txt)\n print(\"Stage install dependencies -- COMPLETED --\")",
"def pip_install(\n versioned_package: str,\n install_path: str,\n upgrade: bool = False,\n no_dependencies: bool = False\n) -> None:\n verify_pip_is_installed()\n\n additional_pip_args = []\n if upgrade:\n additional_pip_args.append('--upgrade')\n if no_dependencies:\n additional_pip_args.append('--no-dependencies')\n\n _run_pip_command([\n 'install', versioned_package, '--target', install_path\n ] + additional_pip_args)",
"def pip(command):\n with sudo(user='addok'):\n run(f'/srv/addok/venv/bin/pip {command}')",
"def pip_install(path: PathType, package_name: str) -> ContextManagerFunctionReturnType[None]:\n # Not using the function `main` from pip._internal because it assumes that once it finished,\n # the process will terminate, and thus it can failed if called multiple times. See\n # https://pip.pypa.io/en/latest/user_guide/#using-pip-from-your-program\n # It actually fails in pip==19.3.1 if called multiple times in the same process (but it works\n # in 20.0).\n # Starting a new process is slower, but it's not a problem if it's not called often.\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", str(path)])\n try:\n yield\n finally:\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"uninstall\", \"-y\", package_name])",
"def pip_install_req_file(req_file):\n pip_cmd = 'pip install -q --disable-pip-version-check --exists-action w'\n sh(f\"{pip_cmd} -r {req_file}\")",
"def install(self):\n\n self.clean_git_checkout(self.git_repo, '/src')\n\n self.__copy_config_templates();\n\n self.local(\"sudo pip install -r src/requirements.txt --upgrade\")\n\n if not self.is_local():\n PiService.install(self) #copy to remote\n\n self.sudo(\"pip install -r src/requirements.txt --upgrade\")",
"def pipinstall(packages):\n\n if isinstance(packages, str):\n if hasattr(pip, 'main'):\n pip.main(['install', packages])\n else:\n pip._internal.main(['install', packages])\n elif isinstance(packages, list):\n for i in enumerate(packages):\n if hasattr(pip, 'main'):\n pip.main(['install', i[1]])\n else:\n pip._internal.main(['install', i[1]])\n else:\n raise TypeError(\"Nor a string or a list was provided.\")",
"def pip_install(*args):\n call(WITH_VENV, '.venv', 'pip', 'install', *args)",
"def setup(ctx):\r\n ctx.run('pip3 install -r requirements.txt')",
"def pipenv(pipenv_cmd='pipenv', python_cmd='python3', use_sudo=True):\n pip(python_cmd, use_sudo)\n if not is_pipenv_installed(version=None, pipenv_cmd=pipenv_cmd):\n install_pipenv(python_cmd=python_cmd)",
"def install_requirements():\n local('. fabric_factory/ve/bin/activate; easy_install pip')\n local('. fabric_factory/ve/bin/activate; pip install -r requirements.txt')",
"def get_pip():\n return 'pip'"
] | [
"0.7695657",
"0.74213576",
"0.7209003",
"0.7197479",
"0.7096822",
"0.7030998",
"0.69859535",
"0.6862794",
"0.68612194",
"0.6831305",
"0.6793315",
"0.6789067",
"0.6607342",
"0.6605917",
"0.65682715",
"0.65639573",
"0.6522661",
"0.64693004",
"0.64424676",
"0.6357721",
"0.63213474",
"0.6316437",
"0.63062376",
"0.62823737",
"0.62380123",
"0.6219111",
"0.6211868",
"0.6110536",
"0.6106591",
"0.61049366"
] | 0.7709263 | 0 |
Creates a new git repo on the server (do not include the .git ending in git_repo_name) | def create_prod_git_repo(git_repo_name):
with cd(git_dir):
run("git init --bare %s.git && cd %s.git && git config http.receivepack true" %
(git_repo_name,git_repo_name)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_storer_git_repo():\n # first make teh destination directory\n rel_repo_path = vmcheckerpaths.repository\n abs_repo_path = vmcheckerpaths.abspath(rel_repo_path)\n _mkdir_if_not_exist(abs_repo_path)\n\n # then, if missing, initialize a git repo in it.\n repo_path_git = os.path.join(abs_repo_path, '.git')\n if not(os.path.isdir(repo_path_git)):\n # no git repo found in the dir.\n try:\n env = os.environ\n env['GIT_DIR'] = repo_path_git\n check_call(['git', 'init'], env=env)\n except CalledProcessError:\n logging.error('cannot create git repo in %s' % repo_path_git)",
"def cmd_create(self):\n self.repo.create()\n\n # Add .gitignore.\n self.repo.add_files({'.gitignore': '.swp\\n'}, FIRST_COMMIT_MSG)\n\n # Create the etc and timestamps branches.\n self.repo.checkout('etc', create=True)\n self.repo.checkout('timestamps', create=True)\n\n self.repo.checkout('master')\n self.repo.init()\n self.update_repository()\n print('Git repository created at %s' % self.repodir)",
"def command_new_repo(self):\n repoinit.new_repo(*self.args())",
"def _create_github_repo(self):\n\n repo_dir = join(self.temp_dir, 'repo')\n subprocess.check_output(['git', 'init', repo_dir])\n\n subprocess.check_output(\n ['git', 'config', 'user.email', os.environ['GIT_EMAIL']],\n cwd=repo_dir\n )\n subprocess.check_output(\n ['git', 'config', 'user.name', os.environ['GIT_NAME']],\n cwd=repo_dir\n )\n\n content = statiki.get_travis_files_content(TEST_REPO, 'BOGUS', {})\n\n for info in content:\n path = join(repo_dir, info['name'])\n with open(path, 'w') as f:\n f.write(info['content'])\n\n subprocess.check_output(['git', 'add', path], cwd=repo_dir)\n subprocess.check_output(\n ['git', 'commit', '-m', '%s' % info['message']], cwd=repo_dir\n )\n\n subprocess.check_output(\n shlex.split('git remote add origin ..'), cwd=repo_dir\n )\n\n return repo_dir",
"def create_repository(cfg):\n if os.path.isdir(cfg[\"repo_dir\"]):\n shutil.rmtree(cfg[\"repo_dir\"], ignore_errors=True)\n return Repo.init(cfg[\"repo_dir\"])",
"def create_update_gitdir():\n if not os.path.exists(gitdname):\n retcode = subprocess.call('git clone '+repo, shell=True)\n if retcode != 0:\n msg = \"\"\"There was a problem cloning the repo\"\"\"\n raise Exception(msg)\n else: # directory exists, can't pull if you're not on a branch\n # just delete it and clone again. Lazy but clean solution.\n shutil.rmtree(gitdname)\n create_update_gitdir()",
"def create_code_repository(CodeRepositoryName=None, GitConfig=None):\n pass",
"def _clone_gitrepo():\n # Puts git repo in ~/.ssh/config to avoid interaction due to missing known_hosts\n git_server = urllib.splituser(urllib.splittype(env.project['git_repo'])[0])[1]\n if not files.exists('~/.ssh/config') or not files.contains('~/.ssh/config', git_server):\n files.append('~/.ssh/config', ['host %s' % git_server, ' StrictHostKeyChecking no'])\n\n branch = env.project.get('git_branch', 'master')\n if files.exists(_interpolate(DJANGO_PROJECT_DIR)):\n print _interpolate('project %(project)s already exists, updating')\n remote('git pull origin %s' % branch)\n else:\n with cd(_interpolate(VIRTUALENV_DIR)):\n run(_interpolate('git clone %(git_repo)s %(project)s'))\n if branch != 'master':\n remote('git fetch origin %s:%s' % (branch, branch))\n remote('git checkout %s' % branch)",
"def create_repo(self, repo):\n return self.user_con.create_repo(repo=repo)",
"def create_from_git(self, token: Any, repo: str):\n params = [token, repo, ]\n method = \"ProjectAPI.CreateFromGit\"\n self.__add_request(method, params, lambda payload: Definition.from_json(payload))",
"def create_repository(organization_name, repository_name, \n template_repository=None, travis_ci=True):\n\n # Create a GitHub repository.\n github_client = GitHub(os.environ.get(\"GITHUB_TOKEN\"))\n\n organization = github_client.get_organization(organization_name)\n new_repository = organization.create_repo(repository_name)\n new_repository_uri = \"/\".join([organization_name, repository_name])\n\n # Enable continuous integration.\n if travis_ci:\n enable_continuous_integration(new_repository_uri)\n\n # Copy from a template.\n if template_repository: \n template = github_client.get_repo(template_repository)\n\n temp_folder = mkdtemp()\n subprocess.Popen(\n [\"git\", \"clone\", template.clone_url], cwd=temp_folder).wait()\n\n # Remove .git directory, create new one, add files, commit and push\n commands = [\n \"rm -Rf .git/\",\n \"git init\",\n \"git add -f -A\",\n \"git remote add origin [email protected]:{uri}.git\"\\\n .format(uri=new_repository_uri),\n (\"git\", \"commit\", \"-m\", \"Initial commit using {} template\"\\\n .format(template_repository)),\n \"git push -u origin master\"\n ]\n\n cwd = glob(os.path.join(temp_folder, \"*\"))[0]\n for command in commands:\n args = command.split() if isinstance(command, str) else command\n subprocess.Popen(args, cwd=cwd).wait()\n\n return new_repository",
"def create_bare_repo(self, domain):\n\n domain_dir = self.get_domaindir(domain)\n www_dir = domain_dir + \"/www\"\n www_git = domain_dir + \"/www.git\"\n hook_post_receive_file = www_git + \"/hooks/post-receive\"\n\n if not os.path.exists(www_git):\n os.makedirs(www_git)\n git_init_command = \"cd \" + www_git\n git_init_command += \" && git init --bare\"\n subprocess.call(git_init_command, shell=True)\n\n if not os.path.isfile(hook_post_receive_file):\n with open(hook_post_receive_file, \"w\") as file:\n post_receive_content = \"#!/bin/sh\"\n post_receive_content += \"\\nGIT_WORK_TREE=\" + www_dir\n post_receive_content += \" git checkout -f\"\n file.write(post_receive_content)\n subprocess.call(\"chmod +x \" + hook_post_receive_file, shell=True)",
"def api_repo_create():\n form = NewRepoForm()\n if form.validate_on_submit():\n # On the miniscule chance we generate a non-unique access key, loop and try again.\n success = False\n while not success:\n new_repo = Repo.create(\n pass_phrase = form.pass_phrase.data,\n title = form.title.data,\n description = form.description.data,\n is_private = form.is_private.data\n )\n db.session.add(new_repo)\n try:\n db.session.commit()\n success = True\n except:\n db.session.rollback()\n success = False\n session['working_repo'] = new_repo.access_key\n return jsonify(message='success', created=new_repo.access_key)\n else:\n return jsonify(message=\"failed\", errors=form.errors_to_json()), 400",
"def create_repo_cli(api_client, url, provider, path):\n content = ReposApi(api_client).create(url, provider, path)\n click.echo(pretty_format(content))",
"def create_clowder_repo(self, url, branch, depth=0):\n\n if self.existing_git_repository(self.repo_path):\n return\n self._init_repo()\n self._create_remote(self.remote, url, remove_dir=True)\n self._checkout_new_repo_branch(branch, depth)",
"def newrepo():\n form = AddRepoForm()\n if form.validate_on_submit():\n\n # make the directory for this package\n os.mkdir(DATA + form.name.data)\n\n flash('Repo created successfully')\n\n # redirect to the login page\n return redirect(url_for('home.dashboard'))\n\n # load registration template\n return render_template('home/add.html', form=form, title='Local Repo', target=\"add\")",
"def new_repo(req, source, psp_dir, url_helper=None):\n req.content_type = 'text/html'\n repo_dir = req.filename.rsplit('/', 1)[0]\n files = [f for f in os.listdir(repo_dir) if f[-3:] == '.h5']\n top_level = psp.PSP(req, filename=psp_dir+'new_repo.psp')\n top_level.run({'context': req.uri,\n 'files': files})",
"def repo_new(request):\n if request.method != 'POST':\n form = RepoForm()\n return respond(request, 'repo_new.html', {'form': form})\n form = RepoForm(request.POST)\n errors = form.errors\n if not errors:\n try:\n repo = models.Repository(\n name=form.cleaned_data.get('name'),\n url=form.cleaned_data.get('url'),\n guid=form.cleaned_data.get('guid'),\n )\n except (db.BadValueError, ValueError) as err:\n errors['__all__'] = unicode(err)\n if errors:\n return respond(request, 'repo_new.html', {'form': form})\n repo.put()\n branch_url = repo.url\n if not branch_url.endswith('/'):\n branch_url += '/'\n branch_url += 'trunk/'\n branch = models.Branch(repo_key=repo.key, repo_name=repo.name,\n category='*trunk*', name='Trunk',\n url=branch_url)\n branch.put()\n return HttpResponseRedirect(reverse(repos))",
"def create_repo_clone(self, path, https):\n _, _, login, remote_dir = path.split('/', 3) # 3 x '/' before real path\n remote_dir = os.path.dirname(remote_dir) # final segment from clone\n print remote_dir\n cmd = ['ssh', login, 'mkdir', '-p', remote_dir]\n print cmd\n check_output(cmd)\n cmd = ['ssh', login, 'cd', remote_dir, ';', 'hg', 'clone', https]\n #cmd = ['ssh', login, 'cd {} ; hg clone {}'.format(remote_dir, path.replace('ssh:', 'https:'))]\n print cmd\n check_output(cmd)",
"def git_project(soup, github_user, github_pass, github_repo, github_name):\n giturl = 'https://{user}:{password}@github.com/{user}/{repo}.git'.format(\n user=github_user, password=github_pass, repo=github_repo\n )\n oldcwd = os.getcwd()\n tmpdir = tempfile.mkdtemp()\n gitdir = os.path.join(tmpdir, github_repo)\n cmd = 'git clone {} {}'.format(shlex.quote(giturl), shlex.quote(gitdir))\n subprocess.run(shlex.split(cmd), check=False)\n os.chdir(gitdir)\n rhinoscrape(soup, github_user, github_name)\n cmd = 'git add .'\n subprocess.run(shlex.split(cmd), check=False)\n msg = 'Project committed by Rhino Repo'\n cmd = 'git commit -m {}'.format(shlex.quote(msg))\n subprocess.run(shlex.split(cmd), check=False)\n cmd = 'git push {}'.format(shlex.quote(giturl))\n subprocess.run(shlex.split(cmd), check=False)\n os.chdir(oldcwd)\n shutil.rmtree(tmpdir, ignore_errors=True)",
"def create(self):\n if os.path.isdir(self.repodir):\n if os.listdir(self.repodir):\n raise EmtError('%s is not empty' % self.repodir)\n else:\n os.makedirs(self.repodir)\n self.git_cmd('init')\n self.initialized = True",
"async def create_from_git(self, token: Any, repo: str) -> Definition:\n response = await self._invoke({\n \"jsonrpc\": \"2.0\",\n \"method\": \"ProjectAPI.CreateFromGit\",\n \"id\": self.__next_id(),\n \"params\": [token, repo, ]\n })\n assert response.status // 100 == 2, str(response.status) + \" \" + str(response.reason)\n payload = await response.json()\n if 'error' in payload:\n raise ProjectAPIError.from_json('create_from_git', payload['error'])\n return Definition.from_json(payload['result'])",
"def create_remote_repo(self, auth_token):\n github = Github(auth_token)\n user = github.get_user()\n try:\n return user.create_repo(self.repo)\n except GithubException as e:\n raise PermissionDenied(\n (e._GithubException__data['message'] +\n e._GithubException__data['errors'][0]['message']))",
"def init_git_repo(c, repo_name, org_name='kinecosystem', remote='origin', branch='master'):\n # clone git repo if it doesn't exist,\n # otherwise checkout master branch\n dir_name = '{}-git'.format(repo_name)\n git_url = 'https://github.com/{}/{}.git'.format(org_name, repo_name)\n\n if not os.path.isdir('{}/{}/volumes/{}'.format(os.getcwd(), c.cwd, dir_name)):\n print('%s git repository doesn\\'t exist, cloning' % repo_name)\n c.run('git clone --branch {branch} {git_url} volumes/{dir_name}'.format(branch=branch, git_url=git_url, dir_name=dir_name))\n else:\n with c.cd('volumes/{}'.format(dir_name)):\n if is_git_dir_modified(c):\n raise Exit('Stopping, please clean changes and retry')\n\n git_dir_checkout_branch(c, org_name, repo_name, remote, branch)\n\n return dir_name",
"def mkdir ():\n name = \"-\".join(parser_arguments().classes)\n if not os.path.exists(name):\n os.mkdir(name)\n print('The repository {} have been created'.format(parser_arguments().classes))\n else:\n print('The repository {} already exists.'.format(parser_arguments().classes))\n pass",
"def _make_github_repo(github_login, entity, reponame, existing,\n access_protocol, private, dryrun):\n repo = None\n access_url = None\n try:\n repo = entity.get_repo(reponame)\n access_url = get_repo_url(repo, access_protocol, github_login)\n except gh.GithubException as e:\n if e.status != 404:\n # this is not a not found message, raise\n raise e\n lgr.debug(\n 'To be created repository \"%s\" does not yet exist on Github',\n reponame)\n\n if repo is not None:\n res = dict(\n url=access_url,\n preexisted=True,\n )\n if existing in ('skip', 'reconfigure'):\n return dict(\n res,\n status='notneeded',\n preexisted=existing == 'skip',\n )\n elif existing == 'error':\n return dict(\n res,\n status='error',\n message=('repository \"%s\" already exists on Github', reponame),\n )\n elif existing == 'replace':\n _msg = ('repository \"%s\" already exists on GitHub.', reponame)\n # Since we are running in the loop trying different tokens,\n # this message might appear twice. TODO: avoid\n if ui.is_interactive:\n remove = ui.yesno(\n \"Do you really want to remove it?\",\n title=_msg[0] % _msg[1],\n default=False\n )\n else:\n return dict(\n res,\n status='impossible',\n message=(\n _msg[0] + \" Remove it manually first on GitHub or \"\n \"rerun datalad in an interactive shell to confirm \"\n \"this action.\",\n _msg[1]),\n )\n if not remove:\n return dict(\n res,\n status='impossible',\n message=_msg,\n )\n repo.delete()\n repo = None\n else:\n RuntimeError('must not happen')\n\n if repo is None and not dryrun:\n try:\n repo = entity.create_repo(\n reponame,\n # TODO description='',\n # TODO homepage='',\n private=private,\n has_issues=False,\n has_wiki=False,\n has_downloads=False,\n auto_init=False)\n except gh.GithubException as e:\n if e.status == 404:\n # can happen if credentials are not good enough!\n raise\n msg = \"Github {} ({})\".format(\n e.data.get('message', str(e) or 'unknown'),\n e.data.get('documentation_url', 'no url')\n )\n if e.data.get('errors'):\n msg += ': {}'.format(\n ', '.join(\n [\n err.get('message')\n for err in e.data.get('errors', [])\n if 'message' in err\n ]))\n return dict(\n res,\n status='error',\n message=msg,\n )\n\n if repo is None and not dryrun:\n raise RuntimeError(\n 'something went wrong, we got no Github repository')\n\n # get definitive URL:\n # - use previously determined one\n # - or query a newly created project\n # - or craft one in dryrun mode\n access_url = access_url or '{}github.com{}{}/{}.git'.format(\n 'https://' if access_protocol == 'https' else 'git@',\n '/' if access_protocol == 'https' else ':',\n # this will be the org, in case the repo will go under an org\n entity.login,\n reponame,\n ) if dryrun else get_repo_url(repo, access_protocol, github_login)\n\n return dict(\n status='ok',\n url=access_url,\n preexisted=False,\n )",
"def repository_create_hosted():\n pass",
"def clone_into_project(git_repo_name):\n repo_dir = git_dir + \"/%s.git\" % git_repo_name\n with cd(remote_dir):\n run('rm -rf myproject')\n run(\"git clone %s %s\" % (repo_dir, project_name))\n run(\"echo 'MY_ENV=\\\"prod\\\"' > %s/%s/site_settings.py\" % (project_name,project_name))\n update_conf_file()",
"def pushrepo(projectjson, repourl):\n try:\n components = projectjson['components']\n name = projectjson['name']\n reponame = name + '_sc'\n logger.debug(f\"repourl is : {repourl}\")\n bb_split = repourl.split(\"//\")\n bb_split[1] = f\"{username}:{escape_password}@\"+bb_split[1]\n newrepourl = \"//\".join(bb_split)\n local_code_setup(reponame, newrepourl)\n dst_makefile_path = f\"/tmp/{reponame}/Makefile\"\n if not os.path.exists(dst_makefile_path):\n src_makefile_path = f\"/tmp/skeleton-build/Makefile\"\n copy2(src_makefile_path, dst_makefile_path)\n print(\"Makefile added\")\n createcomponents(components, reponame, newrepourl, name)\n bitbucket.push_repo_to_bitbucket(f\"/tmp/{reponame}\")\n rmtree('/tmp/skeleton-build')\n rmtree(f'/tmp/{reponame}')\n return True\n except Exception as e:\n print(\"caught exception.: \", e)\n return False",
"def test_add_repo(self):\r\n with self.assertRaisesRegexp(GitImportError, GitImportError.NO_DIR):\r\n git_import.add_repo(self.TEST_REPO, None, None)\r\n\r\n os.mkdir(self.GIT_REPO_DIR)\r\n self.addCleanup(shutil.rmtree, self.GIT_REPO_DIR)\r\n\r\n with self.assertRaisesRegexp(GitImportError, GitImportError.URL_BAD):\r\n git_import.add_repo('foo', None, None)\r\n\r\n with self.assertRaisesRegexp(GitImportError, GitImportError.CANNOT_PULL):\r\n git_import.add_repo('file:///foobar.git', None, None)\r\n\r\n # Test git repo that exists, but is \"broken\"\r\n bare_repo = os.path.abspath('{0}/{1}'.format(settings.TEST_ROOT, 'bare.git'))\r\n os.mkdir(bare_repo)\r\n self.addCleanup(shutil.rmtree, bare_repo)\r\n subprocess.check_output(['git', '--bare', 'init', ], stderr=subprocess.STDOUT,\r\n cwd=bare_repo)\r\n\r\n with self.assertRaisesRegexp(GitImportError, GitImportError.BAD_REPO):\r\n git_import.add_repo('file://{0}'.format(bare_repo), None, None)"
] | [
"0.78584546",
"0.76648015",
"0.7265442",
"0.72380567",
"0.719079",
"0.7102667",
"0.709434",
"0.7067098",
"0.7028291",
"0.70168835",
"0.6969751",
"0.69651526",
"0.6953235",
"0.69130766",
"0.68735963",
"0.68264115",
"0.67686075",
"0.67527515",
"0.6737532",
"0.6696314",
"0.6633343",
"0.66185445",
"0.65991133",
"0.6591205",
"0.6521478",
"0.65140915",
"0.649753",
"0.64814025",
"0.6477833",
"0.64679915"
] | 0.805788 | 0 |
Adds the git repo on the server as the local .git repo's origin, and pushes master to it. (do not include the .git ending in git_repo_name) | def add_prod_repo_as_origin_and_push(git_repo_name):
local("""echo '[remote "origin"]' >> .git/config""")
local(r"echo ' fetch = +refs/heads/*:refs/remotes/origin/*' >> .git/config")
local(r"echo ' url = %s:webapps/git/repos/%s.git' >> .git/config" % (env.hosts[0], git_repo_name))
local(r"git push origin master") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def push(self):\n origin = self.git_repo.remotes.origin\n origin.push()",
"def push(ctx):\n dufl_root = ctx.obj['dufl_root']\n git = Git(ctx.obj.get('git', '/usr/bin/git'), dufl_root)\n git.run('push', 'origin', git.working_branch())",
"def push():\n branch = git.current_branch().name\n shell.run('git push -u origin {}'.format(branch))",
"def __gitAddRemote(self):\n self.vcs.gitAddRemote(self.project.getProjectPath())",
"def push(self, base_repo, branch=\"master\"):\n base_repo.push_to(self, branch)",
"def _clone_gitrepo():\n # Puts git repo in ~/.ssh/config to avoid interaction due to missing known_hosts\n git_server = urllib.splituser(urllib.splittype(env.project['git_repo'])[0])[1]\n if not files.exists('~/.ssh/config') or not files.contains('~/.ssh/config', git_server):\n files.append('~/.ssh/config', ['host %s' % git_server, ' StrictHostKeyChecking no'])\n\n branch = env.project.get('git_branch', 'master')\n if files.exists(_interpolate(DJANGO_PROJECT_DIR)):\n print _interpolate('project %(project)s already exists, updating')\n remote('git pull origin %s' % branch)\n else:\n with cd(_interpolate(VIRTUALENV_DIR)):\n run(_interpolate('git clone %(git_repo)s %(project)s'))\n if branch != 'master':\n remote('git fetch origin %s:%s' % (branch, branch))\n remote('git checkout %s' % branch)",
"def _sync_git_origin(cache_dir, site):\n\n git_dir = '--git-dir=' + cache_dir\n\n # silently try to add origin first, to lazily handle a missing case\n GIT.execute([git_dir, 'remote', 'add', 'origin', site],\n cwd=cache_dir, quiet=True)\n\n if not GIT.execute([git_dir, 'remote', 'set-url', 'origin', site],\n cwd=cache_dir):\n err('unable to ensure origin is set on repository cache')\n return False\n\n return True",
"def push(self):\n out, err, code = self.command( [\"git\", \"push\"], self.directory )",
"def gitAdd(filename, repo_dir):\n file_path = \"%s/%s\" % (repo_dir, filename)\n git(\"add\", file_path)",
"def push(self):\n if self.forward:\n git = self.repo.git\n try:\n git.push()\n self.forward = \"pushed\"\n except:\n self.forward = \"push error - \"+self.forward",
"def _git_push(branch):\n\n local(\n 'git push -f origin %(branch)s:%(branch)s' % {'branch': branch},\n capture=True\n )\n print('Pushed to %s' % branch)",
"def create_prod_git_repo(git_repo_name):\n with cd(git_dir):\n run(\"git init --bare %s.git && cd %s.git && git config http.receivepack true\" %\n (git_repo_name,git_repo_name))",
"def pushrepo(projectjson, repourl):\n try:\n components = projectjson['components']\n name = projectjson['name']\n reponame = name + '_sc'\n logger.debug(f\"repourl is : {repourl}\")\n bb_split = repourl.split(\"//\")\n bb_split[1] = f\"{username}:{escape_password}@\"+bb_split[1]\n newrepourl = \"//\".join(bb_split)\n local_code_setup(reponame, newrepourl)\n dst_makefile_path = f\"/tmp/{reponame}/Makefile\"\n if not os.path.exists(dst_makefile_path):\n src_makefile_path = f\"/tmp/skeleton-build/Makefile\"\n copy2(src_makefile_path, dst_makefile_path)\n print(\"Makefile added\")\n createcomponents(components, reponame, newrepourl, name)\n bitbucket.push_repo_to_bitbucket(f\"/tmp/{reponame}\")\n rmtree('/tmp/skeleton-build')\n rmtree(f'/tmp/{reponame}')\n return True\n except Exception as e:\n print(\"caught exception.: \", e)\n return False",
"def push_code(repo, branch='gh-pages'):\n return repo.remotes.origin.push(branch)",
"def push(ref='origin/master'):\n from fabric.api import local, run, cd\n from fabric.contrib.project import rsync_project\n local('pelican -s %s -d' % env.config_file)\n rsync_project(\n remote_dir=env.host_site_path,\n local_dir='output/',\n delete=True\n )\n if env.host_type != 'production':\n run(\"chown -R %(user)s:%(host_webserver_user)s %(host_site_path)s \"\n \"&& chmod -R 02750 %(host_site_path)s\" % env)",
"def git_remote(git_repo):\n github_token = os.getenv(GITHUB_TOKEN_KEY)\n if github_token:\n return 'https://{0}@github.com/{1}'.format(\n github_token, git_repo)\n return '[email protected]:{0}'.format(git_repo)",
"def sync_git_repo():\n # get the current dir of this script\n current_dir = os.path.dirname(os.path.realpath(sys.argv[0]))\n repo_path = os.path.join(current_dir,REPO_NAME)\n logging.info(\"Repository path is: \"+repo_path)\n # check to see if a repo has been init already\n try: \n repo = git.Repo(repo_path)\n logging.info(\"Git repo has already been created.\")\n except (git.exc.InvalidGitRepositoryError,git.exc.NoSuchPathError):\n logging.info(\"No git repo has been initialized for this module. Cloning from github.com now.\")\n repo_url = \"https://\"+REPO_USERNAME+\":\"+REPO_PERSONAL_ACCESS_TOKEN+\"@github.com/\"+REPO_USERNAME+\"/\"+REPO_NAME+\".git\"\n git.Repo.clone_from(repo_url,repo_path)\n logging.info(\"Repo cloned successfully.\")\n repo = git.Repo(repo_path)\n # now we have a valid repo created \n # pull the latest data from the repo\n origin = repo.remotes.origin\n origin.pull()\n # create the csv output dir if it does not exist\n Path(paho_csv_reports_dir).mkdir(parents=False, exist_ok=True)\n # get all csv files in this dir\n all_paho_csv_files = glob.glob(paho_csv_reports_dir+os.path.sep+\"*.csv\")\n # add all files in this dir to the repo index\n repo.index.add(all_paho_csv_files)\n logging.info(\"Added all .csv files from \"+paho_csv_reports_dir+\" to repo index.\")\n # set the commit message\n repo.index.commit(\"Automatic commit by \"+os.path.basename(__file__))\n # git push \n origin.push()\n logging.info(\"All csv files pushed to github repo successfully.\")",
"def update_code_from_git():\n if not files.exists(REMOTE_REPO_DIR):\n with cd(HOME_DIR):\n run(\"git clone %s\" % MAIN_GITHUB_REP )\n with cd(REMOTE_REPO_DIR):\n run(\"git pull\")",
"def commit_master(do_deploy=True):\n local(\"git pull origin master\")\n commit()\n local(\"git checkout master\")\n local(\"git pull origin master\")\n local(\"git merge dev\")\n local(\"git push origin master\")\n if do_deploy:\n deploy()\n deploy_config()",
"def clone_into_project(git_repo_name):\n repo_dir = git_dir + \"/%s.git\" % git_repo_name\n with cd(remote_dir):\n run('rm -rf myproject')\n run(\"git clone %s %s\" % (repo_dir, project_name))\n run(\"echo 'MY_ENV=\\\"prod\\\"' > %s/%s/site_settings.py\" % (project_name,project_name))\n update_conf_file()",
"def __gitPush(self):\n self.vcs.gitPush(self.project.getProjectPath())",
"def deploy():\n remote_dir = os.path.abspath(os.path.join(REMOTE_BASE_DIR, REPO_NAME))\n \n with settings(warn_only=True):\n if run(\"test -d %s\" % (remote_dir)).failed:\n puts(red(\"[Repo %s does not exist on remote at: %s]\" % (REPO_NAME, remote_dir)))\n with cd(REMOTE_BASE_DIR):\n run(\"git clone %s %s\" % (REPO_URL, REPO_NAME))\n\n puts(yellow(\"[Write logs]\"))\n run(\"echo '-----------------------------' > %s\" % REMOTE_ERR_FILE)\n run(\"echo `date` >> %s\" % REMOTE_ERR_FILE)\n run(\"echo '-----------------------------' >> %s\" % REMOTE_ERR_FILE)\n run(\"echo '-----------------------------' > %s\" % REMOTE_LOG_FILE)\n run(\"echo `date` >> %s\" % REMOTE_LOG_FILE)\n run(\"echo '-----------------------------' >> %s\" % REMOTE_LOG_FILE)\n\n puts(yellow(\"[Update repo: %s]\" % REPO_NAME))\n with cd(remote_dir):\n run(\"git pull origin master >> %s 2>> %s\" %\n (REMOTE_LOG_FILE, REMOTE_ERR_FILE))\n\n # reminder new static files\n puts(yellow('Do not forget to run collect staticfiles on DJANGO server.'))",
"def add_repo(repo, rdir_in, branch=None):\r\n # pylint: disable=R0915\r\n\r\n # Set defaults even if it isn't defined in settings\r\n mongo_db = {\r\n 'host': 'localhost',\r\n 'user': '',\r\n 'password': '',\r\n 'db': 'xlog',\r\n }\r\n\r\n # Allow overrides\r\n if hasattr(settings, 'MONGODB_LOG'):\r\n for config_item in ['host', 'user', 'password', 'db', ]:\r\n mongo_db[config_item] = settings.MONGODB_LOG.get(\r\n config_item, mongo_db[config_item])\r\n\r\n if not os.path.isdir(GIT_REPO_DIR):\r\n raise GitImportError(GitImportError.NO_DIR)\r\n # pull from git\r\n if not (repo.endswith('.git') or\r\n repo.startswith(('http:', 'https:', 'git:', 'file:'))):\r\n raise GitImportError(GitImportError.URL_BAD)\r\n\r\n if rdir_in:\r\n rdir = os.path.basename(rdir_in)\r\n else:\r\n rdir = repo.rsplit('/', 1)[-1].rsplit('.git', 1)[0]\r\n log.debug('rdir = {0}'.format(rdir))\r\n\r\n rdirp = '{0}/{1}'.format(GIT_REPO_DIR, rdir)\r\n if os.path.exists(rdirp):\r\n log.info('directory already exists, doing a git pull instead '\r\n 'of git clone')\r\n cmd = ['git', 'pull', ]\r\n cwd = rdirp\r\n else:\r\n cmd = ['git', 'clone', repo, ]\r\n cwd = GIT_REPO_DIR\r\n\r\n cwd = os.path.abspath(cwd)\r\n try:\r\n ret_git = cmd_log(cmd, cwd=cwd)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Error running git pull: %r', ex.output)\r\n raise GitImportError(GitImportError.CANNOT_PULL)\r\n\r\n if branch:\r\n switch_branch(branch, rdirp)\r\n\r\n # get commit id\r\n cmd = ['git', 'log', '-1', '--format=%H', ]\r\n try:\r\n commit_id = cmd_log(cmd, cwd=rdirp)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to get git log: %r', ex.output)\r\n raise GitImportError(GitImportError.BAD_REPO)\r\n\r\n ret_git += '\\nCommit ID: {0}'.format(commit_id)\r\n\r\n # get branch\r\n cmd = ['git', 'symbolic-ref', '--short', 'HEAD', ]\r\n try:\r\n branch = cmd_log(cmd, cwd=rdirp)\r\n except subprocess.CalledProcessError as ex:\r\n # I can't discover a way to excercise this, but git is complex\r\n # so still logging and raising here in case.\r\n log.exception('Unable to determine branch: %r', ex.output)\r\n raise GitImportError(GitImportError.BAD_REPO)\r\n\r\n ret_git += '{0}Branch: {1}'.format(' \\n', branch)\r\n\r\n # Get XML logging logger and capture debug to parse results\r\n output = StringIO.StringIO()\r\n import_log_handler = logging.StreamHandler(output)\r\n import_log_handler.setLevel(logging.DEBUG)\r\n\r\n logger_names = ['xmodule.modulestore.xml_importer', 'git_add_course',\r\n 'xmodule.modulestore.xml', 'xmodule.seq_module', ]\r\n loggers = []\r\n\r\n for logger_name in logger_names:\r\n logger = logging.getLogger(logger_name)\r\n logger.setLevel(logging.DEBUG)\r\n logger.addHandler(import_log_handler)\r\n loggers.append(logger)\r\n\r\n try:\r\n management.call_command('import', GIT_REPO_DIR, rdir,\r\n nostatic=not GIT_IMPORT_STATIC)\r\n except CommandError:\r\n raise GitImportError(GitImportError.XML_IMPORT_FAILED)\r\n except NotImplementedError:\r\n raise GitImportError(GitImportError.UNSUPPORTED_STORE)\r\n\r\n ret_import = output.getvalue()\r\n\r\n # Remove handler hijacks\r\n for logger in loggers:\r\n logger.setLevel(logging.NOTSET)\r\n logger.removeHandler(import_log_handler)\r\n\r\n course_key = None\r\n location = 'unknown'\r\n\r\n # extract course ID from output of import-command-run and make symlink\r\n # this is needed in order for custom course scripts to work\r\n match = re.search(r'(?ms)===> IMPORTING course (\\S+)', ret_import)\r\n if match:\r\n course_id = match.group(1)\r\n try:\r\n course_key = CourseKey.from_string(course_id)\r\n except InvalidKeyError:\r\n course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)\r\n cdir = '{0}/{1}'.format(GIT_REPO_DIR, course_key.course)\r\n log.debug('Studio course dir = {0}'.format(cdir))\r\n\r\n if os.path.exists(cdir) and not os.path.islink(cdir):\r\n log.debug(' -> exists, but is not symlink')\r\n log.debug(subprocess.check_output(['ls', '-l', ],\r\n cwd=os.path.abspath(cdir)))\r\n try:\r\n os.rmdir(os.path.abspath(cdir))\r\n except OSError:\r\n log.exception('Failed to remove course directory')\r\n\r\n if not os.path.exists(cdir):\r\n log.debug(' -> creating symlink between {0} and {1}'.format(rdirp, cdir))\r\n try:\r\n os.symlink(os.path.abspath(rdirp), os.path.abspath(cdir))\r\n except OSError:\r\n log.exception('Unable to create course symlink')\r\n log.debug(subprocess.check_output(['ls', '-l', ],\r\n cwd=os.path.abspath(cdir)))\r\n\r\n # store import-command-run output in mongo\r\n mongouri = 'mongodb://{user}:{password}@{host}/{db}'.format(**mongo_db)\r\n\r\n try:\r\n if mongo_db['user'] and mongo_db['password']:\r\n mdb = mongoengine.connect(mongo_db['db'], host=mongouri)\r\n else:\r\n mdb = mongoengine.connect(mongo_db['db'], host=mongo_db['host'])\r\n except mongoengine.connection.ConnectionError:\r\n log.exception('Unable to connect to mongodb to save log, please '\r\n 'check MONGODB_LOG settings')\r\n cil = CourseImportLog(\r\n course_id=course_key,\r\n location=location,\r\n repo_dir=rdir,\r\n created=timezone.now(),\r\n import_log=ret_import,\r\n git_log=ret_git,\r\n )\r\n cil.save()\r\n\r\n log.debug('saved CourseImportLog for {0}'.format(cil.course_id))\r\n mdb.disconnect()",
"def git_push(c):\n c.run(\"git submodule foreach git push \")",
"def git_project(soup, github_user, github_pass, github_repo, github_name):\n giturl = 'https://{user}:{password}@github.com/{user}/{repo}.git'.format(\n user=github_user, password=github_pass, repo=github_repo\n )\n oldcwd = os.getcwd()\n tmpdir = tempfile.mkdtemp()\n gitdir = os.path.join(tmpdir, github_repo)\n cmd = 'git clone {} {}'.format(shlex.quote(giturl), shlex.quote(gitdir))\n subprocess.run(shlex.split(cmd), check=False)\n os.chdir(gitdir)\n rhinoscrape(soup, github_user, github_name)\n cmd = 'git add .'\n subprocess.run(shlex.split(cmd), check=False)\n msg = 'Project committed by Rhino Repo'\n cmd = 'git commit -m {}'.format(shlex.quote(msg))\n subprocess.run(shlex.split(cmd), check=False)\n cmd = 'git push {}'.format(shlex.quote(giturl))\n subprocess.run(shlex.split(cmd), check=False)\n os.chdir(oldcwd)\n shutil.rmtree(tmpdir, ignore_errors=True)",
"def call_git_push():\n print(\"This will commit and push the git repo\")\n today = datetime.datetime.today()\n call([\"git\", \"add\", \".\"])\n call([\"git\", \"commit\", \"-m\", \"Updated notes. {:%Y-%m-%d %H:%M:%S}\".format(today)])\n call([\"git\", \"push\", \"origin\", \"master\"])",
"def push_git(store, path):\n storedir, _ = os.path.split(path)\n cmd = [\"git\", \"add\", \".\"]\n subprocess.check_call(cmd, cwd=storedir)\n cmd = [\"git\", \"commit\", \"-m\", \"regolith auto-store commit\"]\n try:\n subprocess.check_call(cmd, cwd=storedir)\n except subprocess.CalledProcessError:\n warn(\"Could not git commit to \" + storedir, RuntimeWarning)\n return\n cmd = [\"git\", \"push\"]\n try:\n subprocess.check_call(cmd, cwd=storedir)\n except subprocess.CalledProcessError:\n warn(\"Could not git push from \" + storedir, RuntimeWarning)\n return",
"def clone_github_repo(self):\n repository_local_destination = os.path.join(MODULES_PATH, 'github', self.username, self.repository_name)\n if not os.path.exists(repository_local_destination):\n Repo.clone_from(self.repo_url, repository_local_destination, branch='master')\n init_filename = os.path.join(repository_local_destination, '__init__.py')\n open(init_filename, 'a').close()",
"def git_config_setup():\n\n local('git config user.email $GIT_EMAIL')\n local('git config user.name $GIT_NAME')\n\n local(\n 'git remote set-url --push origin '\n 'https://[email protected]/$TRAVIS_REPO_SLUG.git'\n )",
"def push_sources():\n ensure_src_dir()\n push_rev = getattr(env, 'push_rev', None)\n if push_rev is None:\n push_rev = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n local(\"git tag -a {0} -m \\\"Tagged for release\\\"\".format(push_rev))\n local(\"git push origin master --tags\")\n\n with cd(SRC_DIR):\n run(\"git pull origin master\")\n run(\"git fetch -t\")\n run(\"git checkout {0}\".format(push_rev))"
] | [
"0.74367875",
"0.7217327",
"0.7001284",
"0.68715334",
"0.6737078",
"0.6531534",
"0.65219814",
"0.6468138",
"0.6464436",
"0.6363024",
"0.632618",
"0.62762666",
"0.6264901",
"0.62638634",
"0.6229483",
"0.62241775",
"0.61916083",
"0.6184975",
"0.6179584",
"0.61400473",
"0.6078943",
"0.6049489",
"0.6038741",
"0.60308397",
"0.6022022",
"0.59970856",
"0.5950285",
"0.5944758",
"0.59374416",
"0.5933768"
] | 0.81499577 | 0 |
Updates the apache httpd.conf file to point to the new project instead of the default 'myproject'. This is called as part of clone_into_project, or you can call | def update_conf_file():
filepath = remote_dir + "/apache2/conf/httpd.conf"
fabric.contrib.files.sed(filepath, 'myproject', project_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_project():\n _require_environment()\n\n # Grants write rights on log dir for the admin group\n log_dir = '%s/log' % _interpolate(VIRTUALENV_DIR)\n if files.exists(log_dir):\n sudo('chmod -R g+w %s' % log_dir)\n\n # Updates from git, issues Django syncdb, South migrate, Collecstatic and resets Apache\n branch = env.project.get('git_branch', 'master')\n with prefix(_django_prefix()):\n with cd(_django_project_dir()):\n with settings(hide('warnings'), warn_only=True):\n run('git fetch origin %s:%s' % (branch, branch))\n run('git checkout %s' % branch)\n with settings(hide('warnings'), warn_only=True):\n run('git pull origin %s' % branch)\n run('django-admin.py syncdb --noinput')\n run('django-admin.py migrate')\n run('touch config/wsgi*')\n run('django-admin.py collectstatic --noinput')",
"def update_webserver_config():\n require('ws_config_path', provided_by=[prod])\n apache_sa = '/etc/apache2/sites-available/'\n apache_se = '/etc/apache2/sites-enabled/'\n nginx_sa = '/etc/nginx/sites-available/'\n nginx_se = '/etc/nginx/sites-enabled/'\n\n sudo('rm %s%s' % (apache_sa, env.project_name))\n sudo('rm %s%s' % (apache_se, env.project_name))\n\n sudo('rm %s%s' % (nginx_sa, env.project_name))\n sudo('rm %s%s' % (nginx_se, env.project_name))\n\n put('%sapache2/sites-available/*' % (env.ws_config_path), apache_sa, use_sudo=True)\n put('%snginx/sites-available/*' % (env.ws_config_path), nginx_sa, use_sudo=True)\n\n sudo('ln -s %s%s %s' % (apache_sa, env.project_name, apache_se))\n sudo('ln -s %s%s %s' % (nginx_sa, env.project_name, nginx_se))\n restart_webservers()",
"def touch_project():\n remote('touch config/wsgi*')",
"def config_apache():\n with lcd(env.projectroot):\n with cd(\"/etc/apache2\"):\n put(\"manage/sysconf/%(target)s/etc/apache2/sites-available/lagrummet\" % env, \"sites-available\",\n use_sudo=True)\n try:\n sudo(\"ln -s ../sites-available/lagrummet sites-enabled/lagrummet\")\n except:\n print \"Ignored failed to create symbolic link!\"",
"def configure_project():\n pass",
"def install_apache_conf():\n sudo('cp -T %(repo_path)s/apache/%(settings)s/apache %(apache_config_path)s' % env)",
"def configure_httpd_wsgi_conf(self):\n raise NotImplementedError()",
"def clone_into_project(git_repo_name):\n repo_dir = git_dir + \"/%s.git\" % git_repo_name\n with cd(remote_dir):\n run('rm -rf myproject')\n run(\"git clone %s %s\" % (repo_dir, project_name))\n run(\"echo 'MY_ENV=\\\"prod\\\"' > %s/%s/site_settings.py\" % (project_name,project_name))\n update_conf_file()",
"def config_apache_command(server_name):\n if not server_name:\n server_name = socket.getfqdn()\n print(\"\"\"# Virtual Host config for BetterWeather WSGI Server\n# Required modules: mod_wsgi\n<VirtualHost *:80>\n ServerName \"\"\", end='')\n print(server_name, end='')\n print(\"\"\"\n WSGIDaemonProcess betterweather threads=15\n WSGIScriptAlias / \"\"\", end='')\n print(app.root_path + '/wsgi.py', end='')\n print(\"\"\"\n <Directory \"\"\", end='')\n print(os.path.dirname(os.path.dirname(os.path.abspath(__file__))).__str__() + '>', end='')\n print(\"\"\"\n WSGIProcessGroup betterweather\n WSGIApplicationGroup %{GLOBAL}\n \n <IfVersion < 2.4>\n Allow from all\n Order allow,deny\n </IfVersion>\n \n <IfVersion >= 2.4>\n Require all granted\n </IfVersion>\n \n <IfModule mod_headers.c>\n Header set Cache-Control \"no-cache, no-store, must-revalidate\"\n Header set Pragma \"no-cache\"\n Header set Expires 0\n </IfModule>\n </Directory>\n</VirtualHost>\"\"\")",
"def deploy_project(self, svn_repo, co_dir, path_to_static, database_name, apache_conf_file_path, project_port, svn_username='', svn_password='', db_username='', db_password='', sql_paths_list=[], changes_dict={}):\n self.checkout(svn_repo, co_dir, svn_username, svn_password)\n self.change_static_to_pro(path_to_static)\n self.create_db(database_name, sql_paths_list, db_username, db_password)\n self.change_settings(co_dir, changes_dict)\n self.create_vhost(apache_conf_file_path, project_dir=co_dir, project_port=project_port)\n self.server_restart()",
"def update_website_configuration():\n put('config/supervisor_website.conf', \n '/etc/supervisor/conf.d/gunicorn.conf', \n use_sudo=True)\n sudo('supervisorctl update')\n sudo('supervisorctl reload')",
"def install(self, project, acl=None):\n self.config.options['project_name'] = project.name\n self.config.options['show_right_bar'] = True\n super(ForgeWikiApp, self).install(project, acl=acl)\n\n root_page_name = self.default_root_page_name\n Globals(app_config_id=c.app.config._id, root=root_page_name)\n self.upsert_root(root_page_name)",
"def apache():\n\n get_details()\n\n context = {\n \"site_name\": env.site_name,\n \"paths\": env.paths,\n \"project_name\": env.project_name,\n }\n\n apache_path = '/etc/httpd/sites-available/'\n\n if exists(apache_path):\n with cd(apache_path):\n if exists(env.site_name):\n print \"apache site configuration already exists!\"\n return\n else:\n upload_template(\"apache_conf.txt\", \n env.site_name,\n context,\n use_jinja=True,\n template_dir=JINJA_TEMPLATE_PATH,\n use_sudo=True)\n print \"Created apache site configuration file. Don't forget to enable it!\"\n return\n else:\n print \"It doesn't seem like you have apache installed.\"\n return",
"def update_project(arn=None, name=None, defaultJobTimeoutMinutes=None):\n pass",
"def _change_project(self):\n project_key = utils.prompt_string(\n 'You are currently managing Google Cloud Project {!r}.\\n'\n 'This project is currently saved as {!r}.\\n'\n 'All of the currently configured projects include: {}.\\n'\n 'Which project would you like to switch to?'.format(\n self._config.project, self._config.key,\n ', '.join(common.get_available_configs(self._config.path))))\n return _Manager.new(\n self._config.path, self._prefer_gcs, project_key=project_key,\n version=self._version)",
"def edit_files(project_name, app_name):\n SETTINGS = f'{project_name}/backend/backend/settings.py'\n PACKAGE_JSON = f'{project_name}/frontend/package.json'\n\n\n c1 = f\"\\n \\t'corsheaders', \\n\\t'rest_framework', \\n\\t'{app_name}',\\n\"\n add_to_line(SETTINGS, 32, c1 )\n\n c2 = f\"\\n \\t'corsheaders.middleware.CorsMidleware',\\n\"\n add_to_line(SETTINGS, 44, c2 )\n \n with open(SETTINGS, 'a+') as f:\n f.write(\"\\nCORS_ORIGIN_WHITELIST = ['localhost:3000/']\")\n\n c3 = '\\n\\t\"proxy\": \"http://localhost:8000\",\\n'\n add_to_line(PACKAGE_JSON, 3, c3)",
"def _prep_client_dist_for_project(project_env, project_root_dir):\n #need to make the project's index.html the index.html that tomcat will find\n clientdir = os.path.join(project_root_dir, 'client')\n index_target_fn = os.path.join(clientdir, 'index.html')\n\n index_src_base = 'index.' + project_env.get_project_name() + '.html'\n index_src_fn = os.path.join(clientdir, index_src_base)\n cmd = 'cp ' + index_src_fn + ' ' + index_target_fn\n cr = container_users.make_host_user_command_runner()\n result = cr.run(cmd)\n return result.get_exit_code()",
"def set_config_path(self, new_config_path):\n oldpath = self.get_config_path()\n cdir, cfile = os.path.split(new_config_path)\n \n if not cdir.startswith('/'):\n cdit='/'+cdir\n if not cfile:\n cfile = 'site.yaml'\n\n self.dropbox_base_dir = cdir\n self.dropbox_site_yaml = cfile\n newpath = self.get_config_path()\n if newpath !=oldpath:\n return oldpath",
"def update_project():\n with cd(env.code_dir):\n with _virtualenv():\n run('git pull origin master')\n install_requirements()\n perform_migration()\n collect_static()",
"def createFakeSphinxProject(self):\n self.sourceDir.child(\"conf.py\").setContent(self.confContent.encode())\n self.sourceDir.child(\"index.rst\").setContent(self.indexContent.encode())",
"def create_vhost(self, apache_conf_file_path, project_dir, project_port, queue=None):\n #append to the one n only httpd.conf....use apacheconfparser before to know the settings?\n import os\n #Apache conf file\n if self.SHELL == 'Local':\n if os.path.exists(apache_conf_file_path):\n httpd = apache_conf_file_path\n else:\n raise Exception('Apache Config file httpd.conf not found!')\n elif self.SHELL == 'Remote':\n get(apache_conf_file_path, 'tmp')\n httpd = os.path.join(os.path.dirname(__name__), 'tmp', 'httpd.conf')\n else:\n raise Exception('Shell type was not given properly!')\n\n #parse apache conf info here?\n import ApacheParser as AP\n from ApacheSections import parse_section\n\n #create config object and parse the file\n c = AP.ApacheConfig('httpd')\n confs = c.parse_file(httpd)\n configs = parse_section(confs.children)\n\n ports = [each[1][0] for each in configs if each[0]=='Listen']\n unneeded = [each[0] for each in configs if each[0] in ('WSGIPythonPath', 'WSGIRestrictStdin', 'WSGIRestrictStdout')]\n\n print apache_conf_file_path, project_dir, project_port\n #create the vhost conf\n try:\n project_dir = project_dir.rstrip('/')\n project_dir = project_dir.rstrip('\\\\')\n\n if ('/' in project_dir and '/' != os.path.sep) or ('\\\\' in project_dir and '\\\\' != os.path.sep):\n os.path.sep = os.path.altsep\n\n PATH_TO_PROJECT_ROOT = project_dir\n PORT = project_port\n PATH_TO_PUBLIC_DIR = os.path.sep.join([project_dir, 'public'])\n PATH_TO_STATIC = os.path.sep.join([PATH_TO_PUBLIC_DIR, 'static'])\n PATH_TO_DIVINEBA_WSGI = os.path.sep.join([PATH_TO_PUBLIC_DIR, 'divineba.wsgi'])\n\n #The PrismERP FurinaPy vhost config template\n vhost = os.path.join(os.path.dirname(__name__), 'tmp', 'PyPrismVhost.conf')\n v = open(vhost, 'r')\n temp = os.path.join(os.path.dirname(__name__), 'tmp', 'new.conf')\n n = open(temp, 'w')\n\n for each in v.readlines():\n if '{PATH_TO_PROJECT_ROOT}' in each:\n n.write(each.replace('{PATH_TO_PROJECT_ROOT}', PATH_TO_PROJECT_ROOT))\n elif '{PORT}' in each:\n if PORT in ports and 'Listen' in each:\n n.write('#Port already open...')\n n.write('\\n')\n # n.write(each.replace('{PORT}', PORT))\n else:\n n.write(each.replace('{PORT}', PORT))\n elif '{PATH_TO_STATIC}' in each:\n n.write(each.replace('{PATH_TO_STATIC}', PATH_TO_STATIC))\n elif '{PATH_TO_DIVINEBA_WSGI}' in each:\n n.write(each.replace('{PATH_TO_DIVINEBA_WSGI}', PATH_TO_DIVINEBA_WSGI))\n elif '{PATH_TO_PUBLIC_DIR}' in each:\n n.write(each.replace('{PATH_TO_PUBLIC_DIR}', PATH_TO_PUBLIC_DIR))\n else:\n try:\n if each.split()[0] in unneeded:\n continue\n else:\n n.write(each)\n except:\n n.write(each)\n\n v.close()\n n.close()\n\n #append at end of the httpd.conf....local = original....remote = delete original and put\n\n conf = open(httpd, 'a')\n n = open(temp, 'r')\n conf.write('\\n\\n')\n for each in n.readlines():\n conf.write(each)\n\n n.close()\n conf.close()\n\n if self.SHELL == 'Remote':\n cmd = 'rm -f ' + apache_conf_file_path\n self.run_task(cmd)\n put(httpd, apache_conf_file_path, mode=0755)\n os.remove(httpd)\n\n os.remove(temp)\n\n except Exception as e:\n print str(e)\n raise Exception(e)\n\n cmd = 'create virtualhost'\n out = 'Config Created Successfully!'\n res = [cmd, out.split('\\n')]\n if queue:\n queue.put(res)",
"def configure_httpd_service_ipa_conf(self):\n raise NotImplementedError()",
"def generate_wsgi_conf():\n _require_environment()\n\n # Dictionary for interpolating template\n variables = {\n 'project': env.project['project'],\n 'settings': env.project['settings'],\n 'site_packages': SITE_PACKAGES_DIR % _get_python_version(),\n }\n _generate_conf('wsgi.py', variables)",
"def redefine_airflow_workspaces(self, workspaces):\n dst = _app_config_file()\n new_config = (\n pyhocon.ConfigFactory.parse_string(\n \"aiscalator.airflow.setup.workspace_paths = [\\n\" +\n \"\\n\".join([ws for ws in workspaces]) +\n \"]\"\n )\n ).with_fallback(_app_config_file(), resolve=False)\n with open(dst, \"w\") as output:\n output.write(\n pyhocon.converter.HOCONConverter.to_hocon(new_config)\n )\n self._app_conf = new_config\n return new_config",
"def update_submodules(options, project_directory=None):\n pass",
"def update_project(self, name):\n self._log.info(\"Updating project: {}\".format(name))\n if name in self.projects:\n pass\n else:\n self.add_project(name)",
"def test_replace_project(self):\n pass",
"def _bootstrap():\r\n import os\r\n import sys\r\n \r\n pwd = os.path.dirname(__file__)\r\n \r\n (parent_directory, project_name) = os.path.split(pwd)\r\n \r\n # protect template itself from being bootstrapped\r\n if project_name == 'django_project_template':\r\n abort('bootstrap should not be run on project template!')\r\n\r\n env.project_name = project_name\r\n env.project_domain = env.project_name.split('.')[0].replace('_','-')\r\n \r\n def replace_in_files(path, find, replace):\r\n \r\n import fileinput\r\n \r\n if os.path.isfile(path):\r\n for line in fileinput.input(path, inplace=1):\r\n if find in line:\r\n line = line.replace(find, replace)\r\n sys.stdout.write(line)\r\n \r\n if os.path.isdir(path):\r\n # do not replace in virtual env\r\n if os.path.split(path)[1] == env.virtualenv_dir:\r\n return\r\n for f in os.listdir(path):\r\n replace_in_files(os.path.join(path, f), find, replace)\r\n\r\n # 'escape' placeholders here to protect them from being replaced\r\n replace_in_files(pwd, '@PROJECT_NAME' + '@', env.project_name)\r\n replace_in_files(pwd, '@PROJECT_DOMAIN' + '@', env.project_domain)",
"def sync_config():\n rsync_project(remote_dir='/apps/sharejs-rethinkdb-example/config/', local_dir='./config/')",
"def wsgi_conf():\n\n get_details()\n\n site_dir = posixpath.join(env.paths[\"sites\"], env.site_name)\n if not exists(site_dir):\n run(\"mkdir -p %s\" % site_dir)\n\n filename = \"%s_wsgi.py\" % env.project_name\n\n context = {\n \"site_name\": env.site_name,\n \"project_name\": env.project_name,\n \"python_version\": env.python_version,\n \"paths\": env.paths,\n }\n\n # Set up the wsgi dir.\n if env.app_server=='apache':\n wsgi_dir = posixpath.join(site_dir, \"apache\")\n else:\n wsgi_dir = posixpath.join(site_dir, \"src/src-%s\" % env.project_name)\n\n with cd(wsgi_dir):\n if not exists(filename):\n print \"Template path: %s\" % JINJA_TEMPLATE_PATH\n upload_template(\"wsgi_conf_%s.txt\" % env.app_server,\n filename,\n context,\n use_jinja=True,\n template_dir=JINJA_TEMPLATE_PATH)\n else:\n\t\t\t#TODO: If it exists, append to it\n print \"This file already exists.\"\n return\n run(\"chmod 654 %s\" % filename)"
] | [
"0.6472111",
"0.6156051",
"0.6123365",
"0.5980424",
"0.5979637",
"0.5878512",
"0.57706547",
"0.56331587",
"0.55369204",
"0.5507125",
"0.54162174",
"0.5402613",
"0.53572744",
"0.52999634",
"0.5277135",
"0.52048105",
"0.5179632",
"0.5174573",
"0.5156144",
"0.514356",
"0.5130455",
"0.5111259",
"0.50954354",
"0.50872135",
"0.5075139",
"0.50248396",
"0.5013426",
"0.49799138",
"0.49690223",
"0.49642625"
] | 0.72978866 | 0 |
Clones the git repo into the new webapp, deleting the default myproject project and updating the config file to point to the new project. Also adds a site_settings.py file to the project/project folder. | def clone_into_project(git_repo_name):
repo_dir = git_dir + "/%s.git" % git_repo_name
with cd(remote_dir):
run('rm -rf myproject')
run("git clone %s %s" % (repo_dir, project_name))
run("echo 'MY_ENV=\"prod\"' > %s/%s/site_settings.py" % (project_name,project_name))
update_conf_file() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_project():\n _require_environment()\n\n # Grants write rights on log dir for the admin group\n log_dir = '%s/log' % _interpolate(VIRTUALENV_DIR)\n if files.exists(log_dir):\n sudo('chmod -R g+w %s' % log_dir)\n\n # Updates from git, issues Django syncdb, South migrate, Collecstatic and resets Apache\n branch = env.project.get('git_branch', 'master')\n with prefix(_django_prefix()):\n with cd(_django_project_dir()):\n with settings(hide('warnings'), warn_only=True):\n run('git fetch origin %s:%s' % (branch, branch))\n run('git checkout %s' % branch)\n with settings(hide('warnings'), warn_only=True):\n run('git pull origin %s' % branch)\n run('django-admin.py syncdb --noinput')\n run('django-admin.py migrate')\n run('touch config/wsgi*')\n run('django-admin.py collectstatic --noinput')",
"def newproject():\n log('Criando novo projeto', yellow)\n log('Cria a conta no bitbucket com o nome do projeto vázio que o script se encarregará do resto', red)\n\n conta = raw_input('Digite o nome do projeto: ')\n\n local('echo \"clonando projeto %s\"' % bitbucket_repository)\n local('git clone {0} {1}{2}'.format(bitbucket_repository, folder_project_local, conta))\n local('cd {0}{1}'.format(folder_project_local, conta))\n local('mkvirtualenv {0}'.format(conta))\n local('setvirtualenvproject')\n local('pip install -r requirements.txt')\n local('rm -rf {0}{1}/.git'.format(folder_project_local, conta))\n local('rm -rf README.md')\n local('git init')\n local('git remote add origin [email protected]:{0}/{1}.git'.format(bitbucket_user, conta))",
"def touch_project():\n remote('touch config/wsgi*')",
"def createproject(project_name):\n app_clone_script = 'git clone https://github.com/jaarce/falcon-bp.git %s' % project_name\n subprocess.call(app_clone_script.split(' '))",
"def djangular_boilerplate():\n git = Repo()\n path = git.absolute_path\n package_name = git.package_name\n clone = git.command\n if not os.path.exists(path):\n os.system(clone)\n rename = prompt(prompt_rename)\n if rename.get(\"rename\", True):\n os.rename(package_name, input(\"Rename directory: \"))\n else:\n pass\n elif os.path.exists(path):\n ow = prompt(prompt_overwrite)\n if ow.get(\"overwrite\", True):\n shutil.rmtree(package_name)\n os.system(clone)\n rename = prompt(prompt_rename)\n if rename.get(\"rename\", True):\n os.rename(package_name, input(\"Rename directory: \"))\n else:\n exit(\"You have chosen not to overwrite. Session ended.\")",
"def clone():\n require('PROJECT_NAME')\n require('PROJECT_REPO')\n require('MERCURIAL_BIN')\n\n # Create the \"apps\" directory if it does not exist.\n run('mkdir -p {}'.format(utils.home('apps')))\n\n if files.exists(utils.home('apps', env.PROJECT_NAME)):\n delete()\n\n with cd(utils.home('apps')):\n run('{0} clone {1} {2}'.format(env.MERCURIAL_BIN,\n env.PROJECT_REPO,\n env.PROJECT_NAME))",
"def deploy_project(self, svn_repo, co_dir, path_to_static, database_name, apache_conf_file_path, project_port, svn_username='', svn_password='', db_username='', db_password='', sql_paths_list=[], changes_dict={}):\n self.checkout(svn_repo, co_dir, svn_username, svn_password)\n self.change_static_to_pro(path_to_static)\n self.create_db(database_name, sql_paths_list, db_username, db_password)\n self.change_settings(co_dir, changes_dict)\n self.create_vhost(apache_conf_file_path, project_dir=co_dir, project_port=project_port)\n self.server_restart()",
"def flush_repo():\n server = get_server()\n run(\"rm -rf %(project_name)s\" % env)\n git.clone()\n server.setup()",
"def update_site():\n site_path = os.path.join(PROJECTS_ROOT, CURRENT_SITE)\n docs_path = os.path.join(site_path, 'doc_src')\n with cd(site_path):\n run('git pull --all')\n run('workon djangopatterns && pip install -r %s/setup/requirements.txt' % site_path)\n run('workon djangopatterns && %s/manage.py syncdb' % site_path)\n # run('workon djangopatterns && %s/manage.py migrate' % site_path)\n run('workon djangopatterns && %s/manage.py collectstatic --noinput' % site_path)\n run('workon djangopatterns && %s/manage.py compress' % site_path)\n with cd(docs_path):\n run('git pull --all')\n # run('workon djangopatterns && cd doc_src && make clean')\n # run('workon djangopatterns && cd doc_src && make json')\n reload_site()",
"def _clone_gitrepo():\n # Puts git repo in ~/.ssh/config to avoid interaction due to missing known_hosts\n git_server = urllib.splituser(urllib.splittype(env.project['git_repo'])[0])[1]\n if not files.exists('~/.ssh/config') or not files.contains('~/.ssh/config', git_server):\n files.append('~/.ssh/config', ['host %s' % git_server, ' StrictHostKeyChecking no'])\n\n branch = env.project.get('git_branch', 'master')\n if files.exists(_interpolate(DJANGO_PROJECT_DIR)):\n print _interpolate('project %(project)s already exists, updating')\n remote('git pull origin %s' % branch)\n else:\n with cd(_interpolate(VIRTUALENV_DIR)):\n run(_interpolate('git clone %(git_repo)s %(project)s'))\n if branch != 'master':\n remote('git fetch origin %s:%s' % (branch, branch))\n remote('git checkout %s' % branch)",
"def deploy():\n setup()\n builddir = get_build_dir()\n if sys.platform == 'win32':\n # Support cygwin rsync on windows:\n build_path = cygpath(slashed(builddir))\n else:\n build_path = slashed(builddir)\n rsync_project(env.admin_webroot, build_path, exclude=\".*\", delete=True)\n sudo(\"chmod -R 755 %(admin_webroot)s\" % env)",
"def dev_site(live_path, dev_parent, dev_name, dev_db_name='',\n base_url='', rewrite_base=''):\n with mute():\n remote = git.get_remote_url(live_path)\n dev_path = '%s/%s' % (dev_parent, dev_name)\n if exists(dev_path):\n warning = \"\"\"\nA folder already exists at your destination path.\n\nDo you wish to overwrite it?\n\"\"\"\n confirm_overwrite(warning)\n\n with mute():\n run('rm -rf %s' % dev_path)\n with cd(dev_parent):\n run('git clone %s %s' % (remote, dev_name))\n\n with cd(dev_path):\n run('git fetch')\n run('git branch')\n\n # Determinine a branching strategy\n strategy_prompt = \"\"\"\nHow would you like to create your dev site:\n1) Use an existing Git branch\n2) Create a new Git branch\n:\n\"\"\"\n strategy = prompt(strategy_prompt,\n validate=validate_branching_strategy)\n\n # Checkout an existing branch\n if strategy == '1':\n branch_prompt = \"\"\"\nWhich existing branch would you like to use for this dev site?\n\"\"\"\n # TODO - add validation\n dev_branch = prompt(branch_prompt)\n run('git checkout %s' % dev_branch)\n run('git pull origin %s' % dev_branch)\n\n # Create new branch\n if strategy == '2':\n start_branch_prompt = \"\"\"\nWhich branch should we use to start from?\n\"\"\"\n start_branch = prompt(start_branch_prompt)\n run('git checkout %s' % start_branch)\n dev_branch_prompt = \"\"\"\nWhat would like to name the new dev branch?\n\"\"\"\n dev_branch = prompt(dev_branch_prompt)\n run('git checkout -b %s' % dev_branch)\n # Look for an git origin in the live site\n\n # cd to the dev parent dir and clone the repo from origin\n\n # switch to the develop branch\n\n # git fetch\n\n # git pull origin develop\n\n # Duplicate the live mysql db as a dev db\n # Look into cross platform ways to just do the db duplication without\n # needing to write the db dump file and then do the insert\n\n # Configure the settings.php and .htaccess files for the dev site\n\n # Copy the files folder from the live site to the dev site\n # Eventually there should be a option here for doing read only sym-links\n # Or maybe some S3 thingy\n\n # drush cc all on dev\n\n # done",
"def project_clone(request, proj_id=None):\n\n if not proj_id or not request.user.is_authenticated():\n raise Http404\n\n project = get_object_or_404(Project, id=proj_id)\n\n if project.user != request.user and project.is_private:\n raise Http404\n\n project.pk = None\n project.user = request.user\n project.save()\n\n for scenario in Scenario.objects \\\n .filter(project_id=proj_id) \\\n .order_by('created_at'):\n scenario.pk = None\n scenario.project = project\n scenario.save()\n\n return redirect('/project/{0}'.format(project.id))",
"def mkweb(project_name, mode):\n\n MAIN_FOLDER = data.get_base_path(data.WEB)\n\n if mode != 'MAIN':\n MAIN_FOLDER += f'{mode}/'\n \n webproject = folders.WebProject(project_name, MAIN_FOLDER)\n\n webproject.create_project()\n click.echo(f'Project created succesfull in {webproject.project_path}')\n cli_commands.start_git(webproject.project_path)\n cli_commands.show_dir_path(webproject.project_path)\n # cli_commands.start_vscode(webproject.project_path)\n\n click.echo('Project Path copied to clipboard...')",
"def deploy_django_project(self):\n\n if self.no_files:\n return\n\n local_dir = \"{0}\".format(self.app_dir)\n app_dir = \"{0}\".format(self.app_remote_dir)\n\n if not exists(app_dir):\n mkdir(app_dir)\n\n zip_name = make_zip(local_dir, self.app_name)\n put(zip_name, self.app_remote_dir)\n\n with cd(self.app_remote_dir):\n run(\"unzip -o {0}\".format(zip_name))\n\n os.remove(zip_name)",
"def deploy():\n build()\n rsync_project(\n local_dir=os.path.abspath(env.config['destination']) + \"/\",\n remote_dir=env.remote_dir,\n delete=True,\n extra_opts='--exclude=\".DS_Store\"',\n )",
"def _create_main_project_and_root(self): \n if len(ComicSite.objects.filter(short_name=settings.MAIN_PROJECT_NAME)) == 0:\n main = ComicSite.objects.create(short_name=settings.MAIN_PROJECT_NAME,\n description=\"main project, autocreated by comicframeworkTestCase._create_inital_project()\",\n skin=\"fakeskin.css\"\n )\n \n main.save()\n \n try:\n self.root = User.objects.get(username='root')\n except ObjectDoesNotExist:\n # A user who has created a project\n root = User.objects.create_user('root',\n '[email protected]',\n 'testpassword') \n root.is_staff = True\n root.is_superuser = True\n root.save()\n \n self.root = root\n\n call_command('check_permissions')",
"def upload():\n run('mkdir -p /srv/images/'+env.project_name+'/')\n rsync_project(\n env.project_dir, './',\n exclude=(\n '.git', '.gitignore', '__pycache__', '*.pyc', '.DS_Store', 'environment.yml',\n 'fabfile.py', 'Makefile', '.idea', 'bower_components', 'node_modules',\n '.env.example', 'README.md', 'var'\n ), delete=True)",
"def git_project(soup, github_user, github_pass, github_repo, github_name):\n giturl = 'https://{user}:{password}@github.com/{user}/{repo}.git'.format(\n user=github_user, password=github_pass, repo=github_repo\n )\n oldcwd = os.getcwd()\n tmpdir = tempfile.mkdtemp()\n gitdir = os.path.join(tmpdir, github_repo)\n cmd = 'git clone {} {}'.format(shlex.quote(giturl), shlex.quote(gitdir))\n subprocess.run(shlex.split(cmd), check=False)\n os.chdir(gitdir)\n rhinoscrape(soup, github_user, github_name)\n cmd = 'git add .'\n subprocess.run(shlex.split(cmd), check=False)\n msg = 'Project committed by Rhino Repo'\n cmd = 'git commit -m {}'.format(shlex.quote(msg))\n subprocess.run(shlex.split(cmd), check=False)\n cmd = 'git push {}'.format(shlex.quote(giturl))\n subprocess.run(shlex.split(cmd), check=False)\n os.chdir(oldcwd)\n shutil.rmtree(tmpdir, ignore_errors=True)",
"def project_refresh(project_name):\n if not db_find_project(project_name):\n abort(404)\n analyser.add_repos(current_user.username, [project_name])\n return redirect(url_for('main.admin_manage'))",
"def create_project_form(request):\n \n # First we check to see the site has been set up, otherwise we throw the user to the config screen\n if not bool(os.path.isdir(Project.project_options.repository_directory)):\n request.user.message_set.create(message=\"The site has not been set up yet. Log in as your admin user and create your settings!\")\n return HttpResponseRedirect(reverse('site-config'))\n \n if request.is_ajax():\n template ='project/project_create_ajax.html'\n else:\n template = 'project/project_create.html'\n \n # Lets check if this form is being shown or processed\n if request.method == \"POST\":\n # We're processing the form, so lets create the instance\n form = NewProjectForm(request.POST, auto_id=False)\n # The form is correct, lets proceeed.\n if form.is_valid():\n # Lets check the user has conformed to a sites T&C's\n if form.cleaned_data['t_and_c'] == True:\n # Create the project instance\n project = Project(\n project_id = string.lower(form.cleaned_data['project_id']),\n project_name = form.cleaned_data['project_name'],\n short_description = form.cleaned_data['short_description'],\n full_description = form.cleaned_data['full_description'],\n project_manager = request.user,\n hgweb_style = form.cleaned_data.get('hgweb_style', ''),\n project_icon = form.cleaned_data['project_icon'],\n )\n # Ok, we're all good, so lets save.\n project.save()\n # We'll tell the user that there site has been saved\n request.user.message_set.create(message=_(\"The project \" + form.cleaned_data['project_name'] + \" has been created\"))\n if request.is_ajax():\n return HttpResponse(\n \"{'success': 'true', 'url': '\" + reverse('project-detail', kwargs={'slug':form.cleaned_data['project_id']}) + \"', 'project': \" + json_encode(project) + \"}\"\n , mimetype=\"application/json\")\n else:\n return HttpResponseRedirect(reverse('project-detail', kwargs={'slug': form.cleaned_data['project_id']}))\n else:\n return render_to_response(template,\n {\n 'form':form.as_table(),\n }, context_instance=RequestContext(request)\n )\n #return HttpResponseRedirect(reverse('project-detail', kwargs={'slug':form.cleaned_data['name_short']}))\n else:\n form = NewProjectForm()\n is_auth = request.user.is_authenticated()\n \n return render_to_response(template,\n {\n 'form':form.as_table(),\n 'is_auth': is_auth\n }, context_instance=RequestContext(request)\n )",
"def edit_files(project_name, app_name):\n SETTINGS = f'{project_name}/backend/backend/settings.py'\n PACKAGE_JSON = f'{project_name}/frontend/package.json'\n\n\n c1 = f\"\\n \\t'corsheaders', \\n\\t'rest_framework', \\n\\t'{app_name}',\\n\"\n add_to_line(SETTINGS, 32, c1 )\n\n c2 = f\"\\n \\t'corsheaders.middleware.CorsMidleware',\\n\"\n add_to_line(SETTINGS, 44, c2 )\n \n with open(SETTINGS, 'a+') as f:\n f.write(\"\\nCORS_ORIGIN_WHITELIST = ['localhost:3000/']\")\n\n c3 = '\\n\\t\"proxy\": \"http://localhost:8000\",\\n'\n add_to_line(PACKAGE_JSON, 3, c3)",
"def update_project():\n with cd(env.code_dir):\n with _virtualenv():\n run('git pull origin master')\n install_requirements()\n perform_migration()\n collect_static()",
"def upload():\n env.user = 'webcontent'\n rsync_project(DOCDIR, 'doc/_build/html/', delete=True)",
"def deploy():\n remote_dir = os.path.abspath(os.path.join(REMOTE_BASE_DIR, REPO_NAME))\n \n with settings(warn_only=True):\n if run(\"test -d %s\" % (remote_dir)).failed:\n puts(red(\"[Repo %s does not exist on remote at: %s]\" % (REPO_NAME, remote_dir)))\n with cd(REMOTE_BASE_DIR):\n run(\"git clone %s %s\" % (REPO_URL, REPO_NAME))\n\n puts(yellow(\"[Write logs]\"))\n run(\"echo '-----------------------------' > %s\" % REMOTE_ERR_FILE)\n run(\"echo `date` >> %s\" % REMOTE_ERR_FILE)\n run(\"echo '-----------------------------' >> %s\" % REMOTE_ERR_FILE)\n run(\"echo '-----------------------------' > %s\" % REMOTE_LOG_FILE)\n run(\"echo `date` >> %s\" % REMOTE_LOG_FILE)\n run(\"echo '-----------------------------' >> %s\" % REMOTE_LOG_FILE)\n\n puts(yellow(\"[Update repo: %s]\" % REPO_NAME))\n with cd(remote_dir):\n run(\"git pull origin master >> %s 2>> %s\" %\n (REMOTE_LOG_FILE, REMOTE_ERR_FILE))\n\n # reminder new static files\n puts(yellow('Do not forget to run collect staticfiles on DJANGO server.'))",
"def deploy(app_to_migrate=\"\"):\n mysqldump() # backup database before making changes\n with cd(code_dir):\n run(\"git pull\")\n run(python_add_str + \"python manage.py migrate %s\" % app_to_migrate)\n run(python_add_str + \"python manage.py createinitialrevisions\") # only if using reversion\n run(python_add_str + \"python manage.py collectstatic --noinput\")\n run(\"../apache2/bin/restart\")",
"def startproject(self):\n\n path = os.path.join(self.path, self.project_name)\n if os.path.exists(path):\n raise exceptions.ProjectDirectoryAlreadyExistsError(self.project_name)\n else:\n os.makedirs(path)\n\n context = {\n 'project_name': self.project_name,\n 'default_region': self.region,\n 'random': hashlib.sha1(six.text_type(random.random()).encode('utf-8')).hexdigest()[:8]\n }\n\n self._clone_defaults(\n os.path.join(self.root, 'defaults', 'project'),\n path,\n context\n )",
"def push(ref='origin/master'):\n from fabric.api import local, run, cd\n from fabric.contrib.project import rsync_project\n local('pelican -s %s -d' % env.config_file)\n rsync_project(\n remote_dir=env.host_site_path,\n local_dir='output/',\n delete=True\n )\n if env.host_type != 'production':\n run(\"chown -R %(user)s:%(host_webserver_user)s %(host_site_path)s \"\n \"&& chmod -R 02750 %(host_site_path)s\" % env)",
"def prepare_deploy():\n from fabdeploy.django import test as django_test\n django_test()\n git.add_commit_pull()\n git.push()",
"def deploy():\n _git_pull()\n _migrate()\n _collect_static_files()\n _restart_webserver()"
] | [
"0.6597053",
"0.6376039",
"0.6335648",
"0.61842006",
"0.6173729",
"0.6145058",
"0.6005152",
"0.59648216",
"0.595788",
"0.59347206",
"0.5933034",
"0.5923805",
"0.59103745",
"0.58390486",
"0.5812732",
"0.57964295",
"0.572337",
"0.56827134",
"0.5680887",
"0.5663389",
"0.56439286",
"0.5631816",
"0.5591635",
"0.5576353",
"0.55736345",
"0.5573325",
"0.55700004",
"0.5548547",
"0.5543566",
"0.55360085"
] | 0.7331329 | 0 |
Adds the "/static" and "/media" directories to the static webapp if needed, and deletes the default index.html. Also adds a project/project/static directory if there isn't one. | def add_dirs_to_static(static_webapp_name):
static_dir = '$HOME/webapps/%s' % static_webapp_name
with settings(warn_only=True):
with cd(static_dir):
run("mkdir static && mkdir media")
run("rm index.html")
run("touch index.html")
with cd(code_dir):
run("mkdir %s/static" % project_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def include_static_files(app):\n file_path = sphinx_prolog.get_static_path(STATIC_FILE)\n if file_path not in app.config.html_static_path:\n app.config.html_static_path.append(file_path)",
"def ensure_static_exists():\n for entry in html_static_path:\n static_path = os.path.join(__repo_docs__, entry)\n if not os.path.isdir(static_path):\n os.makedirs(static_path)",
"def copy_static_resources(self):\n if not hasattr(settings, 'STATIC_ROOT'):\n raise MissingStaticRoot()\n destination = os.path.join(STORAGE_PATH, 'static')\n if os.path.exists(destination):\n shutil.rmtree(destination)\n shutil.copytree(settings.STATIC_ROOT, destination)",
"def add_static_paths(app):\n app.env.book_theme_resources_changed = False\n\n output_static_folder = Path(app.outdir) / \"_static\"\n theme_static_files = resources.contents(theme_static)\n\n if (\n app.config.html_theme_options.get(\"theme_dev_mode\", False)\n and output_static_folder.exists()\n ):\n # during development, the JS/CSS may change, if this is the case,\n # we want to remove the old files and ensure that the new files are loaded\n for path in output_static_folder.glob(\"sphinx-book-theme*\"):\n if path.name not in theme_static_files:\n app.env.book_theme_resources_changed = True\n path.unlink()\n # note sphinx treats theme css different to regular css\n # (it is specified in theme.conf), so we don't directly use app.add_css_file\n for fname in resources.contents(theme_static):\n if fname.endswith(\".css\"):\n if not (output_static_folder / fname).exists():\n (output_static_folder / fname).write_bytes(\n resources.read_binary(theme_static, fname)\n )\n app.env.book_theme_resources_changed = True\n\n # add javascript\n for fname in resources.contents(theme_static):\n if fname.endswith(\".js\"):\n app.add_js_file(fname)",
"def deploy_static(): \n from fabdeploy.django import collectstatic as django_collectstatic\n# run(\"rm -rf %(root_path)s%(project_name)s/static/*\" % env) # call again git_add_commit_pull\n django_collectstatic()",
"def serve_static_files(request, path, insecure=False, **kwargs):\n\n if not settings.DEBUG and not insecure:\n raise Http404\n normalized_path = posixpath.normpath(unquote(path)).lstrip('/')\n absolute_path = finders.find(normalized_path)\n if not absolute_path:\n if path.endswith('/') or path == '':\n raise Http404(\"Directory indexes are not allowed here.\")\n raise Http404(\"'%s' could not be found\" % path)\n document_root, path = os.path.split(absolute_path)\n return static.serve(request, path, document_root=document_root, **kwargs)",
"def index():\n return flask.send_from_directory(\"static\", \"index.html\")",
"def path_static():\n return os.path.abspath(os.path.dirname(__file__))+'/_static'",
"def static_text_files():\n return send_from_directory(\"static/\", request.path[1:])",
"def install_project_structure():\n from .project import static_base, use_static\n\n with sudo():\n info('Install application directory structure')\n\n create_app_root()\n\n if use_static():\n # Create static web paths\n static_path = os.path.join(static_base(), 'static')\n media_path = os.path.join(static_base(), 'media')\n debian.mkdir(static_path, group='www-data', mode=1775)\n debian.mkdir(media_path, group='www-data', mode=1775)",
"def serve_static(request, path, document_root):\n # Clean up given path to only allow serving files below document_root.\n path = posixpath.normpath(urllib.unquote(path))\n path = path.lstrip('/')\n newpath = ''\n for part in path.split('/'):\n if not part:\n # Strip empty path components.\n continue\n drive, part = os.path.splitdrive(part)\n head, part = os.path.split(part)\n if part in (os.curdir, os.pardir):\n # Strip '.' and '..' in path.\n continue\n newpath = os.path.join(newpath, part).replace('\\\\', '/')\n if newpath and path != newpath:\n return HttpResponseRedirect(newpath)\n fullpath = os.path.join(document_root, newpath)\n if os.path.isdir(fullpath):\n #if show_indexes:\n # return directory_index(newpath, fullpath)\n raise Http404, \"Directory indexes are not allowed here.\"\n if not os.path.exists(fullpath):\n raise Http404, '\"%s\" does not exist' % fullpath\n # Respect the If-Modified-Since header.\n statobj = os.stat(fullpath)\n if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),\n statobj[stat.ST_MTIME], statobj[stat.ST_SIZE]):\n return HttpResponseNotModified()\n mimetype = mimetypes.guess_type(fullpath)[0] or 'application/octet-stream'\n # Treat the file as a django template\n template = Template(open(fullpath, 'rb').read())\n context = RequestContext(request)\n # Render the template giving the current request\n contents = template.render(context)\n response = HttpResponse(contents, mimetype=mimetype)\n response[\"Last-Modified\"] = http_date(statobj[stat.ST_MTIME])\n response[\"Content-Length\"] = len(contents)\n return response",
"def setup_output_path(self):\n self.logger.info('setting up output path')\n try:\n self.output_path.mkdir()\n except FileExistsError:\n pass\n try:\n (self.output_path / 'simple').mkdir()\n except FileExistsError:\n pass\n for filename in resource_listdir(__name__, 'static'):\n if filename == 'index.html':\n # Skip template\n continue\n with (self.output_path / filename).open('wb') as f:\n source = resource_stream(__name__, 'static/' + filename)\n f.write(source.read())\n source.close()",
"def static(path):\n return static_file(path, root='media')",
"def glr_path_static():\n return os.path.join(base_path, \"static\")",
"def deploy_static_media(env=None, asset_version='', quick=False, haus_vars={}):\n print green('Deploying static media {}'.format('__quick__' if quick else ''))\n collectstatic(no_input=True, skip_admin=quick)",
"def make_static_tween(app, handler):\n # TODO allow turning off.\n # TODO Get path from config.\n static_app = DirectoryApp(\n os.path.join(os.path.dirname(__file__), app.static_path),\n index_page=None\n )\n\n def static_tween(request):\n if request.path_info_peek() == 'static':\n request.path_info_pop()\n return static_app(request)\n return handler(request)\n\n return static_tween",
"def collect_static_files():\n with env.cd(settings.PROJECT_PATH), prefix(COMMANDS['set_environment']), \\\n prefix(COMMANDS['activate_virtualenv']):\n env.run('python rnacentral/manage.py collectstatic --noinput')",
"def add(app, url = None, path = None, endpoint=None, index='index.html'):\n url = url or app.static_url_path or ''\n path = os.path.abspath(path or app.static_folder or '.')\n endpoint = endpoint or 'static_' + os.path.basename(path)\n\n if path == app.static_folder:\n if url != app.static_url_path:\n raise ValueError('Files in `{}` path are automatically served on `{}` URL by Flask.'\n ' Use different path for serving them at `{}` URL'.format(path, app.static_url_path, url))\n else:\n @app.route(url + '/<path:filename>', endpoint = endpoint)\n def static_files(filename):\n return send_from_directory(path, filename)\n\n if index:\n @app.route(url + '/', endpoint = endpoint + '_index')\n def static_index():\n return send_from_directory(path, index)\n\n if url:\n @app.route(url, endpoint = endpoint + '_index_bare')\n def static_index_bare():\n return send_from_directory(path, index)",
"def test_does_static_directory_exist(self):\n does_static_dir_exist = os.path.isdir(self.static_dir)\n does_css_static_dir_exist = os.path.isdir(os.path.join(self.static_dir, 'css'))\n does_js_static_dir_exist = os.path.isdir(os.path.join(self.static_dir, 'js'))\n \n self.assertTrue(does_static_dir_exist, f\"{FAILURE_HEADER}The static directory was not found in the expected location. Check and try again.{FAILURE_FOOTER}\")\n self.assertTrue(does_css_static_dir_exist, f\"{FAILURE_HEADER}The css subdirectory was not found in your static directory.{FAILURE_FOOTER}\")\n self.assertTrue(does_js_static_dir_exist, f\"{FAILURE_HEADER}The js subdirectory was not found in your static directory.{FAILURE_FOOTER}\")",
"def __get_server_static__(app_path,static_dir):\n import os\n # from . import config_loader\n\n # root_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n _path = (static_dir).replace(\"/\", os.path.sep)\n return os.sep.join([app_path, _path])",
"def static(path):\n return bottle.static_file(path, root='static/')",
"def static(path):\n return bottle.static_file(path, root='static/')",
"def static(path):\n return bottle.static_file(path, root='static/')",
"def static(path):\n return bottle.static_file(path, root='static/')",
"def create_project_dir():\r\n with settings(warn_only=True):\r\n run('mkdir -p %s/packages' % (env.path,))\r\n run('mkdir %s/log' % (env.path,))\r\n run('mkdir -p %s/media/uploads' % (env.path,))\r\n run('mkdir -p %s/collected_static' % (env.path,))\r\n # change permissions for writable folder\r\n cmd = env.host_settings.get('make_folder_world_writeable','chown -R www-data:www-data')\r\n if cmd:\r\n run('%s %s/media' % (cmd, env.path))\r\n run('%s %s/collected_static' % (cmd, env.path))",
"def get_swagger_static_root():\n return os.path.join(CURDIR, \"static\")",
"def create_path_and_index(subdir: str) -> None:\n if not os.path.exists(WEBOUT_PATH + subdir):\n os.makedirs(WEBOUT_PATH + subdir)\n create_blank_index(WEBOUT_PATH + subdir + \"index.html\")",
"def copy_static(self, outdir):\n pass",
"def update_static_files(self):\n\n params = self.chose_param_value(\"--static\")\n self._check_path_availability([\"get_static_dir\", \"get_static_dir_to\"])\n if self._check_whether_has_params(params):\n self.updater.update_files(\n self.analizer.get_static_dir(),\n self.analizer.get_static_dir_to(),\n params\n )\n return self.write_debug_message(\"Static files upgrade is done!\\n\")\n return self.write_error_message(\"You haven't passed any params about static files\")",
"def cp_static_files(self,inpath,outpath): \n if inpath==self.static_dir:\n dest=os.path.join(outpath,os.path.basename(inpath))\n if os.path.exists(dest):\n logger.warning('Remove old static folder')\n shutil.rmtree(dest) #not efficient. Should do it incrementaly...\n logger.info('cp_static_files %s -> %s' %(inpath,dest))\n copyfiles(inpath,dest) \n else:\n for folder in os.listdir(inpath):\n if folder == 'static':\n logger.info('found static folder, copy all...')\n dest=os.path.join(outpath,folder)\n src=os.path.join(inpath,folder)\n if os.path.exists(dest):\n logger.warning('Remove old static folder')\n shutil.rmtree(dest) #not efficient. Should do it incrementaly...\n logger.info('cp_static_files %s -> %s' %(src,dest))\n copyfiles(src,dest)\n return 0"
] | [
"0.70106965",
"0.68311924",
"0.6625952",
"0.63648784",
"0.61918205",
"0.60434896",
"0.5981367",
"0.58381754",
"0.5810352",
"0.57726264",
"0.57547545",
"0.57248443",
"0.56464094",
"0.5618542",
"0.56139016",
"0.5593905",
"0.55286866",
"0.55245644",
"0.5519247",
"0.55121106",
"0.55010533",
"0.55010533",
"0.55010533",
"0.55010533",
"0.5496965",
"0.5491143",
"0.54505855",
"0.54126155",
"0.5390763",
"0.53661925"
] | 0.7912355 | 0 |
Initialises the database to contain the tables required for DjangoCMS with South. Runs syncdb all and migrate fake. | def initialise_database():
with cd(code_dir):
run(python_add_str + "python manage.py syncdb --all")
run(python_add_str + "python manage.py migrate --fake") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setup_database():\n from django.core.management import call_command\n from django import setup\n setup()\n call_command('migrate', verbosity=0, interactive=False)\n call_command('loaddata', data('initial_data.json'), verbosity=0, interactive=False)",
"def init_db():\n db = get_db()\n Page.create_table(db)\n PageVersion.create_table(db)\n User.create_table(db)",
"def setup_database(self):\n self.db.setup_database()",
"def _init_db(self):\n cursor = self._main_connection.cursor()\n cursor.execute(self.sql[\"create_table\"])\n self._main_connection.commit()",
"def setup_db(self) -> None:\n conn = mysql.connector.connect(\n user=self.app.config[\"DATABASE_USER\"], password=self.app.config[\"DATABASE_PASSWORD\"],\n host=self.app.config[\"DATABASE_HOST\"], port=self.app.config[\"DATABASE_PORT\"], raise_on_warnings=True\n )\n try:\n cursor = conn.cursor()\n cursor.execute(\n \"CREATE DATABASE IF NOT EXISTS {} CHARACTER SET utf8\".format(self.app.config[\"DATABASE_NAME\"])\n )\n conn.commit()\n except:\n raise\n else:\n with self.DBManager(self.app) as connection:\n for model in sorted(lib.get_subclasses(lib.models.Model), key=lambda x: x.index):\n model.setup_table(connection=connection)\n finally:\n conn.close()",
"def initialize_test_db(self):\n # Create a test database and sync it with models.py\n # Handle a second test database for selenium use. Postgres uses\n # transactions which interfere with the Django server thread.\n settings.TEST_DATABASE_NAME = self.db_name\n connection.creation.create_test_db(verbosity=self.verbosity,\n autoclobber=True)\n # Hook for doing any extra initialization\n self.extra_init()\n # Load fixture data.\n call_command('loaddata', *self.fixtures, verbosity=self.verbosity)\n # Sync data and close connection\n connection.close()\n # If sqlite3 or Postgres is used, create a backup database to speed up\n # fixture reloading.\n if settings.DATABASE_ENGINE == 'postgresql_psycopg2':\n # connection.creation is used to overcome transaction management,\n # allowing to execute DROP and CREATE db commands.\n cursor = connection.cursor()\n connection.creation.set_autocommit()\n cursor.execute(\"DROP DATABASE IF EXISTS %s_backup\" % self.db_name)\n cursor.execute(\"CREATE DATABASE %s_backup WITH TEMPLATE %s\" % (\n self.db_name, self.db_name))\n if settings.DATABASE_ENGINE == 'sqlite3':\n self.db_path = os.path.join(PROJECT_PATH, settings.DATABASE_NAME)\n self.db_backup_path = '%s_backup' % self.db_path\n if self.db_path[-3:] == '.db':\n self.db_backup_path = '%s_backup.db' % self.db_path[:-3]\n shutil.copyfile(self.db_path, self.db_backup_path)\n # Restore the database names as create_test_db changed it.\n settings.TEST_DATABASE_NAME = self.test_database_name\n settings.DATABASE_NAME = self.database_name",
"def init_db():\n db.drop_all()\n db.create_all()\n seed_companies()\n seed_emission_reports()\n seed_reduction_targets()\n seed_milestones()",
"def set_up_db():\n DATABASE.drop_tables([Customer])\n DATABASE.close()\n DATABASE.create_tables([Customer])\n DATABASE.close()",
"def init_db():\n import cerbereapp.models\n Base.metadata.create_all(bind=engine)",
"def __init_database(self):\n from admin.database import init_db\n init_db()",
"def django_db_setup(django_db_setup, django_db_blocker):\n with django_db_blocker.unblock():\n # todo Now remove the --noinput just to be sure that the test database's data will be deleted\n management.call_command('flush', '--noinput')\n zakanda.db.create_initial_data()",
"def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User, Entry], safe=True)\n DATABASE.close()",
"def setup_db():\n logger.info('Setting up db')\n setup_all_db()\n setup_emails()",
"def migrate_database(self):\n\n self.db.migrate_database()",
"def db_initialise():\n generate_migration_file()\n if not MySQLScheme.fetch_one(IS_MIGRATION_TABLE,\n **{\"args\": {'schema': SCHEMA}}):\n with open(MIGRATION_FILE, 'r') as init_sql:\n data = init_sql.read()\n\n if f\"CREATE TABLE IF NOT EXISTS {MIGRATION_TABLE}\" not in data:\n when = str(int(time.time()))\n sql_file = os.path.join(MIGRATION_FOLDER, f\"{when}.sql\")\n\n with open(sql_file, 'w') as save_sql:\n up = MYSQL_MIGRATION_UP.format(f\"upgrade-{when}\", when,\n MIGRATION_TABLE)\n down = MYSQL_MIGRATION_DOWN.format(f\"downgrade-{when}\",\n MIGRATION_TABLE)\n\n save_sql.write(\"\\n\\n\".join([up, down]))\n LOGGER.info(f\"migration file: \"\n f\"{os.path.join('migrations', sql_file)}\")\n else:\n when = re.findall('[0-9]+', data)[0]\n\n generate_migration_file()\n dbi_query = anosql.from_path(MIGRATION_FILE, 'psycopg2')\n MySQLScheme.commit(getattr(dbi_query, f\"upgrade_{when}\").sql)\n LOGGER.info(f\"initial successful migration: {when}\")",
"def _initial_setup(self):\n logger.info(\"Performing initial database setup...\")\n\n # Set up the migration_version table\n self._execute(\n \"\"\"\n CREATE TABLE migration_version (\n version INTEGER PRIMARY KEY\n )\n \"\"\"\n )\n\n # Initially set the migration version to 0\n self._execute(\n \"\"\"\n INSERT INTO migration_version (\n version\n ) VALUES (?)\n \"\"\",\n (0,),\n )\n\n # Set up any other necessary database tables here\n\n logger.info(\"Database setup complete\")",
"def syncdb():\n with virtualenv():\n run('python manage.py syncdb --noinput')\n run('python manage.py migrate')",
"def setUp(self):\n db.create_all()",
"def initdb():\n db.create_all()",
"def initdb():\n db.create_all()",
"def setup_db():\n\n engine = config['tg.app_globals'].sa_engine\n # model.init_model(engine)\n # model.metadata.create_all(engine)",
"def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User], safe=True)\n DATABASE.close()",
"def init_db():\n current_app.logger.info('Creating database...')\n db.drop_all()\n db.create_all()\n db.session.commit()",
"def smart_syncdb_migrate(self):\n local('python manage.py syncdb')\n local('python manage.py migrate')\n local('python manage.py syncdb --all')",
"def init_db():\n # We are setting the module variables here for the first time, so disable the warning\n global DB_USER_TABLE # pylint: disable=global-variable-undefined\n global DB_CUSTOMER_TABLE # pylint: disable=global-variable-undefined\n global DB_USER_CUSTOMER_RELS_TABLE # pylint: disable=global-variable-undefined\n global DB_TICKET_TABLE # pylint: disable=global-variable-undefined\n global DB_COMMENT_TABLE # pylint: disable=global-variable-undefined\n\n db = TinyDB(app.config['DB_NAME'])\n\n DB_USER_TABLE = db.table('users')\n DB_CUSTOMER_TABLE = db.table('customers')\n DB_USER_CUSTOMER_RELS_TABLE = db.table('user_customer_rels')\n DB_TICKET_TABLE = db.table('tickets')\n DB_COMMENT_TABLE = db.table('comments')",
"def setUp(self):\n db.drop_all() # clean up the last tests\n db.create_all() # make our sqlalchemy tables",
"def init():\n database.create_tables([Tracker])\n database.commit()",
"def init_db():\n db.drop_all()\n db.configure_mappers()\n db.create_all()\n db.session.commit()",
"def initdb():\n db.drop_all()\n db.configure_mappers()\n db.create_all()\n db.session.commit()",
"def sync_db():\n\n check_prompt = (\n not env.prompt or\n console.confirm(\n \"Create tables for models which have not yet been installed?\",\n default=True,\n )\n )\n\n if check_prompt:\n with cd(\"%s\" % env.work_path):\n with prefix(\"source %s/bin/activate\" % env.env_path):\n run(\n \"./manage.py syncdb\"\n \" --noinput\"\n )"
] | [
"0.7663976",
"0.75398517",
"0.75250465",
"0.7471838",
"0.7441649",
"0.7391628",
"0.73528063",
"0.72446615",
"0.72148156",
"0.72110015",
"0.71857905",
"0.7177211",
"0.7132408",
"0.7070013",
"0.705777",
"0.7000869",
"0.69667584",
"0.69374496",
"0.6935844",
"0.6935844",
"0.6898643",
"0.68908525",
"0.6869744",
"0.68635553",
"0.6857782",
"0.6838732",
"0.68320835",
"0.6829788",
"0.6827978",
"0.6824524"
] | 0.8100372 | 0 |
Binary mask from cv2 styled contour (gets filled) | def make_mask(shape, contour):
mask = np.zeros(shape, np.int32)
cv2.drawContours(mask, [contour], 0, (255), -1)
return mask | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_contour_features(mask,selectcell=\"centered\"):\r\n \r\n #binarize image (everything above 0 becomes 1)\r\n mask = np.clip(mask,a_min=0,a_max=1)\r\n\r\n #for contours, dont use RETR_TREE, but RETR_EXTERNAL as we are not interested in internal objects\r\n contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n contours = list(contours)\r\n \r\n #in case there is no contour found, add a dummy contour\r\n if len(contours)==0:\r\n contours = [np.array([[[0, 0]],[[0, 1]],[[1, 1]],[[1, 0]]])] #generate a dummy contour\r\n\r\n #Sort contours, longest first\r\n contours.sort(key=len,reverse=True)\r\n contours = [c for c in contours if len(c)>4] #proper contour should have at least 5 points\r\n hulls = [cv2.convexHull(contour,returnPoints=True) for contour in contours]\r\n\r\n mu_origs = [cv2.moments(contour) for contour in contours]\r\n mu_hulls = [cv2.moments(hull) for hull in hulls]\r\n\r\n area_origs = [mu_orig[\"m00\"] for mu_orig in mu_origs]\r\n area_hulls = [mu_hull[\"m00\"] for mu_hull in mu_hulls]\r\n\r\n #drop events where area is zero\r\n hulls = [hulls[i] for i in range(len(hulls)) if area_origs[i]>0] \r\n contours = [contours[i] for i in range(len(contours)) if area_origs[i]>0]\r\n mu_origs = [mu_origs[i] for i in range(len(mu_origs)) if area_origs[i]>0]\r\n mu_hulls = [mu_hulls[i] for i in range(len(mu_hulls)) if area_origs[i]>0]\r\n area_hulls = [area_hulls[i] for i in range(len(area_hulls)) if area_origs[i]>0]\r\n area_origs = [area_origs[i] for i in range(len(area_origs)) if area_origs[i]>0]\r\n \r\n \r\n pos_x = [int(mu_orig['m10']/mu_orig['m00']) for mu_orig in mu_origs]\r\n pos_y = [int(mu_orig['m01']/mu_orig['m00']) for mu_orig in mu_origs]\r\n\r\n \r\n if selectcell == \"smooth\":\r\n #compute the area ratio (roughness of contour)\r\n area_ratio = np.array(area_hulls)/np.array(area_origs)\r\n #get the contour with minimum roughness (smooth contour)\r\n sorter = np.argsort(area_ratio) #smallest first\r\n\r\n if selectcell == \"centered\":\r\n #select contour that is closest to the center of the image. \r\n #In iPAC, cells are usually in the center.\r\n mid_x,mid_y = mask.shape[0]/2,mask.shape[1]/2 #middle of the image\r\n BB = [cv2.boundingRect(c) for c in contours] #get a bounding box around the object\r\n distances = [np.sqrt((mid_x-bb[0])**2 + (mid_y-bb[1])**2) for bb in BB]\r\n sorter = np.argsort(distances) #smallest first\r\n \r\n #sort values with respect to chosen metric (area_ratio or distance)\r\n contours = [contours[s] for s in sorter]\r\n hulls = [hulls[s] for s in sorter]\r\n pos_x = [pos_x[s] for s in sorter]\r\n pos_y = [pos_y[s] for s in sorter]\r\n mu_origs = [mu_origs[s] for s in sorter]\r\n area_origs = [area_origs[s] for s in sorter]\r\n area_hulls = [area_hulls[s] for s in sorter]\r\n \r\n # draw mask of the chosen contour\r\n mask = np.zeros_like(mask)\r\n cv2.drawContours(mask,contours,0,1,cv2.FILLED)# produce a contour that is filled inside\r\n\r\n hull = hulls[0]#[0:n_contours]\r\n pos_x = pos_x[0]\r\n pos_y = pos_y[0] \r\n mu_orig = mu_origs[0]#[0:n_contours]\r\n area_orig = area_origs[0]#[0:n_contours]\r\n area_hull = area_hulls[0]#[0:n_contours]\r\n \r\n if area_orig>0:\r\n area_ratio = area_hull/area_orig\r\n else:\r\n area_ratio = np.nan\r\n\r\n arc = cv2.arcLength(hull, True) \r\n circularity = 2.0 * np.sqrt(np.pi * mu_orig[\"m00\"]) / arc\r\n\r\n\r\n dic = {\"mask\":mask,\"pos_x\":pos_x,\"pos_y\":pos_y,\"area_orig\":area_orig,\"area_hull\":area_hull,\\\r\n \"area_ratio\":area_ratio,\"circularity\":circularity}\r\n return dic",
"def mask(self):\n mask = np.zeros((self.height, self.width))\n pts = [\n np.array(anno).reshape(-1, 2).round().astype(int)\n for anno in self.segmentation\n ]\n mask = cv2.fillPoly(mask, pts, 1)\n return mask",
"def sanitize_mask(orig_x, orig_y, mask):\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n # Draw contours:\n cv2.drawContours(mask, contours, 0, (0, 255, 0), 2)\n # Calculate image moments of the detected contour\n num_objects = (len(contours))\n #threshold\n threshold = 3\n\n center_list = []\n # print(num_objects)\n if num_objects > 1:\n for item in range(num_objects):\n M = cv2.moments(contours[item])\n try:\n center_x = round(M['m10'] / M['m00'])\n center_y = round(M['m01'] / M['m00'])\n center_list.append([center_y , center_x ])\n except:\n pass\n\n # initialize retmask\n retmask = mask\n if num_objects > 1:\n for x, y in center_list:\n if orig_x - threshold <= x <= orig_x + threshold and orig_y - threshold <= y <= orig_y + threshold:\n pass\n else:\n def dfs_removal(px , py, mask):\n R = len(mask)\n C = len(mask[0])\n if mask[px][py ] != 255: \n return\n mask[px][py] = 0\n if 0 <= px - 1 and mask[px - 1][py ] == 255: dfs_removal(px - 1 , py , mask)\n if px + 1 < R and mask[px + 1][py ] == 255: dfs_removal(px + 1 , py , mask)\n if 0 <= py - 1 and mask[px][py - 1] == 255: dfs_removal(px, py -1 , mask)\n if py + 1 < C and mask[px][py + 1] == 255: dfs_removal(px, py + 1 , mask)\n\n dfs_removal(x,y, mask)\n\n return retmask",
"def as_boolean_mask(self):\n bbox = self.bbox()\n zs = np.unique([c.image_z_position for c in self.contours])\n z_to_index = dict(zip(zs,range(len(zs))))\n\n # Get dimensions, initialize mask.\n nx,ny = np.diff(bbox[:2], axis=1).astype(int) + 1\n nx = int(nx); ny = int(ny)\n nz = int(zs.shape[0])\n mask = np.zeros((nx,ny,nz), dtype=np.bool)\n\n # We check if these points are enclosed within each contour \n # for a given slice. `test_points` is a list of image coordinate \n # points, offset by the bounding box.\n test_points = bbox[:2,0] + np.c_[ np.where(~mask[:,:,0]) ]\n\n # First we \"turn on\" pixels enclosed by inclusion contours.\n for contour in self.contours:\n if contour.inclusion:\n zi = z_to_index[contour.image_z_position]\n contour_matrix = contour.to_matrix()[:,:2]\n\n # Turn the contour closed if it's not.\n if (contour_matrix[0] != contour_matrix[-1]).all():\n contour_matrix = np.append(contour_matrix,\n contour_matrix[0].reshape(1,2),\n axis=0)\n\n # Create path object and test all pixels\n # within the contour's bounding box.\n path = mplpath.Path(contour_matrix, closed=True)\n contains_pts = path.contains_points(test_points)\n mask[:,:,zi] = contains_pts.reshape(mask.shape[:2])\n\n # Second, we \"turn off\" pixels enclosed by exclusion contours.\n for contour in self.contours:\n if not contour.inclusion:\n zi = z_to_index[contour.image_z_position]\n contour_matrix = contour.to_matrix()[:,:2]\n\n # Turn the contour closed if it's not.\n if (contour_matrix[0] != contour_matrix[-1]).all():\n contour_matrix = np.append(contour_matrix,\n contour_matrix[0].reshape(1,2),\n axis=0)\n\n path = mplpath.Path(contour_matrix, closed=True)\n not_contains_pts = ~path.contains_points(test_points)\n not_contains_pts = not_contains_pts.reshape(mask.shape[:2])\n mask[:,:,zi] = np.logical_and(mask[:,:,zi], not_contains_pts)\n\n # The first and second axes have to \n # be swapped because of the reshape.\n return mask.swapaxes(0,1), bbox[[1,0,2]]",
"def get_binary_mask(self,index):\n mask = self.load_mask_png(index)\n (rows,cols) = np.where(mask>0)[0:2] #pixels in mask disregarding the color\n new_mask = np.zeros(shape=mask.shape[0:2], dtype=np.uint8)\n new_mask[(rows,cols)] = 255\n return new_mask",
"def get_contour(self, mask):\n\n assert mask.ndim == 2\n assert mask.min() == 0\n assert mask.max() == 1\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n assert len(contours) == 1, \"Too many contours in this mask!\"\n contour = contours[0]\n # logging.debug(\"Returning {} fit contours over mask pixels\".format(len(contours)))\n return contour",
"def mask(self):\n\n mask = np.zeros(shape=(self._info.height, self._info.width), dtype=np.uint8)\n\n self.draw(image=mask, color=constants.COLOR_WHITE_MONO)\n\n mask_with_border = np.pad(mask, 1, 'constant', constant_values=255)\n\n cv2.floodFill(image=mask,\n mask=mask_with_border,\n seedPoint=(int(self.middle_point[0]), int(self.middle_point[1])),\n newVal=constants.COLOR_WHITE_MONO)\n\n return mask",
"def find_contours(mask):\n _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_TC89_KCOS)\n return contours",
"def get_biomass(binary_mask):\n\n white_pixels = cv2.countNonZero(binary_mask)\n return white_pixels",
"def emit_mask_contour(self):\r\n contours = find_contours(self.final_mask, 0.5)\r\n \r\n sig = [contours, self.fillContourButton.isChecked(), self.thicknessSpinBox.value(), self.invertMaskButton.isChecked()]\r\n \r\n self.signal_DMDcontour.emit(sig)",
"def apply_mask_to_image(img, mask):\n img_size = img.shape[0]\n mask = cv2.resize(mask, dsize=(img_size, img_size))\n\n # Find contour of the mask\n imgray = mask\n ret,thresh = cv2.threshold(imgray, 127, 255, 0)\n contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n # Draw contours on image\n segmented_img = cv2.drawContours(img, contours, -1, (0,255,0), 3)\n\n return segmented_img",
"def find_contours(mask):\n\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n return cnts",
"def mask_label_contour(image, seg):\n return sitk.Mask(image, sitk.LabelContour(seg+1)==0)",
"def __mask_region(self, img, vertices):\n\n mask = np.zeros_like(img) \n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n cv2.fillConvexPoly(mask, vertices, ignore_mask_color)\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image",
"def mask_creation(image, mask_path, image_index):\n # convert image to hsv color space\n image = cv2.imread(image)\n \n im_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n h, s, v = cv2.split(im_hsv)\n\n # compute the mean value of hue, saturation and value for the border of the image\n hue_mean_border = (np.mean(h[0, :]) + np.mean(h[:, 0]) + np.mean(h[-1, :]) + np.mean(h[:, -1]))/4\n saturation_mean_border = (np.mean(s[0, :]) + np.mean(s[:, 0]) + np.mean(s[-1, :]) + np.mean(s[:, -1]))/4\n value_mean_border = (np.mean(v[0, :]) + np.mean(v[:, 0]) + np.mean(v[-1, :]) + np.mean(v[:, -1]))/4\n\n # compute lower and upper limits for the mask\n # we need to find the good limits to segment the background by color\n lower_hue = (hue_mean_border - 40)\n upper_hue = (hue_mean_border + 40)\n lower_saturation = (saturation_mean_border - 20)\n upper_saturation = (saturation_mean_border + 20)\n lower_value = (value_mean_border - 200)\n upper_value = (value_mean_border + 200)\n\n lower_limit = np.array([lower_hue, lower_saturation, lower_value])\n upper_limit = np.array([upper_hue, upper_saturation, upper_value])\n\n # create mask\n mask = cv2.inRange(im_hsv, lower_limit, upper_limit)\n mask = cv2.bitwise_not(mask)\n\n # resize masks\n n_mask, m_mask = mask.shape[0], mask.shape[1]\n mask = cv2.resize(mask, (1000, 1000)) \n\n # apply mask to find contours\n mask = np.uint8(mask)\n \n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n # create new mask with the contours found\n mask_contours = cv2.fillPoly(mask, contours, [255, 255, 255])\n\n # Apply morphological filter to clean\n kernel = np.ones((9, 9), np.float32)/25\n mask_erode = cv2.morphologyEx(mask_contours, cv2.MORPH_ERODE, kernel, iterations = 1)\n mask_dilate = cv2.morphologyEx(mask_erode, cv2.MORPH_DILATE, kernel, iterations = 1)\n\n # resize masks to original size\n new_mask = cv2.resize(mask_dilate, (m_mask, n_mask))\n\n # save mask image inside the same folder as the image\n # cv2.imwrite(mask_path + str(image_index).zfill(2) + \"_mask.png\", new_mask)\n\n return new_mask",
"def region_of_interest(self,img):\r\n #defining a blank mask\r\n mask = np.zeros_like(img) \r\n #checking number of image channel(color/grayscale) and applying mask\r\n if len(img.shape) > 2:\r\n ignore_mask_color = (255,255,255)\r\n else:\r\n ignore_mask_color = 255\r\n #filling color to pixels inside the polygon \r\n cv2.fillPoly(mask, self.vertices_img, ignore_mask_color)\r\n #image where mask pixels are nonzero\r\n masked_image = cv2.bitwise_and(img, mask)\r\n #cv2.imshow('',masked_image)\r\n return masked_image",
"def create_binary_image(img, s_thresh=(100, 255), sx_thresh=(10, 200), dir_thresh=(np.pi/6, np.pi/2), c_thresh=50):\n # We use a combination of gradient and direction threshold\n # convert to gray scale\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Compute the combined threshold\n sobel_x = sobel_mask(gray, sx_thresh)\n dir_gradient = dir_mask(gray, dir_thresh)\n combined = ((sobel_x == 1) & (dir_gradient == 1))\n\n # Color threshold in RGB color space\n # This helps to detect yellow lanes better, which is a significant issue in the video \n G = img[:,:,1]\n R = img[:,:,2]\n r_g = (R > c_thresh) & (G > c_thresh)\n \n # color channel thresholds\n hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)\n S = hls[:,:,2]\n L = hls[:,:,1]\n \n # S channel performs well for detecting bright yellow and white lanes\n s = (S > s_thresh[0]) & (S <= s_thresh[1])\n l = (L > s_thresh[0]) & (L <= s_thresh[1])\n\n # combine all the thresholds\n # The pixel we want is either white or yellow\n color_combined = np.zeros_like(R)\n color_combined[(r_g & l) & (s | combined)] = 1\n \n # apply the region of interest mask\n # This helps to remove the shadow outside the lane\n mask = np.zeros_like(color_combined)\n h, w = img.shape[0], img.shape[1]\n polygon_vertice = np.array([[0,h-1], [w//2, h//2], [w-1, h-1]], dtype=np.int32)\n cv2.fillPoly(mask, [polygon_vertice], 1)\n binary = cv2.bitwise_and(color_combined, mask)\n \n return binary",
"def _get_mask(self, anno, idx):\n coco = self.coco\n img_info = coco.loadImgs(self.img_ids[idx])[0]\n\n m = np.zeros((img_info['height'], img_info['width']), dtype=np.float32)\n\n for obj in anno:\n if 'segmentation' in obj:\n if obj['iscrowd']:\n rle = pycocotools.mask.frPyObjects(obj['segmentation'],\n img_info['height'],\n img_info['width'])\n m += pycocotools.mask.decode(rle)\n elif obj['num_keypoints'] == 0:\n rles = pycocotools.mask.frPyObjects(obj['segmentation'],\n img_info['height'],\n img_info['width'])\n for rle in rles:\n m += pycocotools.mask.decode(rle)\n\n return m < 0.5",
"def get_mask(self, img):\n raise NotImplementedError()",
"def create_binary_masks(image_path):\n mask = cv2.imread(image_path, cv2.IMREAD_ANYDEPTH)\n size = mask.shape\n for row_pixel in range(0, size[0]):\n for column_pixel in range(0, size[1]):\n if mask[row_pixel, column_pixel] == 0:\n mask[row_pixel, column_pixel] = 65535\n\n else:\n mask[row_pixel, column_pixel] = 0\n\n cv2.imwrite(image_path[:-4]+'_binary.png', mask)",
"def get_rectangles_mask(self, thresh: np.ndarray) -> np.ndarray:\r\n contours = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)[0]\r\n mask = np.zeros(thresh.shape, np.uint8)\r\n good_contours = sorted(\r\n [cnt for cnt in contours if 100000 < cv.contourArea(cnt) < 200000],\r\n key=cv.contourArea,\r\n )\r\n\r\n setattr(self, \"contour1\", good_contours[0])\r\n setattr(\r\n self,\r\n \"contour2\",\r\n good_contours[1]\r\n if cv.pointPolygonTest(\r\n good_contours[1], tuple(good_contours[0][0][0]), False\r\n )\r\n < 0\r\n else good_contours[2],\r\n )\r\n\r\n cv.drawContours(mask, [self.contour1], 0, 255, -1)\r\n cv.drawContours(mask, [self.contour2], 0, 255, -1)\r\n\r\n return mask",
"def preprocessing(self, img):\n [a, contours, c] = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n return contours",
"def find_contour(ctx: Context):\n cv2.copyTo(ctx.filter_image, np.ones_like(ctx.temp_image1), ctx.temp_image1)\n contours, _ = cv2.findContours(ctx.temp_image1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n # take the 5 biggest areas\n contours = sorted(contours, key=lambda c: math.fabs(cv2.contourArea(c)), reverse=True)[:5]\n\n # approximate contours with poly line\n ctx.contours = [cv2.approxPolyDP(c, 2, True) for c in contours]",
"def compute_contour_binary_masks(\n contour1: np.ndarray,\n contour2: np.ndarray,\n max_size: int = DEFAULT_MAX_CONTOUR_MASK_SIZE,\n) -> typing.Tuple[np.ndarray, np.ndarray]:\n points = np.concatenate([contour1, contour2], axis=0)\n offset = points.min(axis=0)\n points, contour1, contour2 = [v - offset for v in [points, contour1, contour2]]\n scale = min(max_size / points.max(axis=0).min(), 1)\n if scale < 1:\n points, contour1, contour2 = [v * scale for v in [points, contour1, contour2]]\n w, h = points.max(axis=0).astype(\"int32\")\n im1, im2 = [\n cv2.drawContours(\n np.zeros((h, w), dtype=\"uint8\"),\n contours=(box[np.newaxis]).round().astype(\"int32\"),\n color=255,\n thickness=-1,\n contourIdx=0,\n )\n > 0\n for box in [contour1, contour2]\n ]\n return im1, im2",
"def inflate_mask(mask):\n kernel = np.ones((12, 12), np.uint8)\n return cv2.dilate(mask, kernel, 1)",
"def _draw_contour(self, img):\n if self.mask is None or self.contour_width == 0:\n return img\n\n mask = self._get_bolean_mask(self.mask) * 255\n contour = Image.fromarray(mask.astype(np.uint8))\n contour = contour.resize(img.size)\n contour = contour.filter(ImageFilter.FIND_EDGES)\n contour = np.array(contour)\n\n # make sure borders are not drawn before changing width\n contour[[0, -1], :] = 0\n contour[:, [0, -1]] = 0\n\n # use gaussian to change width, divide by 10 to give more resolution\n radius = self.contour_width / 10\n contour = Image.fromarray(contour)\n contour = contour.filter(ImageFilter.GaussianBlur(radius=radius))\n contour = np.array(contour) > 0\n contour = np.dstack((contour, contour, contour))\n\n # color the contour\n ret = np.array(img) * np.invert(contour)\n if self.contour_color != 'black':\n color = Image.new(img.mode, img.size, self.contour_color)\n ret += np.array(color) * contour\n\n return Image.fromarray(ret)",
"def draw_contours(self, image, maskImg):\r\n # Required variables..\r\n x, y, width, height = 0, 0, 0, 0\r\n # Find contours..\r\n contours, hierarchy = cv2.findContours(image=maskImg, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE) # Playable Parameters..\r\n # Draw the contours..\r\n for contour in contours:\r\n # Calculate the area of the contour, so can remove unnecessary contours..\r\n area = cv2.contourArea(contour=contour)\r\n if area > 3000: # Playable adjustment..!! Found Good as 3000 for current light condition.. change this if light condition changes..\r\n # Draw the contours to the image -- actual frame..\r\n if self.debug_mode:\r\n cv2.drawContours(image=image, contours=contour, contourIdx=-1, color=(255, 255, 0), thickness=4)\r\n # Find the perimeter of the markers detected...\r\n perimeter = cv2.arcLength(curve=contour, closed=True)\r\n # Approximating/Finding the corners of the image from the obtained corners..\r\n approx_corners = cv2.approxPolyDP(curve=contour, epsilon=0.02 * perimeter, closed=True)\r\n # Find the bounding box rectangle for the approximated corners..\r\n x, y, width, height = cv2.boundingRect(approx_corners)\r\n # Return the values with which a rectangle can be drawn..\r\n return x, y, width, height",
"def get_building_contour(current_building_mask):\n ret, threshed = cv.threshold(current_building_mask, 0, 2 ** 16, cv.THRESH_BINARY)\n compressed = threshed.astype(np.uint8)\n current_building_contour, hierarchy = cv.findContours(compressed, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)\n return current_building_contour, hierarchy",
"def get_regions_mask(self, input):",
"def get_mask(self, anno, img_info) -> np.ndarray:\n m = np.zeros((img_info[\"height\"], img_info[\"width\"]), dtype=np.float32)\n\n for obj in anno:\n if obj[\"iscrowd\"]:\n rle = pycocotools.mask.frPyObjects(obj[\"segmentation\"], img_info[\"height\"], img_info[\"width\"])\n mask = pycocotools.mask.decode(rle)\n if mask.shape != m.shape:\n logger.warning(f\"Mask shape {mask.shape} does not match image shape {m.shape} for image {img_info['file_name']}\")\n continue\n m += mask\n elif obj[\"num_keypoints\"] == 0:\n rles = pycocotools.mask.frPyObjects(obj[\"segmentation\"], img_info[\"height\"], img_info[\"width\"])\n for rle in rles:\n mask = pycocotools.mask.decode(rle)\n if mask.shape != m.shape:\n logger.warning(f\"Mask shape {mask.shape} does not match image shape {m.shape} for image {img_info['file_name']}\")\n continue\n\n m += mask\n\n return (m < 0.5).astype(np.float32)"
] | [
"0.7214323",
"0.70771843",
"0.694643",
"0.69156",
"0.6753374",
"0.6689051",
"0.66160184",
"0.66025215",
"0.65440816",
"0.6517318",
"0.65129876",
"0.64886975",
"0.6453866",
"0.64246404",
"0.64205784",
"0.6411343",
"0.63934743",
"0.6383716",
"0.6381961",
"0.6374727",
"0.63475114",
"0.6322776",
"0.6322223",
"0.6308395",
"0.63027936",
"0.62911636",
"0.6288618",
"0.6286578",
"0.6276172",
"0.6275158"
] | 0.7433139 | 0 |
Converts either bytes or unicode to `bytes`, using utf8 encoding for text. | def as_bytes(bytes_or_text, encoding='utf-8'):
if isinstance(bytes_or_text, _six.text_type):
return bytes_or_text.encode(encoding)
elif isinstance(bytes_or_text, bytes):
return bytes_or_text
else:
raise TypeError('Expected binary or unicode string, got %r' %
(bytes_or_text,)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _to_bytes(value: Union[str, bytes]) -> bytes:\n return value if isinstance(value, bytes) else value.encode(\"utf-8\")",
"def ensure_utf8_bytes(v: Union[str, bytes]) -> bytes:\n if isinstance(v, str):\n v = v.encode(\"utf-8\")\n return v",
"def force_utf8(text):\n if isinstance(text, binary_type):\n return text\n else:\n return text.encode('utf-8')",
"def decode_to_utf8(text) -> bytes: # pragma: no cover\n try:\n return text.decode(\"utf-8\")\n except (AttributeError, UnicodeEncodeError):\n return text",
"def _as_bytes(s):\n if isinstance(s, bytes):\n return s\n return bytes(s, encoding='latin_1')",
"def to_utf8(text, errors='strict', encoding='utf8'):\n if isinstance(text, unicode):\n return text.encode('utf8')\n # do bytestring -> unicode -> utf8 full circle, to ensure valid utf8\n else:\n return unicode(text, encoding, errors=errors).encode('utf8')",
"def asbytes(s):\n if isinstance(s, bytes):\n return s\n else:\n return s.encode('utf-8')",
"def str_to_bytes(data):\n u_type = type(b''.decode('utf8'))\n if isinstance(data, u_type):\n return data.encode('utf8')\n return data",
"def to_bytes(data):\n if isinstance(data, unicode):\n return data.encode('utf-8')\n else:\n return data",
"def utf8(value):\n if isinstance(value, (bytes, type(None))):\n return value\n if not isinstance(value, unicode_type):\n raise TypeError(\n \"Expected bytes, unicode, or None; got %r\" % type(value)\n )\n return value.encode(\"utf-8\")",
"def ensure_bytes(data, encoding=\"utf8\"):\n return data if isinstance(data, bytes) else unicode_type(data).encode(encoding)",
"def to_bytes(bytes_or_str):\r\n if isinstance(bytes_or_str, str):\r\n value = bytes_or_str.encode('utf-8')\r\n else:\r\n value = bytes_or_str\r\n return value",
"def to_bytes(data):\n if isinstance(data, str):\n return data.encode(encoding='utf-8')\n else:\n return data",
"def to_bytes(something, encoding='utf8') -> bytes:\n if isinstance(something, bytes):\n return something\n if isinstance(something, str):\n return something.encode(encoding)\n elif isinstance(something, bytearray):\n return bytes(something)\n else:\n raise TypeError(\"Not a string or bytes like object\")",
"def str_to_bytes(self, data):\n if isinstance(data, bytes):\n return data\n return data.encode(\"utf-8\")",
"def to_utf8(text, charset='iso-8859-15'):\n try:\n # Do nothing if it's already utf-8\n u = unicode(text, 'utf-8')\n return text\n except UnicodeError:\n try:\n # Use the user supplied charset if possible\n u = unicode(text, charset)\n except UnicodeError:\n # This should always work\n u = unicode(text, 'iso-8859-15')\n return u.encode('utf-8')\n except TypeError:\n return text",
"def as_utf8(value):\n assert value is None or isinstance(value,types.StringTypes)\n if isinstance(value,types.UnicodeType):\n return value.encode('utf-8')\n else:\n return value",
"def give_me_bytes(string):\n return string.encode('utf8') if isinstance(string, str) else string",
"def as_text(bytes_or_text, encoding='utf-8'):\n if isinstance(bytes_or_text, _six.text_type):\n return bytes_or_text\n elif isinstance(bytes_or_text, bytes):\n return bytes_or_text.decode(encoding)\n else:\n raise TypeError(\n 'Expected binary or unicode string, got %r' % bytes_or_text\n )",
"def utf8(value):\r\n if isinstance(value, _UTF8_TYPES):\r\n return value\r\n elif isinstance(value, unicode_type):\r\n return value.encode(\"utf-8\")\r\n else:\r\n return str(value)",
"def utf8(value):\r\n if isinstance(value, six.text_type):\r\n return value.encode('utf-8')\r\n assert isinstance(value, str)\r\n return value",
"def to_bytes(string):\n assert isinstance(string, basestring)\n if sys.version_info[0] >= 3:\n if isinstance(string, str):\n return string.encode('utf-8')\n else:\n return string\n else:\n if isinstance(string, unicode):\n return string.encode('utf-8')\n else:\n return string",
"def bytes_string(text, encode=\"utf-8\"):\n if not PY3:\n if isinstance(text, unicode): # pylint: disable=undefined-variable\n result = text.encode(encode)\n else:\n result = text\n else:\n if isinstance(text, bytes):\n result = text\n else:\n result = bytes(text, encode)\n return result",
"def ensure_bytes(value: AnyStr) -> bytes:\n if isinstance(value, bytes):\n return value\n if isinstance(value, str):\n return value.encode('utf-8')\n raise TypeError(f\"input must be str or bytes, got {type(value).__name__}\")",
"def _as_bytes(s):\n if isinstance(s, bytes):\n return s\n # Assume it is a unicode string\n # Note ISO-8859-1 aka Latin-1 preserves first 256 chars\n return codecs.latin_1_encode(s)[0]",
"def test_bytes_encoding_arg(self):\n u = u'Unicode string: \\u5b54\\u5b50'\n b = py23_bytes(u, encoding='utf-8')\n self.assertEqual(b, u.encode('utf-8'))",
"def ensure_bytes(str_or_bytes, binary_type=six.binary_type,\n text_type=six.text_type):\n if isinstance(str_or_bytes, binary_type):\n return str_or_bytes\n if isinstance(str_or_bytes, text_type):\n return str_or_bytes.encode('utf-8')\n raise TypeError(\n \"input must be a text or byte string, got {}\"\n .format(type(str_or_bytes).__name__))",
"def toBytes(data):\n\tif isBytes(data):\n\t\treturn data\n\telse:\n\t\treturn data.encode(\"latin-1\")",
"def force_bytes(value):\n if IS_PY3:\n if isinstance(value, str):\n value = value.encode(\"utf-8\", \"backslashreplace\")\n else:\n if isinstance(value, unicode): # NOQA: F821\n value = value.encode(\"utf-8\")\n\n return value",
"def ensure_bytes(s, encoding):\n if isinstance(s, bytes):\n return s\n return s.encode(encoding)"
] | [
"0.78761524",
"0.78181183",
"0.78134924",
"0.77963704",
"0.7775173",
"0.77212256",
"0.7695007",
"0.76554793",
"0.76551324",
"0.76297563",
"0.7587289",
"0.75190413",
"0.74554324",
"0.74133134",
"0.74104327",
"0.74089986",
"0.73655003",
"0.73444015",
"0.72940016",
"0.72861797",
"0.72822374",
"0.7224943",
"0.7195027",
"0.7165689",
"0.7132938",
"0.7130741",
"0.7130097",
"0.71165246",
"0.706306",
"0.7029265"
] | 0.7902959 | 0 |
Adds a node to the front of the list with value 'val' | def push_front(self, val):
new_node = Node(val, self.head)
if self.is_empty():
self.tail = new_node
self.head = new_node
self.size += 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def push_front(self, val: Generic[T]) -> None:\n first_node = self.node.next\n\n self.node.next = Node(val)\n latest_first = self.node.next\n\n latest_first.prev = self.node #pushes the node to the front\n latest_first.next = first_node\n first_node.prev = latest_first #rearranges the list",
"def addAtHead(self, val):\n tmp = Node(val)\n tmp.nxt = self.head\n self.head = tmp\n if not self.tail:\n self.tail = self.head",
"def addAtHead(self, val):\n node = ListNode(val)\n if self.head == None:\n self.head = node\n else:\n node.next = self.head\n self.head = node",
"def push(self, val):\n self.head = Node(val, self.head)",
"def addAtHead(self, val: int) -> None:\n if(self.head == None):\n self.head = Node(val)\n else:\n new_node = Node(val)\n new_node.next = self.head \n self.head = new_node",
"def addAtHead(self, val: int) -> None:\n if self.head:\n temp_node = MyListNode(val, next_node=self.head, prev_node=None)\n self.head.prev = temp_node\n self.head = temp_node\n self.node_count += 1\n else:\n self.head = MyListNode(val)\n self.node_count += 1",
"def addNode(self, val):\n\t\tnode = self.createNode(val)\n\t\tif self.head is None:\n\t\t\tself.head = node\n\t\t\treturn node\n\t\tcur = self.head\n\t\twhile cur.getNext() is not None:\n\t\t\tcur = cur.getNext()\n\t\tcur.setNext(node)\n\t\treturn node",
"def addAtHead(self, val):\n new_node = ListNode(val)\n new_node.next = self.head\n self.head = new_node\n self.length += 1",
"def addAtHead(self, val):\n cur = linkNode(val)\n cur.next = self.head.next\n cur.prev = self.head\n\n self.head.next = cur\n if cur.next:\n cur.next.prev = cur\n\n if cur.next == None: # first node\n self.tail = cur\n # self.printList()",
"def addAtHead(self, val: int) -> None:\n pred, succ = self.head, self.head.next\n cur = Node(val)\n cur.next = succ\n cur.prev = pred\n pred.next = cur\n succ.prev = cur\n self.size += 1\n # print(\"addHead\", self.head.next.val)",
"def addAtHead(self, val):\n node = Node(val)\n node.next = self.head\n self.head = node\n\n self.size += 1",
"def addAtHead(self, val):\n node = Node(val)\n node.next = self.head\n self.head = node\n\n self.size += 1",
"def addAtHead(self, val):\n new_node = Node(val)\n new_node.next = self.head\n self.head = new_node\n self.length += 1",
"def addAtHead(self, val: int) -> None:\n new_node = Node(val)\n new_node.next = self.head\n self.head = new_node",
"def push(self, val):\n node = Node(val)\n node.next_node = self.head\n self.head = node",
"def push(self, val):\n new_head = Node(val, self.head)\n self.head = new_head\n self._counter += 1",
"def addAtHead(self, val):\n new_head = Node(val)\n if self._size == 0:\n self._head = new_head\n self._tail = self._head\n else:\n new_head.next = self._head\n self._head = new_head\n self._size += 1",
"def insert(self, val):\n new_node = Node(val)\n new_node.next = self.head\n self.head = new_node",
"def addAtHead(self, val):\n node = ListNode(val)\n node.next = self.head.next\n self.head.next = node\n if self.head is self.tail:\n self.tail = node\n self.len += 1",
"def add_node(self, val):\n if val not in self:\n self.setdefault(val, [])",
"def add_first(self, value):\n self.head = Node(value, self.head)",
"def add_front(self, key, value):\r\n\t\tnew_node = SLNode(key, value)\r\n\t\tnew_node.next = self.head\r\n\t\tself.head = new_node\r\n\t\tself.size = self.size + 1",
"def add_front(self, key, value):\n new_node = SLNode(key, value)\n new_node.next = self.head\n self.head = new_node\n self.size = self.size + 1",
"def add_front(self, key, value):\n new_node = SLNode(key, value)\n new_node.next = self.head\n self.head = new_node\n self.size = self.size + 1",
"def push_front(self, value):\n new_node = self.Node(value)\n\n # Edge Case : List is empty\n if self._size == 0:\n self._tail = new_node\n self._head = new_node\n self._size += 1\n return\n\n new_node.next = self._head\n self._head.prev = new_node\n self._head = new_node\n self._size += 1",
"def addAtTail(self, val: int) -> None:\n if(self.head == None):\n self.head = Node(val)\n else:\n cur = self.head \n while cur.next != None:\n cur = cur.next \n\n cur.next = Node(val)",
"def addAtHead(self, val):\n self.nums.insert(0, val)",
"def addAtTail(self, val: int) -> None:\n new_node = Node(val)\n temp = self.head\n if self.head is None:\n self.head = new_node\n while temp.next:\n temp = temp.next\n temp.next = new_node",
"def push_front(self, value):\n node = DLLNode(value)\n if self.head is None:\n self.tail = node \n else: \n self.head.prev_node = node \n node.next_node = self.head\n self.head = node",
"def addAtTail(self, val: int) -> None:\n pred, succ = self.tail.prev, self.tail\n cur = Node(val)\n cur.next = succ\n cur.prev = pred\n pred.next = cur\n succ.prev = cur\n self.size += 1\n # print(\"addAtTail\", self.tail.prev.val)"
] | [
"0.80221725",
"0.7826861",
"0.7751701",
"0.77292",
"0.7605654",
"0.75942093",
"0.75738394",
"0.755168",
"0.7545137",
"0.7532508",
"0.75297654",
"0.75297654",
"0.75270396",
"0.75006205",
"0.7481478",
"0.7417329",
"0.7411148",
"0.7410026",
"0.7406186",
"0.74055594",
"0.738027",
"0.7368777",
"0.7326956",
"0.7326956",
"0.73056793",
"0.72869956",
"0.7285683",
"0.72437495",
"0.7238271",
"0.72258633"
] | 0.7917012 | 1 |
Adds a node to the back of the list with value 'val' | def push_back(self, val):
new_node = Node(val)
# Update current head and tail, if necessary
if self.is_empty():
self.head = new_node
else:
self.tail.next_node = new_node
# new_node is now the tail
self.tail = new_node
self.size += 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def push_back(self, val: Generic[T]) -> None:\n last_node = self.node.prev\n self.node.prev = Node(val) #pushes the node to the back\n latest_first = self.node.prev\n\n latest_first.next = self.node #rearranges the list\n latest_first.prev = last_node\n last_node.next = latest_first",
"def push(self, val):\n self.head = Node(val, self.head)",
"def push(self, val):\n node = Node(val)\n node.next_node = self.head\n self.head = node",
"def addAtTail(self, val: int) -> None:\n pred, succ = self.tail.prev, self.tail\n cur = Node(val)\n cur.next = succ\n cur.prev = pred\n pred.next = cur\n succ.prev = cur\n self.size += 1\n # print(\"addAtTail\", self.tail.prev.val)",
"def addAtTail(self, val: int) -> None:\n new_node = Node(val)\n temp = self.head\n if self.head is None:\n self.head = new_node\n while temp.next:\n temp = temp.next\n temp.next = new_node",
"def addAtTail(self, val: int) -> None:\n if(self.head == None):\n self.head = Node(val)\n else:\n cur = self.head \n while cur.next != None:\n cur = cur.next \n\n cur.next = Node(val)",
"def push(self, val):\n new_head = Node(val, self.head)\n self.head = new_head\n self._counter += 1",
"def addAtTail(self, val):\n tmp = Node(val)\n if self.tail:\n self.tail.nxt = tmp\n k = self.tail\n self.tail = tmp\n else:\n self.head = tmp\n self.tail = tmp\n k = tmp",
"def append(self, val):\n inserted_node = DblNode(val, previous_node=self.tail)\n self.tail.next_node = inserted_node\n self.tail = inserted_node",
"def add_node(self, val):\n if val in self._g:\n raise ValueError('Node already exists.')\n self._g[val] = []",
"def add_node(self, val):\n if val not in self:\n self.setdefault(val, [])",
"def push_back(self, value):\n\n # Edge Case : List is empty\n # Behave just like push_front()\n if self._size == 0:\n self.push_front(value)\n return\n\n new_node = self.Node(value)\n new_node.prev = self._tail\n self._tail.next = new_node\n self._tail = new_node\n self._size += 1",
"def push(self, val):\n new_node = Node(val)\n new_node.next = self.head\n self.head = new_node\n self.length += 1",
"def push(self, val):\n self._linkedlist.push(val)\n self._update_attr()",
"def push(self, val: str) -> None:\n if self.head is None:\n self.head = Node(val)\n self.tail = self.head\n else:\n node = Node(val)\n self.tail.next = node\n self.tail = node",
"def push(self, val):\n try:\n node = Node(val, self.top)\n except TypeError:\n return self.top\n self.top = node\n self._size += 1\n return self.top",
"def addAtTail(self, val):\n if self.head is None:\n self.addAtHead(val)\n else:\n new_node = Node(val)\n curr = self.head\n while (curr.next is not None):\n curr = curr.next\n\n curr.next = new_node\n new_node.prev = curr\n self.length += 1",
"def push(self, val):\n self.head = Node(val, self.head)\n self._length += 1",
"def addAtTail(self, val):\n cur = linkNode(val)\n if self.tail == None: # first node\n self.head.next = cur\n cur.prev = self.head\n self.tail = cur\n else:\n self.tail.next = cur\n cur.prev = self.tail\n self.tail = cur # update tail\n # self.printList()",
"def addAtTail(self, val):\n node = ListNode(val)\n if self.head == None:\n self.head = node\n else:\n cur = self.head\n while cur.next != None:\n cur = cur.next\n cur.next = node",
"def addAtTail(self, val):\n curr = self.head\n if curr is None:\n self.head = Node(val)\n else:\n while curr.next is not None:\n curr = curr.next\n curr.next = Node(val)\n\n self.size += 1",
"def addAtTail(self, val):\n curr = self.head\n if curr is None:\n self.head = Node(val)\n else:\n while curr.next is not None:\n curr = curr.next\n curr.next = Node(val)\n\n self.size += 1",
"def addNode(self, val):\n\t\tnode = self.createNode(val)\n\t\tif self.head is None:\n\t\t\tself.head = node\n\t\t\treturn node\n\t\tcur = self.head\n\t\twhile cur.getNext() is not None:\n\t\t\tcur = cur.getNext()\n\t\tcur.setNext(node)\n\t\treturn node",
"def addAtTail(self, val):\n new_tail = Node(val)\n if self._size == 0:\n self._head = new_head\n self._tail = self._head\n else:\n self._tail.next = new_tail\n self._tail = new_tail\n self._size += 1",
"def addAtTail(self, val):\n curr = self.head\n while curr.next:\n curr = curr.next\n new_node = ListNode(val)\n curr.next = new_node\n self.length += 1",
"def push_front(self, val):\n new_node = Node(val, self.head)\n if self.is_empty():\n self.tail = new_node\n self.head = new_node\n self.size += 1",
"def addAtTail(self, val):\n node = ListNode(val)\n self.tail.next = node\n self.tail = node\n self.len += 1",
"def push_front(self, val: Generic[T]) -> None:\n first_node = self.node.next\n\n self.node.next = Node(val)\n latest_first = self.node.next\n\n latest_first.prev = self.node #pushes the node to the front\n latest_first.next = first_node\n first_node.prev = latest_first #rearranges the list",
"def addAtHead(self, val):\n tmp = Node(val)\n tmp.nxt = self.head\n self.head = tmp\n if not self.tail:\n self.tail = self.head",
"def push(self, value): ################# <-\n self.top = Node(value, next=self.top)"
] | [
"0.8086445",
"0.7602821",
"0.75396514",
"0.74361414",
"0.7427383",
"0.7410255",
"0.74002004",
"0.7388654",
"0.73812497",
"0.7362498",
"0.7361017",
"0.73132956",
"0.73118097",
"0.7306703",
"0.72853166",
"0.7268352",
"0.72647965",
"0.7225639",
"0.7210625",
"0.7206143",
"0.71408105",
"0.71408105",
"0.7132019",
"0.7109238",
"0.70833707",
"0.70181113",
"0.6979405",
"0.69201744",
"0.6904901",
"0.68448794"
] | 0.80715704 | 1 |
List all registered posts | def get(self):
return get_all_posts() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_all_posts():\n post = Post.query.all()\n\n return render_template('all-posts.html', post=post)",
"def get_posts(self):\n return self.blog_posts.all()",
"def post_list(request):\n # Only show the posts that have been published\n posts = Post.objects.filter(date_published__isnull=False)\n return render(request,\n 'blog/post_list.html',\n {'posts': posts}\n )",
"def posts_list(request):\n\n # recupera posts\n posts = Post.objects.select_related(\"owner\").filter(Q(publish_at__lte=now())).all()\n categorias = Categoria.objects.all()\n\n # prepara el contexto de la plantilla\n context = {\n 'post_objects': posts,\n 'categoria_objects': categorias\n }\n\n # renderiza y devuelve la plantilla\n return render(request, 'blogs/inicio.html', context)",
"def get_posts(self): #return list of posts that are associated with this blog_id\n return Post.find_posts_for_blog_id(self.blog_id) #this will return a list of posts objects",
"def post_list(request):\n\timage_post_list = list(ImagePost.objects.all())\n\tvideo_post_list = list(VideoPost.objects.all())\n\tall_post = image_post_list + video_post_list\n\treturn render(request,'devblog/post_list.html', {'posts':all_post})",
"def all(request):\n\n posts = Post.objects.filter()\n\n print(\"made a query\")\n return render(request, 'posts/all.html', {'posts':posts})",
"def get_posts(self):\n return Post.select().where (Post.user == self)",
"def postList(posts):\n post_list = list()\n for post in posts:\n visible_to = list()\n visible = post.visibleTo.all()\n if visible:\n for author in visible:\n auth = \"{}/api/author/{}\".format(DOMAIN, author.id)\n visible_to.append(auth)\n\n comments = commentList(post)\n comment_url = \"{}/api/posts/{}/comments\".format(DOMAIN, post.id)\n post_dict = {'author': addAuthor(post.author), 'title': post.title, 'description': post.description,\n 'contentType': post.contentType, 'content': post.content, 'published': post.published,\n 'visibility': post.visibility, 'visibleTo': visible_to, 'unlisted': post.unlisted, 'id': post.id,\n 'comments': comments[:5], 'next': comment_url, 'count': len(comments),\n 'origin': \"{}/api/posts/{}\".format(DOMAIN, post.id),\n 'source': \"{}/api/posts/{}\".format(DOMAIN, post.id)}\n post_list.append(post_dict)\n return post_list",
"def get_posts():\n url = app.config['POSTS_ENDPOINT']\n response = requests.get(url, params={})\n if response.status_code == 200:\n return parse_posts(response.json())\n raise RuntimeError('Error in retrieving posts.')",
"def list_posts(request):\n if request.method == 'POST':\n category = request.POST.get('category', False)\n posts = Post.objects.select_related('author')\\\n .filter(category=category)\\\n .order_by('-modified')\n # import pdb; pdb.set_trace()\n return render(request, 'posts/index.html',\n {'posts': posts})\n\n posts = Post.objects.select_related('author').order_by('-modified')\n likes = Likes.objects.select_related('post')\n\n return render(request, 'posts/index.html',\n {'posts': posts})",
"def show_posts():\n\n # set page as req.args['page'] coerced to int, or set as one if none is passed\n page = int(request.args.get('page', 1))\n\n # handle private AND public posts if user is logged in, only public if not\n if CURRENT_USER_KEY in session:\n posts = Post.query.order_by(Post.id.desc()).paginate(\n page=page, per_page=10, error_out=True)\n else:\n posts = Post.query.filter_by(is_private='f').order_by(Post.id.desc()).paginate(\n page=page, per_page=10, error_out=True)\n\n all_posts = [post.serialize() for post in posts.items]\n return jsonify(has_next=posts.has_next, posts=all_posts)",
"def index(self):\n \n return self.view.render('index.html', {\"posts\"=posts})",
"def posts_index():\n posts = Post.query.all()\n return render_template('posts.html', posts=posts, post=None)",
"def all_query() -> list:\n data = []\n posts = Posts.query.all()\n for post in posts:\n x = {\n \"title\": post.title,\n \"body\": post.body,\n \"timestamp\": post.timestamp,\n \"id\": post.id,\n \"url\": make_url_from_title(post.title),\n }\n data.append(x)\n return data",
"def get_posts(self):\r\n postList = []\r\n for tag in self.setting.imgurTags:\r\n try:\r\n req = requests.get('%s%s' % (self.setting.tagLink, tag), headers=self.setting.imgurHeaders)\r\n for post in req.json()['data']['items']:\r\n p = self.json_to_post(post, tag)\r\n if p is not None:\r\n postList.append(p)\r\n except Exception as e:\r\n self.logger.log(logger.LogLevel.CRITICAL, 'imgur.get_posts exception(%s): %s' % (tag, e))\r\n break\r\n return postList",
"def get(self):\n\n self.render_posts()",
"def get_posts(request):\n posts = Post.objects.order_by(\"created_date\")\n return render(request, \"blogposts.html\", {\"posts\": posts})",
"def education_post_list(request):\n posts = EducationBlogPost.objects.filter(published_date__lte=timezone.now()\n ).order_by('-published_date')\n return render(request, \"education_center/education_blogposts.html\", {'posts': posts})",
"def list_all(request):\n\n entries = BlogEntry.objects.all()\n data = {'entries': paginate_objects(request, entries),\n 'blog_info': get_blog_info(), 'action_str': 'All Blogs Shown'}\n\n return render_to_response('blog/list_entries.html', data,\n context_instance=get_rq(request))",
"def get_posts(wp):\n from wordpress_xmlrpc.methods.posts import GetPosts\n\n all_posts = []\n\n offset = 0\n increment = 20\n while True:\n posts = wp.call(GetPosts({'number': increment, 'offset': offset, 'post_type': 'post'}))\n if len(posts) == 0:\n break # no more posts returned\n for post in posts:\n all_posts.append(post)\n\n offset = offset + increment\n\n return all_posts",
"def getMyPosts():\n \n cur, user_id = initialise(3)\n cur.execute(\"SELECT username FROM users WHERE id = ?\", [user_id])\n name = cur.fetchall()[0][0]\n cur.execute(\"SELECT * FROM posts WHERE name = ?\", [name])\n posts = cur.fetchall()\n return posts",
"def all_posts_list(request):\n #update is_expired in all posts\n update_posts_expiration()\n #put all posts into post\n post = Post.objects.all()\n #create serializer with the posts\n serializer = ViewPostSerializer(post, many=True)\n #return serializer view\n return Response(serializer.data)",
"def get_all_posts_json():\n\n posts = [\n {\n \"postId\": post.post_id,\n \"postPrompt\" : crud.get_prompt_by_prompt_id(post.prompt_id),\n \"postText\": post.post_text,\n \"location\": post.user_facing_location,\n \"dateCreated\": post.created_at,\n \"toneQualities\": crud.get_tone_qualities_by_post_id(post.post_id),\n }\n for post in crud.get_post_by_user_id(session['user_id'])\n ]\n\n return jsonify(posts)",
"def get_queryset(self):\n try:\n posts = Hashtag.filter_posts_by_hashtag(self.kwargs['hashtag_name'])\n except Hashtag.DoesNotExist:\n raise Http404('Hashtag \"%s\" does not exist' % self.kwargs['hashtag_name'])\n return posts",
"def get_all_posts(self):\n cur = self.conn.cursor()\n\n query = 'SELECT blog.blog_id as id, blog.title as title, ' \\\n 'blog.subtitle as subtitle, ' \\\n 'blog.content as content, blog.date as date, ' \\\n 'author.name as author ' \\\n 'FROM blog, author ' \\\n 'WHERE blog.author_id = author.author_id ' \\\n 'ORDER BY blog_id DESC '\n\n posts = []\n cur.execute(query)\n\n for row in cur.fetchall():\n posts.append(dict(row))\n\n return posts",
"def register_posts(app):\n blog = Blog(app)\n for docname, posts in getattr(app.env, \"ablog_posts\", {}).items():\n for postinfo in posts:\n blog.register(docname, postinfo)",
"def get_queryset(self):\n return Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')",
"def get_queryset(self):\n return Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')",
"def all_blogs(request):\n\n posts = Post.objects.all()\n\n context = {\n 'posts': posts\n }\n\n return render(request, 'blog/blog.html', context)"
] | [
"0.743275",
"0.69572014",
"0.687439",
"0.6832434",
"0.67154205",
"0.66296536",
"0.66215444",
"0.65834826",
"0.6543941",
"0.6530415",
"0.6451439",
"0.64244354",
"0.6422649",
"0.6408737",
"0.6408558",
"0.6405169",
"0.6393117",
"0.6386442",
"0.6368651",
"0.6358445",
"0.63418925",
"0.6328895",
"0.63281816",
"0.63130015",
"0.6294807",
"0.62801933",
"0.62683195",
"0.6266921",
"0.6266921",
"0.6259066"
] | 0.7320014 | 1 |
get a post given its title | def get(self, title):
post = get_a_post(title)
if not post:
api.abort(404)
else:
return post | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def query_by_title(title: str) -> dict:\n post = Posts.query.filter_by(title=title).first()\n if post is None:\n return {\"status\": 404, \"message\": \"No Post Available\"}\n return {\n \"title\": post.title,\n \"body\": markdown.markdown(post.body),\n \"timestamp\": post.timestamp,\n \"id\": post.id,\n \"url\": make_url_from_title(post.title),\n }",
"def get_by_natural_key(self, title):\n try:\n return self.get(title=title)\n except ObjectDoesNotExist:\n logging.getLogger(self.__module__).error('%s \"%s\" does not exist',\n self.model.__name__, title)",
"def get_article(title):\n article = None\n # search for the corresponding title from the memcache\n articles = memcache.get('top_ten')\n if articles and len(articles) > 0:\n for item in articles:\n # workaround to remove all non-alphanumeric characters before comparison\n item_title = re.sub(r'\\W', \"\", item.title)\n art_title = re.sub(r'\\W', \"\", title)\n if item_title == art_title:\n article = item\n break\n # in case the article we're looking for is not in memcache:\n if not article:\n # query the DB\n query = db.Query(Article)\n query.filter('title =', title)\n article = query.get()\n return article",
"def find_movie_by_title(title):\n return Movie.objects.filter(title=title).first()",
"def get_post(post_pk):\n where = \"WHERE pk = ?\"\n values = (post_pk, )\n return Post.select_one(where, values)",
"def _get_post(self):\n post_pk = self.kwargs.get('post_pk', 0)\n return get_object_or_404(Post, pk=post_pk)",
"def get_topic(title):\n return Topic.get(Topic.title == title)",
"def get_snippet(self, title=None):\n for snippet in self.snippets:\n if snippet[\"title\"] == title:\n return snippet\n return None",
"def get_page(title):\n\n page = None\n\n try:\n page = Page.objects.get(page_title=title)\n except Page.DoesNotExist:\n pass\n\n return page",
"def get(self, post_id=None):\n\n if post_id:\n post = Post.query.filter_by(id=post_id).first()\n if not post:\n abort(404)\n return post\n else:\n args = parsers.post_get_parser.parse_args()\n page = args['page'] or 1\n\n # Return the posts with user.\n if args['user']:\n user = User.query.filter_by(username=args['user']).first()\n if not user:\n abort(404)\n posts = user.posts.order_by(\n Post.publish_date.desc()).paginate(page, 30)\n # Return the posts\n else:\n posts = Post.query.order_by(\n Post.publish_date.desc()).paginate(page, 30)\n\n return posts.items",
"def fromtitle(cls, title):\n return Collection.get_by_key_name(cls.getkeyname(title))",
"def get_object(self, id):\n try:\n return Post.objects.get(id=id)\n except Post.DoesNotExist:\n raise Http404",
"def get_post(id, check_author=True):\r\n cur = get_db().cursor()\r\n cur.execute(\r\n 'SELECT p.id, title, body, created, author_id, username'\r\n ' FROM novel.post p JOIN novel.user u ON p.author_id = u.id'\r\n ' WHERE p.id = %s',id )\r\n\r\n post = cur.fetchone()\r\n if post is None:\r\n abort(404, \"Post id {0} doesn't exist.\".format(id))\r\n\r\n if check_author and post['author_id'] != g.user['id']:\r\n abort(403)\r\n\r\n return post",
"def get_project_by_title(title):\n QUERY = \"\"\"\n SELECT * FROM Projects WHERE title = ?\n \"\"\"\n\n db_cursor.execute(QUERY, (title,))\n row = db_cursor.fetchone()\n print \"Project: %s \\nID: %s \\nTitle: %s \\nDescription: %s \\nMax Grade: %s\" % (\n title, row[0], row[1], row[2], row[3])",
"def get_by_title(self, title):\n return Field(self.context,\n ResourcePathServiceOperation(\"getByTitle\", [title], self.resource_path))",
"def title_by_id(id_: int) -> Any:\n post = Posts.query.filter_by(id=id_).first()\n if post is None:\n return \"404\"\n return post.title",
"def get(self, request, post_id):\n post = Evento.objects.get(id=post_id)\n #post = get_object_or_404(Post, id=post_id)\n self.context['post'] = post\n\n self.context['title'] = str(post)\n\n return render(request, self.template, self.context)",
"def get_article(self, slug):\n\t\tarticle = Blog.objects.get(slug=slug)\n\t\treturn article",
"def get_by_title(title):\n query = Session.query(Movie.title)\n result = query.all()\n title_list = [title for title, in result]\n one_item = process.extractOne(title, title_list)\n if one_item:\n result_title, ratio = one_item\n else:\n return None\n if ratio > 60:\n return result_title\n else:\n return None",
"def load_post_by_permalink(self, permalink):\n post = None\n posts = self.session.query(Post).filter(Post.permalink == permalink).all()\n if len(posts) > 0:\n post = posts[0]\n return post",
"def view_post(request, slug_post):\n try:\n post = Entry.objects.filter(status=2).get(slug=slug_post)\n except Entry.DoesNotExist:\n raise Http404\n return render_to_response('blog/post.html', {'post':post, 'DISQUS_SHORTNAME':settings.DISQUS_SHORTNAME}, RequestContext(request))",
"def get_post(self):\n\t\tself.post = graph.get_object(POST_ID)",
"def get(self, post_id):\n key = db.Key.from_path('Post', int(post_id), parent=blog_key())\n post = db.get(key)\n\n # if use request a non-exist post, render 404 error\n if not post:\n self.error(404)\n return\n\n self.render(\"permalink.html\", post = post)",
"def dangerously_get_post(post_id: str):\n return Post.objects.get(eid=post_id)",
"def get_project_by_title(title):\n \n QUERY = \"\"\"SELECT title, description, max_grade FROM Projects WHERE title = ?\"\"\"\n db_cursor.execute(QUERY, (title,))\n row = db_cursor.fetchone()\n print \"Project title: %s, description: %s, and max grade is %s.\" % (\n row[0], row[1], row[2])",
"def get_post(self, postid):\n return self.execute('metaWeblog.getPost', postid, self.username, self.password)",
"def load_post_by_id(self, id):\n post = None\n posts = self.session.query(Post).filter(Post.id == id).all()\n if len(posts) > 0:\n post = posts[0]\n return post",
"def get_entry(title):\n try:\n f = default_storage.open(f\"entries/{title}.md\")\n data = f.read()\n markdowner = Markdown()\n html = markdowner.convert(data)\n return html\n except FileNotFoundError:\n return None",
"def show(self, title):\n\n return Product.query.filter_by(title=title).first()",
"def get(self):\n parser.add_argument('title-word', required=True, help='cannot be blank')\n return find_movie_by_title(parser.parse_args()['title-word'])"
] | [
"0.7257662",
"0.6715822",
"0.6635556",
"0.66111004",
"0.66026914",
"0.65306",
"0.64700407",
"0.6466437",
"0.64379615",
"0.62551534",
"0.6247926",
"0.6238597",
"0.6233855",
"0.6216439",
"0.61509603",
"0.61505276",
"0.6148375",
"0.6121378",
"0.61104965",
"0.60835755",
"0.60384434",
"0.6033756",
"0.6011574",
"0.60014844",
"0.59998286",
"0.5998279",
"0.5964593",
"0.59639657",
"0.59506696",
"0.59366554"
] | 0.8569673 | 0 |
Reinvite an already invited user. | def reinvite_user(self, user, email):
if self.is_moderator and self.has_perm('accounts.invite_user'):
# Reset email, set a new token and update decision datetime
user.email = email
user.auth_token = generate_unique_id()
user.decision_datetime = timezone.now()
user.save()
return user
else:
raise PermissionDenied | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def invite_user(request):\r\n params = request.params\r\n\r\n email = params.get('email', None)\r\n user = request.user\r\n\r\n if not email:\r\n # try to get it from the json body\r\n email = request.json_body.get('email', None)\r\n\r\n if not email:\r\n # if still no email, I give up!\r\n request.response.status_int = 406\r\n return _api_response(request, {\r\n 'username': user.username,\r\n 'error': \"Please submit an email address\"\r\n })\r\n\r\n email = email.lower()\r\n # first see if the user is already in the system\r\n exists = UserMgr.get(email=email.lower())\r\n if exists:\r\n request.response.status_int = 406\r\n return _api_response(request, {\r\n 'username': exists.username,\r\n 'error': \"This user is already a Bookie user!\"\r\n })\r\n\r\n new_user = user.invite(email.lower())\r\n if new_user:\r\n LOG.debug(new_user.username)\r\n # then this user is able to invite someone\r\n # log it\r\n AuthLog.reactivate(new_user.username)\r\n\r\n # and then send an email notification\r\n # @todo the email side of things\r\n settings = request.registry.settings\r\n msg = ActivationMsg(new_user.email,\r\n \"Enable your Bookie account\",\r\n settings)\r\n\r\n msg.send(\r\n request.route_url(\r\n 'reset',\r\n username=new_user.username,\r\n reset_key=new_user.activation.code))\r\n return _api_response(request, {\r\n 'message': 'You have invited: ' + new_user.email\r\n })\r\n else:\r\n # you have no invites\r\n request.response.status_int = 406\r\n return _api_response(request, {\r\n 'username': user.username,\r\n 'error': \"You have no invites left at this time.\"\r\n })",
"def invite(self,roomName,user):\n\n self.sendCommand(roomName +\" /invite\",user)",
"def invite_user(request):\n moderator = request.user\n site = get_current_site(request)\n\n invitation_form = InviteMemberForm(request.POST)\n\n if invitation_form.is_valid():\n\n # Invite user\n full_name = invitation_form.cleaned_data['full_name']\n email = invitation_form.cleaned_data['email']\n new_user = moderator.invite_new_user(email, full_name)\n\n # Log moderation event\n msg_type = ModerationLogMsg.INVITATION\n log_comment = _('{} invited {}'.format(moderator.get_full_name(),\n new_user.get_full_name()))\n log_moderator_event(msg_type=msg_type,\n user=new_user,\n moderator=moderator,\n comment=log_comment)\n\n # Send email\n subject = _('Welcome to {}'.format(site.name))\n template = 'moderation/emails/invite_new_user.html'\n token = new_user.auth_token\n url = request.build_absolute_uri(\n reverse('accounts:activate-account', args=[token]))\n send_connect_email(subject=subject,\n template=template,\n recipient=new_user,\n sender=moderator,\n site=site,\n url=url)\n\n messages.success(request, _('{} has been invited to {}.'.format(\n new_user.get_full_name(), site.name)))\n\n return redirect('moderation:moderators')\n\n else:\n return moderation_home(request, invitation_form=invitation_form)",
"def invite_user(session, invitee):\n session.invite_event.clear()\n key = b64encode(messaging.common.pkc_encrypt(\n session.get_channel_key(), session.get_encryption_cert(invitee))).decode()\n msg = {\n kk.typ: kk.add_user,\n kk.inviter: session.user,\n kk.invitee: invitee,\n kk.chid: session.chan,\n kk.chkey: key,\n }\n msg[kk.signature] = b64encode(\n messaging.common.create_msg_sig(session, msg)).decode()\n messaging.common.send_msg(session.sock, msg, key=session.symkey)",
"def test_user_invite_cant_edit_users_existing_user(self):\n project = fake_clients.FakeProject(name=\"test_project\")\n\n user = fake_clients.FakeUser(name=\"[email protected]\")\n\n setup_identity_cache(projects=[project], users=[user])\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"user\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"username\": \"new_user\",\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})",
"def invite(self, invite):\n\n self._invite = invite",
"def invite_user():\n\n form = InviteUserForm()\n if form.validate_on_submit():\n invited_by = db.session.query(User).filter_by(id=current_user.id).first()\n user = User(\n invited_by=invited_by.full_name,\n first_name=form.first_name.data,\n last_name=form.last_name.data,\n email=form.email.data)\n db.session.add(user)\n db.session.commit()\n token = user.generate_confirmation_token()\n invite_link = url_for(\n 'account.join_from_invite',\n user_id=user.id,\n token=token,\n _external=True)\n\n get_queue().enqueue(\n send_email,\n recipient=user.email,\n subject='You Are Invited To Join',\n template='account/email/invite',\n user=user.id,\n invited_by=invited_by,\n invite_link=invite_link,\n invite_by=invited_by\n )\n flash('User {} successfully invited'.format(user.full_name),\n 'form-success')\n return redirect(url_for('invite.index'))\n return render_template('invite/new_user.html', form=form)",
"def invite(self):\n pass",
"def revoke_invitation(request):\n site = get_current_site(request)\n\n revocation_form = RevokeInvitationForm(request.POST)\n\n if revocation_form.is_valid():\n\n user_id = revocation_form.cleaned_data['user_id']\n user = get_object_or_404(User, id=user_id)\n\n if user.is_invited_pending_activation \\\n and user.moderator == request.user:\n messages.success(request, _(\n '{} has been uninvited from {}.'.format(\n user.get_full_name(), site.name)))\n\n # Delete the user rather than deactivate it.\n # Removing the email address from the system altogether means\n # that the same email can later be used to create a new account\n # (e.g. if the user applies or is invited by another moderator).\n # Logs related to this user are also removed,\n # resulting in less junk to filter in that view.\n user.delete()\n else:\n raise PermissionDenied\n\n return redirect('moderation:moderators')\n\n else:\n return moderation_home(request, revocation_form=revocation_form)",
"def test_user_invite_cant_edit_users(self):\n project = fake_clients.FakeProject(name=\"test_project\")\n\n setup_identity_cache(projects=[project])\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"user\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"username\": \"new_user\",\n \"email\": \"[email protected]\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.json(), {\"errors\": [\"actions invalid\"]})",
"def test_invited(self) -> None:\n\n self._perform_background_initial_update()\n\n u1 = self.register_user(\"u1\", \"pass\")\n u1token = self.login(\"u1\", \"pass\")\n r1 = self.helper.create_room_as(u1, tok=u1token)\n\n u2 = self.register_user(\"u2\", \"pass\")\n\n r1stats_ante = self._get_current_stats(\"room\", r1)\n assert r1stats_ante is not None\n\n self.helper.invite(r1, u1, u2, tok=u1token)\n\n r1stats_post = self._get_current_stats(\"room\", r1)\n assert r1stats_post is not None\n\n self.assertEqual(\n r1stats_post[\"current_state_events\"] - r1stats_ante[\"current_state_events\"],\n 1,\n )\n self.assertEqual(\n r1stats_post[\"invited_members\"] - r1stats_ante[\"invited_members\"], +1\n )",
"def remove_invite(self, redditor: str | praw.models.Redditor):\n data = {\"name\": str(redditor), \"type\": \"moderator_invite\"}\n url = API_PATH[\"unfriend\"].format(subreddit=self.subreddit)\n self.subreddit._reddit.post(url, data=data)",
"def _send_existing_agent_user_invite(self):\n standard_invite = self.instance\n try:\n agent_invite = AgentUserInvite.objects.get(\n agent=self._agent_user, organisation=standard_invite.organisation\n )\n except AgentUserInvite.DoesNotExist:\n agent_invite = AgentUserInvite(\n agent=self._agent_user, organisation=standard_invite.organisation\n )\n\n agent_invite.inviter = standard_invite.inviter\n agent_invite.status = AgentUserInvite.PENDING\n agent_invite.save()\n agent_invite.send_confirmation()\n return standard_invite",
"def resend_email(self, userdict):\n return self.post('resend', userdict)",
"def accept_invite(self):\n url = API_PATH[\"accept_mod_invite\"].format(subreddit=self.subreddit)\n self.subreddit._reddit.post(url)",
"def decline_invitation(self, user, group):\n if group.is_invited(user):\n group.remove_invitation(user)",
"async def invite(self, ctx):\n await ctx.send(f\"**{ctx.author.name}**, use this URL to invite me\\n<{discord.utils.oauth_url(self.bot.user.id)}>\")",
"async def invite(self, ctx):\n await ctx.send(f'🐱You can invite me to your server using the following url:\\n{self.invite_url}'\n '\\n\\nYou will need the **Manage Server** permission to add me to a server. '\n f'Run `{self.heleus.command_prefix[0]}help` to see what you can customise!')",
"def invite_users(self, roomName, users, client):\n for room in self.rooms:\n if room.get_name() == roomName:\n if room.verify_owner(client.get_socket()):\n for user in users:\n room.invite_member(user)\n if client.has_name(client):\n name = client.get_name()\n else:\n name = client.get_ip()\n self.send_message('Has sido invidado a la sala {} por '\n 'parte de {}.'.format(roomName, name), user)\n self.send_message('Todos han sido invitados.', client.get_socket())\n else:\n self.send_message('No eres dueno de la sala.', client.get_socket())",
"def put(self, id):\n payload = marshal(api.payload, invite_user)\n taskroom_service.invite_user(id, payload['email'])\n return {'Message': \"User Added to the Task Room\"}",
"def test_resend_delegate(self):\n self.invite.role = self.role_delegate\n self.invite.save()\n url = reverse(\n 'projectroles:api_invite_resend',\n kwargs={'projectinvite': self.invite.sodar_uuid},\n )\n response = self.request_knox(url, method='POST')\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(len(mail.outbox), 1)",
"def test_revoke_delegate(self):\n self.invite.role = self.role_delegate\n self.invite.save()\n url = reverse(\n 'projectroles:api_invite_revoke',\n kwargs={'projectinvite': self.invite.sodar_uuid},\n )\n response = self.request_knox(url, method='POST')\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.invite.refresh_from_db()\n self.assertEqual(self.invite.active, False)",
"def acceptInvite(self, user):\n invite = user if isinstance(user, MyPlexInvite) else self.pendingInvite(user, includeSent=False)\n params = {\n 'friend': int(invite.friend),\n 'home': int(invite.home),\n 'server': int(invite.server)\n }\n url = MyPlexInvite.REQUESTS + f'/{invite.id}' + utils.joinArgs(params)\n return self.query(url, self._session.put)",
"async def invite(self, ctx):\n perms = discord.Permissions.text()\n perms.update(read_messages=True, manage_messages=True,\n mention_everyone=False, send_tts_messages=False)\n await ctx.send(f'Invite me here:\\n<{discord.utils.oauth_url(self.bot.user.id, perms)}>')",
"def remind_users(self, request, pk=None):\n retreat = self.get_object()\n if not retreat.is_active:\n response_data = {\n 'detail': \"Retreat need to be activate to send emails.\"\n }\n return Response(response_data, status=status.HTTP_200_OK)\n\n # This is a hard-coded limitation to allow anonymous users to call\n # the function.\n time_limit = retreat.start_time - timedelta(days=8)\n if timezone.now() < time_limit:\n response_data = {\n 'detail': \"Retreat takes place in more than 8 days.\"\n }\n return Response(response_data, status=status.HTTP_200_OK)\n\n # Notify a user for every reserved seat\n emails = []\n for reservation in retreat.reservations.filter(\n is_active=True, pre_event_send=False):\n send_retreat_reminder_email(reservation.user, retreat)\n reservation.pre_event_send = True\n reservation.save()\n emails.append(reservation.user.email)\n\n response_data = {\n 'stop': True,\n 'emails': emails\n }\n return Response(response_data, status=status.HTTP_200_OK)",
"def invitation(id):\n invitation = get_required(Invitation, id)\n if g.user == invitation.inviter.user:\n flash(\"You can't send an invitation to yourself.\")\n return redirect(url_for('front'))\n if invitation.acceptor_member_id:\n flash(\"This invitation has already been used.\")\n return redirect(url_for('front'))\n clicked_invitation(invitation)\n db.session.commit()\n return redirect(invitation.circle.url)",
"async def rep_user(self, ctx, *, user: discord.Member = None):\n if user and user.bot:\n return await ctx.send_line(\"😔 Sorry but I just can't do that.\")\n if user and user.id == ctx.author.id:\n return await ctx.send_line(\"🙂 Nice try but wouldn't that be unfair?\")\n author_profile = await self.cache.get_profile(ctx.author.id)\n if user is None:\n if author_profile.can_rep:\n res = \"👌 You can rep someone now.\"\n else:\n res = f\"⏳ You can rep again {author_profile.next_rep.humanize()}.\"\n return await ctx.send_line(res)\n\n if author_profile.can_rep:\n target_profile = await self.cache.get_profile(user.id)\n if not target_profile:\n res = self.plugin.data.responses.no_profile.format(user_name=user.name)\n return await ctx.send_line(res)\n await target_profile.rep(author_profile)\n res = f\"You added one reputation point to {user.name}.\"\n await ctx.send_line(res, ctx.author.avatar_url)\n else:\n res = f\"⏳ You can rep again {author_profile.next_rep.humanize()}.\"\n await ctx.send_line(res)",
"async def invite(self, ctx):\n invite = f\"https://discordapp.com/api/oauth2/authorize?client_id={self.bot.user.id}&permissions=67584&scope=bot\"\n await ctx.send(embed=discord.Embed(\n color=discord.colour.Colour.teal(),\n description=f\":mailbox_with_mail: [Invite]({invite}) me to your server!\"))",
"def upgradeFixInvitations():\n from base import get_group_database, get_user_database\n \n user_db = get_user_database()\n group_db = get_group_database()\n \n for user in user_db.root.values():\n for g in group_db.users_groups(user):\n g.remove_invitation(user)\n group_db.invitations.remove_all_user_invitations(user, g)",
"async def resign(self, ctx):\n currency = await bank.get_currency_name(ctx.guild)\n await self.config.user(ctx.author).gameRole.set(\"User\")\n await ctx.send(\n f\"{ctx.author} has spent 10,000 {currency}- to resign from their current job.\"\n )"
] | [
"0.6742321",
"0.65622705",
"0.6341691",
"0.618192",
"0.6178779",
"0.6154837",
"0.61355495",
"0.60448194",
"0.5970474",
"0.59555095",
"0.592783",
"0.59243935",
"0.59088224",
"0.590702",
"0.5906705",
"0.5898827",
"0.577978",
"0.5737973",
"0.5707441",
"0.57018846",
"0.56371075",
"0.5636754",
"0.5630471",
"0.5615821",
"0.55907476",
"0.55854684",
"0.55674404",
"0.5539243",
"0.55353385",
"0.5527742"
] | 0.7661665 | 0 |
Approve a user's application | def approve_user_application(self, user):
if self.is_moderator and \
self.has_perm('accounts.approve_user_application'):
user.moderator = self
user.moderator_decision = user.APPROVED
user.decision_datetime = timezone.now()
user.auth_token = generate_unique_id()
user.save()
return user
else:
raise PermissionDenied | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def approve(self):\n self._check_if_open()\n data = {\"approved\": True}\n return self.post(\"approve\", data)",
"def can_approve(self, user, **data):\n raise Return(False)",
"def approve_me(message):\n users = hf.get_users()\n for user in users:\n if user[\"id\"] == message._get_user_id():\n if user[\"approval_level\"] == \"unapproved\": # Unknown\n message.reply(Strings['APPROVER_REQUEST'])\n admins = hf.get_admins()\n names = []\n for admin in admins:\n names.append(admin[\"name\"])\n\n approval_message = Strings[\n 'APPROVER_REQUEST_DETAIL'].format(\">, <@\".join(names), user[\"name\"])\n\n #message._client.send_message(config.AUTH_CHANNEL, approval_message)\n message._client.send_message(public_channel, approval_message)\n else:\n message.reply(\":x: Your approval level is already: \" + str(user[\"approval_level\"]))",
"def approve_me(message):\n load_users(message._client.users)\n sender_id = message._get_user_id()\n target = user_list[sender_id].details['name']\n if (user_list[sender_id].is_unknown):\n message.reply(Strings['APPROVER_REQUEST'])\n names = list_to_names(user_list.admin_list)\n approval_message = Strings[\n 'APPROVER_REQUEST_DETAIL'].format(\">, <@\".join(names), target)\n message._client.send_message(config.AUTH_CHANNEL, approval_message)\n else:\n message.reply(\n \"Your status is already: \" + user_list[sender_id].level.name)",
"def action_approve(self):\n if not self.date_approve:\n self.date_approve = fields.Datetime.now()\n\n config = self.env['ka_hr_payroll.config'].default_config()\n if check_rapel_status(self, config):\n self.action_rapel()\n else:\n self.action_done()",
"def test_approve(self):\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.utils.account.login_as(username,userpass)\n\n self.contribtool.approve(TOOLNAME,TOOLLICENSEDATA)",
"def review_applications(request):\n moderator = request.user\n site = get_current_site(request)\n\n pending = User.objects.filter(registration_method='REQ',\n decision_datetime=None,\n is_active=False)\n\n form = ModerateApplicationForm()\n\n if request.method == 'POST':\n\n form = ModerateApplicationForm(request.POST)\n user = get_object_or_404(User, id=request.POST['user_id'])\n\n if form.is_valid():\n decision = form.cleaned_data['decision']\n comments = form.cleaned_data['comments']\n\n if decision == 'APP':\n confirmation_message = _(\"{}'s account application \"\n \"has been approved.\".format(\n user.get_full_name().title()))\n\n moderator.approve_user_application(user)\n\n # Set log and email settings\n msg_type = ModerationLogMsg.APPROVAL\n url = request.build_absolute_uri(\n reverse('accounts:activate-account',\n args=[user.auth_token]))\n subject = _('Welcome to {}'.format(site.name))\n template = 'moderation/emails/approve_user.html'\n\n elif decision == 'REJ':\n confirmation_message = _(\"{}'s account application \"\n \"has been rejected.\".format(\n user.get_full_name().title()))\n\n moderator.reject_user_application(user)\n\n # Set log and email settings\n msg_type = ModerationLogMsg.REJECTION\n url = ''\n subject = _(('Unfortunately, your application to {} '\n 'was not successful').format(site.name))\n template = 'moderation/emails/reject_user.html'\n\n # Log moderation event\n log_comment = '{}'.format(comments)\n log_moderator_event(msg_type=msg_type,\n user=user,\n moderator=moderator,\n comment=log_comment)\n\n # Send moderation email\n send_connect_email(subject=subject,\n template=template,\n recipient=user,\n sender=moderator,\n site=site,\n url=url)\n\n messages.success(request, confirmation_message)\n\n return redirect('moderation:review-applications')\n\n context = {\n 'pending': pending,\n 'form': form,\n }\n\n return render(request, 'moderation/review_applications.html', context)",
"def approve(user):\n if user.approved:\n logging.warn('noop - User %d already approved', user.id)\n return user\n user.approved = True\n for message in user.messages:\n if message.text == config.MSG_WELCOME:\n session.delete(message)\n session.add(user)\n session.commit()\n return user",
"def Approve(self, request, global_params=None):\n config = self.GetMethodConfig('Approve')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def userapprove_admin(user_id):\n # take the supplied user_id and use that to access a given user.\n\n # User objects list which includes list of all users which can be broken down into editors and sponsors\n # get individual user\n user = db.session.query(User).filter(User.id==user_id).first()\n # update status to approved\n user.user_status = 'approved'\n # commit to database\n db.session.commit()\n\n return redirect(url_for('admin_bp.usersview_admin'))",
"def approve(self):\n self.approved = True\n self.quest_node['approved'] = True\n graph.push(self.quest_node)\n self.payout()",
"def approve_project(cls, project):\n project.status = Project.APPROVED\n project.save()",
"def approve_person(message, target):\n load_users(message._client.users)\n if target == 'me':\n return\n user = user_list.find_user(target)\n\n approver = message._get_user_id()\n if user_list[approver].is_admin:\n if user is not None:\n target_name = user.details['name']\n if user.is_unknown:\n message.reply(\"Approving user: '{}'\".format(target_name))\n user_list[user.details['id']].level = Level.Approved\n user_list.save()\n elif user.is_denied:\n message.reply(Strings['MARKED_DENIED'])\n else:\n message.reply(\"{} is already: {}.\".format(target_name,\n user.level.name))\n else:\n message.reply(Strings['USER_NOT_FOUND'].format(target))\n else:\n message.reply(Strings['CANT_APPROVE'])",
"def approve(self,toolname,data):\n\n self.logger.info(\"approving the tool '%s'\" % (toolname))\n\n po = self.catalog.load_pageobject('ToolsStatusInstalledPage',toolname)\n po.goto_page()\n\n # click the approve link\n po.flip_status_to_approved()\n\n\n po = self.catalog.load_pageobject('ToolsStatusApproveConfirmVersionPage',toolname)\n\n # check for error on page\n err = po.get_error_info()\n if err:\n # update the version information\n old_version = po.version_form.version.value\n new_version = str(float(old_version) + 0.01)\n po.version_form.submit_form({'version':new_version})\n\n # check for error on page\n err = po.get_error_info()\n if err:\n raise RuntimeError('error found on page: %s' % (err))\n\n # check for the success message\n ok = po.get_success_info()\n if not ok:\n raise RuntimeError('missing success message after updating version')\n\n # click the approve link again ?!?\n po = self.catalog.load_pageobject('ToolsStatusInstalledPage',toolname)\n po.flip_status_to_approved()\n\n # confirm the version\n po = self.catalog.load_pageobject('ToolsStatusApproveConfirmVersionPage',toolname)\n po.version_form.submit_form()\n\n # confirm the license\n po = self.catalog.load_pageobject('ToolsStatusApproveConfirmLicensePage',toolname)\n po.submit_form(data)\n\n # check for error on page\n err = po.get_error_info()\n if err:\n raise RuntimeError('error found on page: %s' % (err))\n\n # confirm the tool info\n po = self.catalog.load_pageobject('ToolsStatusApproveConfirmToolInfoPage',toolname)\n po.approve_tool()\n\n # check for the success message\n po = self.catalog.load_pageobject('ToolsStatusApprovedPage',toolname)\n ok = po.get_success_info()\n if not ok:\n raise RuntimeError('missing success message after approving tool info')",
"def approve_person(message, target):\n users = hf.get_users()\n if target == 'me':\n return\n for user in users:\n if user[\"name\"] == target:\n approver = message._get_user_id()\n admins = hf.get_admins()\n for admin in admins:\n if admin[\"id\"] == approver:\n if user is not None:\n if user[\"approval_level\"] == \"unapproved\":\n message.reply(\"Approved user: <@{}>\".format(target))\n user[\"approval_level\"] = \"approved\"\n hf.save_users(users)\n return\n elif user[\"approval_level\"] == \"denied\":\n message.reply(Strings['MARKED_DENIED'])\n return\n else:\n message.reply(\":x: {} is already: {}.\".format(target,\n user[\"approval_level\"]))\n return\n else:\n message.reply(Strings['USER_NOT_FOUND'].format(target))\n return\n\n message.reply(Strings['CANT_APPROVE'])",
"def jao_approve(self):\n print \"JAO approved this form. Current state:\", self.state",
"def test_admin_approval_already_approved(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n self.assertIsInstance(user, UserModel())\n self.assertTrue(activated)\n\n user = self.registration_profile.objects.admin_approve_user(\n profile.id, Site.objects.get_current())\n self.assertIsInstance(user, UserModel())\n self.assertIs(user.is_active, True)",
"def update_application(request):\n\n record = RegApplication.query.filter_by(email=request.form['application-email']).first()\n\n record.application_processed = True\n record.application_granted = False if request.form['application-action'] == 'reject' else True\n record.processed_date = datetime.datetime.now()\n db.session.commit()\n\n if not record.application_granted:\n\n send_message(subject='OpenAPS Access Refused',\n email=request.form['application-email'],\n content=f\"\"\"Your application for access to the OpenAPS data portal was rejected for the following reason:\n <br><br>\n '{request.form['reject-reason']}'\"\"\")\n\n return record.project_requests",
"def approve(self, approver: str, to: str, amount, key: bytes):\n raw_tx = self.approve_build_transaction(approver, to, amount)\n signed_tx = self._sign(raw_tx, key)\n self.send_and_wait(signed_tx)",
"def submit(request):\n if not request.user.is_authenticated():\n return proceed(request)\n # If dev has already agreed, continue to next step.\n user = UserProfile.objects.get(pk=request.user.id)\n if not user.read_dev_agreement:\n return redirect('submit.app.terms')\n return manifest(request)",
"def handle_application(sender, instance, **kwargs):\n if instance.accepted is not None:\n if instance.accepted:\n instance.user.userprofile.change_status_developer()\n else:\n instance.user.userprofile.change_status_player()",
"def approve(self, approved_by=\"system\"):\n\n self.confirm_state(completed=False, cancelled=False)\n\n self.is_valid(\"task invalid before approval\")\n\n # We approve the task before running actions,\n # that way if something goes wrong we know if it was approved,\n # when it was approved, and who approved it.\n self.task.approved = True\n self.task.approved_on = timezone.now()\n self.task.approved_by = approved_by\n self.task.save()\n\n # approve all actions\n for action in self.actions:\n try:\n action.approve()\n except Exception as e:\n handle_task_error(e, self.task, error_text=\"while approving task\")\n\n self.is_valid(\"task invalid after approval\")\n\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n else:\n self.submit()",
"def post(self, request, *args, **kwargs):\n application = self.get_object()\n app_complete = Application.objects.filter(\n pk=self.kwargs['app_complete']\n ).first()\n if is_application_owner(self.request.user, application) and (\n application.questionnaire.status != 'complete'\n ) and app_complete is not None and (\n app_complete.authorized_email is not None\n ) and app_complete.questionnaire.completed_by_candidate and (\n app_complete.questionnaire.status == 'complete'\n ):\n\n \"\"\"Attach authorized email & questionnaire to application\"\"\"\n application.authorized_email = app_complete.authorized_email\n application.questionnaire = app_complete.questionnaire\n application.save()\n\n \"\"\"Submit application if nomination is complete too\"\"\"\n if application.nomination.status == 'complete':\n submit_application(application)\n\n return redirect(self.get_success_url())\n else:\n raise Http404(_(\"No application found matching the query\"))",
"def approve(self, approver: PrivateKey):\n sig = crypto.get_signature_for_deploy_approval(\n self.hash, approver.private_key, approver.key_algo\n )\n self._append_approval(DeployApproval(approver.account_key, sig))",
"def approve(self, request, object_id, extra_context=None):\n obj = self.get_object(request, unquote(object_id))\n title = self._approve_title(obj)\n\n AdminAddApprovalForm = self._approve_approval_form(request)\n\n form = AdminAddApprovalForm(initial={'prescription': obj})\n if request.method == 'POST':\n url = reverse('admin:prescription_prescription_detail',\n args=[str(obj.id)])\n if obj.approval_status == obj.APPROVAL_DRAFT and obj.can_approve:\n # create an approval\n obj.approval_status = obj.APPROVAL_SUBMITTED\n obj.approval_status_modified = timezone.now()\n obj.save()\n self.message_user(\n request, \"Successfully submitted for approval.\")\n return HttpResponseRedirect(url)\n elif obj.approval_status == obj.APPROVAL_SUBMITTED:\n if request.POST.get('_cancel'):\n obj.clear_approvals()\n msg = 'Delete: Clearing Approvals/Endorsements', 'Burn ID: {}, Deleted by: {}'. format(obj.burn_id, request.user.get_full_name())\n logger.warning(msg)\n support_email('Delete: Clearing Approvals/Endorsements', msg)\n\n self.message_user(\n request, \"Approval rejected. ePFP is now draft.\")\n return HttpResponseRedirect(url)\n\n form = AdminAddApprovalForm(request.POST,\n initial={'prescription': obj})\n if form.is_valid():\n approval = form.save(commit=False)\n approval.prescription = obj\n approval.creator = request.user\n approval.modifier = request.user\n approval.save()\n obj.approval_status = obj.APPROVAL_APPROVED\n obj.approval_status_modified = timezone.now()\n obj.save()\n self.message_user(\n request, \"Successfully approved.\")\n return HttpResponseRedirect(url)\n elif obj.is_approved:\n if obj.is_closed:\n self.message_user(\n request, \"You can't extend an approval after the \"\n \"prescribed fire plan has been closed.\")\n return HttpResponseRedirect(url)\n if request.POST.get('_cancel'):\n self.message_user(\n request, \"Didn't extend approval.\")\n return HttpResponseRedirect(url)\n else:\n approval = obj.current_approval\n if approval and approval.extension_count < 3:\n approval.extension_count = approval.extension_count + 1\n approval.valid_to = approval.next_valid_to\n approval.save()\n self.message_user(\n request, \"Successfully extended approval.\")\n else:\n self.message_user(request, \"You can't extend an \"\n \"approval more than 3 times.\")\n return HttpResponseRedirect(url)\n\n admin_form, media = self._approve_form(request, obj, form)\n\n context = {\n 'title': title,\n 'current': obj,\n 'form': admin_form,\n 'media': media,\n 'errors': None,\n }\n return TemplateResponse(request, \"admin/prescription/prescription/\"\n \"approval.html\", context,\n current_app=self.admin_site.name)",
"def hod_approve(self):\n print \"HOD approved this form. Current state:\", self.state",
"def change_approval(self, status):\r\n if status == 'approve':\r\n return self.approve()\r\n elif status == 'disapprove':\r\n return self.disapprove()",
"def authorise(data, ind):\n global approved\n global pending_sheet\n approved.append_row(data)\n ind += 1\n pending_sheet.delete_rows(ind)\n print(colored('\\nApplication authorised.\\n', 'cyan', attrs=['bold']))",
"def approved(message):\n hf.query_users(message, hf.get_users(), \"approved\")",
"def approve (self, response) :\n if 'event' in response and 'moderator' in response :\n eventId = response ['event']\n userId = response ['moderator']\n else :\n raise ModerationError (response)\n\n mod_status = 'OK'\n if 'status' in response :\n mod_status = response ['status']\n \n event = Event.object.get (id = eventId)\n approval = Approval (approved = event, moderatorId = userId, status = mod_status)\n approval.save ()\n self.editValues (event.answer, response)"
] | [
"0.7413602",
"0.71029663",
"0.67888886",
"0.6747213",
"0.67318124",
"0.67286676",
"0.6719018",
"0.66766196",
"0.6670902",
"0.65097535",
"0.64031065",
"0.63882875",
"0.63717735",
"0.63500285",
"0.634869",
"0.63414145",
"0.63078153",
"0.6297645",
"0.62773865",
"0.6240185",
"0.6192649",
"0.61336243",
"0.611977",
"0.6088279",
"0.60658",
"0.6056476",
"0.6044347",
"0.6042677",
"0.60321546",
"0.60213864"
] | 0.79233325 | 0 |
Reject a user's application | def reject_user_application(self, user):
if self.is_moderator \
and self.has_perm('accounts.reject_user_application'):
user.moderator = self
user.moderator_decision = user.REJECTED
user.decision_datetime = timezone.now()
user.save()
return user
else:
raise PermissionDenied | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def admin_reject(user):\n if user.comments in (None or \"\"):\n return\n\n subject = \"ECE/CIS Account - Account Application rejected for %s\" % user.username\n application = \"https://www.eecis.udel.edu/NewAccount/\"\n helprequest = \"https://www.eecis.udel.edu/service\"\n sponsor = \"%[email protected]\" % user.sponsor\n \n message = \"Your ECE/CIS Account has been rejected by ECE/CIS faculty adminstrators.\\n\" % user.sponsor\n message += \"The reason given for rejection was:\\n\\n%s\\n\\n\" % user.comments\n message += \"You may re-apply with corrected information at %s\\n\" % application\n message += \"Please don't reply to this email. If have any questions, please \\n\"\n message += \"please post a ticket as an outsider at %s\" % helprequest\n message += \"-- ECE\\CIS Labstaff\"\n\n\n send('[email protected]', 'ECE/CIS Account System', \\\n [user.email, sponsor], subject, message, MAILHOST)",
"def serverReject(self):\n self.handshake_deferred.errback(ConnectionDeny(code=403, reason=\"Access denied\"))\n self.cleanup()\n logger.debug(\"WebSocket %s rejected by application\", self.reply_channel)\n self.factory.log_action(\"websocket\", \"rejected\", {\n \"path\": self.request.path,\n \"client\": \"%s:%s\" % tuple(self.client_addr) if self.client_addr else None,\n })",
"def _reject(self, reason):\n log.error('Rejected: %s' % reason)\n\n self._remove_changes()\n self._remove_files()\n\n if self.user is not None:\n email = Email('importer_reject_maintainer')\n package = self.changes.get('Source', '')\n\n self.send_email(email, [self.user.email], package=package, message=reason)\n sys.exit(1)",
"async def deny(self, ctx: commands.Context, target: discord.Member):\n try:\n accepter = get(ctx.guild.roles, id=await self.config.guild(ctx.guild).accepter_id())\n except TypeError:\n accepter = None\n if not accepter:\n if not ctx.author.guild_permissions.administrator:\n return await ctx.send(\"Uh oh, you cannot use this command.\")\n else:\n if accepter not in ctx.author.roles:\n return await ctx.send(\"Uh oh, you cannot use this command.\")\n try:\n applicant = get(ctx.guild.roles, id=await self.config.guild(ctx.guild).applicant_id())\n except TypeError:\n applicant = None\n if not applicant:\n applicant = get(ctx.guild.roles, name=\"Staff Applicant\")\n if not applicant:\n return await ctx.send(\n \"Uh oh, the configuration is not correct. Ask the Admins to set it.\"\n )\n if applicant in target.roles:\n await ctx.send(\"Would you like to specify a reason? (yes/no)\")\n pred = MessagePredicate.yes_or_no(ctx)\n try:\n await self.bot.wait_for(\"message\", timeout=30, check=pred)\n except asyncio.TimeoutError:\n return await ctx.send(\"You took too long. Try again, please.\")\n if pred.result:\n await ctx.send(\"Please, specify your reason now.\")\n\n def check(m):\n return m.author == ctx.author\n\n try:\n reason = await self.bot.wait_for(\"message\", timeout=120, check=check)\n except asyncio.TimeoutError:\n return await ctx.send(\"You took too long. Try again, please.\")\n await target.send(\n f\"Your application in {ctx.guild.name} has been denied.\\n*Reason:* {reason.content}\"\n )\n else:\n await target.send(f\"Your application in {ctx.guild.name} has been denied.\")\n await target.remove_roles(applicant)\n await ctx.send(f\"Denied {target.mention}'s application.\")\n else:\n await ctx.send(f\"Uh oh. Looks like {target.mention} hasn't applied for anything.\")",
"async def deny(self, ctx: commands.Context, target: discord.Member):\n try:\n accepter = get(ctx.guild.roles, id = await self.config.guild(ctx.guild).accepter_id())\n except TypeError:\n accepter = None\n if not accepter:\n if not ctx.author.guild_permissions.administrator:\n return await ctx.send(\"Uh oh, you cannot use this command.\")\n else:\n if accepter not in ctx.author.roles:\n return await ctx.send(\"Uh oh, you cannot use this command.\")\n try:\n applicant = get(ctx.guild.roles, id = await self.config.guild(ctx.guild).applicant_id())\n except TypeError:\n applicant = None\n if not applicant:\n applicant = get(ctx.guild.roles, name=\"Staff Applicant\")\n if not applicant:\n return await ctx.send(\"Uh oh, the configuration is not correct. Ask the Admins to set it.\")\n if applicant in target.roles:\n await ctx.send(\"Would you like to specify a reason? (yes/no)\")\n pred = MessagePredicate.yes_or_no(ctx)\n try:\n await self.bot.wait_for(\"message\", timeout=30, check=pred)\n except asyncio.TimeoutError:\n return await ctx.send(\"You took too long. Try again, please.\")\n if pred.result:\n await ctx.send(\"Please, specify your reason now.\")\n\n def check(m):\n return m.author == ctx.author\n\n try:\n reason = await self.bot.wait_for(\n \"message\", timeout=120, check=check\n )\n except asyncio.TimeoutError:\n return await ctx.send(\"You took too long. Try again, please.\")\n await target.send(\n f\"Your application in {ctx.guild.name} has been denied.\\n*Reason:* {reason.content}\"\n )\n else:\n await target.send(\n f\"Your application in {ctx.guild.name} has been denied.\"\n )\n await target.remove_roles(applicant)\n await ctx.send(f\"Denied {target.mention}'s application.\")\n else:\n await ctx.send(\n f\"Uh oh. Looks like {target.mention} hasn't applied for anything.\"\n )",
"def reject_appl(data, ind):\n global rejected\n global pending_sheet\n rejected.append_row(data)\n ind += 1\n pending_sheet.delete_rows(ind)\n print(colored('\\nApplication rejected.\\n', 'cyan', attrs=['bold']))",
"def reject(self):\n self.skype.conn(\"PUT\", \"{0}/users/{1}/invites/8:{2}/decline\"\n .format(SkypeConnection.API_CONTACTS, self.skype.userId, self.userId),\n auth=SkypeConnection.Auth.SkypeToken)",
"def reject(self):\n pass",
"async def reject_challenge(self, user_id, *, delay=0, lifespan=math.inf):\n await self.user_command(\n \"\", \"reject\", user_id, delay=delay, lifespan=lifespan\n )",
"def review_applications(request):\n moderator = request.user\n site = get_current_site(request)\n\n pending = User.objects.filter(registration_method='REQ',\n decision_datetime=None,\n is_active=False)\n\n form = ModerateApplicationForm()\n\n if request.method == 'POST':\n\n form = ModerateApplicationForm(request.POST)\n user = get_object_or_404(User, id=request.POST['user_id'])\n\n if form.is_valid():\n decision = form.cleaned_data['decision']\n comments = form.cleaned_data['comments']\n\n if decision == 'APP':\n confirmation_message = _(\"{}'s account application \"\n \"has been approved.\".format(\n user.get_full_name().title()))\n\n moderator.approve_user_application(user)\n\n # Set log and email settings\n msg_type = ModerationLogMsg.APPROVAL\n url = request.build_absolute_uri(\n reverse('accounts:activate-account',\n args=[user.auth_token]))\n subject = _('Welcome to {}'.format(site.name))\n template = 'moderation/emails/approve_user.html'\n\n elif decision == 'REJ':\n confirmation_message = _(\"{}'s account application \"\n \"has been rejected.\".format(\n user.get_full_name().title()))\n\n moderator.reject_user_application(user)\n\n # Set log and email settings\n msg_type = ModerationLogMsg.REJECTION\n url = ''\n subject = _(('Unfortunately, your application to {} '\n 'was not successful').format(site.name))\n template = 'moderation/emails/reject_user.html'\n\n # Log moderation event\n log_comment = '{}'.format(comments)\n log_moderator_event(msg_type=msg_type,\n user=user,\n moderator=moderator,\n comment=log_comment)\n\n # Send moderation email\n send_connect_email(subject=subject,\n template=template,\n recipient=user,\n sender=moderator,\n site=site,\n url=url)\n\n messages.success(request, confirmation_message)\n\n return redirect('moderation:review-applications')\n\n context = {\n 'pending': pending,\n 'form': form,\n }\n\n return render(request, 'moderation/review_applications.html', context)",
"def reject(self, responder):\n self._apply_decision(self.Status.REJECTED, responder)",
"def get_everyone_denied(self):",
"def on_buttonBox_rejected(self):\n self.reject()",
"async def appcheck(self, ctx: commands.Context, user_id: discord.Member):\n return await ctx.send(\n \"This command is currently being reworked, follow updates in The Kompound\"\n )",
"def reject(self):\r\n QtGui.QDialog.reject(self)",
"def no_reason(message, db):\n message.reply(Strings['GRANT_EXAMPLE'].format(db))",
"def app_permission_denied(self, request, message=None):\n if not request.successful_authenticator and not message:\n raise exceptions.NotAuthenticated()\n if message:\n raise exceptions.PermissionDenied(detail=message)\n raise exceptions.PermissionDenied(detail=message)",
"def Reject(self, request, global_params=None):\n config = self.GetMethodConfig('Reject')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def reject(self):\n print \"This form has been rejected. Current state:\", self.state",
"def sponsor_reject(user):\n if user.comments in (None or \"\"):\n return\n\n subject = \"ECE/CIS Account - Account Application rejected for %s\" % user.username\n application = \"https://www.eecis.udel.edu/NewAccount/\"\n \n message = \"Your ECE/CIS Account has been rejected by the faculty sponsor you selected (%s).\\n\" % user.sponsor\n message += \"The reason given for rejection was:\\n\\n%s\\n\\n\" % user.comments\n message += \"You may re-apply with corrected information at %s\\n\" % application\n message += \"Please don't reply to this email. If have any questions, please contact\\n\"\n message += \"the faculty member you listed as sponsor or labstaff.\\n\\n\"\n message += \"-- ECE\\CIS Labstaff\"\n \n send('[email protected]', 'ECE/CIS Account System', \\\n [user.email], subject, message, MAILHOST)",
"def on_reject(self):\n self.state = REJECTED\n self._reject()",
"def update_application(request):\n\n record = RegApplication.query.filter_by(email=request.form['application-email']).first()\n\n record.application_processed = True\n record.application_granted = False if request.form['application-action'] == 'reject' else True\n record.processed_date = datetime.datetime.now()\n db.session.commit()\n\n if not record.application_granted:\n\n send_message(subject='OpenAPS Access Refused',\n email=request.form['application-email'],\n content=f\"\"\"Your application for access to the OpenAPS data portal was rejected for the following reason:\n <br><br>\n '{request.form['reject-reason']}'\"\"\")\n\n return record.project_requests",
"def denied(message):\n hf.query_users(message, hf.get_users(), \"denied\")",
"def reject_proposal(self, widget):\n print(\"Rejecting project\")\n proposal_id = subprocess.check_output(\n ['python3', os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '/Sawtooth/bin/code_smell.py',\n 'list',\n '--type', 'proposal', '--active', '1', '--url', 'http://127.0.0.1:' + self.api])\n proposal_id = proposal_id.decode('utf-8').split(' ')[0]\n try:\n clocker = open('votelock.txt', 'r').read()\n if proposal_id in clocker:\n ErrorDialog(self, \"You already voted!\")\n return\n except:\n pass\n print(['python3', os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '/Sawtooth/bin/code_smell.py',\n 'vote',\n '--id', proposal_id, '--vote', 'no', '--url', 'http://127.0.0.1:' + self.api])\n subprocess.Popen(\n ['python3', os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '/Sawtooth/bin/code_smell.py',\n 'vote',\n '--id', proposal_id, '--vote', 'no', '--url', 'http://127.0.0.1:' + self.api])\n locker = open('votelock.txt', 'w')\n locker.write(proposal_id)\n locker.close()\n try:\n vote = int(self.lbl_reject.get_text().split(\":\")[1][1:])+1\n self.lbl_accept.set_text(self.lbl_reject.get_text().split(\":\")[0]+\" \"+str(vote))\n except:\n pass",
"def handle_application(sender, instance, **kwargs):\n if instance.accepted is not None:\n if instance.accepted:\n instance.user.userprofile.change_status_developer()\n else:\n instance.user.userprofile.change_status_player()",
"def unaccept_offer(self, pname, matchid):\n msg = '%s declined the match' % (pname)\n self._rem_offer(matchid, msg)\n msg = '%s canceled the game' % (pname)\n self._rem_game(matchid, msg)",
"def ignore(self):\n self.accepted = False",
"def ignore(self):\n self.accepted = False",
"def reject(request, pk=None):\n # Check request is still valid or not\n friend_request = get_or_none(FriendRequest, pk=pk)\n # if request is not valid\n if friend_request is None:\n return Response({'status': '400', 'code': 'E_REQUEST_NOT_FOUND',\n 'detail': code['E_REQUEST_NOT_FOUND']}, status=400)\n # Delete request\n friend_request.delete()\n return Response({'status': '201', 'code': 'OK_REJECT_FRIEND_REQUEST',\n 'detail': code['OK_REJECT_FRIEND_REQUEST']}, status=201)",
"def no_reason(message, db):\n #message.reply(Strings['GRANT_EXAMPLE'].format(db))\n try:\n hf.grant(message, db.lower(), \"[EXTENDING ACCESS TIME]\", False)\n except Exception as e:\n message._client.send_message(errors_channel, \"```{}```\".format(e))"
] | [
"0.6770561",
"0.6585959",
"0.65521705",
"0.6491148",
"0.6487616",
"0.646831",
"0.63601846",
"0.63346696",
"0.6136011",
"0.61090565",
"0.607409",
"0.6064712",
"0.60478044",
"0.59528744",
"0.5923107",
"0.5874209",
"0.5872581",
"0.5852314",
"0.58413756",
"0.58047056",
"0.5798305",
"0.5784694",
"0.57806647",
"0.5743636",
"0.5707335",
"0.56867456",
"0.5685694",
"0.5685694",
"0.5675082",
"0.56601423"
] | 0.7710392 | 0 |
Return a user's profiency in a particular skill as a percentage, based on the position of the proficiency in PROFICIENCY_CHOICES. | def get_proficiency_percentage(self):
choice_values = [choice[0] for choice in self.PROFICIENCY_CHOICES]
if '' in choice_values:
choice_values.remove('') # Remove the empty proficiency choice
choice_values.sort() # Ensure values are in the correct order
value = choice_values.index(self.proficiency) + 1
factor = 100 / len(choice_values)
percentage = round(value * factor)
return percentage | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_opinion_percent(self):\n return (self.get_percent()+100)/2",
"def profit_per_item_percentage(self, pk=None):\n total_profit_percentage = 0\n total_cost = self.item_cost + self.shipping_cost + self.listing_fee + self.final_value_fee\n total_paid = self.shipping_paid + self.item_paid\n total_profit_percentage = round(100*((total_paid - total_cost) / total_cost), 2)\n return total_profit_percentage",
"def set_effective_field_goal_percentage(self):\n bx = self.get_standard_stats()\n tcInt = float(bx[\"t2p_int\"] + bx[\"t3p_int\"])\n tcConv = float(bx[\"t2p_conv\"] + bx[\"t3p_conv\"])\n result = 0.00\n if tcInt > 0:\n result = ((tcConv + (0.5 * float(bx[\"t3p_conv\"]))) / tcInt) * 100\n self.effective_field_goal_percentage = \"%.2f\" % round(result, 2)",
"def fidelity_promo(percent: float) -> Promotion:\n return lambda order: (\n order.total() * percent / 100 if order.customer.fidelity >= 1000 else 0\n )",
"def percentage(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"percentage\")",
"def get_percentage_practices(measure_table):\n with open(OUTPUT_DIR / \"practice_count.json\") as f:\n num_practices = json.load(f)[\"num_practices\"]\n\n num_practices_in_study = get_number_practices(measure_table)\n\n return np.round((num_practices_in_study / num_practices) * 100, 2)",
"def profesionalRecommendation(user_preferences: dict, matcher: NodeMatcher):\r\n profesional = user_preferences[\"vida_profesional\"]\r\n equal_styles = list(matcher.match(\"User\", prof = profesional))\r\n return equal_styles",
"def GetProportion(self):\r\n\r\n return self.proportion",
"def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)",
"def get_percentage(self):\n return self.percentage",
"def get_percentage(self):\n return self.percentage",
"def percentage(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"percentage\")",
"def get_percentage(self):\n return self.PotTax_percentage",
"def proportional_strategy(our_hist, their_hist):\n if len(our_hist) == 0 or len(their_hist) == 0:\n return choice(CHOICES)\n freqs = count(their_hist)\n prediction_for_them = choices(CHOICES, weights=freqs)[0]\n return CHOICES[(prediction_for_them + 1) % 3]",
"def calculate_penalty(self):\n if AT.PENALTY not in self.attributes:\n return (0, 1)\n return self.attributes[AT.PENALTY].calculate(self)",
"def score_professor_conflicts(self):\n prof_conflict_score = 0\n multiplier = 4\n \n for day_num in range(self.num_days):\n \n current_day = self.days[ day_num ]\n num_conflicts = 0\n \n for prof_name in current_day.keys():\n if not self.get_prof_by_name[prof_name].available( day_num ):\n num_conflicts += 1\n \n prof_conflict_score += multiplier * ( num_conflicts ** 2 )\n \n self.prof_conflict_score = prof_conflict_score\n return self.prof_conflict_score",
"def administer(self):\n\n score = 0.0\n for question in self.questions:\n if question.ask_and_evaluate() is True:\n score += 1\n return (score / len(self.questions)) * 100",
"def envisaged_profit(self):\n profit = round(\n self.calcul_buy_nb_action() * self.take_profit - self.investment_price(),\n 2,\n )\n percent_profit = round(profit * 100 / self.capital, 2)\n return profit, percent_profit",
"def adjustment_percentage(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"adjustment_percentage\")",
"def adjustment_percentage(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"adjustment_percentage\")",
"def get_crawlera_incapsula_percent(crawlera_user):\n if crawlera_user:\n return 0\n else:\n return 100",
"def factor_in_multiple_professors(self):\n professors = [professor for professor in self.course.professors if professor.lower() != \"none\"]\n number_professors = len(set(professors))\n if number_professors > 1:\n self.score = self.score + number_professors",
"def scoreSkills(self, skills, work_hist_skills, req_skills):\n\n if work_hist_skills:\n score = len(set(work_hist_skills).intersection(req_skills))\n else:\n score = len(set(skills).intersection(req_skills))\n\n req_skills_len = len(req_skills)\n\n return score/req_skills_len if score != 0 else 0",
"def set_usg_percentage(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n tcInt = bx[\"t2p_int\"] + bx[\"t3p_int\"]\n a = tcInt + (Decimal('0.44')*bx[\"tl_int\"]) + bx[\"turnovers\"]\n b = team[\"minutes\"]/5\n c = (team[\"t2p_int\"] + team[\"t3p_int\"]) + (Decimal('0.44')*team[\"tl_int\"]) + team[\"turnovers\"]\n result = 0.00\n if bx[\"minutes\"] > 0:\n result = ((Decimal(a)*Decimal(b))/(bx[\"minutes\"]*c))*100\n self.usg_percentage = \"%.2f\" % round(result, 2)",
"def percentage(my_list, item):\n return 100.0 * frequency(my_list, item)",
"def adjusted_pa(personal_allowance, salary):\n\t\tlo, hi = 100000, 120000\n\t\tif salary <= lo:\n\t\t\treturn personal_allowance\n\t\telif salary >= hi:\n\t\t\treturn 0\n\t\telse:\n\t\t\treturn (salary - 100000) / 2",
"def as_percent(self):\n if self.dynamic:\n self._update_db_obj()\n return f\"{(self._db_obj.progress / self._db_obj.total) * 100}%\"",
"def propabilityLVQ(self):\n self.labels = self.labelingLVQ()\n for i in range(self.labels.shape[0]):\n for j in range(self.labels.shape[1]):\n for k in range(self.labels.shape[2]):\n total = sum(self.labels[i, j, k] for i in range(self.labels.shape[0]))\n if total == 0. :\n continue\n else:\n self.propa[i, j, k] = self.labels[i, j, k] / total\n self.propa[i, j, k] = round(self.propa[i, j, k], 2)\n return self.propa",
"def get_model_profits(model, cost_benefit, X_test, y_test):\n predicted_probs = model.predict_proba(X_test)[:, 1]\n profits, thresholds = profit_curve(cost_benefit, predicted_probs, y_test)\n\n return profits, thresholds",
"def _calcProminenceMetric(self, harmonicComplexity, metricalAccentLevel):\n prominenceScores = []\n profileScores = self.chordProfile.getScores()\n MAXSCORE = float(max(profileScores.values()))\n MIDVALUE = MAXSCORE / 2\n\n # step through candidate triads and calculate prominence score\n for triad in self._candidateTriads:\n code = triad.getCode()\n score = float(profileScores[code])\n prominenceScores.append(score)\n\n # modify scores based on harmonicComplexity\n attractionRate = self._calcHarmonicComplexityImpactOnProfile(\n harmonicComplexity, metricalAccentLevel)\n prominenceScores = RhythmGenerator.compressValues(MIDVALUE, prominenceScores,\n attractionRate)\n\n return prominenceScores"
] | [
"0.624777",
"0.5969812",
"0.58800423",
"0.57505316",
"0.57186955",
"0.57097393",
"0.57036775",
"0.56423104",
"0.5622649",
"0.5582169",
"0.5582169",
"0.55723554",
"0.5554754",
"0.55056655",
"0.5428772",
"0.53997266",
"0.53909737",
"0.53796136",
"0.53647095",
"0.53647095",
"0.5341981",
"0.53225017",
"0.530566",
"0.5294536",
"0.5292997",
"0.52811795",
"0.5279743",
"0.5279741",
"0.52676153",
"0.5254112"
] | 0.8257786 | 0 |
Generate the preparation files for the projects in a run | def format_preparation_files(run_dir, sample_sheet, output_dir, pipeline,
verbose):
sample_sheet = KLSampleSheet(sample_sheet)
df_sheet = sample_sheet_to_dataframe(sample_sheet)
if pipeline == 'atropos-and-bowtie2':
click.echo('Stats collection is not supported for pipeline '
'atropos-and-bowtie2')
else:
stats = run_counts(run_dir, sample_sheet)
stats['sample_name'] = \
df_sheet.set_index('lane', append=True)['sample_name']
# returns a map of (run, project_name, lane) -> preparation frame
preps = preparations_for_run(run_dir, df_sheet, pipeline=pipeline)
os.makedirs(output_dir, exist_ok=True)
for (run, project, lane), df in preps.items():
fp = os.path.join(output_dir, f'{run}.{project}.{lane}.tsv')
if pipeline == 'fastp-and-minimap2':
# stats are indexed by sample name and lane, lane is the first
# level index. When merging, make sure to select the lane subset
# that we care about, otherwise we'll end up with repeated rows
df = df.merge(stats.xs(lane, level=1), how='left',
on='sample_name')
# strip qiita_id from project names in sample_project column
df['sample_project'] = df['sample_project'].map(
lambda x: re.sub(r'_\d+$', r'', x))
# center_project_name is a legacy column that should mirror
# the values for sample_project.
df['center_project_name'] = df['sample_project']
df.to_csv(fp, sep='\t', index=False)
if verbose:
project_name = remove_qiita_id(project)
# assume qiita_id is extractable and is an integer, given that
# we have already passed error-checking.
qiita_id = project.replace(project_name + '_', '')
print("%s\t%s" % (qiita_id, abspath(fp))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def task_generate_tasks():\n \n yield {\n 'basename': 'generate_tasks',\n 'name': None,\n # 'doc': 'docs for X',\n 'watch': ['trains/'],\n 'task_dep': ['create_folders'],\n }\n \n for root, dirs, files in os.walk('trains/',topdown=False):\n for f in files:\n #print(f)\n yield template_train_model(os.path.join(root,f))",
"def project():",
"def project():",
"def project():",
"def generate_files(self):\n\t\tapply_stemmer, xml_file, query_file, expected_file = self.read_config_file()\n\t\tself.generate_query_file(query_file, xml_file, apply_stemmer)\n\t\tself.generate_expected_file(expected_file, xml_file)\n\t\tlogging.info('FINALIZADO: MÓDULO PROCESSADOR DE CONSULTAS')",
"def create_files(project_name, root_dir):\r\n root_dir = projectfolders.create_path(root_dir, project_name) #Modify the root\r\n \r\n write_setup(project_name, root_dir)\r\n write_inits(project_name, root_dir)\r\n write_tests(project_name, root_dir)",
"def _prepare_projects(self):\n self._projects = {}\n self._cfgs = {}\n self._plugins = {}\n\n working_bins = []\n for b in self._seed_bins:\n if any([nb in b for nb in self._ignore_bins]):\n continue\n\n log.info(\"Building %s CFG (this may take some time)\" % b.split('/')[-1])\n try:\n blob = False\n try:\n self._projects[b] = angr.Project(b, auto_load_libs=False)\n except:\n log.info(\"We got a blob\")\n self._projects[b] = angr.Project(b, auto_load_libs=False, load_options={'main_opts': {'custom_arch': self.config['arch'], 'backend': 'blob', 'custom_base_addr': int(self.config['base_addr'], 16)}})\n blob = True\n\n self._cfgs[b] = self._projects[b].analyses.CFG(collect_data_references=True, extra_cross_references=True)\n\n self._plugins[b] = []\n\n if blob:\n memcplike = find_memcmp_like(self._projects[b], self._cfgs[b])\n else:\n memcplike = []\n\n for plugin in self._enabled_plugins:\n self._plugins[b].append(plugin(self._projects[b], self._cfgs[b], self._fw_path, memcmp_like_functions=memcplike,log=log))\n working_bins.append(b)\n except Exception as e:\n log.warning(\"Skipping binary %s\" % b)\n import ipdb; ipdb.set_trace()\n self._seed_bins = list(working_bins)",
"def _build_pre_project_template(self, output_filename=\"{}_pr_p.json\"):\n template = actions.ActionsTemplate()\n\n for resource_type, resource_cls in six.iteritems(AVAILABLE_RESOURCES):\n resource_cls.register_type_pre_project_template(self, template)\n for r in self.get_resources(resource_type):\n r.register_pre_project_template(template)\n\n if template:\n output_filename = output_filename.format(self._get_next_build_sequence_id())\n self.puts(colored.cyan(output_filename))\n with open(os.path.join(self.build_path, output_filename), 'w') as f:\n f.write(template.to_json(indent=4))",
"def main():\n global GOLIVE # If False, it's a dry run only\n global PROJECT_ROOT\n global CAD_SOURCE\n global REVIT_SOURCE\n global GENERIC_SOURCE\n global FOLDER_LIST\n global logger\n\n logger = logging.getLogger('__name__')\n stream_handler = logging.StreamHandler()\n logger.addHandler(stream_handler)\n logger.setLevel(logging.INFO)\n\n logger.debug(sys.argv)\n parser = argparse.ArgumentParser(description='Create a project')\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-i', action='store_true', help=\"Show INFO messages\")\n group.add_argument('-d', action='store_true', help=\"Show DEBUG messages\")\n parser.add_argument('-t', action='store_true', help='Test: dry run only')\n parser.add_argument('-r', help=\"Set root directory\")\n parser.add_argument('project_data', nargs='+', help=\"<num>%,<name>%<type>\")\n\n args = parser.parse_args(sys.argv[1:])\n logger.debug(args)\n if args.i:\n logger.info('Setting logging level to INFO')\n logger.setLevel(logging.INFO)\n elif args.d:\n logger.info('Setting logging level to DEBUG')\n logger.setLevel(logging.DEBUG)\n if args.t:\n GOLIVE = False\n logger.info('Dry run...')\n if args.r:\n PROJECT_ROOT = args.r\n logger.info(f'Setting PROJECT_ROOT to {args.r}')\n\n CAD_SOURCE = os.path.join(PROJECT_ROOT, 'Templates', 'CAD_Template')\n REVIT_SOURCE = os.path.join(PROJECT_ROOT, 'Templates', 'Revit_Template')\n GENERIC_SOURCE = os.path.join(PROJECT_ROOT,\n 'Templates', 'Generic_Template')\n FOLDER_LIST = os.listdir(PROJECT_ROOT)\n project_info = ' '.join(args.project_data) # The parser split at spaces\n logger.debug(f'Project info: {project_info}')\n project_info = project_info.split('%') # Divide it into our 3 fields\n project_number, project_name, project_type = project_info\n assert project_type in ['Revit', 'CAD', 'Generic']\n\n if checkNewProject(project_number, project_name): # Sanity checks\n success = createProject(project_number, project_name, project_type)\n if success:\n logger.info(f'Created project {project_number} {project_name}')\n else:\n logger.error('Project creation failed.')",
"def prepare_run_directory(resource_types, trecs_root_dir):\n # Copy the executables and required files in 'run' directory.\n sources = {path.join(trecs_root_dir, 'src', 'model', 'grid'),\n path.join(trecs_root_dir, 'src', 'api'),\n path.join(trecs_root_dir, 'src', 'module'),\n path.join(trecs_root_dir, 'src', 'util'),\n path.join(trecs_root_dir, 'src', 'router')}\n\n for source in sources:\n for dirpath, _, filenames in walk(source):\n for filename in filenames:\n from_path = path.join(dirpath, filename)\n to_path = path.join(trecs_root_dir, 'run', filename)\n copy2(from_path, to_path)\n\n # From resource model repositories, copy all the directory contents as they are\n for resource in resource_types:\n copy_tree(path.join(trecs_root_dir, 'src', 'model', 'resource', resource),\n path.join(trecs_root_dir, 'run'))",
"def generate_all_files():\n for (name, fn) in lang_module.targets.items():\n path = of_g.options.install_dir + '/' + name\n os.system(\"mkdir -p %s\" % os.path.dirname(path))\n with open(path, \"w\") as outfile:\n fn(outfile, os.path.basename(name))\n print(\"Wrote contents for \" + name)",
"def prepare(self):\n import tempfile \n import evoware\n self.f_project = tempfile.mkdtemp(prefix='evoware_cherrypick_')\n evoware.plates.index.clear()",
"def cmd_generate_requirements(): \n \n for env in ('dev', 'test'):\n source = Path(ROOT, \"requirements\", f\"{env}.txt\")\n target = Path(ROOT, \"requirements\", f\"{env}.in\")\n os.system(f\"pip-compile --output-file={source} {target}\")",
"def cmd_generate_requirements(): \n \n for env in ('dev', 'test'):\n source = Path(ROOT, \"requirements\", f\"{env}.txt\")\n target = Path(ROOT, \"requirements\", f\"{env}.in\")\n os.system(f\"pip-compile --output-file={source} {target}\")",
"def setupRunDir(self):\n\n pass",
"def generate_project_files(specs_path, dst_path):\n hm_generator = HookManGenerator(hook_spec_file_path=specs_path)\n hm_generator.generate_project_files(Path(dst_path))\n return 0",
"def setup_for_compilation_testcase(self):\n os.chdir(self.tmp_work)\n\n for container in self.containers:\n self._setup_single_directory_for_compilation(container.directory)\n # Run any necessary pre_commands\n self._run_pre_commands(container.directory)",
"def generate_build_files(ctx):\n\n project_dir = Path(__file__).parent\n\n directory_of_the_tests = project_dir / \"tests/plugins\"\n directory_to_build_tests = project_dir / \"build/build_directory_for_tests\"\n\n # Clean UP\n if directory_to_build_tests.exists():\n shutil.rmtree(directory_to_build_tests)\n os.makedirs(directory_to_build_tests)\n\n # Finding hook_specs.py, each hook_specs represent a different project with different hooks\n hook_spec_paths = [\n path for path in directory_of_the_tests.glob(\"**/hook_specs.py\") if \"tmp\" not in path.parts\n ]\n\n # CMakeList.txt that includes all sub_directory with tests to be compiled\n root_cmake_list = directory_to_build_tests / \"CMakeLists.txt\"\n cmake_file_of_test_build_dir = [\n f\"add_subdirectory({i.parent.name })\\n\" for i in hook_spec_paths\n ]\n root_cmake_list.write_text(\"\".join(cmake_file_of_test_build_dir))\n\n # For each hook_specs, create a directory for the compilation and generate the files\n for project_hook_spec_path in hook_spec_paths:\n project_dir_for_build = directory_to_build_tests / project_hook_spec_path.parent.name\n project_dir_for_build.mkdir(parents=True)\n\n hm_generator = HookManGenerator(hook_spec_file_path=project_hook_spec_path)\n hm_generator.generate_project_files(dst_path=project_dir_for_build)\n\n # Find folder with Plugins\n plugins_dirs = [\n x\n for x in project_hook_spec_path.parent.iterdir()\n if x.is_dir() and (x / \"assets\").exists()\n ]\n\n # Copy all the plugins to the build dir\n for plugin in plugins_dirs:\n plugin_dir_build = project_dir_for_build / f\"plugin/{plugin.name}\"\n shutil.copytree(src=plugin, dst=plugin_dir_build)\n (plugin_dir_build / \"src/hook_specs.h\").write_text(\n hm_generator._hook_specs_header_content(plugin.stem)\n )\n\n # Create the CMakeFile on root of the project to include others CMake files.\n main_cmakelist = project_dir_for_build / \"CMakeLists.txt\"\n main_cmakelist_content = []\n main_cmakelist_content.append(\"add_subdirectory(cpp)\\nadd_subdirectory(binding)\\n\")\n main_cmakelist_content += [\n f\"add_subdirectory(plugin/{plugin.name}/src)\\n\" for plugin in plugins_dirs\n ]\n main_cmakelist.write_text(\"\".join(main_cmakelist_content))",
"def post_build(self, manager):\n if not self.output_files_dir.exists():\n return\n\n output_file_dirs = [\n d for d in self.output_files_dir.rglob(\"*\") if d.is_dir()\n ] + [self.output_files_dir]\n for output_file_dir in output_file_dirs:\n stem = output_file_dir.relative_to(self.output_files_dir)\n api_path = self.api_dir / stem / ALL_JSON\n\n yield self.task(\n name=f\"contents:{stem}\",\n doc=f\"create a Jupyter Contents API response for {stem}\",\n actions=[\n (self.one_contents_path, [output_file_dir, api_path]),\n (self.maybe_timestamp, [api_path]),\n ],\n file_dep=[p for p in output_file_dir.rglob(\"*\") if not p.is_dir()],\n targets=[api_path],\n )",
"def setups():\n setups = []\n\n # If you run this in detailed mode, you need to set --t8 to 1e8\n kotani2017_F2 = dict()\n kotani2017_F2['name'] = 'kotani2017_F2'\n kotani2017_F2['piltemplate'] = kotani2017_F2_pil\n kotani2017_F2['pilparams'] = [None]\n kotani2017_F2['pepperargs'] = {'condensed': True, 'conc': 'nM', 'release_cutoff': 10}\n kotani2017_F2['simulation'] = [\n ('pilsimulator', '--nxy', '--atol', '1e-13', '--rtol', '1e-13', '--mxstep', '10000', '--t8', '36000', '--p0', 'S1=10', 'S2=10', 'R=20', 'C1=1'),\n ('pilsimulator', '--nxy', '--atol', '1e-13', '--rtol', '1e-13', '--mxstep', '10000', '--t8', '36000', '--p0', 'S1=10', 'S2=10', 'R=20', 'C1=0.5'),\n ('pilsimulator', '--nxy', '--atol', '1e-13', '--rtol', '1e-13', '--mxstep', '10000', '--t8', '36000', '--p0', 'S1=10', 'S2=10', 'R=20', 'C1=0.05')]\n kotani2017_F2['reporter'] = 'D'\n kotani2017_F2['exp_results'] = [(7733, 7.42), (11333, 6.18), (25533, 1.40)]\n setups.append(kotani2017_F2)\n\n\n\n # If you run this in detailed mode, you need to set --t8 to 1e8\n kotani2017_F3 = dict()\n kotani2017_F3['name'] = 'kotani2017_F3'\n kotani2017_F3['piltemplate'] = kotani2017_F3_pil\n kotani2017_F3['pilparams'] = [None]\n kotani2017_F3['pepperargs'] = {'condensed': True, 'conc': 'nM', 'release_cutoff': 10}\n kotani2017_F3['simulation'] = [\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S1=10', 'S2=10', 'S3=10', 'S4=10', 'R=20', 'C1=0.1'),\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S1=10', 'S2=10', 'S3=10', 'S4=10', 'R=20', 'C1=0.01'),\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S1=10', 'S2=10', 'S3=10', 'S4=10', 'R=20', 'C1=0.001')]\n kotani2017_F3['reporter'] = 'D'\n kotani2017_F3['exp_results'] = [(21220, 7.72), (64203, 3.12), (86996, 0.69)]\n setups.append(kotani2017_F3)\n\n # If you run this in detailed mode, you need to set --t8 to 1e8\n kotani2017_F4 = dict()\n kotani2017_F4['name'] = 'kotani2017_F4'\n kotani2017_F4['piltemplate'] = kotani2017_F4_pil\n kotani2017_F4['pilparams'] = [None]\n kotani2017_F4['pepperargs'] = {'condensed': True, 'conc': 'nM', 'release_cutoff': 10}\n kotani2017_F4['simulation'] = [\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S5au=10', 'S6au=10', 'R=20', 'C1x=0.1'),\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S5au=10', 'S6au=10', 'R=20', 'C1x=0.01'),\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S5au=10', 'S6au=10', 'R=20', 'C1x=0.001'),\n ('pilsimulator', '--nxy', '--atol', '1e-10', '--rtol', '1e-10', '--mxstep', '10000', '--t8', '360000', '--p0', 'S5au=10', 'S6au=10', 'R=20', 'C1x=0')]\n kotani2017_F4['reporter'] = 'D'\n kotani2017_F4['exp_results'] = [(6815, 6.06), (9004, 4.78), (10278, 4.03), (10795, 3.73)]\n setups.append(kotani2017_F4)\n\n return setups",
"def run() -> ():\n if len(sys.argv) > 1:\n show_help()\n errs = get_cargo_input()\n main_stack = make_proj_stack(errs)\n while len(main_stack) > 0:\n file_stack = make_file_stack(main_stack)\n overwrite(file_stack)\n\n # FIXME",
"def prepare(skip_static=False):\n\n local('npm install')\n local('grunt build')\n\n with warn_only():\n local('git add staticfiles')\n local('git add {{ project_name }}/templates')\n local('git commit -m \"PRODUCTION ONLY: Build static files.\"')\n\n files_to_remove = (\n '.bowerrc',\n '.editorcinfig',\n '.gitignore',\n '.jshintrc',\n 'bower.json',\n 'dev-only-package.json',\n 'error.log',\n 'fabfile.py',\n 'Gruntfile.js',\n 'migrate.sh',\n 'README.md',\n 'serve.sh',\n 'flush_cache.py',\n )\n\n with warn_only():\n for file_ in files_to_remove:\n local('git rm {}'.format(file_))\n\n # store it\n local('git commit -m \"PRODUCTION ONLY: Removing files.\"')\n\n if skip_static:\n local('touch .skipDjango')\n local('git add .skipDjango')\n local('git commit -m \"PRODUCTION ONLY: Skip static files\"')",
"def setup():\n print('...')\n # Make sure dirs exist\n for directory in [DATA_DIR, DATA_INPUT_DIR, DATA_OUTPUT_DIR]:\n os.makedirs(directory, exist_ok=True)",
"def write_inits(project_name, root_dir):\r\n \r\n #Create our file paths first...\r\n test_init_path = get_file_path(root_dir, \"tests\", \"__init__.py\")\r\n project_init_path = get_file_path(root_dir, project_name, \"__init__.py\")\r\n \r\n #Write the test_init file first\r\n test_init = open(test_init_path, 'w')\r\n test_init.close()\r\n print_file(test_init_path)\r\n \r\n #Write the NAME_init second\r\n project_init = open(project_init_path, 'w')\r\n project_init.close()\r\n print_file(project_init_path)",
"def project_starter(project_name,yaml_project):\n snpt.load_snippets()\n archives = yaml.load(yaml_project)\n make_project_structure(archives,\"./\",project_name)\n make_exec(project_name + '/manage.py')",
"def test_project_generation(cookies, context, context_combination):\n result = cookies.bake(extra_context={**context, **context_combination})\n assert result.exit_code == 0\n assert result.exception is None\n assert result.project.basename == context[\"project_slug\"]\n assert result.project.isdir()\n\n paths = build_files_list(str(result.project))\n assert paths\n check_paths(paths)",
"def create_project(self,*pages,config_folder = \"config\",FunctionBased = False):\n\n self._make_initial_directories()\n self._make_initial_files(*pages,FunctionBased = FunctionBased)",
"def init(projectfolder, projectname, example):\n\n productline_dir = path.join(projectfolder, \"productline\")\n configs_path = path.join(productline_dir, \"configs\")\n bddfeatures_path = path.join(projectfolder, \"bddfeatures\")\n testreports_path = path.join(projectfolder, \"testreports\")\n\n if not path.exists(productline_dir):\n makedirs(productline_dir)\n\n if not path.exists(configs_path):\n makedirs(configs_path)\n\n if not path.exists(bddfeatures_path):\n makedirs(bddfeatures_path)\n\n if not path.exists(testreports_path):\n makedirs(testreports_path)\n\n model_src = pkg_resources.resource_filename(__name__, \"templates/model.xml\")\n model_dst = path.join(productline_dir, \"model.xml\")\n shutil.copyfile(model_src, model_dst)\n utilities.sed_inplace(model_dst, \"{{PROJECT_NAME}}\", projectname.replace(\" \", \"\"))\n\n configtemplate_src = pkg_resources.resource_filename(__name__, 'templates/aplet.yml')\n configtemplate_dst = path.join(projectfolder, \"aplet.yml\")\n shutil.copyfile(configtemplate_src, configtemplate_dst)\n utilities.sed_inplace(configtemplate_dst, \"{{PROJECT_NAME}}\", projectname)\n\n # copy docs templates from aplet application into projectfolder\n lektortemplates_path = pkg_resources.resource_filename(__name__, 'templates/lektor')\n doc_templates_path = path.join(projectfolder, \"doc_templates\")\n if not path.exists(doc_templates_path):\n shutil.copytree(lektortemplates_path, doc_templates_path)\n\n\n if example:\n examples_dir = \"templates/exampleproject\"\n model_src = pkg_resources.resource_filename(__name__, path.join(examples_dir, \"model.xml\"))\n shutil.copyfile(model_src, model_dst)\n exampleconfig_src = pkg_resources.resource_filename(__name__, path.join(examples_dir, \"ExampleProduct.config\"))\n shutil.copyfile(exampleconfig_src, path.join(configs_path, \"ExampleProduct.config\"))\n configtemplate_src = pkg_resources.resource_filename(__name__, path.join(examples_dir, \"aplet.yml\"))\n shutil.copyfile(configtemplate_src, configtemplate_dst)",
"def setup():\n require('hosts', 'project_path', provided_by=envs.ENVS)\n\n if not exists(env.project_path):\n abort(red('Project path ({project_path}) does not exist. '\n 'Create it on the server before continuing.'.format(**env)))\n\n with cd(env.project_path):\n run('mkdir -p api renderer conf markup_renderer')\n run('mkdir -p api/static api/uploads')\n\n make_release_folders('api')\n make_release_folders('renderer')",
"def main():\n for db_csv_export in current_dir.glob(\"template*.csv\"):\n data_projects = load_projects(db_csv_export)\n json_path = db_csv_export.with_suffix(\".json\")\n with open(json_path, \"w\") as fh:\n json.dump(data_projects, fh, indent=2)"
] | [
"0.64848816",
"0.6330899",
"0.6330899",
"0.6330899",
"0.6284369",
"0.62667686",
"0.6219365",
"0.620572",
"0.6156918",
"0.6099373",
"0.60919136",
"0.60415375",
"0.60354227",
"0.60354227",
"0.6011478",
"0.6007769",
"0.5993345",
"0.59776956",
"0.59592646",
"0.59452546",
"0.5938593",
"0.5932985",
"0.59248996",
"0.5923378",
"0.5913009",
"0.590542",
"0.59036285",
"0.5899277",
"0.58928245",
"0.587222"
] | 0.66563916 | 0 |
Return tokenized list of strings from raw text input using keras functionality | def tokenize_keras(raw_data):
from keras.preprocessing.text import text_to_word_sequence
return [text_to_word_sequence(d) for d in raw_data] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def preproc_user_input(txt, model):\r\n txt = pre_process(txt)\r\n txt_tokenized = [word for word in txt.split(\" \") if word in model.wv.vocab]\r\n return \" \".join(txt_tokenized)",
"def input_new_phrase(self, text):\n \n x_new_tokens = [word_idx[word] for word in text.split()]\n \n pred = self.model.predict(np.array([x_new_tokens]))\n pred = np.argmax(pred, axis=-1)[0]\n \n return [[word_list[w], tags[pred]] for (w, pred) in zip(range(len(x_new)), pred)]",
"def _batch_tokenize(self, text: List[str]) -> List[List[str]]:\n return self.bert_model.batch_tokenize([t.strip() for t in text])",
"def tokenize(lang):\n lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='')\n lang_tokenizer.fit_on_texts(lang)\n tensor = lang_tokenizer.texts_to_sequences(lang)\n # pad zero after sequences for the same length.\n tensor = tf.keras.preprocessing.sequence.pad_sequences(tensor,\n padding='post')\n return tensor, lang_tokenizer",
"def tokenized(self, text):\n return self.tokenizer.encode_plus(text,\n max_length=512,\n pad_to_max_length=True,\n truncation=True)[\"input_ids\"]",
"def create_model_uniform(text: str) -> List[str]:\n return str.split(text)",
"def identity_tokenizer(text):\n return text",
"def _tokenize(self, text: str) -> List[str]:\n return self.bert_model.tokenize(text.strip())",
"def batch_tokenize_fn(examples):\n sources = examples[config.source_lang]\n targets = examples[config.target_lang]\n model_inputs = config.tokenizer(sources, max_length=config.max_source_length, truncation=True)\n\n # setup the tokenizer for targets,\n # huggingface expects the target tokenized ids to be stored in the labels field\n with config.tokenizer.as_target_tokenizer():\n labels = config.tokenizer(targets, max_length=config.max_target_length, truncation=True)\n\n model_inputs[\"labels\"] = labels[\"input_ids\"]\n return model_inputs",
"def tokenize(text):\n\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n return [lemmatizer.lemmatize(token).lower().strip() for token in tokens]",
"def tokenize(text):\n tokens=word_tokenize(text)\n lemmatizer=WordNetLemmatizer()\n \n clean_tokens=[]\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens\n pass",
"def tokenize(text):\n\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n \n clean_tokens =[]\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens",
"def create_tokenizer(dataset):\n lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(char_level=True)\n lang_tokenizer.fit_on_texts([x['input'] for x in dataset])\n return lang_tokenizer",
"def tokenize(text):\n tokens = nltk.word_tokenize(text)\n lemmatizer = nltk.WordNetLemmatizer()\n \n lemmatized_words = []\n for word in tokens:\n lemmatized_words.append(lemmatizer.lemmatize(word).lower().strip())\n \n return lemmatized_words",
"def process_text(input_txt):\r\n # if input is string\r\n tidy_txt = remove_pattern(input_txt,\"@[\\w]*\")\r\n ##=============================== if input is dataframe ====================##\r\n # tidy_txt = np.vectorize(remove_pattern)(input_txt,\"@[\\w]*\") #\r\n ##==========================================================================##\r\n # remove special characters\r\n tidy_txt = tidy_txt.replace(\"[^a-zA-Z#]\",\" \")\r\n # split into words\r\n tokenized_txt = tidy_txt.split()\r\n # perform stemming\r\n stemmer = PorterStemmer()\r\n tokenized_txt = [stemmer.stem(i) for i in tokenized_txt]\r\n print(tokenized_txt)\r\n # joining words back\r\n tokenized_txt = ' '.join(tokenized_txt)\r\n return tokenized_txt",
"def preprocessing(raw_text):\n words_list = tokenize(raw_text)\n words_list = remove_stop_words(words_list)\n words_list = remove_punctuations(words_list)\n words_list = lemmatization(words_list)\n return words_list",
"def tokenize_pretraining(self, inputs):\n\n ref_ids = prepare_ref([inputs], self.tokenizer_ltp, self.tokenizer_cn)\n\n tokens = self.tokenizer_cn.tokenize(inputs)\n\n if len(tokens) > self.max_seq_length - 2:\n tokens = tokens[:(self.max_seq_length - 2)]\n ref_ids = ref_ids[:(self.max_seq_length - 2)]\n\n ref_ids = cn_whole_word_mask(tokens, ref_ids[0])\n tokens, labels = random_word_wwm(tokens, ref_ids, self.tokenizer_cn)\n\n tokens = ['[CLS]'] + tokens + ['[SEP]']\n lm_label_ids = ([-100] + labels + [-100])\n\n input_ids = self.tokenizer_cn.convert_tokens_to_ids(tokens)\n\n attention_mask = [1] * len(input_ids)\n token_type_ids = [0] * len(input_ids)\n\n while len(input_ids) < self.max_seq_length:\n input_ids.append(0)\n attention_mask.append(0)\n token_type_ids.append(0)\n lm_label_ids.append(-100)\n\n assert len(input_ids) == self.max_seq_length\n assert len(attention_mask) == self.max_seq_length\n assert len(token_type_ids) == self.max_seq_length\n assert len(lm_label_ids) == self.max_seq_length\n\n\n outputs = {'input_ids': tf.constant(input_ids), 'attention_mask': tf.constant(attention_mask), \n 'token_type_ids': tf.constant(token_type_ids), 'lm_label_ids': tf.constant(lm_label_ids)}\n\n return outputs",
"def preprocess(self,text):\n return preprocess.get_tokens(text)",
"def preprocess(text):\n\tX = []\n\tsent_detector = nltk.data.load('tokenizers/punkt/english.pickle')\n\tfor t in text:\n\t\tsents = sent_detector.tokenize(t)\n\t\tresult = ''\n\t\tfor s in sents:\n\t\t\ttokens = word_tokenize(s)\n\t\t\tresult += ' ' + ' '.join(tokens)\n\t\tX.append(result)\n\treturn X",
"def tokenize(text):\n \n text.lower() # convert to lowercase\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text) #remove punctuation\n words = word_tokenize(text) # tokenize by individual word\n words = [w for w in words if w not in stopwords.words(\"english\")] #remove stop words\n lemmed = [WordNetLemmatizer().lemmatize(w) for w in words] #lemminization\n \n return words",
"def tokenize(text):\n #Clean data, remove all character except character and number,such as punctuation etc.\n text = re.sub(r'[^a-zA-Z0-9]', ' ', text.lower())\n tokens = word_tokenize(text)\n tokens = [WordNetLemmatizer().lemmatize(word) for word in tokens if word not in ST_english]\n return tokens",
"def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for token in tokens:\n clean_token = lemmatizer.lemmatize(token).lower().strip()\n clean_tokens.append(clean_token)\n \n return clean_tokens",
"def tokenise(sample):\n\n processed = sample.split()\n return processed",
"def generate_text(session, model, config, starting_text='<eos>',\n stop_length=100, stop_tokens=None, temp=1.0):\n state = model.initial_state.eval()\n # Imagine tokens as a batch size of one, length of len(tokens[0])\n tokens = [model.vocab.encode(word) for word in starting_text.split()]\n for i in xrange(stop_length):\n ### YOUR CODE HERE\n #print tokens\n feed = {}\n #x = np.array([tokens[-1]])\n #x.reshape(1,1)\n feed[model.input_placeholder] = [[tokens[-1]]]\n feed[model.dropout_placeholder] = 1\n feed[model.initial_state] = state\n y_pred, state = session.run([model.predictions[-1], model.final_state], feed_dict=feed)\n ### END YOUR CODE\n next_word_idx = sample(y_pred[0], temperature=temp)\n tokens.append(next_word_idx)\n if stop_tokens and model.vocab.decode(tokens[-1]) in stop_tokens:\n break\n output = [model.vocab.decode(word_idx) for word_idx in tokens]\n return output",
"def get_text_features() -> np.array:\r\n # Universal sentence encoder model\r\n # Original model by Google could be loaded from: https://tfhub.dev/google/universal-sentence-encoder/4\r\n # In this notebook the model is loaded from a public dataset on Kaggle\r\n # at https://www.kaggle.com/dimitreoliveira/universalsentenceencodermodels\r\n text_model = tf.keras.Sequential(\r\n [KerasLayer(txt_model_path, input_shape=[], dtype=tf.string, # Pretrained model\r\n output_shape=[512], trainable=False),\r\n tf.keras.layers.Layer(512, dtype='float16')] # This layer reduces precision of float numbers\r\n )\r\n\r\n # Convert all texts to vectors\r\n features = text_model.predict(data['title'],\r\n batch_size=BATCH_SIZE,\r\n use_multiprocessing=True,\r\n workers=-1)\r\n print('Text features extracted. Shape:', features.shape)\r\n\r\n return features",
"def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n \n return clean_tokens",
"def tokenize(text):\n return text.split(' ')",
"def predict(self, text: list) -> list:\n\n encoded_input = self.__encode_token(text)\n dataloaders = self.__create_dataloaders(encoded_input, self.batch_size)\n\n preds = []\n for i, batch in enumerate(zip(dataloaders[0], dataloaders[1], dataloaders[2])):\n preds_batch_list = self.__predict_batch(self.model, batch)\n preds += preds_batch_list\n\n return preds",
"def tokenizer(self):\n tokenizer = RegexpTokenizer(r'\\w+')\n \n self.tweet_tokenized_train = [tokenizer.tokenize(x.lower()) for x in self.tweet_prepro_train]\n self.tweet_tokenized_test = [tokenizer.tokenize(x.lower()) for x in self.tweet_prepro_test]",
"def tokenize(self, input_string: str) -> List[str]:"
] | [
"0.70793176",
"0.6935871",
"0.6846465",
"0.6740176",
"0.66016084",
"0.6601571",
"0.65836084",
"0.6577888",
"0.656995",
"0.6567432",
"0.65455496",
"0.654399",
"0.6511661",
"0.65019155",
"0.6500425",
"0.649716",
"0.64943504",
"0.6473806",
"0.6438927",
"0.64235365",
"0.64078707",
"0.64054835",
"0.640119",
"0.63890177",
"0.63861823",
"0.6371208",
"0.63663167",
"0.63476497",
"0.6334111",
"0.6331364"
] | 0.8465603 | 0 |
Return True if word passes filter | def filter1(word):
if not word: return False
w = word.lower()
if w in STOPWORDS: return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def apply_word_filter(self, fn):\n self._apply_filter(lambda ng, f: any(fn(w) for w in ng))",
"async def wordfilter(self, ctx):\n pass",
"async def wordfilter_test(self, ctx, *, message):\n found = self.test_sentence(message)\n if found:\n await ctx.send(f\"Message contains `{found}`\")\n else:\n await ctx.send(\"Couldn't detect any filtered words\")",
"def filter_word(text):\n text = normalize(text)\n if re.match(r'^\\p{P}+$', text):\n return True\n if text.lower() in STOPWORDS:\n return True\n return False",
"def can_recept(self, text, *args, **kwargs):\n for each_cur in self.flat_norm.keys():\n if each_cur.lower() in text.lower():\n return True\n\n else:\n return False",
"def match(self, filter_text):\n\n return filter_text.lower() in self.artist.lower() or \\\n super().match(filter_text)",
"def match(self, filter_text):\n\n return filter_text.lower() in self.director.lower() or \\\n filter_text.lower() in self.actor.lower() or \\\n super().match(filter_text)",
"def match(self, filter_text):\n\n return filter_text.lower() in self.author.lower() or \\\n super().match(filter_text)",
"def check(self, word: str) -> bool:\n for s in (word, word.lower(), word.capitalize()):\n if s in self.words or s in self.ignored_words:\n return True\n return False",
"def filter_tokens(x):\n if x in _STOP_WORDS:\n return False\n if not re.search(r'\\w', x):\n # Does not contain at least one word character\n return False\n return True",
"def filter(word):\n if word.strip() not in stop: # Print word only if it is not a stop word\n print(word.strip())",
"def match(self, filter_text):\n return filter_text.lower() in self.name.lower() or \\\n filter_text.lower() == self.isbn.lower() or \\\n filter_text.lower() in (str(tag).lower() for tag in self.tags)",
"def retweet_filter(self, text):\n return not text.lower().startswith('rt')",
"def text_is_relevant(self, text):\n for word in text:\n if word in self.relevant_words:\n return True\n return False",
"def search(self, word):\n for wc in self.get_wildcards(word):\n # Don't forget word not in self.all_words\n if wc in self.wc_dict and (self.wc_dict[wc] > 1 or word not in self.all_words) :\n return True\n return False",
"def is_simple (self, phrase):\r\n\r\n return not self.contains(phrase,'()&|>#')",
"def __contains__(self, word):\n if word in self.vocab:\n return True\n else:\n char_ngrams = compute_ngrams(word, self.min_n, self.max_n)\n return any(ng in self.ngrams for ng in char_ngrams)",
"def is_unimportant(word):\n return word in ['.', '!', ',', ] or '\\'' in word or word in stop_words",
"def search(self, word):",
"def check_word(words, word):\r\n if word in words:\r\n return True\r\n else:\r\n return False",
"def two_word_finder(word1,word2,text):\r\n word1 = word1.lower()\r\n word2 = word2.lower()\r\n text = str(text).lower()\r\n if word1 and word2 in text:\r\n return True #return text to see specific tweets\r\n return False",
"def is_stop_word(word):\n return word in final_stop_words",
"def onlyuse(word, letters):\r\n truth = True\r\n for letter in word:\r\n truth = letter in letters and truth\r\n return truth",
"def is_stopword(self, word, language):",
"def isValid(text):\n return bool(re.search(r'\\b%s\\b' %new_word, text, re.IGNORECASE))",
"def text_search(self, text, stuff_to_cop):\n if any(ext in text for ext in stuff_to_cop):\n return(True)\n else:\n return(False)",
"def search(self, word):\n if len(word) not in self.length_set:\n return False\n for i in self.mutate(word):\n if i in self.s:\n return True\n return False",
"def include_word(word, chardict):\n if (all(char in chardict.keys() for char in word)) & (len(word)<=25):\n # Some word2vec entries are all capitals and generally are acronyms.\n # This is unlikely to be learnable\n if not word.isupper():\n return True\n\n return False",
"def filter(self, ffun):\n # BEGIN\n lst = []\n for item in WordSet(self.text).words():\n # if len(item) == len(ffun):\n # lst.append(item)\n if ffun(item) == True:\n lst.append(item)\n return lst\n\n # END",
"def search(self, word):\n length = len(word)\n if length == 1:\n for letter in string.ascii_lowercase:\n key = \"{}/{}\".format(1, letter)\n if key in self.origin and letter != word:\n return True\n return False\n\n key = \"{}/{}\".format(len(word), word[0])\n ls = self.origin.get(key, [])\n if len(ls) == 0:\n return False\n\n for origin in ls:\n if self.only_modify_one_char(word, origin):\n return True\n return False"
] | [
"0.74591035",
"0.7444302",
"0.7269609",
"0.71056175",
"0.7085896",
"0.69943607",
"0.6989586",
"0.68581426",
"0.6818804",
"0.67899287",
"0.6701206",
"0.6658593",
"0.66539127",
"0.6639391",
"0.6632937",
"0.66313547",
"0.6630455",
"0.6528471",
"0.65281737",
"0.6523878",
"0.6522145",
"0.6517991",
"0.6516359",
"0.64933324",
"0.6491399",
"0.6485058",
"0.6468987",
"0.6460723",
"0.6442059",
"0.6430321"
] | 0.78754514 | 0 |
Return dict of wordtoid from raw text data If max_size is specified, vocab is truncated to set of highest frequency words within size. | def build_vocab(raw_data, max_size=None):
data = [w for doc in tokenize_keras(raw_data) for w in doc]
counter = collections.Counter(data)
count_pairs = sorted(counter.items(),
key=lambda x: (-x[1], x[0]))
if max_size: count_pairs = count_pairs[:max_size]
words, _ = list(zip(*count_pairs))
word_to_id = dict(zip(words, range(len(words))))
word_to_id[UNKNOWN_WORD] = len(word_to_id)
word_to_id[PAD_WORD] = len(word_to_id)
return word_to_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_vocab(self):\n word2id = {}\n for document in self.docs:\n for word in document:\n if word not in word2id.keys():\n word2id[word] = len(word2id)\n return word2id",
"def build_vocab(sentences, max_num_words):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences)).most_common()\n if max_num_words != 0 and max_num_words < len(word_counts):\n word_counts = word_counts[:max_num_words]\n\n # Mapping from index to word\n vocabulary = dict()\n index = 0\n for x in word_counts:\n vocabulary[index] = x[0]\n index += 1\n\n return vocabulary",
"def construct_vocab(lines, vocab_size):\n vocab = {}\n for line in lines:\n for word in line:\n if word not in vocab:\n vocab[word] = 1\n else:\n vocab[word] += 1\n \n word2id = {}\n id2word = {}\n word2id['<pad>'] = 0\n word2id['<unk>'] = 1\n id2word[0] = '<pad>'\n id2word[1] = '<pad>'\n \n sorted_word2id = sorted(\n vocab.items(),\n key=operator.itemgetter(1),\n reverse=True\n )\n\n sorted_words = [x[0] for x in sorted_word2id[:vocab_size]]\n\n for ind, word in enumerate(sorted_words):\n word2id[word] = ind + 2\n\n for ind, word in enumerate(sorted_words):\n id2word[ind + 2] = word\n\n return word2id, id2word",
"def build_vocab(data):\n # data = _read_words(filename)\n counter = collections.Counter(data)\n # print('counter', counter) # dictionary for the occurrence number of each word, e.g. 'banknote': 1, 'photography': 1, 'kia': 1\n count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n # print('count_pairs',count_pairs) # convert dictionary to list of tuple, e.g. ('ssangyong', 1), ('swapo', 1), ('wachter', 1)\n words, _ = list(zip(*count_pairs))\n word_to_id = dict(zip(words, range(len(words))))\n # print(words) # list of words\n # print(word_to_id) # dictionary for word to id, e.g. 'campbell': 2587, 'atlantic': 2247, 'aoun': 6746\n return word_to_id",
"def process_text(self, text: str, max_length: int) -> Dict[str, Sequence[int]]:\n inputs = self.tokenizer(\n [c for c in text],\n return_token_type_ids=True,\n return_attention_mask=True,\n max_length=max_length,\n padding=\"max_length\",\n truncation=True,\n is_pretokenized=True,\n )\n return inputs.data",
"def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,\n tokenizer=None, normalize_digits=True):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"r\") as f:\n counter = 0\n for line in f:\n counter += 1\n line = line.strip().split('\\t')[0]\n if counter % 100000 == 0:\n print(\" processing line %d\" % counter)\n tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)\n for w in tokens:\n word = re.sub(_DIGIT_RE, \"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n sorted_vocab = sorted(vocab, key=vocab.get, reverse=True)\n vocab_list = _START_VOCAB + sorted_vocab\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n print(\"Corpus %s has %d tokens, %d uniq words, %d vocab at cutoff %d.\" % (\n data_path, sum(vocab.values()), len(vocab), max_vocabulary_size, vocab[sorted_vocab[max_vocabulary_size - len(_START_VOCAB)]] ) )\n else:\n print(\"Corpus %s has %d tokens, %d uniq words, %d vocab at cutoff %d.\" % (\n data_path, sum(vocab.values()), len(vocab), len(vocab), 0))\n\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + \"\\n\")",
"def build_vocab(words, vocab_size, visual_fld=None):\n utils.safe_mkdir(visual_fld)\n file = open(os.path.join(visual_fld, 'vocab.tsv'), 'w',encoding='utf8')\n\n dictionary = dict()\n count = [('UNK', -1)]\n index = 0\n count.extend(Counter(words).most_common(vocab_size - 1))\n\n for word, _ in count:\n dictionary[word] = index\n index += 1\n file.write(word + '\\n')\n\n index_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n file.close()\n return dictionary, index_dictionary",
"def build_words_dataset(words, vocabulary_size=50000, printable=True):\n import collections\n count = [['UNK', -1]]\n count.extend(collections.Counter(words).most_common(vocabulary_size - 1))\n dictionary = dict()\n for word, _ in count:\n dictionary[word] = len(dictionary)\n data = list()\n unk_count = 0\n for word in words:\n if word in dictionary:\n index = dictionary[word]\n else:\n index = 0 # dictionary['UNK']\n unk_count += 1\n data.append(index)\n count[0][1] = unk_count\n reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n if printable:\n print('Real vocabulary size %d' % len(collections.Counter(words).keys()))\n print('Limited vocabulary size {}'.format(vocabulary_size))\n assert len(collections.Counter(words).keys()) >= vocabulary_size , \\\n \"Read vocabulary size can be less than limited vocabulary size\"\n return data, count, dictionary, reverse_dictionary",
"def create_vocab(vocab_size):\n vocab_dict = tff.simulation.datasets.stackoverflow.load_word_counts(\n cache_dir='/tmp')\n return list(vocab_dict.keys())[:vocab_size]",
"def __init__(self, vocab_file, max_size):\n\t\tself._word_to_id = {}\n\t\tself._id_to_word = {}\n\t\tself._count = 0 # keeps track of total number of words in the Vocab\n\n\t\t# [UNK], [PAD], [START] and [STOP] get the ids 0,1,2,3.\n\t\tfor w in [UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:\n\t\t\tself._word_to_id[w] = self._count\n\t\t\tself._id_to_word[self._count] = w\n\t\t\tself._count += 1\n\n\t\t# Read the vocab file and add words up to max_size\n\t\twith open(vocab_file, 'r') as vocab_f:\n\t\t\tfor line in vocab_f:\n\t\t\t\tpieces = line.split()\n\t\t\t\tif len(pieces) != 2:\n\t\t\t\t\tprint ('Warning: incorrectly formatted line in vocabulary file: %s\\n' % line)\n\t\t\t\t\tcontinue\n\t\t\t\tw = pieces[0]\n\t\t\t\tif w in [SENTENCE_START, SENTENCE_END, UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:\n\t\t\t\t\traise Exception(\n\t\t\t\t\t\t'<s>, </s>, [UNK], [PAD], [START] and [STOP] shouldn\\'t be in the vocab file, but %s is' % w)\n\t\t\t\tif w in self._word_to_id:\n\t\t\t\t\traise Exception('Duplicated word in vocabulary file: %s' % w)\n\t\t\t\tself._word_to_id[w] = self._count\n\t\t\t\tself._id_to_word[self._count] = w\n\t\t\t\tself._count += 1\n\t\t\t\tif max_size != 0 and self._count >= max_size:\n\t\t\t\t\tprint (\"max_size of vocab was specified as %i; we now have %i words. Stopping reading.\" % (\n\t\t\t\t\tmax_size, self._count))\n\t\t\t\t\tbreak\n\n\t\tprint (\"Finished constructing vocabulary of %i total words. Last word added: %s\" % (\n\t\tself._count, self._id_to_word[self._count - 1]))",
"def count_words(path, max_vocab_size=40000, tok=False):\n counts = collections.Counter()\n for words in read_file(path, tok):\n for word in words:\n counts[word] += 1\n\n vocab = [word for (word, _) in counts.most_common(max_vocab_size)]\n return vocab",
"def _create_id_map(self, word_list, max_list_length):\n\n ############ 1.5 TODO\n from collections import Counter\n \n # import pdb; pdb.set_trace()\n word_rank_list = Counter(word_list).most_common(max_list_length)\n \n id_map = {}\n for idx, (word,_) in enumerate(word_rank_list):\n id_map[word] = idx\n\n ############\n # raise NotImplementedError()\n return id_map",
"def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,\n tokenizer=None, normalize_digits=True,\n _DIGIT_RE=re.compile(br\"\\d\"),\n _START_VOCAB=[b\"_PAD\", b\"_GO\", b\"_EOS\", b\"_UNK\"]):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"rb\") as f:\n counter = 0\n for line in f:\n counter += 1\n if counter % 100000 == 0:\n print(\" processing line %d\" % counter)\n tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)\n for w in tokens:\n word = re.sub(_DIGIT_RE, b\"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b\"\\n\")\n else:\n print(\"Vocabulary %s from data %s exists\" % (vocabulary_path, data_path))",
"def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,\n tokenizer=None, normalize_digits=True):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"rb\") as f:\n counter = 0\n for line in f:\n counter += 1\n if counter % 100000 == 0:\n print(\" processing line %d\" % counter)\n tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)\n for w in tokens:\n word = re.sub(_DIGIT_RE, b\"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n print(\"vocab too big\")\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b\"\\n\")",
"def make_vocab(corpus, word_vocab, char_vocab, max_len):\n\n word_id = len(word_vocab)\n char_id = len(char_vocab) + 1\n \n for words in corpus:\n words_list = words.split()+['+'] \n for word in words_list:\n if word not in word_vocab:\n word_vocab[word] = word_id\n word_id += 1\n for char in word:\n if char not in char_vocab:\n char_vocab[char] = char_id\n char_id += 1\n if max_len < len(word):\n max_len = len(word) \n\n return (word_vocab, char_vocab, max_len)",
"def make_word2id():\r\n with open(\"public_data/stats/stats_train.pkl\", 'rb') as stats:\r\n stats = pickle.load(stats)\r\n vocab = stats[\"VOCAB\"]\r\n word2id = {word: id for id, word in enumerate([\"PAD\"] + [\"UNK\"] + vocab)}\r\n with open('public_data/vocab/word2id.pkl', 'wb') as out:\r\n pickle.dump(word2id, out, protocol=4)",
"def build_vocab(self, min_count=3):\n word2count = defaultdict(int)\n for sentence in self.tokenized_corpus:\n for word in sentence:\n word2count[word] += 1\n\n word2dict = {}\n word2dict['PAD'] = {'id': 0}\n word2dict['UNK'] = {'id': 1}\n for word in word2count:\n if word2count[word] >= min_count:\n word2dict[word] = {'id': len(word2dict), 'count': word2count[word]}\n self.vocab = word2dict",
"def data_to_word_ids(self, input_data, filter=False):\n\n _buffer = list()\n for word in input_data:\n word = word.lower()\n if self.unit == \"oracle\":\n if \"+\" in word:\n tokens = word.split('+')\n word_tag = tokens[0].split(':')\n word = word_tag[1]\n if self.unit == \"morpheme\":\n word = re.sub(\"@@\", \"\", word)\n\n # flag to randomize token with frequency one\n flag = 1\n if word in self.unk_word_list:\n flag = random.randint(0, 1)\n\n if word in self.word_to_id and flag == 1:\n # if filter is True, reduce output vocabulary for softmax\n # (map words not in top self.max_vocab_size to UNK)\n if filter:\n # index start from 0\n if self.word_to_id[word] < self.max_vocab_size:\n _buffer.append(self.word_to_id[word])\n else:\n _buffer.append(self.word_to_id['<unk>'])\n else:\n _buffer.append(self.word_to_id[word])\n else:\n _buffer.append(self.word_to_id['<unk>'])\n return _buffer",
"def get_vocab_dicts(vocab_size, vocab):\n assert vocab_size == len(vocab)\n\n word2idx = {}\n idx2word = {}\n\n for idx in range(vocab_size):\n word2idx[vocab[idx]] = idx\n idx2word[idx] = vocab[idx]\n\n return word2idx, idx2word",
"def build_vocab(vocab_size, text_vector):\n vocab = Counter()\n for text in text_vector:\n for word in text.split(' '):\n vocab[word.lower()]+=1\n vocab = dict(vocab.most_common(vocab_size))\n return vocab",
"def get_vocab(data, nb_words=50000, min_nb=10, remove_stop_words = True):\n\n\n # Put everything into onw long string\n data = [item for sublist in list(data.values()) for item in sublist]\n data = \" \".join(data)\n\n # Do a bit of steaming\n data = remove_punctuations(data)\n vocab = Counter(data)\n\n # Remove the stop words\n new_vocab = vocab.copy()\n for key, value in vocab.items():\n if remove_stop_words and key in stopwords:\n del new_vocab[key]\n if value < min_nb:\n del new_vocab[key]\n\n vocab = new_vocab\n\n # Keep the most common words\n vocab = Counter(dict(vocab.most_common(nb_words)))\n\n # Extract a mapping\n mapping = {}\n mapping[1] = \"--UNK--\"\n mapping[\"--UNK--\"] = 1\n for i, word in enumerate(sorted(vocab.keys())):\n mapping[i + 2] = word\n mapping[word] = i + 2\n\n return vocab, mapping",
"def build_morpheme_vocab(self):\n max_morph_per_word = 0\n morpheme_dict = collections.defaultdict(int)\n splitter = \"@@\"\n for token in self.train_data:\n if token == self.eos or token == self.sos:\n continue\n token = '^' + token + '$'\n morphemes = token.split(splitter)\n if len(morphemes) > max_morph_per_word:\n max_morph_per_word = len(morphemes)\n for morpheme in morphemes:\n morpheme_dict[morpheme] += 1\n\n unk_morpheme_list = set()\n item_to_id = dict()\n item_to_id[constants.PAD_ITEM] = len(item_to_id)\n item_to_id[constants.UNK_ITEM] = len(item_to_id)\n sorted_dict = sorted(morpheme_dict.items(), key=operator.itemgetter(1), reverse=True)\n for token, freq in sorted_dict:\n if freq == 1:\n unk_morpheme_list.add(token)\n if token not in item_to_id:\n item_to_id[token] = len(item_to_id)\n return item_to_id, unk_morpheme_list, max_morph_per_word",
"def load_target_vocab(self):\n vocab = [line.split()[0] for line in open(os.path.join('preprocessed', 'all_vocab.txt'), 'r').read().splitlines()]\n self.word2idx = {word: idx for idx, word in enumerate(vocab)}\n self.idx2word = {idx: word for idx, word in enumerate(vocab)}\n self.vocab_size = len(self.word2idx)",
"def build_vocab(sentences, vocab_limit):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n print( 'Total size of vocab is {}'.format(len(word_counts.most_common())))\n # Mapping from index to word\n # vocabulary_inv = [x[0] for x in word_counts.most_common(vocab_limit)]\n vocabulary_inv = [x[0] for x in word_counts.most_common(vocab_limit)]\n \n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i+1 for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]",
"def tokenize_document(doc_info: dict, tokenizer: BertTokenizer, max_doc_length: int = None) -> dict:\n sub_tokens: List[str] = [] # all sub tokens of a document\n sentence_map: List[int] = [] # collected tokenized tokens -> sentence id\n subtoken_map: List[int] = [] # collected tokenized tokens -> original token id\n\n word_idx = -1\n\n for sentence_id, sentence in enumerate(doc_info['sentences']):\n for token in sentence:\n word_idx += 1\n word_tokens = tokenizer.tokenize(token)\n sub_tokens.extend(word_tokens)\n sentence_map.extend([sentence_id] * len(word_tokens))\n subtoken_map.extend([word_idx] * len(word_tokens))\n if max_doc_length:\n num_to_pad = max_doc_length - len(sub_tokens)\n sub_tokens.extend([\"[PAD]\"] * num_to_pad)\n sentence_map.extend([sentence_map[-1]+1] * num_to_pad)\n subtoken_map.extend(list(range(word_idx+1, num_to_pad+1+word_idx)))\n # global MAX_LENGTH\n # if len(sub_tokens) > MAX_LENGTH:\n # print(len(sub_tokens))\n # MAX_LENGTH = len(sub_tokens)\n # print(MAX_LENGTH)\n # todo(yuxian): need pad speakers?\n speakers = {subtoken_map.index(word_index): tokenizer.tokenize(speaker)\n for word_index, speaker in doc_info['speakers']}\n clusters = [[(subtoken_map.index(start), len(subtoken_map) - 1 - subtoken_map[::-1].index(end))\n for start, end in cluster] for cluster in doc_info['clusters']]\n tokenized_document = {'sub_tokens': sub_tokens, 'sentence_map': sentence_map, 'subtoken_map': subtoken_map,\n 'speakers': speakers, 'clusters': clusters, 'doc_key': doc_info['doc_key']}\n return tokenized_document",
"def build_vocab(sentences_list, vocab_size, visual_fld):\n words = [word for sentence in sentences_list for word in sentence]\n utils.safe_mkdir(visual_fld)\n with open(os.path.join(visual_fld, 'vocab.tsv'), 'w') as fd:\n dictionary = {}\n index_dictionary = {}\n count = [('UNK', -1)]\n count.extend(Counter(words).most_common(vocab_size - 1))\n for index, (word, _) in enumerate(count):\n dictionary[word] = index\n index_dictionary[index] = word\n fd.write(word + '\\n')\n\n return dictionary, index_dictionary",
"def load_vocab(vocab):\r\n\tvocab = [line.split()[0] for line in open(\r\n\t\t'{}{}'.format(pm.vocab_path, vocab), 'r', encoding='utf-8').read().splitlines()\r\n\t\t\t if int(line.split()[1]) >= pm.word_limit_size]\r\n\tword2idx_dic = {word: idx for idx, word in enumerate(vocab)}\r\n\tidx2word_dic = {idx: word for idx, word in enumerate(vocab)}\r\n\treturn word2idx_dic, idx2word_dic",
"def vocab_size():\n\n MAXSIZE = 10000\n\n ls = Language.objects.exclude(id=80).filter(vocabulary_size__gt=0, vocabulary_size__lte=MAXSIZE).conlangs()\n\n outliers = Language.objects.filter(vocabulary_size__gt=MAXSIZE).order_by('vocabulary_size')\n\n # Assumes unimodal distribution\n modes = [(mode['count'], mode['vocabulary_size'])\n for mode in ls.values('vocabulary_size').annotate(count=Count('vocabulary_size')).order_by('-count', '-vocabulary_size')\n if mode['count'] > 5]\n mode = modes[0][1]\n\n avg_maximum_minimum = ls.aggregate(avg=Avg('vocabulary_size'), maximum=Max('vocabulary_size'), minimum=Min('vocabulary_size'))\n avg = avg_maximum_minimum['avg']\n maximum = avg_maximum_minimum['maximum']\n minimum = avg_maximum_minimum['minimum']\n\n curve = ls.order_by('-vocabulary_size')\n rows = [v.vocabulary_size for v in curve]\n\n chart_svg = vocab_chart(rows)\n\n # median\n med = median(rows)\n\n return {'average': avg,\n 'min': minimum,\n 'max': maximum,\n 'median': med,\n 'chart_svg': chart_svg,\n 'mode': mode,\n 'common': modes,\n 'stddev': stddev(rows),\n 'outliers': outliers,\n 'upper_bound': MAXSIZE}",
"def build_pos_tag_vocab(data, vocab_size=1000, min_freq=1):\n counter = Counter()\n for d in data:\n tags = d['pos_class']\n counter.update(tags)\n\n itos = ['<pad>']\n min_freq = max(min_freq, 1)\n\n # sort by frequency, then alphabetically\n words_and_frequencies = sorted(counter.items(), key=lambda tup: tup[0])\n words_and_frequencies.sort(key=lambda tup: tup[1], reverse=True)\n\n for word, freq in words_and_frequencies:\n if freq < min_freq or len(itos) == vocab_size:\n break\n itos.append(word)\n # stoi is simply a reverse dict for itos\n stoi = defaultdict()\n stoi.update({tok: i for i, tok in enumerate(itos)})\n\n return {'itos': itos, 'stoi': stoi, 'len': len(itos)}",
"def token2id(data, mode):\n vocab_path = 'vocab.' + mode\n in_path = data + '.' + mode\n out_path = data + '_ids.' + mode\n _, vocab = load_vocab(os.path.join(config.PROCESSED_PATH, vocab_path))\n in_file = open(os.path.join(config.PROCESSED_PATH, in_path), 'rb')\n out_file = open(os.path.join(config.PROCESSED_PATH, out_path), 'wb')\n\n lines = in_file.read().splitlines()\n for line in lines:\n if mode == 'dec': # we only care about '<s>' and </s> in encoder\n ids = [vocab[b'<s>']]\n else:\n ids = []\n ids.extend(sentence2id(vocab, line))\n # ids.extend([vocab.get(token, vocab['<unk>']) for token in basic_tokenizer(line)])\n if mode == 'dec':\n ids.append(vocab[b'</s>'])\n out_file.write(b' '.join(str(id_).encode('ascii') for id_ in ids) + b'\\n')"
] | [
"0.6375627",
"0.6333941",
"0.63053066",
"0.6265616",
"0.6252101",
"0.6250396",
"0.62214345",
"0.6192168",
"0.6157442",
"0.60978895",
"0.6085281",
"0.6050244",
"0.6039959",
"0.6008718",
"0.6007198",
"0.6006812",
"0.59851426",
"0.59756815",
"0.59671265",
"0.5960956",
"0.5825441",
"0.58087635",
"0.57941294",
"0.5790539",
"0.5758069",
"0.57558537",
"0.5742934",
"0.57081914",
"0.57024837",
"0.5697753"
] | 0.7943447 | 0 |
Convert raw text data into integer ids | def raw_to_ids(raw_data, word_to_id):
docs = tokenize_keras(raw_data)
uid = word_to_id[UNKNOWN_WORD]
return [[word_to_id.get(w, uid) for w in doc] for doc in docs] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def text2ids(self, text: str, length: int):\n # Tokenize\n tokens = self.tokenizer.tokenize(text)\n token_ids = self.tokenizer.tokens2ids(tokens)\n # Padding\n while len(token_ids) < length:\n token_ids.append(0)\n # Truncate\n if len(token_ids) > length:\n token_ids = token_ids[:length]\n assert len(token_ids) == length\n return token_ids",
"def get_ids(cls, text):\n tokens = TokenizerContainer.TOKENIZER.tokenize(text)\n token_ids = TokenizerContainer.TOKENIZER.convert_tokens_to_ids(tokens)\n input_ids = token_ids + [0] * (cls.MAX_LEN-len(token_ids))\n return tokens, input_ids",
"def _text_to_ids(self, *Xs, max_length=None):\n return Xs",
"def _build_data_from_text(self, text):\n # tokenize text if tokenizer is given\n if self.tokenizer is not None:\n data = self.tokenizer.text_to_ids(text)\n else:\n data = text\n\n return data",
"def encode(self, text: str) -> List[int]:\n return [self._label2id.get(char, self.oov) for char in text]",
"def get_ids(self, text):\n\n tokens = [token.orth for token in self.tokenizer(text)]\n ids = []\n for token in tokens:\n try:\n id = self.vocab.vectors.key2row[token]\n except KeyError:\n id = self.oov_id\n\n ids.append(id)\n\n return ids",
"def get_ids(self, text):\n\n tokens = [token.orth for token in self.tokenizer(text)]\n ids = []\n for token in tokens:\n try:\n id = self._vocab.vectors.key2row[token]\n except KeyError:\n id = self.oov_id\n\n ids.append(id)\n\n return ids",
"def texts2ids(self, texts: list, length: int):\n return [self.text2ids(text, length) for text in texts]",
"def token2id(data, mode):\n vocab_path = 'vocab.' + mode\n in_path = data + '.' + mode\n out_path = data + '_ids.' + mode\n _, vocab = load_vocab(os.path.join(config.PROCESSED_PATH, vocab_path))\n in_file = open(os.path.join(config.PROCESSED_PATH, in_path), 'rb')\n out_file = open(os.path.join(config.PROCESSED_PATH, out_path), 'wb')\n\n lines = in_file.read().splitlines()\n for line in lines:\n if mode == 'dec': # we only care about '<s>' and </s> in encoder\n ids = [vocab[b'<s>']]\n else:\n ids = []\n ids.extend(sentence2id(vocab, line))\n # ids.extend([vocab.get(token, vocab['<unk>']) for token in basic_tokenizer(line)])\n if mode == 'dec':\n ids.append(vocab[b'</s>'])\n out_file.write(b' '.join(str(id_).encode('ascii') for id_ in ids) + b'\\n')",
"def _convert_ids(self, ids):\n ids_list_int = []\n\n for id_ in ids:\n if not self._validate_identifier(id_):\n raise PhabfiveDataException(f\"Identifier '{id_}' is not valid\")\n\n id_ = id_.replace(\"P\", \"\")\n # constraints takes int\n id_ = int(id_)\n ids_list_int.append(id_)\n\n return ids_list_int",
"def text_to_int(self, text):\n int_sequence = []\n for c in text:\n if c == ' ':\n ch = self.char_map['']\n else:\n ch = self.char_map[c]\n int_sequence.append(ch)\n return int_sequence",
"def batches2IDs(batches):\n l = [ np.array( [ char2id(x) for x in characters(b) ] ) for b in batches ]\n return l",
"def data_to_int(data): \r\n data = str(data).strip().upper()\r\n if data[0]== 'B':\r\n return bin_to_int(data[1:])\r\n elif data[0]== 'H':\r\n return hex_to_int(data[1:])\r\n else:\r\n return int(data, 10)",
"def words_to_id(text, is_list=False, old_word_to_id=None):\n if is_list:\n x = \"\"\n for line in text:\n x += line + \" \"\n text = x\n \n uniq_words = set(text.split(\" \"))\n \n if old_word_to_id:\n word_to_id = old_word_to_id\n start = len(old_word_to_id)\n for word in uniq_words:\n if word not in word_to_id:\n word_to_id[word] = start\n start += 1\n else:\n word_to_id = {word:i for i, word in enumerate(uniq_words)}\n \n id_to_word = {str(v):k for k,v in word_to_id.items()}\n return word_to_id, id_to_word",
"def _preprocess(self, txt_seq):\n input = []\n for token in txt_seq.split():\n input.append(self.word2id.get(token, self.word2id[\"<UNK>\"]))\n input.append(self.word2id[\"<END>\"])\n input = torch.LongTensor(input)\n return input",
"def _natural_keys(text: str) -> list[int | str]:\n return [_atoi(c) for c in re.split(r\"(\\d+)\", text)]",
"def convert_texts_to_ids(self, batch_text):\n max_len = self.field_config.max_seq_len\n batch_fea_list = []\n name_block_len = []\n name_block_begin = []\n name_block_end = []\n for idx_batch, text in enumerate(batch_text):\n fea_str = text.split(' [SEP] ')\n fea_list = [[float(y) for y in x.split(' ')] for x in fea_str]\n\n # 加上截断策略\n if len(fea_list) > self.field_config.max_seq_len:\n logging.warn('input instance is to long: %s', text)\n fea_list = truncation_words(fea_list, self.field_config.max_seq_len, self.field_config.truncation_type)\n batch_fea_list.append(fea_list)\n\n return_list = []\n\n padded = [0] * self._feature_dim\n padded_ids = np.array([inst + list([padded] * (max_len - len(inst))) for inst in batch_fea_list])\n padded_ids = padded_ids.astype('float32').reshape([-1, max_len, self._feature_dim])\n\n return_list.append(padded_ids)\n\n return return_list",
"def text_to_id(tweets_dict):\n text_to_id_dict = {}\n for key in tweets_dict:\n # we assume that there are no retweets as this has been preprocessed before\n text_to_id_dict[key] = tweets_dict[key][\"text\"]\n return text_to_id_dict",
"def text_to_id(text, word_to_id_dict):\n return [word_to_id_dict[word] for word in text.split(\" \") if word in word_to_id_dict]",
"def words_to_word_ids(data, word_to_id):\n # if isinstance(data[0], six.string_types):\n # print(type(data[0]))\n # # exit()\n # print(data[0])\n # print(word_to_id)\n # return [word_to_id[str(word)] for word in data]\n # else:\n return [word_to_id[word] for word in data]\n\n # if isinstance(data[0], str):\n # # print('is a string object')\n # return [word_to_id[word] for word in data]\n # else:#if isinstance(s, bytes):\n # # print('is a unicode object')\n # # print(data[0])\n # return [word_to_id[str(word)] f",
"def read_lines_of_ints(text):\n ints = []\n ints_as_strs = split_line(text)\n # below is equivalent code to the following for loop\n # index = 0\n # while index < len(ints_as_strs):\n # int_as_str = ints_as_strs[index]\n # index += 1\n for ints_as_str in ints_as_strs:\n ints.append(int(int_as_str))\n return ints",
"def text2Int(text):\n return reduce(lambda x, y : (x << 8) + y, map(ord, text))",
"def ids(filename):\n with open(filename) as file:\n contents = file.read()\n return [int(x) for x in contents.split(\",\")]",
"def data_to_word_ids(self, input_data, filter=False):\n\n _buffer = list()\n for word in input_data:\n word = word.lower()\n if self.unit == \"oracle\":\n if \"+\" in word:\n tokens = word.split('+')\n word_tag = tokens[0].split(':')\n word = word_tag[1]\n if self.unit == \"morpheme\":\n word = re.sub(\"@@\", \"\", word)\n\n # flag to randomize token with frequency one\n flag = 1\n if word in self.unk_word_list:\n flag = random.randint(0, 1)\n\n if word in self.word_to_id and flag == 1:\n # if filter is True, reduce output vocabulary for softmax\n # (map words not in top self.max_vocab_size to UNK)\n if filter:\n # index start from 0\n if self.word_to_id[word] < self.max_vocab_size:\n _buffer.append(self.word_to_id[word])\n else:\n _buffer.append(self.word_to_id['<unk>'])\n else:\n _buffer.append(self.word_to_id[word])\n else:\n _buffer.append(self.word_to_id['<unk>'])\n return _buffer",
"def doc2id(self, doc):\n if isinstance(doc, string_types):\n raise TypeError(\"doc2idx expects an array of unicode tokens on input, not a single string\")\n doc = map(self.process_token, doc)\n return [self.token_to_id(token) for token in doc]",
"def line2ints(line):\n return [int(d) for d in line.strip()]",
"def intparse(text):\n return int(text, 0)",
"def test_convert_id():",
"def __ui_convert_ids_string_to_list(string_of_ids):\n if string_of_ids == \"\":\n return []\n string_of_ids = string_of_ids.strip()\n string_of_ids = string_of_ids.replace(\",\", \" \")\n\n done = False\n while not done:\n if string_of_ids.find(\" \") == -1:\n done = True\n else:\n string_of_ids = string_of_ids.replace(\" \", \" \")\n list_of_ids = string_of_ids.split(\" \")\n for id_index in range(len(list_of_ids)):\n list_of_ids[id_index] = int(list_of_ids[id_index])\n return list_of_ids",
"def _encode(self, text: str) -> List[str]:\n token_ids: List[int] = self.bert_model.encode(text.strip())\n tokens_ids_str: List[str] = [str(token_id) for token_id in token_ids]\n return tokens_ids_str"
] | [
"0.672705",
"0.6686971",
"0.66411066",
"0.66214275",
"0.6610237",
"0.6563587",
"0.650595",
"0.6494439",
"0.6406515",
"0.6315634",
"0.6188173",
"0.6168283",
"0.6166879",
"0.6148879",
"0.60975975",
"0.6084565",
"0.6077067",
"0.6060543",
"0.60536844",
"0.60036033",
"0.59868073",
"0.59835935",
"0.5962527",
"0.5944648",
"0.5933868",
"0.5907434",
"0.5895855",
"0.5890603",
"0.58717227",
"0.58454424"
] | 0.7270317 | 0 |
callback for when the detector has found a stop sign. Note that a distance of 0 can mean that the lidar did not pickup the stop sign at all | def stop_sign_detected_callback(self, msg):
# distance of the stop sign
corners = msg.corners
dx = corners[3] - corners[1]
dy = corners[2] - corners[0]
r = dx/dy # aspect ratio
rdist = np.array([.15, .20, .25, .30,.35, .40, .45, .50])
pixelheight = np.array([139, 102, 82, 64, 56, 50, 44, 40])
if dy > pixelheight[-1] and dy < pixelheight[0]:
dist = np.interp(dy, pixelheight[::-1], rdist[::-1])
else:
return
# Get location of camera with respect to the map
try:
(translation,rotation) = self.tf_listener.lookupTransform('/map', '/camera', rospy.Time(0))
xcam = translation[0]
ycam = translation[1]
zcam = translation[2]
euler = tf.transformations.euler_from_quaternion(rotation)
thetacam = euler[2]
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
return
# Get angle of robot with respect to the map
try:
(translation,rotation) = self.tf_listener.lookupTransform('/map', '/base_footprint', rospy.Time(0))
euler = tf.transformations.euler_from_quaternion(rotation)
thetarobot = euler[2]
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
return
# Now we have pose of robot, we want to determine stop sign angle relative
# to camera frame
thstopsign = (wrapToPi(msg.thetaright) + wrapToPi(msg.thetaleft))/2.
zstopsign = dist*np.cos(-thstopsign)
xstopsign = dist*np.sin(-thstopsign)
x = xcam + xstopsign*np.cos(thetacam) - zstopsign*np.sin(thetacam)
y = ycam + xstopsign*np.sin(thetacam) + zstopsign*np.cos(thetacam)
# Now that we have x and y coord of stop sign in world frame, append coord
found = False
for i in range(len(self.stopSigns[0])):
xcur = self.stopSigns[0][i]
ycur = self.stopSigns[1][i]
thetarobotcur = self.stopSigns[2][i]
distance = np.sqrt((x - xcur)**2 + (y - ycur)**2)
n = self.stopSignCounts[i]
if distance < .2:
if n < 100:
# We have found the same stop sign as before
xnew = (n/(n+1.))*xcur + (1./(n+1))*x
ynew = (n/(n+1.))*ycur + (1./(n+1))*y
thetarobotnew = (n/(n+1.))*thetarobotcur + (1./(n+1))*thetarobot
self.stopSigns[0][i] = xnew
self.stopSigns[1][i] = ynew
self.stopSigns[2][i] = thetarobotnew
self.stopSignCounts[i] += 1
found = True
if not found:
# Found a new one, append it
self.stopSigns[0].append(x)
self.stopSigns[1].append(y)
self.stopSigns[2].append(thetarobot)
self.stopSignCounts.append(1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _detect_stop(func):\n def wrapper(*args,**kwargs):\n self = args[0]\n self.episode_length -= 1\n if self.episode_length <=0:\n \"\"\"if the episode is end\"\"\"\n self.end = True\n else:\n if self.adsorption:\n \"\"\"just stop moving and wait until the end of episode\"\"\"\n self.state = self.previous_state\n else:\n func(*args,**kwargs)\n self._detect_obstacles()\n\n # func(*args,**kwargs)\n # self._detect_obstacles()\n # if self.adsorption:\n # \"\"\"if this step update is invalid, the point will rebond\"\"\"\n # self.state = self.previous_state\n\n if self.distance <= 0.02:\n \"\"\"if the point reached the boundary around the goal, let it stop and reset the punishment(self.reward)\"\"\"\n self.end = True\n self.reward = 0\n if self.state[0] <0 or self.state[0] > 10 or self.state[1] <0 or self.state[1] > 10:\n # self.end = True\n self.reward = -800\n return np.array(self.state), self.reward, self.end, self.distance\n return wrapper",
"def _change_seg_stop(self, seg_img, depth_img, stop_signs, cam, _region_size=6): \r\n for stop in stop_signs:\r\n\r\n _dist = self._get_distance(stop.get_transform().location)\r\n \r\n _region = np.abs(depth_img - _dist)\r\n\r\n seg_img[(_region < _region_size) & (seg_img == 12)] = 26\r\n\r\n # lane markings\r\n trigger = stop.trigger_volume\r\n\r\n _trig_loc_world = self._trig_to_world(np.array([[0], [0], [0], [1.0]]).T, stop, trigger)\r\n _x = self._world_to_sensor(_trig_loc_world, self._get_sensor_position(cam))[0,0]\r\n\r\n if _x > 0: # stop is in front of camera\r\n\r\n bb = self._create_2d_bb_points(trigger, 4)\r\n trig_loc_world = self._trig_to_world(bb, stop, trigger)\r\n cords_x_y_z = self._world_to_sensor(trig_loc_world, self._get_sensor_position(cam), True)\r\n\r\n #if cords_x_y_z.size: \r\n cords_x_y_z = cords_x_y_z[:3, :]\r\n cords_y_minus_z_x = np.concatenate([cords_x_y_z[1, :], -cords_x_y_z[2, :], cords_x_y_z[0, :]])\r\n bbox = (self._sensor_data['calibration'] @ cords_y_minus_z_x).T\r\n\r\n camera_bbox = np.concatenate([bbox[:, 0] / bbox[:, 2], bbox[:, 1] / bbox[:, 2], bbox[:, 2]], axis=1)\r\n\r\n if np.any(camera_bbox[:,2] > 0):\r\n\r\n camera_bbox = np.array(camera_bbox)\r\n\r\n polygon = [(camera_bbox[i, 0], camera_bbox[i, 1]) for i in range(len(camera_bbox))]\r\n\r\n img = Image.new('L', (self._sensor_data['width'], self._sensor_data['height']), 0)\r\n ImageDraw.Draw(img).polygon(polygon, outline=1, fill=1)\r\n _region = np.array(img)\r\n\r\n seg_img[(_region == 1) & (seg_img == 6)] = 27",
"def update_trailing_stop(self, trade, instrument, distance, local=True, distance_in_percent=True):\n close_exec_price = instrument.close_exec_price(trade.direction)\n stop_loss = trade.sl\n\n if trade.direction > 0:\n # long case\n ratio = close_exec_price / trade.entry_price\n sl_ratio = (trade.entry_price - trade.sl) / trade.entry_price\n dist = (close_exec_price - trade.sl) / trade.entry_price\n step = distance\n\n if distance_in_percent:\n # @todo\n if dist > (sl_ratio + step):\n stop_loss = close_exec_price * (1.0 - distance)\n else:\n # @todo\n pass\n\n # # if dist > (sl_ratio + step):\n # # stop_loss = close_exec_price * (1.0 - sl_ratio)\n # # logger.debug(\"update SL from %s to %s\" % (trade.sl, stop_loss))\n\n # # # alternative @todo how to trigger\n # # if ratio >= 1.10:\n # # stop_loss = max(trade.sl, close_exec_price - (close_exec_price/trade.entry_price*(close_exec_price-trade.entry_price)*0.33))\n\n # # ultra large and based on the distance of the price\n # # if dist > 0.25:\n # # stop_loss = trade.entry_price + (trade.entry_price * (dist * 0.5))\n\n elif trade.direction < 0:\n # short case\n ratio = close_exec_price / trade.entry_price\n sl_ratio = (trade.sl - trade.entry_price) / trade.entry_price\n dist = (trade.sl - close_exec_price) / trade.entry_price\n step = distance\n\n if distance_in_percent:\n # @todo\n if dist > (sl_ratio - step):\n stop_loss = close_exec_price * (1.0 - distance)\n pass\n else:\n # @todo\n pass\n\n if stop_loss != trade.sl:\n if local:\n trade.sl = stop_loss\n else:\n trade.modify_stop_loss(trader, instrument, stop_loss)",
"def __stop_loss_dist_rsi(rsi):\n return (100 - rsi)/1000.0 # return value between 0 - 0.1",
"def linear_track(self, dist):\n\t\tglobal estop_flag, move_state\n\n\t\t#Disable timer interrupt, reset halfway flag, set target distance\n\t\tsignal.alarm(0) \n\t\thalfway_flag = False\n\n\t\t#Set starting position\n\t\twith self.move_state_lock:\n\t\t\tstart_x, start_y, start_z = move_state['x'], move_state['y'], move_state['z']\n\t\t#Set current position initially to start position\n\t\tcurrent_x, current_y, current_z = start_x, start_y, start_z\n\t\t#Check if the distance travelled is greater than the goal distance\n\t\twhile math.sqrt((current_x - start_x)**2 + (current_y - start_y)**2 + (current_z - start_z)**2) < abs(dist):\n\t\t\t#Check if the estop flag is set, if so, kill movement\n\t\t\tif estop_flag:\n\t\t\t\tself.publisher.publish(Mover.stop_msg)\n\t\t\telse:\n\t\t\t\ttwist_msg = Twist()\n\t\t\t\tif dist < 0:\n\t\t\t\t\tif self.correction == riu.no_correction:\n\t\t\t\t\t\ttwist_msg.linear.x = -1 * riu.move_rate\n\t\t\t\t\telse:\n\t\t\t\t\t\ttwist_msg.linear.x = -1 * riu.move_rate/2\n\t\t\t\t\tif self.correction == \"left\":\n\t\t\t\t\t\ttwist_msg.angular.z = -1 * riu.turn_rate/2\n\t\t\t\t\telif self.correction == \"right\":\n\t\t\t\t\t\ttwist_msg.angular.z = riu.turn_rate/2\n\t\t\t\t#If distance goal is positive, move forward\n\t\t\t\telif dist > 0:\n\t\t\t\t\tif self.correction == riu.no_correction:\n\t\t\t\t\t\ttwist_msg.linear.x = riu.move_rate\n\t\t\t\t\telse:\n\t\t\t\t\t\ttwist_msg.linear.x = riu.move_rate/2\n\t\t\t\t\tif self.correction == \"left\":\n\t\t\t\t\t\ttwist_msg.angular.z = riu.turn_rate/2\n\t\t\t\t\telif self.correction == \"right\":\n\t\t\t\t\t\ttwist_msg.angular.z = -1 * riu.turn_rate/2\n\n\t\t\t\tself.publisher.publish(twist_msg)\n\t\t\t\t#Check if the current movement is half completed, if so, send a Half message and set flag to avoid message duplication\n\t\t\t\tif (math.sqrt((current_x - start_x)**2 + (current_y - start_y)**2 + (current_z - start_z)**2) >= abs(dist)/2\n\t\t\t\t\tand not halfway_flag):\n\t\t\t\t\thalfway_flag = True\n\t\t\t\t\tself.status_pub.publish(String(\"half\"))\n\n\t\t\t\t#update current_x, current_y, and current_z (using local variables to be thread safe)\n\t\t\t\twith self.move_state_lock:\n\t\t\t\t\tcurrent_x = move_state['x']\n\t\t\t\t\tcurrent_y = move_state['y']\n\t\t\t\t\tcurrent_z = move_state['z']\n\t\t\trospy.sleep(.2)\n\t\tself.publisher.publish(Mover.stop_msg)\n\t\tself.status_pub.publish(String(\"done\"))\n\t\tsignal.alarm(Mover.ready_message_interval)",
"def dist_to_stop(speed):\n return speed ** 2 / 4",
"def stopif(self, stop):\n if stop:\n self._stopsim = True",
"def trailing_stop(self):\n # price = self.binance.get_price(self.market)\n pos = self.get_position()\n entry_price = pos['avgEntryPrice']\n qty = pos['currentQty']\n print('Trailing stop triggered')\n order_type = 'market'\n if qty > 0:\n # long position\n price = self.ws.get_ticker()['sell']\n offset_price = float(price) - float(self.strategy.trail_offset)\n text = 'Trailing sell stop for long position'\n qty = qty * -1\n side = 'Sell'\n print(f'Trailing Stop for long position triggered: offset price {offset_price}')\n elif qty < 0:\n # short position\n price = self.ws.get_ticker()['buy']\n offset_price = float(price) + float(self.strategy.trail_offset)\n text = 'Trailing buy stop for short position'\n qty = qty * -1\n side = 'Buy'\n print(f'Trailing Stop for short position triggered: offset price {offset_price}')\n else:\n self.logger.info('No position found!')\n return False\n\n while True:\n if side == \"Sell\":\n if self.strategy.double_check or self.ws_restarting:\n quote = self.get_quote()\n self.logger.info('Bid: {} Ask: {}'.format(quote['bidPrice'], quote['askPrice']))\n price = quote['askPrice']\n else:\n price = self.ws.get_ticker()['sell']\n self.logger.info('Bid: {} Ask: {}'.format(self.ws.get_ticker['buy'], self.ws.get_ticker['sell']))\n if (float(price) - float(self.strategy.trail_offset)) > float(offset_price):\n offset_price = float(price) - float(self.strategy.trail_offset)\n print(\"New high observed: Updating stop loss to %.8f\" % offset_price)\n elif float(price) <= float(offset_price):\n price = self.ws.get_ticker()['sell']\n ret = self.execute_order(oq=qty, ot=order_type, text=text)\n self.logger.info(\"Sell triggered | Price: %.8f | Stop loss: %.8f\" % (price, offset_price))\n self.logger.debug(ret)\n if self.strategy.double_check or self.ws_restarting:\n sleep(0.5)\n break\n\n if side == \"Buy\":\n if self.strategy.double_check or self.ws_restarting:\n quote = self.get_quote()\n self.logger.info('Bid: {} Ask: {}'.format(quote['bidPrice'], quote['askPrice']))\n price = quote['bidPrice']\n else:\n price = self.ws.get_ticker()['buy']\n if (float(price) + float(self.strategy.trail_offset)) < float(offset_price):\n offset_price = float(price) + float(self.strategy.trail_offset)\n print(\"New low observed: Updating stop loss to %.8f\" % offset_price)\n elif price >= offset_price:\n price = self.ws.get_ticker()['buy']\n ret = self.execute_order(oq=qty, ot=order_type, text=text)\n self.logger.info(\"Buy triggered | Price: %.8f | Stop loss: %.8f\" % (price, offset_price))\n self.logger.debug(ret)\n if self.strategy.double_check or self.ws_restarting:\n sleep(0.5)\n break",
"def lidar_callback(self, data):\n \n proc_ranges = self.preprocess_lidar(data)\n\n closest_idx, closest_dist = self.find_closest_point(proc_ranges, data.range_max)\n \n bubble_size = int(math.floor(math.atan(0.55/closest_dist)*1080))/2\n \n print(\"Closest dist: \"+str(closest_dist)+\", Closest idx: \"+str(closest_idx)+\", No Go Zone: [\"+\n str(closest_idx-bubble_size)+\",\"+str(closest_idx+bubble_size)+\"]\")\n \n #Eliminate all points inside 'bubble' (set them to zero) \n for i in range(closest_idx-bubble_size, closest_idx+bubble_size):\n proc_ranges[i] = 0\n \n #Find max length gap \n start, end = self.find_max_gap(proc_ranges)\n\n #Find the best point in the gap \n angle, target = self.find_best_point(start, end, data.ranges)\n \n rospy.loginfo(\"Max Length Gap: [\"+str(start)+\",\"+str(end)+\"] , Target: \"+str(target))\n #if (abs(angle) > 0.2):\n #print(proc_ranges[270:810])\n \n #VELOCITY = 1\n if (abs(angle) < 0.05):\n VELOCITY = 1.5\n elif abs(angle) >= 0.05 and abs(angle) < 0.1:\n VELOCITY = 1.0\n else:\n VELOCITY = 0.5\n\n #Publish Drive message\n drive_msg = AckermannDriveStamped()\n drive_msg.header.stamp = rospy.Time.now()\n drive_msg.header.frame_id = \"laser\"\n drive_msg.drive.steering_angle = angle\n drive_msg.drive.speed = VELOCITY\n self.drive_pub.publish(drive_msg)",
"def is_stopper(self):\r\n return self.stopper",
"def stop(self):\n return _spacegrant_swig.DeNRZI_sptr_stop(self)",
"def CalcStopLevel(self,entryLevel,tradeSignal):\r\n pass",
"def test_get_stop_true(self):\n\n tt = TemperatureTracker()\n tt.stop()\n self.assertIsNotNone(tt.get_stop())",
"def stop(self):\n return _spacegrant_swig.NRZI_sptr_stop(self)",
"def stop_calibration(self):\n self.socket.send_string('c')\n return self.socket.recv_string()",
"def get_stopped_pts(gpx_track, speed_threshold=2.5):\n\n n = 0\n stopped_bool = [False]*gpx_track.get_points_no() # pre-allocate\n for segment in gpx_track.segments:\n for ida, point in enumerate(segment.points):\n stopped_bool[n] = segment.get_speed[ida] < speed_threshold\n n = n + 1\n\n _, stopped_time = gpx_track.get_moving_data(speed_threshold)\n\n return stopped_bool, stopped_time",
"def _test_if_stop_points_reached(self):\n for s in self.program.steps:\n if s.blending == 0 and s.move_type == MoveType.Frame:\n lastFrame = s.playback_frames[-1]\n expectedFramePose = get_frame_pose(s, lastFrame)\n delta = 1e-06\n msg = f\"Step {s.name} is a stop point (frame move, blending 0). Exact target position should be reached\"\n for index, value in enumerate(expectedFramePose):\n self.assertAlmostEqual(s.pose[index], value, msg=msg, delta=delta)",
"def stop(self) -> float:\n raise NotImplementedError()",
"def stopDetection(self):\n self.statusWrite(\"stop\")\n self.p.sleep()\n self.birdHere = 0",
"def test_sense_distance(self):\n\n\t\tmeasurements = [29, 29, 28]\n\t\tself.driver.us_dist.side_effect = lambda x: measurements.pop()\n\t\texpected_measurement = int(ultrasonic_sensor_error(29))\n\n\t\tself.assertEqual(self.s.sense_distance(60), expected_measurement)\n\t\tself.mount.move.assert_called_once_with(x=60)",
"def capture_stop(self):\n pass",
"def need_stop(self, path):",
"def stop_f(self, state):\n return stop(predict_correct, state, update_state,\n similarity_threshold, confidence_threshold)",
"def getSTOP(self):\n return self.listener.STOP",
"def set_linear_track_stop(self):\r\n return self._arm.set_linear_track_stop()",
"def stop(self):\n return _spacegrant_swig.invert_bit_sptr_stop(self)",
"def stop_at_detection(lag=1):\n def policy(model, hist):\n # stop if there was a positive result after lag time\n return (model.lastPositive>=0) and (model.lastPositive+lag <= model.t)\n return policy",
"def on_stop(self):\n pass",
"def on_stop(self):\n pass",
"def on_stop(self):\n pass"
] | [
"0.59943485",
"0.56399006",
"0.5583566",
"0.5494318",
"0.54302335",
"0.54221815",
"0.5411941",
"0.5367256",
"0.53257966",
"0.53016096",
"0.5254286",
"0.52245134",
"0.5204961",
"0.5185182",
"0.5171382",
"0.5162159",
"0.5156524",
"0.51438564",
"0.5138958",
"0.51210594",
"0.50905496",
"0.50705594",
"0.50698704",
"0.5066117",
"0.5061649",
"0.50589126",
"0.5058846",
"0.50330573",
"0.50330573",
"0.50330573"
] | 0.70093864 | 0 |
Custom collate_fn that is called with list of multivariate samples to yield a minibatch It preserves the data structure, e.g., if each sample is a dictionary, it outputs a dictionary with the same set of keys but batched Tensors as values (or lists if the values can not be converted into Tensors). | def collate_fn(sample_list):
x_ref_batch = []
x_pos_batch = []
x_negs_batch = []
label_batch = []
for sample in sample_list:
x_ref_batch.append(sample["x_ref"])
x_pos_batch.append(sample["x_pos"])
x_negs_batch.append(sample["x_negs"])
label_batch.append(sample["label"])
# Use torch API for RNNs to pad samples to fixed length, L, and stack them in batch-tensor of dim (B,n_dim,L).
x_ref_batch = pad_sequence(
x_ref_batch,
batch_first=True,
padding_value=0) # (B,L,n_dim)
x_ref_batch = x_ref_batch.transpose(1, 2) # (B,n_dim,L)
x_pos_batch = pad_sequence(
x_pos_batch,
batch_first=True,
padding_value=0) # (B,L,n_dim)
x_pos_batch = x_pos_batch.transpose(1, 2) # (B,n_dim,L)
# Pad neg tensors with varying length of first dim L, and produce batch (B,K,n_dim,L') where L' is padded length
x_negs_batch = pad_sequence(x_negs_batch,
batch_first=True,
padding_value=0) # (B, L', K, n_dim)
x_negs_batch = x_negs_batch.transpose(1, 2) # (B, K, L', n_dim)
x_negs_batch = x_negs_batch.transpose(2, 3) # (B, K, n_dim, L')
return {
'x_ref': x_ref_batch,
'x_pos': x_pos_batch,
'x_negs': x_negs_batch,
'label': label_batch
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def collate_fn(batch):\n\n flattened_batch = []\n for data in batch:\n num_examples = len(data['image'])\n for i in range(num_examples):\n flattened_batch.append({\n k: v[i] for k, v in data.items()\n })\n\n return default_collate(flattened_batch)",
"def collate_minibatch(list_of_blobs):\n Batch = {key: [] for key in list_of_blobs[0]}\n # Because roidb consists of entries of variable length, it can't be batch into a tensor.\n # So we keep roidb in the type of \"list of ndarray\".\n list_of_roidb = [blobs.pop('roidb') for blobs in list_of_blobs]\n for i in range(0, len(list_of_blobs), cfg.TRAIN.IMS_PER_BATCH):\n mini_list = list_of_blobs[i:(i + cfg.TRAIN.IMS_PER_BATCH)]\n # Pad image data\n # mini_list = pad_image_data(mini_list)\n minibatch = default_collate(mini_list)\n minibatch['roidb'] = list_of_roidb[i:(i + cfg.TRAIN.IMS_PER_BATCH)]\n for key in minibatch:\n Batch[key].append(minibatch[key])\n\n return Batch",
"def collate_fn(list_samples):\n data = dict(outputs=None) # compliant with DataManager <collate_fn>\n data[\"inputs\"] = torch.stack([torch.from_numpy(sample[0]) for sample in list_samples], dim=0).float()\n data[\"labels\"] = torch.stack([torch.tensor(sample[1]) for sample in list_samples], dim=0).squeeze().float()\n return DataItem(**data)",
"def collate_minibatch(list_of_blobs):\n Batch = {key: [] for key in list_of_blobs[0]}\n\n list_of_target = [blobs.pop('target') for blobs in list_of_blobs]\n # list_of_image = [blobs.pop('image') for blobs in list_of_blobs]\n batch_size = Logo_512['numpergpu']\n\n for i in range(0, len(list_of_blobs), batch_size):\n # minibatch = {}\n mini_list = list_of_blobs[i:(i + batch_size)]\n # Pad image data\n minibatch = default_collate(mini_list)\n minibatch['target'] = list_of_target[i:(i + batch_size)]\n for key in minibatch:\n Batch[key].append(minibatch[key])\n\n return Batch",
"def collate_minibatch(self, list_of_blobs):\n def pad_image_data(list_of_blobs):\n max_shape = np.array([blobs['data'].shape[1:] for blobs in list_of_blobs]).max(axis=0)\n output_list = []\n for blobs in list_of_blobs:\n data_padded = np.zeros((3, max_shape[0], max_shape[1]), dtype=np.float32)\n _, h, w = blobs['data'].shape\n data_padded[:, :h, :w] = blobs['data']\n blobs['data'] = data_padded\n output_list.append(blobs)\n return output_list\n \n Batch = {key: [] for key in list_of_blobs[0]}\n # Because roidb consists of entries of variable length, it can't be batch into a tensor.\n # So we keep roidb in the type of \"list of ndarray\".\n list_of_roidb = [blobs.pop('roidb') for blobs in list_of_blobs]\n for i in range(0, len(list_of_blobs), CONFIG.SOLVER.IMS_PER_BATCH):\n mini_list = list_of_blobs[i:(i + CONFIG.SOLVER.IMS_PER_BATCH)]\n # Pad image data\n mini_list = pad_image_data(mini_list)\n minibatch = default_collate(mini_list)\n minibatch['roidb'] = list_of_roidb[i:(i + CONFIG.SOLVER.IMS_PER_BATCH)]\n for key in minibatch:\n Batch[key].append(minibatch[key])\n\n return Batch",
"def collate_fn(batch, samples_per_gpu=1):\n if not isinstance(batch, Sequence):\n raise TypeError(f'{batch.dtype} is not supported.')\n\n if isinstance(batch[0], list):\n batch = [item for _ in batch for item in _]\n\n if isinstance(batch[0], DataContainer):\n assert len(batch) % samples_per_gpu == 0\n stacked = []\n if batch[0].cpu_only:\n for i in range(0, len(batch), samples_per_gpu):\n stacked.append(\n [sample.data for sample in batch[i:i + samples_per_gpu]])\n return DataContainer(\n stacked, batch[0].stack, batch[0].padding_value, cpu_only=True)\n elif batch[0].stack:\n for i in range(0, len(batch), samples_per_gpu):\n assert isinstance(batch[i].data, torch.Tensor)\n\n if batch[i].pad_dims is not None:\n ndim = batch[i].dim()\n assert ndim > batch[i].pad_dims\n max_shape = [0 for _ in range(batch[i].pad_dims)]\n for dim in range(1, batch[i].pad_dims + 1):\n max_shape[dim - 1] = batch[i].size(-dim)\n for sample in batch[i:i + samples_per_gpu]:\n for dim in range(0, ndim - batch[i].pad_dims):\n assert batch[i].size(dim) == sample.size(dim)\n for dim in range(1, batch[i].pad_dims + 1):\n max_shape[dim - 1] = max(max_shape[dim - 1],\n sample.size(-dim))\n padded_samples = []\n for sample in batch[i:i + samples_per_gpu]:\n pad = [0 for _ in range(batch[i].pad_dims * 2)]\n for dim in range(1, batch[i].pad_dims + 1):\n pad[2 * dim -\n 1] = max_shape[dim - 1] - sample.size(-dim)\n padded_samples.append(\n F.pad(\n sample.data, pad, value=sample.padding_value))\n stacked.append(collate(padded_samples))\n elif batch[i].pad_dims is None:\n stacked.append(\n collate([\n sample.data\n for sample in batch[i:i + samples_per_gpu]\n ]))\n else:\n raise ValueError(\n 'pad_dims should be either None or integers (1-3)')\n\n else:\n for i in range(0, len(batch), samples_per_gpu):\n stacked.append(\n [sample.data for sample in batch[i:i + samples_per_gpu]])\n return DataContainer(stacked, batch[0].stack, batch[0].padding_value)\n elif isinstance(batch[0], Sequence):\n transposed = zip(*batch)\n return [collate(samples, samples_per_gpu) for samples in transposed]\n\n elif isinstance(batch[0], Mapping):\n res = dict()\n for key in batch[0]:\n if isinstance(batch[0][key], torch.Tensor):\n res.update({key: collate([d[key] for d in batch], samples_per_gpu)})\n else:\n res.update({key: [d[key] for d in batch]})\n\n return res\n # return {\n # key: collate([d[key] for d in batch], samples_per_gpu)\n # for key in batch[0]\n # }\n else:\n return collate(batch)",
"def collate_fn(batch: list[dict[str, Tensor]]) -> dict[str, Any]:\n output: dict[str, Any] = {}\n output[\"image\"] = torch.stack([sample[\"image\"] for sample in batch])\n output[\"boxes\"] = [sample[\"boxes\"] for sample in batch]\n output[\"labels\"] = [torch.tensor([1] * len(sample[\"boxes\"])) for sample in batch]\n return output",
"def collate_fn(self, *args):\n return TupleMiniBatch(default_collate(*args))",
"def collate_fn(batch):\n metadata = []\n for el in batch:\n metadata.append(el[\"metadata\"])\n del el[\"metadata\"]\n\n batch = default_collate(batch)\n\n batch[\"metadata\"] = metadata\n\n return batch",
"def collate_fn(batch):\r\n transposed = zip(*batch)\r\n lbd = lambda batch:torch.cat([torch.from_numpy(b).long() for b in batch])\r\n return [lbd(samples) for samples in transposed]",
"def collate_fn(batch):\n # eliminate invalid data (where boxes is [] tensor)\n old_batch_len = len(batch)\n batch = [x for x in batch if x[1]['boxes'].shape[0] != 0]\n # try refill empty sample by other sample in current batch\n #print('batch len = ', old_batch_len)\n #print('new batch len = ', len(batch))\n new_batch_len = len(batch)\n for i in range(new_batch_len, old_batch_len):\n batch.append(copy.deepcopy(batch[i%new_batch_len]))\n #print('batch = ', batch)\n #print('filled batch len = ', len(batch))\n batch = list(zip(*batch)) # batch[0]: data tensor, batch[1]: targets dict\n\n batch[0] = nested_tensor_from_tensor_list(batch[0])\n return tuple(batch)",
"def collate_fn(self, batch: List[Dict]) -> List[Dict]:\n # package up a list of individual interventions into multiple batched interventions\n # batch may contain interventions on different locations\n high_node_to_minibatches = defaultdict(list)\n for d in batch:\n high_nodes = tuple(sorted(d[\"high_intervention\"].intervention._values.keys()))\n high_node_to_minibatches[high_nodes].append(d)\n\n minibatches = []\n for minibatch_dicts in high_node_to_minibatches.values():\n low_base_dict, low_ivn_dict, low_loc_dict = pack_interventions(\n [d[\"low_intervention\"] for d in minibatch_dicts],\n batch_dim=self.batch_dim,\n non_batch_inputs=self.low_non_batch_leaves\n )\n low_base_input = GraphInput(\n low_base_dict, batched=True, batch_dim=self.batch_dim,\n cache_results=self.cache_base_results,\n key_leaves=self.low_key_leaves,\n non_batch_leaves=self.low_non_batch_leaves\n )\n low_realizations = [d[\"low_intervention\"].realization for d in minibatch_dicts]\n if all(rzn is None for rzn in low_realizations):\n low_realizations = None\n low_ivn = Intervention.batched(\n low_base_input, low_ivn_dict, low_loc_dict,\n batch_dim=self.batch_dim, cache_base_results=self.cache_interv_results,\n realization=low_realizations\n )\n\n high_base_dict, high_ivn_dict, high_loc_dict = pack_interventions(\n [d[\"high_intervention\"] for d in minibatch_dicts],\n batch_dim=self.batch_dim,\n non_batch_inputs=self.high_non_batch_leaves\n )\n high_base_input = GraphInput(\n high_base_dict, batched=True, batch_dim=self.batch_dim,\n cache_results=self.cache_base_results,\n key_leaves=self.high_key_leaves,\n non_batch_leaves=self.high_non_batch_leaves\n )\n high_ivn = Intervention.batched(\n high_base_input, high_ivn_dict, high_loc_dict,\n batch_dim=self.batch_dim, cache_base_results=self.cache_interv_results)\n\n minibatches.append({\"low_intervention\": low_ivn,\n \"high_intervention\": high_ivn})\n\n return minibatches",
"def collate_fn(\n self,\n batch: List[\n Tuple[\n np.ndarray,\n np.ndarray,\n np.ndarray,\n np.ndarray,\n int,\n int,\n bool,\n bool,\n Optional[np.ndarray],\n Optional[np.ndarray],\n ]\n ],\n ) -> Union[\n Tuple[Tensor, Tensor, Tensor, Tensor, Any, Any, Any, Any],\n Tuple[Tensor, Tensor, Tensor, Tensor, Any, Any, Any, Any, Any, Any],\n ]:\n if not self.use_audio:\n inp_ids, segment_ids, inp_mask, st_mask, n_preceding, query_ids, is_first, is_last = zip(*batch)\n return (\n pad_sequence([torch.tensor(x) for x in inp_ids], batch_first=True, padding_value=0),\n pad_sequence([torch.tensor(x) for x in segment_ids], batch_first=True, padding_value=0),\n pad_sequence([torch.tensor(x) for x in inp_mask], batch_first=True, padding_value=0),\n pad_sequence([torch.tensor(x) for x in st_mask], batch_first=True, padding_value=0),\n n_preceding,\n query_ids,\n is_first,\n is_last,\n )\n (\n inp_ids,\n segment_ids,\n inp_mask,\n st_mask,\n n_preceding,\n query_ids,\n is_first,\n is_last,\n features,\n features_length,\n ) = zip(*batch)\n return (\n pad_sequence([torch.tensor(x) for x in inp_ids], batch_first=True, padding_value=0),\n pad_sequence([torch.tensor(x) for x in segment_ids], batch_first=True, padding_value=0),\n pad_sequence([torch.tensor(x) for x in inp_mask], batch_first=True, padding_value=0),\n pad_sequence([torch.tensor(x) for x in st_mask], batch_first=True, padding_value=0),\n n_preceding,\n query_ids,\n is_first,\n is_last,\n pad_sequence([torch.tensor(x) for x in features], batch_first=True, padding_value=0).float(),\n torch.tensor(features_length, dtype=torch.long),\n )",
"def collate_without_batching_dict(batch):\n\n error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n elem_type = type(batch[0])\n if isinstance(batch[0], torch.Tensor):\n out = None\n if _use_shared_memory:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n numel = sum([x.numel() for x in batch])\n storage = batch[0].storage()._new_shared(numel)\n out = batch[0].new(storage)\n return torch.stack(batch, 0, out=out)\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n elem = batch[0]\n if elem_type.__name__ == 'ndarray':\n # array of string classes and object\n if re.search('[SaUO]', elem.dtype.str) is not None:\n raise TypeError(error_msg.format(elem.dtype))\n\n return torch.stack([torch.from_numpy(b) for b in batch], 0)\n if elem.shape == (): # scalars\n py_type = float if elem.dtype.name.startswith('float') else int\n return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n elif isinstance(batch[0], int_classes):\n return torch.LongTensor(batch)\n elif isinstance(batch[0], float):\n return torch.DoubleTensor(batch)\n elif isinstance(batch[0], string_classes):\n return batch\n elif isinstance(batch[0], collections.Mapping):\n return [d for d in batch]\n # return {key: collate_without_batching_dict_list([d[key] for d in batch]) for key in batch[0]}\n elif isinstance(batch[0], collections.Sequence):\n transposed = zip(*batch)\n return [collate_without_batching_dict(samples) for samples in transposed]\n\n raise TypeError((error_msg.format(type(batch[0]))))",
"def collate_fn(self, batch):\n images, boxes, categories = [], [], []\n\n for b in batch:\n images.append(b['img'])\n boxes.append(b['box'])\n categories.append(b['category'])\n\n images = torch.stack(images, dim=0)\n\n # tensor (N, 3, 300, 300), 3 lists of N tensors each\n return {\n 'imgs': images,\n 'boxes': boxes,\n 'categories': categories\n }",
"def collate_batch(self) -> Dict[str, Any]:\n pass",
"def collate_fn(data: list):\n def pad_tensor(inp):\n assert type(inp[0]) == torch.Tensor\n it = iter(inp)\n t = next(it)\n max_shape = list(t.shape)\n while True:\n try:\n t = next(it)\n for i in range(len(max_shape)):\n max_shape[i] = int(max(max_shape[i], t.shape[i]))\n except StopIteration:\n break\n max_shape = np.array(max_shape)\n\n padded_ts = []\n for t in inp:\n pad_pattern = np.zeros(2 * len(max_shape), dtype=np.int64)\n pad_pattern[::-2] = max_shape - np.array(t.shape)\n pad_pattern = tuple(pad_pattern.tolist())\n padded_ts.append(F.pad(t, pad_pattern, 'constant', 0))\n\n return padded_ts\n\n def stack(inp):\n if type(inp[0]) == list:\n ret = []\n for vs in zip(*inp):\n ret.append(stack(vs))\n elif type(inp[0]) == dict:\n ret = {}\n for kvs in zip(*[x.items() for x in inp]):\n ks, vs = zip(*kvs)\n for k in ks:\n assert k == ks[0], \"Key value mismatch.\"\n ret[k] = stack(vs)\n elif type(inp[0]) == torch.Tensor:\n new_t = pad_tensor(inp)\n ret = torch.stack(new_t, 0)\n elif type(inp[0]) == np.ndarray:\n new_t = pad_tensor([torch.from_numpy(x) for x in inp])\n ret = torch.stack(new_t, 0)\n elif type(inp[0]) == str:\n ret = inp\n else:\n raise ValueError('Cannot handle type {}'.format(type(inp[0])))\n return ret\n\n ret = stack(data)\n\n # compute CPU-intensive matrix K1, K2 here to leverage multi-processing nature of dataloader\n # if 'Gs' in ret and 'Hs' in ret and :\n # try:\n # G1_gt, G2_gt = ret['Gs']\n # H1_gt, H2_gt = ret['Hs']\n # sparse_dtype = np.float32\n # K1G = [kronecker_sparse(x, y).astype(sparse_dtype) for x, y in zip(G2_gt, G1_gt)] # 1 as source graph, 2 as target graph\n # K1H = [kronecker_sparse(x, y).astype(sparse_dtype) for x, y in zip(H2_gt, H1_gt)]\n # K1G = CSRMatrix3d(K1G)\n # K1H = CSRMatrix3d(K1H).transpose()\n #\n # ret['Ks'] = K1G, K1H #, K1G.transpose(keep_type=True), K1H.transpose(keep_type=True)\n # except ValueError:\n # pass\n\n return ret",
"def collate_fn(batch):\n all_input_ids, all_attention_mask, all_token_type_ids, all_lens, all_labels = map(torch.stack, zip(*batch))\n max_len = max(all_lens).item()\n all_input_ids = all_input_ids[:, :max_len]\n all_attention_mask = all_attention_mask[:, :max_len]\n all_token_type_ids = all_token_type_ids[:, :max_len]\n return all_input_ids, all_attention_mask, all_token_type_ids, all_labels",
"def custom_collate(batch):\n error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n elem_type = type(batch[0])\n if isinstance(batch[0], torch.Tensor):\n out = None\n matched = True\n for dim in range(batch[0].dim()):\n lst = list(map(lambda x: x.size(dim), batch))\n matched = not lst or lst.count(lst[0]) == len(lst)\n if not matched:\n break\n if matched:\n return torch.stack(batch, 0, out=out)\n else:\n return pad_sequence(batch, batch_first=True)\n # indices, items = zip(*sorted(enumerate(batch), key=lambda x: x[1].size(0), reverse=True))\n # lengths = [batch[i].size(0) for i in indices]\n # logger.info(lengths)\n # return pad_sequence([batch[i] for i in indices], batch_first=True), lengths\n elif isinstance(batch[0], np.ndarray):\n matched = True\n for dim in range(batch[0].ndim):\n lst = list(map(lambda x: x.shape[dim], batch))\n matched = not lst or lst.count(lst[0]) == len(lst)\n if not matched:\n break\n if matched:\n return np.stack(batch, 0)\n else:\n raise ValueError('dimensions are not matched {}'.format(batch[0].shape))\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n raise ValueError('cannot handle numpy data')\n elif isinstance(batch[0], int_classes):\n return torch.LongTensor(batch)\n elif isinstance(batch[0], float):\n return torch.DoubleTensor(batch)\n elif isinstance(batch[0], string_classes):\n return batch\n elif isinstance(batch[0], collections.abc.Mapping):\n return {key: custom_collate([d[key] for d in batch]) for key in batch[0]}\n elif isinstance(batch[0], collections.abc.Sequence):\n transposed = zip(*batch)\n return [custom_collate(samples) for samples in transposed]\n raise TypeError((error_msg.format(type(batch[0]))))",
"def collate_fn(data, device=default_device):\n # batch.sort(key=lambda x: len(x[1]), reverse=True)\n has_mask_tensor = True if data[0][-1] is not None else False\n input_tensor, target_tensor, bs_tensor, db_tensor, mask_tensor = zip(*data)\n\n input_tensor, input_lengths = padSequence(input_tensor)\n target_tensor, target_lengths = padSequence(target_tensor)\n bs_tensor = torch.as_tensor(bs_tensor, dtype=torch.float, device=device)\n db_tensor = torch.as_tensor(db_tensor, dtype=torch.float, device=device)\n mask_tensor = torch.stack(mask_tensor).permute((1, 0, 2)) if has_mask_tensor else None\n # mask_tensor = torch.stack(mask_tensor).permute((1, 0, 2)) if mask_tensor[0] and mask_tensor[0] != [] else None\n\n # data = input_tensor, target_tensor, bs_tensor, db_tensor, mask_tensor\n # if torch.cuda.is_available():\n # data = [data[i].cuda() if isinstance(data[i], torch.Tensor) else data[i] for i in range(len(data))]\n return input_tensor, input_lengths, target_tensor, target_lengths, bs_tensor, db_tensor, mask_tensor # tensors [batch_size, *]",
"def trivial_batch_collator(batch):\n return batch",
"def trivial_batch_collator(batch):\n return batch",
"def trivial_batch_collator(batch):\n return batch",
"def basic_collate(batch):\n\n minibatch, targets = zip(*[(a, b) for (a,b) in batch])\n minibatch = stack(minibatch, dim=0)\n return minibatch, targets",
"def customize_collate(batch):\n\n elem = batch[0]\n elem_type = type(elem)\n if isinstance(elem, torch.Tensor):\n # this is the main part to handle varied length data in a batch\n # batch = [data_tensor_1, data_tensor_2, data_tensor_3 ... ]\n # \n batch_new = pad_sequence(batch)\n \n out = None\n if torch.utils.data.get_worker_info() is not None:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n\n # allocate the memory based on maximum numel\n numel = max([x.numel() for x in batch_new]) * len(batch_new)\n storage = elem.storage()._new_shared(numel)\n out = elem.new(storage)\n return torch.stack(batch_new, 0, out=out)\n\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':\n # array of string classes and object\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(customize_collate_err_msg.format(elem.dtype))\n # this will go to loop in the last case\n return customize_collate([torch.as_tensor(b) for b in batch])\n elif elem.shape == (): # scalars\n return torch.as_tensor(batch)\n \n elif isinstance(elem, float):\n return torch.tensor(batch, dtype=torch.float64)\n elif isinstance(elem, int_classes):\n return torch.tensor(batch)\n elif isinstance(elem, string_classes):\n return batch\n elif isinstance(elem, container_abcs.Mapping):\n return {key: customize_collate([d[key] for d in batch]) for key in elem}\n elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple\n return elem_type(*(customize_collate(samples) \\\n for samples in zip(*batch)))\n elif isinstance(elem, container_abcs.Sequence):\n # check to make sure that the elements in batch have consistent size\n it = iter(batch)\n elem_size = len(next(it))\n if not all(len(elem) == elem_size for elem in it):\n raise RuntimeError('each element in batch should be of equal size')\n \n # zip([[A, B, C], [a, b, c]]) -> [[A, a], [B, b], [C, c]]\n transposed = zip(*batch)\n return [customize_collate(samples) for samples in transposed]\n\n raise TypeError(customize_collate_err_msg.format(elem_type))",
"def _collate(cls, inbatch, num_devices=None):\n item0 = inbatch[0]\n bsize = len(inbatch)\n if num_devices is None:\n num_devices = 1\n\n samples_per_device = int(np.ceil(bsize / num_devices))\n\n # assert bsize % samples_per_device == 0\n stacked = []\n if item0.cpu_only:\n # chunking logic\n stacked = []\n for i in range(0, bsize, samples_per_device):\n stacked.append(\n [sample.data for sample in inbatch[i:i + samples_per_device]])\n\n elif item0.stack:\n for i in range(0, bsize, samples_per_device):\n item = inbatch[i]\n pad_dims_ = item.pad_dims\n assert isinstance(item.data, torch.Tensor)\n\n if pad_dims_ is not None:\n # Note: can probably reimplement this using padded collate\n # logic\n ndim = item.dim()\n assert ndim > pad_dims_\n max_shape = [0 for _ in range(pad_dims_)]\n for dim in range(1, pad_dims_ + 1):\n max_shape[dim - 1] = item.shape[-dim]\n for sample in inbatch[i:i + samples_per_device]:\n for dim in range(0, ndim - pad_dims_):\n assert item.shape[dim] == sample.shape[dim]\n for dim in range(1, pad_dims_ + 1):\n max_shape[dim - 1] = max(max_shape[dim - 1], sample.shape[-dim])\n padded_samples = []\n for sample in inbatch[i:i + samples_per_device]:\n pad = [0 for _ in range(pad_dims_ * 2)]\n for dim in range(1, pad_dims_ + 1):\n pad[2 * dim - 1] = max_shape[dim - 1] - sample.shape[-dim]\n padded_samples.append(\n F.pad(sample.data, pad, value=sample.padding_value))\n stacked.append(default_collate(padded_samples))\n\n elif pad_dims_ is None:\n stacked.append(\n default_collate([\n sample.data\n for sample in inbatch[i:i + samples_per_device]\n ]))\n else:\n raise ValueError(\n 'pad_dims should be either None or integers (1-3)')\n\n else:\n for i in range(0, bsize, samples_per_device):\n stacked.append(\n [sample.data for sample in inbatch[i:i + samples_per_device]])\n result = BatchContainer(stacked, **item0.meta)\n return result",
"def _collate_fn(batch):\r\n batch = list(zip(*batch))\r\n batch[0] = torch.stack(batch[0])\r\n batch[1] = list(batch[1])\r\n batch[2] = torch.stack(batch[2])\r\n return tuple(batch)",
"def custom_collate_fn(data):\n features, labels = zip(*data)\n return pack_sequence(features, enforce_sorted=False), torch.tensor(labels)",
"def list_data_collate(batch: Sequence):\n elem = batch[0]\n data = [i for k in batch for i in k] if isinstance(elem, list) else batch\n key = None\n try:\n if config.USE_META_DICT:\n data = pickle_operations(data) # bc 0.9.0\n if isinstance(elem, Mapping):\n ret = {}\n for k in elem:\n key = k\n data_for_batch = [d[key] for d in data]\n ret[key] = collate_meta_tensor(data_for_batch)\n else:\n ret = collate_meta_tensor(data)\n return ret\n except RuntimeError as re:\n re_str = str(re)\n if \"equal size\" in re_str:\n if key is not None:\n re_str += f\"\\nCollate error on the key '{key}' of dictionary data.\"\n re_str += (\n \"\\n\\nMONAI hint: if your transforms intentionally create images of different shapes, creating your \"\n + \"`DataLoader` with `collate_fn=pad_list_data_collate` might solve this problem (check its \"\n + \"documentation).\"\n )\n _ = dev_collate(data)\n raise RuntimeError(re_str) from re\n except TypeError as re:\n re_str = str(re)\n if \"numpy\" in re_str and \"Tensor\" in re_str:\n if key is not None:\n re_str += f\"\\nCollate error on the key '{key}' of dictionary data.\"\n re_str += (\n \"\\n\\nMONAI hint: if your transforms intentionally create mixtures of torch Tensor and numpy ndarray, \"\n + \"creating your `DataLoader` with `collate_fn=pad_list_data_collate` might solve this problem \"\n + \"(check its documentation).\"\n )\n _ = dev_collate(data)\n raise TypeError(re_str) from re",
"def collater(self, samples):\r\n return collate(\r\n samples, self.src_dict, self.tgt_dict,\r\n left_pad_source=self.left_pad_source, left_pad_target=self.left_pad_target,\r\n max_sent_len=self.max_sent_len,\r\n mask_other_sents=self.mask_other_sents\r\n )"
] | [
"0.71019036",
"0.6699454",
"0.66386545",
"0.6537352",
"0.65236694",
"0.6492617",
"0.64010304",
"0.6400702",
"0.6383652",
"0.6318756",
"0.62709737",
"0.6211533",
"0.6105718",
"0.59736997",
"0.59524226",
"0.5950687",
"0.5934938",
"0.5909209",
"0.5867133",
"0.58421636",
"0.5756922",
"0.5756922",
"0.5756922",
"0.5754641",
"0.5752697",
"0.57488185",
"0.5743005",
"0.5713209",
"0.57090425",
"0.5707598"
] | 0.67624515 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.